1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 
30 #define DEBUG_TYPE "amdgpu-isel"
31 
32 using namespace llvm;
33 using namespace MIPatternMatch;
34 
35 static cl::opt<bool> AllowRiskySelect(
36   "amdgpu-global-isel-risky-select",
37   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
38   cl::init(false),
39   cl::ReallyHidden);
40 
41 #define GET_GLOBALISEL_IMPL
42 #define AMDGPUSubtarget GCNSubtarget
43 #include "AMDGPUGenGlobalISel.inc"
44 #undef GET_GLOBALISEL_IMPL
45 #undef AMDGPUSubtarget
46 
47 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
48     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
49     const AMDGPUTargetMachine &TM)
50     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51       STI(STI),
52       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53 #define GET_GLOBALISEL_PREDICATES_INIT
54 #include "AMDGPUGenGlobalISel.inc"
55 #undef GET_GLOBALISEL_PREDICATES_INIT
56 #define GET_GLOBALISEL_TEMPORARIES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_TEMPORARIES_INIT
59 {
60 }
61 
62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                                         CodeGenCoverage &CoverageInfo,
66                                         ProfileSummaryInfo *PSI,
67                                         BlockFrequencyInfo *BFI) {
68   MRI = &MF.getRegInfo();
69   Subtarget = &MF.getSubtarget<GCNSubtarget>();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   // The verifier is oblivious to s1 being a valid value for wavesize registers.
76   if (Reg.isPhysical())
77     return false;
78 
79   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80   const TargetRegisterClass *RC =
81       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82   if (RC) {
83     const LLT Ty = MRI.getType(Reg);
84     if (!Ty.isValid() || Ty.getSizeInBits() != 1)
85       return false;
86     // G_TRUNC s1 result is never vcc.
87     return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
88            RC->hasSuperClassEq(TRI.getBoolRC());
89   }
90 
91   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
92   return RB->getID() == AMDGPU::VCCRegBankID;
93 }
94 
95 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
96                                                         unsigned NewOpc) const {
97   MI.setDesc(TII.get(NewOpc));
98   MI.removeOperand(1); // Remove intrinsic ID.
99   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
100 
101   MachineOperand &Dst = MI.getOperand(0);
102   MachineOperand &Src = MI.getOperand(1);
103 
104   // TODO: This should be legalized to s32 if needed
105   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
106     return false;
107 
108   const TargetRegisterClass *DstRC
109     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
110   const TargetRegisterClass *SrcRC
111     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
112   if (!DstRC || DstRC != SrcRC)
113     return false;
114 
115   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
116          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
117 }
118 
119 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
120   const DebugLoc &DL = I.getDebugLoc();
121   MachineBasicBlock *BB = I.getParent();
122   I.setDesc(TII.get(TargetOpcode::COPY));
123 
124   const MachineOperand &Src = I.getOperand(1);
125   MachineOperand &Dst = I.getOperand(0);
126   Register DstReg = Dst.getReg();
127   Register SrcReg = Src.getReg();
128 
129   if (isVCC(DstReg, *MRI)) {
130     if (SrcReg == AMDGPU::SCC) {
131       const TargetRegisterClass *RC
132         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
133       if (!RC)
134         return true;
135       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
136     }
137 
138     if (!isVCC(SrcReg, *MRI)) {
139       // TODO: Should probably leave the copy and let copyPhysReg expand it.
140       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
141         return false;
142 
143       const TargetRegisterClass *SrcRC
144         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
145 
146       Optional<ValueAndVReg> ConstVal =
147           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
148       if (ConstVal) {
149         unsigned MovOpc =
150             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
151         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
152             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
153       } else {
154         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
155 
156         // We can't trust the high bits at this point, so clear them.
157 
158         // TODO: Skip masking high bits if def is known boolean.
159 
160         unsigned AndOpc =
161             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
162         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
163             .addImm(1)
164             .addReg(SrcReg);
165         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
166             .addImm(0)
167             .addReg(MaskedReg);
168       }
169 
170       if (!MRI->getRegClassOrNull(SrcReg))
171         MRI->setRegClass(SrcReg, SrcRC);
172       I.eraseFromParent();
173       return true;
174     }
175 
176     const TargetRegisterClass *RC =
177       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
178     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
179       return false;
180 
181     return true;
182   }
183 
184   for (const MachineOperand &MO : I.operands()) {
185     if (MO.getReg().isPhysical())
186       continue;
187 
188     const TargetRegisterClass *RC =
189             TRI.getConstrainedRegClassForOperand(MO, *MRI);
190     if (!RC)
191       continue;
192     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
193   }
194   return true;
195 }
196 
197 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
198   const Register DefReg = I.getOperand(0).getReg();
199   const LLT DefTy = MRI->getType(DefReg);
200   if (DefTy == LLT::scalar(1)) {
201     if (!AllowRiskySelect) {
202       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
203       return false;
204     }
205 
206     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
207   }
208 
209   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
210 
211   const RegClassOrRegBank &RegClassOrBank =
212     MRI->getRegClassOrRegBank(DefReg);
213 
214   const TargetRegisterClass *DefRC
215     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
216   if (!DefRC) {
217     if (!DefTy.isValid()) {
218       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
219       return false;
220     }
221 
222     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
223     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
224     if (!DefRC) {
225       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
226       return false;
227     }
228   }
229 
230   // TODO: Verify that all registers have the same bank
231   I.setDesc(TII.get(TargetOpcode::PHI));
232   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
233 }
234 
235 MachineOperand
236 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
237                                            const TargetRegisterClass &SubRC,
238                                            unsigned SubIdx) const {
239 
240   MachineInstr *MI = MO.getParent();
241   MachineBasicBlock *BB = MO.getParent()->getParent();
242   Register DstReg = MRI->createVirtualRegister(&SubRC);
243 
244   if (MO.isReg()) {
245     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
246     Register Reg = MO.getReg();
247     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
248             .addReg(Reg, 0, ComposedSubIdx);
249 
250     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
251                                      MO.isKill(), MO.isDead(), MO.isUndef(),
252                                      MO.isEarlyClobber(), 0, MO.isDebug(),
253                                      MO.isInternalRead());
254   }
255 
256   assert(MO.isImm());
257 
258   APInt Imm(64, MO.getImm());
259 
260   switch (SubIdx) {
261   default:
262     llvm_unreachable("do not know to split immediate with this sub index.");
263   case AMDGPU::sub0:
264     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
265   case AMDGPU::sub1:
266     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
267   }
268 }
269 
270 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
271   switch (Opc) {
272   case AMDGPU::G_AND:
273     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
274   case AMDGPU::G_OR:
275     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
276   case AMDGPU::G_XOR:
277     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
278   default:
279     llvm_unreachable("not a bit op");
280   }
281 }
282 
283 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
284   Register DstReg = I.getOperand(0).getReg();
285   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
286 
287   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
288   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
289       DstRB->getID() != AMDGPU::VCCRegBankID)
290     return false;
291 
292   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
293                             STI.isWave64());
294   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
295 
296   // Dead implicit-def of scc
297   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
298                                          true, // isImp
299                                          false, // isKill
300                                          true)); // isDead
301   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
302 }
303 
304 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
305   MachineBasicBlock *BB = I.getParent();
306   MachineFunction *MF = BB->getParent();
307   Register DstReg = I.getOperand(0).getReg();
308   const DebugLoc &DL = I.getDebugLoc();
309   LLT Ty = MRI->getType(DstReg);
310   if (Ty.isVector())
311     return false;
312 
313   unsigned Size = Ty.getSizeInBits();
314   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
315   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
316   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
317 
318   if (Size == 32) {
319     if (IsSALU) {
320       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
321       MachineInstr *Add =
322         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
323         .add(I.getOperand(1))
324         .add(I.getOperand(2));
325       I.eraseFromParent();
326       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
327     }
328 
329     if (STI.hasAddNoCarry()) {
330       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
331       I.setDesc(TII.get(Opc));
332       I.addOperand(*MF, MachineOperand::CreateImm(0));
333       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
334       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
335     }
336 
337     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
338 
339     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
340     MachineInstr *Add
341       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
342       .addDef(UnusedCarry, RegState::Dead)
343       .add(I.getOperand(1))
344       .add(I.getOperand(2))
345       .addImm(0);
346     I.eraseFromParent();
347     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
348   }
349 
350   assert(!Sub && "illegal sub should not reach here");
351 
352   const TargetRegisterClass &RC
353     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
354   const TargetRegisterClass &HalfRC
355     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
356 
357   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
358   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
359   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
360   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
361 
362   Register DstLo = MRI->createVirtualRegister(&HalfRC);
363   Register DstHi = MRI->createVirtualRegister(&HalfRC);
364 
365   if (IsSALU) {
366     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
367       .add(Lo1)
368       .add(Lo2);
369     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
370       .add(Hi1)
371       .add(Hi2);
372   } else {
373     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
374     Register CarryReg = MRI->createVirtualRegister(CarryRC);
375     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
376       .addDef(CarryReg)
377       .add(Lo1)
378       .add(Lo2)
379       .addImm(0);
380     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
381       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
382       .add(Hi1)
383       .add(Hi2)
384       .addReg(CarryReg, RegState::Kill)
385       .addImm(0);
386 
387     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
388       return false;
389   }
390 
391   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
392     .addReg(DstLo)
393     .addImm(AMDGPU::sub0)
394     .addReg(DstHi)
395     .addImm(AMDGPU::sub1);
396 
397 
398   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
399     return false;
400 
401   I.eraseFromParent();
402   return true;
403 }
404 
405 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
406   MachineInstr &I) const {
407   MachineBasicBlock *BB = I.getParent();
408   MachineFunction *MF = BB->getParent();
409   const DebugLoc &DL = I.getDebugLoc();
410   Register Dst0Reg = I.getOperand(0).getReg();
411   Register Dst1Reg = I.getOperand(1).getReg();
412   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
413                      I.getOpcode() == AMDGPU::G_UADDE;
414   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
415                           I.getOpcode() == AMDGPU::G_USUBE;
416 
417   if (isVCC(Dst1Reg, *MRI)) {
418     unsigned NoCarryOpc =
419         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
420     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
421     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
422     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
423     I.addOperand(*MF, MachineOperand::CreateImm(0));
424     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
425   }
426 
427   Register Src0Reg = I.getOperand(2).getReg();
428   Register Src1Reg = I.getOperand(3).getReg();
429 
430   if (HasCarryIn) {
431     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
432       .addReg(I.getOperand(4).getReg());
433   }
434 
435   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
436   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
437 
438   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
439     .add(I.getOperand(2))
440     .add(I.getOperand(3));
441   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
442     .addReg(AMDGPU::SCC);
443 
444   if (!MRI->getRegClassOrNull(Dst1Reg))
445     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
446 
447   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
448       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   if (HasCarryIn &&
453       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
454                                     AMDGPU::SReg_32RegClass, *MRI))
455     return false;
456 
457   I.eraseFromParent();
458   return true;
459 }
460 
461 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
462     MachineInstr &I) const {
463   MachineBasicBlock *BB = I.getParent();
464   MachineFunction *MF = BB->getParent();
465   const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
466 
467   I.setDesc(TII.get(IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64
468                                : AMDGPU::V_MAD_I64_I32_e64));
469   I.addOperand(*MF, MachineOperand::CreateImm(0));
470   I.addImplicitDefUseOperands(*MF);
471   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
472 }
473 
474 // TODO: We should probably legalize these to only using 32-bit results.
475 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
476   MachineBasicBlock *BB = I.getParent();
477   Register DstReg = I.getOperand(0).getReg();
478   Register SrcReg = I.getOperand(1).getReg();
479   LLT DstTy = MRI->getType(DstReg);
480   LLT SrcTy = MRI->getType(SrcReg);
481   const unsigned SrcSize = SrcTy.getSizeInBits();
482   unsigned DstSize = DstTy.getSizeInBits();
483 
484   // TODO: Should handle any multiple of 32 offset.
485   unsigned Offset = I.getOperand(2).getImm();
486   if (Offset % 32 != 0 || DstSize > 128)
487     return false;
488 
489   // 16-bit operations really use 32-bit registers.
490   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
491   if (DstSize == 16)
492     DstSize = 32;
493 
494   const TargetRegisterClass *DstRC =
495     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
496   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
497     return false;
498 
499   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
500   const TargetRegisterClass *SrcRC =
501       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
502   if (!SrcRC)
503     return false;
504   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
505                                                          DstSize / 32);
506   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
507   if (!SrcRC)
508     return false;
509 
510   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
511                                     *SrcRC, I.getOperand(1));
512   const DebugLoc &DL = I.getDebugLoc();
513   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
514     .addReg(SrcReg, 0, SubReg);
515 
516   I.eraseFromParent();
517   return true;
518 }
519 
520 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
521   MachineBasicBlock *BB = MI.getParent();
522   Register DstReg = MI.getOperand(0).getReg();
523   LLT DstTy = MRI->getType(DstReg);
524   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
525 
526   const unsigned SrcSize = SrcTy.getSizeInBits();
527   if (SrcSize < 32)
528     return selectImpl(MI, *CoverageInfo);
529 
530   const DebugLoc &DL = MI.getDebugLoc();
531   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
532   const unsigned DstSize = DstTy.getSizeInBits();
533   const TargetRegisterClass *DstRC =
534       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
535   if (!DstRC)
536     return false;
537 
538   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
539   MachineInstrBuilder MIB =
540     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
541   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
542     MachineOperand &Src = MI.getOperand(I + 1);
543     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
544     MIB.addImm(SubRegs[I]);
545 
546     const TargetRegisterClass *SrcRC
547       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
548     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
549       return false;
550   }
551 
552   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
553     return false;
554 
555   MI.eraseFromParent();
556   return true;
557 }
558 
559 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
560   MachineBasicBlock *BB = MI.getParent();
561   const int NumDst = MI.getNumOperands() - 1;
562 
563   MachineOperand &Src = MI.getOperand(NumDst);
564 
565   Register SrcReg = Src.getReg();
566   Register DstReg0 = MI.getOperand(0).getReg();
567   LLT DstTy = MRI->getType(DstReg0);
568   LLT SrcTy = MRI->getType(SrcReg);
569 
570   const unsigned DstSize = DstTy.getSizeInBits();
571   const unsigned SrcSize = SrcTy.getSizeInBits();
572   const DebugLoc &DL = MI.getDebugLoc();
573   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
574 
575   const TargetRegisterClass *SrcRC =
576       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
577   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
578     return false;
579 
580   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
581   // source, and this relies on the fact that the same subregister indices are
582   // used for both.
583   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
584   for (int I = 0, E = NumDst; I != E; ++I) {
585     MachineOperand &Dst = MI.getOperand(I);
586     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
587       .addReg(SrcReg, 0, SubRegs[I]);
588 
589     // Make sure the subregister index is valid for the source register.
590     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
591     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
592       return false;
593 
594     const TargetRegisterClass *DstRC =
595       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
596     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
597       return false;
598   }
599 
600   MI.eraseFromParent();
601   return true;
602 }
603 
604 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
605   MachineInstr &MI) const {
606   if (selectImpl(MI, *CoverageInfo))
607     return true;
608 
609   const LLT S32 = LLT::scalar(32);
610   const LLT V2S16 = LLT::fixed_vector(2, 16);
611 
612   Register Dst = MI.getOperand(0).getReg();
613   if (MRI->getType(Dst) != V2S16)
614     return false;
615 
616   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
617   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
618     return false;
619 
620   Register Src0 = MI.getOperand(1).getReg();
621   Register Src1 = MI.getOperand(2).getReg();
622   if (MRI->getType(Src0) != S32)
623     return false;
624 
625   const DebugLoc &DL = MI.getDebugLoc();
626   MachineBasicBlock *BB = MI.getParent();
627 
628   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
629   if (ConstSrc1) {
630     auto ConstSrc0 =
631         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
632     if (ConstSrc0) {
633       const int64_t K0 = ConstSrc0->Value.getSExtValue();
634       const int64_t K1 = ConstSrc1->Value.getSExtValue();
635       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
636       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
637 
638       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
639         .addImm(Lo16 | (Hi16 << 16));
640       MI.eraseFromParent();
641       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
642     }
643   }
644 
645   // TODO: This should probably be a combine somewhere
646   // (build_vector_trunc $src0, undef -> copy $src0
647   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
648   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
649     MI.setDesc(TII.get(AMDGPU::COPY));
650     MI.removeOperand(2);
651     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
652            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
653   }
654 
655   Register ShiftSrc0;
656   Register ShiftSrc1;
657 
658   // With multiple uses of the shift, this will duplicate the shift and
659   // increase register pressure.
660   //
661   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
662   //  => (S_PACK_HH_B32_B16 $src0, $src1)
663   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
664   //  => (S_PACK_LH_B32_B16 $src0, $src1)
665   // (build_vector_trunc $src0, $src1)
666   //  => (S_PACK_LL_B32_B16 $src0, $src1)
667 
668   bool Shift0 = mi_match(
669       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
670 
671   bool Shift1 = mi_match(
672       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
673 
674   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
675   if (Shift0 && Shift1) {
676     Opc = AMDGPU::S_PACK_HH_B32_B16;
677     MI.getOperand(1).setReg(ShiftSrc0);
678     MI.getOperand(2).setReg(ShiftSrc1);
679   } else if (Shift1) {
680     Opc = AMDGPU::S_PACK_LH_B32_B16;
681     MI.getOperand(2).setReg(ShiftSrc1);
682   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
683     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
684     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
685       .addReg(ShiftSrc0)
686       .addImm(16);
687 
688     MI.eraseFromParent();
689     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
690   }
691 
692   MI.setDesc(TII.get(Opc));
693   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
694 }
695 
696 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
697   return selectG_ADD_SUB(I);
698 }
699 
700 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
701   const MachineOperand &MO = I.getOperand(0);
702 
703   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
704   // regbank check here is to know why getConstrainedRegClassForOperand failed.
705   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
706   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
707       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
708     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
709     return true;
710   }
711 
712   return false;
713 }
714 
715 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
716   MachineBasicBlock *BB = I.getParent();
717 
718   Register DstReg = I.getOperand(0).getReg();
719   Register Src0Reg = I.getOperand(1).getReg();
720   Register Src1Reg = I.getOperand(2).getReg();
721   LLT Src1Ty = MRI->getType(Src1Reg);
722 
723   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
724   unsigned InsSize = Src1Ty.getSizeInBits();
725 
726   int64_t Offset = I.getOperand(3).getImm();
727 
728   // FIXME: These cases should have been illegal and unnecessary to check here.
729   if (Offset % 32 != 0 || InsSize % 32 != 0)
730     return false;
731 
732   // Currently not handled by getSubRegFromChannel.
733   if (InsSize > 128)
734     return false;
735 
736   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
737   if (SubReg == AMDGPU::NoSubRegister)
738     return false;
739 
740   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
741   const TargetRegisterClass *DstRC =
742       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
743   if (!DstRC)
744     return false;
745 
746   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
747   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
748   const TargetRegisterClass *Src0RC =
749       TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
750   const TargetRegisterClass *Src1RC =
751       TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
752 
753   // Deal with weird cases where the class only partially supports the subreg
754   // index.
755   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
756   if (!Src0RC || !Src1RC)
757     return false;
758 
759   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
760       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
761       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
762     return false;
763 
764   const DebugLoc &DL = I.getDebugLoc();
765   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
766     .addReg(Src0Reg)
767     .addReg(Src1Reg)
768     .addImm(SubReg);
769 
770   I.eraseFromParent();
771   return true;
772 }
773 
774 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
775   Register DstReg = MI.getOperand(0).getReg();
776   Register SrcReg = MI.getOperand(1).getReg();
777   Register OffsetReg = MI.getOperand(2).getReg();
778   Register WidthReg = MI.getOperand(3).getReg();
779 
780   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
781          "scalar BFX instructions are expanded in regbankselect");
782   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
783          "64-bit vector BFX instructions are expanded in regbankselect");
784 
785   const DebugLoc &DL = MI.getDebugLoc();
786   MachineBasicBlock *MBB = MI.getParent();
787 
788   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
789   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
790   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
791                  .addReg(SrcReg)
792                  .addReg(OffsetReg)
793                  .addReg(WidthReg);
794   MI.eraseFromParent();
795   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
796 }
797 
798 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
799   if (STI.getLDSBankCount() != 16)
800     return selectImpl(MI, *CoverageInfo);
801 
802   Register Dst = MI.getOperand(0).getReg();
803   Register Src0 = MI.getOperand(2).getReg();
804   Register M0Val = MI.getOperand(6).getReg();
805   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
806       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
807       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
808     return false;
809 
810   // This requires 2 instructions. It is possible to write a pattern to support
811   // this, but the generated isel emitter doesn't correctly deal with multiple
812   // output instructions using the same physical register input. The copy to m0
813   // is incorrectly placed before the second instruction.
814   //
815   // TODO: Match source modifiers.
816 
817   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
818   const DebugLoc &DL = MI.getDebugLoc();
819   MachineBasicBlock *MBB = MI.getParent();
820 
821   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
822     .addReg(M0Val);
823   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
824     .addImm(2)
825     .addImm(MI.getOperand(4).getImm())  // $attr
826     .addImm(MI.getOperand(3).getImm()); // $attrchan
827 
828   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
829     .addImm(0)                          // $src0_modifiers
830     .addReg(Src0)                       // $src0
831     .addImm(MI.getOperand(4).getImm())  // $attr
832     .addImm(MI.getOperand(3).getImm())  // $attrchan
833     .addImm(0)                          // $src2_modifiers
834     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
835     .addImm(MI.getOperand(5).getImm())  // $high
836     .addImm(0)                          // $clamp
837     .addImm(0);                         // $omod
838 
839   MI.eraseFromParent();
840   return true;
841 }
842 
843 // Writelane is special in that it can use SGPR and M0 (which would normally
844 // count as using the constant bus twice - but in this case it is allowed since
845 // the lane selector doesn't count as a use of the constant bus). However, it is
846 // still required to abide by the 1 SGPR rule. Fix this up if we might have
847 // multiple SGPRs.
848 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
849   // With a constant bus limit of at least 2, there's no issue.
850   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
851     return selectImpl(MI, *CoverageInfo);
852 
853   MachineBasicBlock *MBB = MI.getParent();
854   const DebugLoc &DL = MI.getDebugLoc();
855   Register VDst = MI.getOperand(0).getReg();
856   Register Val = MI.getOperand(2).getReg();
857   Register LaneSelect = MI.getOperand(3).getReg();
858   Register VDstIn = MI.getOperand(4).getReg();
859 
860   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
861 
862   Optional<ValueAndVReg> ConstSelect =
863       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
864   if (ConstSelect) {
865     // The selector has to be an inline immediate, so we can use whatever for
866     // the other operands.
867     MIB.addReg(Val);
868     MIB.addImm(ConstSelect->Value.getSExtValue() &
869                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
870   } else {
871     Optional<ValueAndVReg> ConstVal =
872         getIConstantVRegValWithLookThrough(Val, *MRI);
873 
874     // If the value written is an inline immediate, we can get away without a
875     // copy to m0.
876     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
877                                                  STI.hasInv2PiInlineImm())) {
878       MIB.addImm(ConstVal->Value.getSExtValue());
879       MIB.addReg(LaneSelect);
880     } else {
881       MIB.addReg(Val);
882 
883       // If the lane selector was originally in a VGPR and copied with
884       // readfirstlane, there's a hazard to read the same SGPR from the
885       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
886       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
887 
888       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
889         .addReg(LaneSelect);
890       MIB.addReg(AMDGPU::M0);
891     }
892   }
893 
894   MIB.addReg(VDstIn);
895 
896   MI.eraseFromParent();
897   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
898 }
899 
900 // We need to handle this here because tablegen doesn't support matching
901 // instructions with multiple outputs.
902 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
903   Register Dst0 = MI.getOperand(0).getReg();
904   Register Dst1 = MI.getOperand(1).getReg();
905 
906   LLT Ty = MRI->getType(Dst0);
907   unsigned Opc;
908   if (Ty == LLT::scalar(32))
909     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
910   else if (Ty == LLT::scalar(64))
911     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
912   else
913     return false;
914 
915   // TODO: Match source modifiers.
916 
917   const DebugLoc &DL = MI.getDebugLoc();
918   MachineBasicBlock *MBB = MI.getParent();
919 
920   Register Numer = MI.getOperand(3).getReg();
921   Register Denom = MI.getOperand(4).getReg();
922   unsigned ChooseDenom = MI.getOperand(5).getImm();
923 
924   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
925 
926   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
927     .addDef(Dst1)
928     .addImm(0)     // $src0_modifiers
929     .addUse(Src0)  // $src0
930     .addImm(0)     // $src1_modifiers
931     .addUse(Denom) // $src1
932     .addImm(0)     // $src2_modifiers
933     .addUse(Numer) // $src2
934     .addImm(0)     // $clamp
935     .addImm(0);    // $omod
936 
937   MI.eraseFromParent();
938   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
939 }
940 
941 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
942   unsigned IntrinsicID = I.getIntrinsicID();
943   switch (IntrinsicID) {
944   case Intrinsic::amdgcn_if_break: {
945     MachineBasicBlock *BB = I.getParent();
946 
947     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
948     // SelectionDAG uses for wave32 vs wave64.
949     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
950       .add(I.getOperand(0))
951       .add(I.getOperand(2))
952       .add(I.getOperand(3));
953 
954     Register DstReg = I.getOperand(0).getReg();
955     Register Src0Reg = I.getOperand(2).getReg();
956     Register Src1Reg = I.getOperand(3).getReg();
957 
958     I.eraseFromParent();
959 
960     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
961       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
962 
963     return true;
964   }
965   case Intrinsic::amdgcn_interp_p1_f16:
966     return selectInterpP1F16(I);
967   case Intrinsic::amdgcn_wqm:
968     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
969   case Intrinsic::amdgcn_softwqm:
970     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
971   case Intrinsic::amdgcn_strict_wwm:
972   case Intrinsic::amdgcn_wwm:
973     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
974   case Intrinsic::amdgcn_strict_wqm:
975     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
976   case Intrinsic::amdgcn_writelane:
977     return selectWritelane(I);
978   case Intrinsic::amdgcn_div_scale:
979     return selectDivScale(I);
980   case Intrinsic::amdgcn_icmp:
981     return selectIntrinsicIcmp(I);
982   case Intrinsic::amdgcn_ballot:
983     return selectBallot(I);
984   case Intrinsic::amdgcn_reloc_constant:
985     return selectRelocConstant(I);
986   case Intrinsic::amdgcn_groupstaticsize:
987     return selectGroupStaticSize(I);
988   case Intrinsic::returnaddress:
989     return selectReturnAddress(I);
990   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
991   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
992   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
993   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
994   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
995   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
996     return selectSMFMACIntrin(I);
997   default:
998     return selectImpl(I, *CoverageInfo);
999   }
1000 }
1001 
1002 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
1003   if (Size != 32 && Size != 64)
1004     return -1;
1005   switch (P) {
1006   default:
1007     llvm_unreachable("Unknown condition code!");
1008   case CmpInst::ICMP_NE:
1009     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
1010   case CmpInst::ICMP_EQ:
1011     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
1012   case CmpInst::ICMP_SGT:
1013     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
1014   case CmpInst::ICMP_SGE:
1015     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
1016   case CmpInst::ICMP_SLT:
1017     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
1018   case CmpInst::ICMP_SLE:
1019     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
1020   case CmpInst::ICMP_UGT:
1021     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
1022   case CmpInst::ICMP_UGE:
1023     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1024   case CmpInst::ICMP_ULT:
1025     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1026   case CmpInst::ICMP_ULE:
1027     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1028   }
1029 }
1030 
1031 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1032                                               unsigned Size) const {
1033   if (Size == 64) {
1034     if (!STI.hasScalarCompareEq64())
1035       return -1;
1036 
1037     switch (P) {
1038     case CmpInst::ICMP_NE:
1039       return AMDGPU::S_CMP_LG_U64;
1040     case CmpInst::ICMP_EQ:
1041       return AMDGPU::S_CMP_EQ_U64;
1042     default:
1043       return -1;
1044     }
1045   }
1046 
1047   if (Size != 32)
1048     return -1;
1049 
1050   switch (P) {
1051   case CmpInst::ICMP_NE:
1052     return AMDGPU::S_CMP_LG_U32;
1053   case CmpInst::ICMP_EQ:
1054     return AMDGPU::S_CMP_EQ_U32;
1055   case CmpInst::ICMP_SGT:
1056     return AMDGPU::S_CMP_GT_I32;
1057   case CmpInst::ICMP_SGE:
1058     return AMDGPU::S_CMP_GE_I32;
1059   case CmpInst::ICMP_SLT:
1060     return AMDGPU::S_CMP_LT_I32;
1061   case CmpInst::ICMP_SLE:
1062     return AMDGPU::S_CMP_LE_I32;
1063   case CmpInst::ICMP_UGT:
1064     return AMDGPU::S_CMP_GT_U32;
1065   case CmpInst::ICMP_UGE:
1066     return AMDGPU::S_CMP_GE_U32;
1067   case CmpInst::ICMP_ULT:
1068     return AMDGPU::S_CMP_LT_U32;
1069   case CmpInst::ICMP_ULE:
1070     return AMDGPU::S_CMP_LE_U32;
1071   default:
1072     llvm_unreachable("Unknown condition code!");
1073   }
1074 }
1075 
1076 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1077   MachineBasicBlock *BB = I.getParent();
1078   const DebugLoc &DL = I.getDebugLoc();
1079 
1080   Register SrcReg = I.getOperand(2).getReg();
1081   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1082 
1083   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1084 
1085   Register CCReg = I.getOperand(0).getReg();
1086   if (!isVCC(CCReg, *MRI)) {
1087     int Opcode = getS_CMPOpcode(Pred, Size);
1088     if (Opcode == -1)
1089       return false;
1090     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1091             .add(I.getOperand(2))
1092             .add(I.getOperand(3));
1093     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1094       .addReg(AMDGPU::SCC);
1095     bool Ret =
1096         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1097         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1098     I.eraseFromParent();
1099     return Ret;
1100   }
1101 
1102   int Opcode = getV_CMPOpcode(Pred, Size);
1103   if (Opcode == -1)
1104     return false;
1105 
1106   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1107             I.getOperand(0).getReg())
1108             .add(I.getOperand(2))
1109             .add(I.getOperand(3));
1110   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1111                                *TRI.getBoolRC(), *MRI);
1112   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1113   I.eraseFromParent();
1114   return Ret;
1115 }
1116 
1117 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1118   Register Dst = I.getOperand(0).getReg();
1119   if (isVCC(Dst, *MRI))
1120     return false;
1121 
1122   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1123     return false;
1124 
1125   MachineBasicBlock *BB = I.getParent();
1126   const DebugLoc &DL = I.getDebugLoc();
1127   Register SrcReg = I.getOperand(2).getReg();
1128   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1129 
1130   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1131   if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1132     MachineInstr *ICmp =
1133         BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1134 
1135     if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1136                                       *TRI.getBoolRC(), *MRI))
1137       return false;
1138     I.eraseFromParent();
1139     return true;
1140   }
1141 
1142   int Opcode = getV_CMPOpcode(Pred, Size);
1143   if (Opcode == -1)
1144     return false;
1145 
1146   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1147                            .add(I.getOperand(2))
1148                            .add(I.getOperand(3));
1149   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1150                                *MRI);
1151   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1152   I.eraseFromParent();
1153   return Ret;
1154 }
1155 
1156 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1157   MachineBasicBlock *BB = I.getParent();
1158   const DebugLoc &DL = I.getDebugLoc();
1159   Register DstReg = I.getOperand(0).getReg();
1160   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1161   const bool Is64 = Size == 64;
1162 
1163   if (Size != STI.getWavefrontSize())
1164     return false;
1165 
1166   Optional<ValueAndVReg> Arg =
1167       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1168 
1169   if (Arg.hasValue()) {
1170     const int64_t Value = Arg.getValue().Value.getSExtValue();
1171     if (Value == 0) {
1172       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1173       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1174     } else if (Value == -1) { // all ones
1175       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1176       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1177     } else
1178       return false;
1179   } else {
1180     Register SrcReg = I.getOperand(2).getReg();
1181     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1182   }
1183 
1184   I.eraseFromParent();
1185   return true;
1186 }
1187 
1188 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1189   Register DstReg = I.getOperand(0).getReg();
1190   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1191   const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1192   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1193     return false;
1194 
1195   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1196 
1197   Module *M = MF->getFunction().getParent();
1198   const MDNode *Metadata = I.getOperand(2).getMetadata();
1199   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1200   auto RelocSymbol = cast<GlobalVariable>(
1201     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1202 
1203   MachineBasicBlock *BB = I.getParent();
1204   BuildMI(*BB, &I, I.getDebugLoc(),
1205           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1206     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1207 
1208   I.eraseFromParent();
1209   return true;
1210 }
1211 
1212 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1213   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1214 
1215   Register DstReg = I.getOperand(0).getReg();
1216   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1217   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1218     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1219 
1220   MachineBasicBlock *MBB = I.getParent();
1221   const DebugLoc &DL = I.getDebugLoc();
1222 
1223   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1224 
1225   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1226     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1227     MIB.addImm(MFI->getLDSSize());
1228   } else {
1229     Module *M = MF->getFunction().getParent();
1230     const GlobalValue *GV
1231       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1232     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1233   }
1234 
1235   I.eraseFromParent();
1236   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1237 }
1238 
1239 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1240   MachineBasicBlock *MBB = I.getParent();
1241   MachineFunction &MF = *MBB->getParent();
1242   const DebugLoc &DL = I.getDebugLoc();
1243 
1244   MachineOperand &Dst = I.getOperand(0);
1245   Register DstReg = Dst.getReg();
1246   unsigned Depth = I.getOperand(2).getImm();
1247 
1248   const TargetRegisterClass *RC
1249     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1250   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1251       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1252     return false;
1253 
1254   // Check for kernel and shader functions
1255   if (Depth != 0 ||
1256       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1257     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1258       .addImm(0);
1259     I.eraseFromParent();
1260     return true;
1261   }
1262 
1263   MachineFrameInfo &MFI = MF.getFrameInfo();
1264   // There is a call to @llvm.returnaddress in this function
1265   MFI.setReturnAddressIsTaken(true);
1266 
1267   // Get the return address reg and mark it as an implicit live-in
1268   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1269   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1270                                              AMDGPU::SReg_64RegClass, DL);
1271   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1272     .addReg(LiveIn);
1273   I.eraseFromParent();
1274   return true;
1275 }
1276 
1277 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1278   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1279   // SelectionDAG uses for wave32 vs wave64.
1280   MachineBasicBlock *BB = MI.getParent();
1281   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1282       .add(MI.getOperand(1));
1283 
1284   Register Reg = MI.getOperand(1).getReg();
1285   MI.eraseFromParent();
1286 
1287   if (!MRI->getRegClassOrNull(Reg))
1288     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1289   return true;
1290 }
1291 
1292 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1293   MachineInstr &MI, Intrinsic::ID IntrID) const {
1294   MachineBasicBlock *MBB = MI.getParent();
1295   MachineFunction *MF = MBB->getParent();
1296   const DebugLoc &DL = MI.getDebugLoc();
1297 
1298   unsigned IndexOperand = MI.getOperand(7).getImm();
1299   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1300   bool WaveDone = MI.getOperand(9).getImm() != 0;
1301 
1302   if (WaveDone && !WaveRelease)
1303     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1304 
1305   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1306   IndexOperand &= ~0x3f;
1307   unsigned CountDw = 0;
1308 
1309   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1310     CountDw = (IndexOperand >> 24) & 0xf;
1311     IndexOperand &= ~(0xf << 24);
1312 
1313     if (CountDw < 1 || CountDw > 4) {
1314       report_fatal_error(
1315         "ds_ordered_count: dword count must be between 1 and 4");
1316     }
1317   }
1318 
1319   if (IndexOperand)
1320     report_fatal_error("ds_ordered_count: bad index operand");
1321 
1322   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1323   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1324 
1325   unsigned Offset0 = OrderedCountIndex << 2;
1326   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1327                      (Instruction << 4);
1328 
1329   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1330     Offset1 |= (CountDw - 1) << 6;
1331 
1332   unsigned Offset = Offset0 | (Offset1 << 8);
1333 
1334   Register M0Val = MI.getOperand(2).getReg();
1335   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1336     .addReg(M0Val);
1337 
1338   Register DstReg = MI.getOperand(0).getReg();
1339   Register ValReg = MI.getOperand(3).getReg();
1340   MachineInstrBuilder DS =
1341     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1342       .addReg(ValReg)
1343       .addImm(Offset)
1344       .cloneMemRefs(MI);
1345 
1346   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1347     return false;
1348 
1349   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1350   MI.eraseFromParent();
1351   return Ret;
1352 }
1353 
1354 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1355   switch (IntrID) {
1356   case Intrinsic::amdgcn_ds_gws_init:
1357     return AMDGPU::DS_GWS_INIT;
1358   case Intrinsic::amdgcn_ds_gws_barrier:
1359     return AMDGPU::DS_GWS_BARRIER;
1360   case Intrinsic::amdgcn_ds_gws_sema_v:
1361     return AMDGPU::DS_GWS_SEMA_V;
1362   case Intrinsic::amdgcn_ds_gws_sema_br:
1363     return AMDGPU::DS_GWS_SEMA_BR;
1364   case Intrinsic::amdgcn_ds_gws_sema_p:
1365     return AMDGPU::DS_GWS_SEMA_P;
1366   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1367     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1368   default:
1369     llvm_unreachable("not a gws intrinsic");
1370   }
1371 }
1372 
1373 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1374                                                      Intrinsic::ID IID) const {
1375   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1376       !STI.hasGWSSemaReleaseAll())
1377     return false;
1378 
1379   // intrinsic ID, vsrc, offset
1380   const bool HasVSrc = MI.getNumOperands() == 3;
1381   assert(HasVSrc || MI.getNumOperands() == 2);
1382 
1383   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1384   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1385   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1386     return false;
1387 
1388   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1389   assert(OffsetDef);
1390 
1391   unsigned ImmOffset;
1392 
1393   MachineBasicBlock *MBB = MI.getParent();
1394   const DebugLoc &DL = MI.getDebugLoc();
1395 
1396   MachineInstr *Readfirstlane = nullptr;
1397 
1398   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1399   // incoming offset, in case there's an add of a constant. We'll have to put it
1400   // back later.
1401   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1402     Readfirstlane = OffsetDef;
1403     BaseOffset = OffsetDef->getOperand(1).getReg();
1404     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1405   }
1406 
1407   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1408     // If we have a constant offset, try to use the 0 in m0 as the base.
1409     // TODO: Look into changing the default m0 initialization value. If the
1410     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1411     // the immediate offset.
1412 
1413     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1414     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1415       .addImm(0);
1416   } else {
1417     std::tie(BaseOffset, ImmOffset) =
1418         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1419 
1420     if (Readfirstlane) {
1421       // We have the constant offset now, so put the readfirstlane back on the
1422       // variable component.
1423       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1424         return false;
1425 
1426       Readfirstlane->getOperand(1).setReg(BaseOffset);
1427       BaseOffset = Readfirstlane->getOperand(0).getReg();
1428     } else {
1429       if (!RBI.constrainGenericRegister(BaseOffset,
1430                                         AMDGPU::SReg_32RegClass, *MRI))
1431         return false;
1432     }
1433 
1434     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1435     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1436       .addReg(BaseOffset)
1437       .addImm(16);
1438 
1439     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1440       .addReg(M0Base);
1441   }
1442 
1443   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1444   // offset field) % 64. Some versions of the programming guide omit the m0
1445   // part, or claim it's from offset 0.
1446   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1447 
1448   if (HasVSrc) {
1449     Register VSrc = MI.getOperand(1).getReg();
1450     MIB.addReg(VSrc);
1451 
1452     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1453       return false;
1454   }
1455 
1456   MIB.addImm(ImmOffset)
1457      .cloneMemRefs(MI);
1458 
1459   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1460 
1461   MI.eraseFromParent();
1462   return true;
1463 }
1464 
1465 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1466                                                       bool IsAppend) const {
1467   Register PtrBase = MI.getOperand(2).getReg();
1468   LLT PtrTy = MRI->getType(PtrBase);
1469   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1470 
1471   unsigned Offset;
1472   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1473 
1474   // TODO: Should this try to look through readfirstlane like GWS?
1475   if (!isDSOffsetLegal(PtrBase, Offset)) {
1476     PtrBase = MI.getOperand(2).getReg();
1477     Offset = 0;
1478   }
1479 
1480   MachineBasicBlock *MBB = MI.getParent();
1481   const DebugLoc &DL = MI.getDebugLoc();
1482   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1483 
1484   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1485     .addReg(PtrBase);
1486   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1487     return false;
1488 
1489   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1490     .addImm(Offset)
1491     .addImm(IsGDS ? -1 : 0)
1492     .cloneMemRefs(MI);
1493   MI.eraseFromParent();
1494   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1495 }
1496 
1497 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1498   if (TM.getOptLevel() > CodeGenOpt::None) {
1499     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1500     if (WGSize <= STI.getWavefrontSize()) {
1501       MachineBasicBlock *MBB = MI.getParent();
1502       const DebugLoc &DL = MI.getDebugLoc();
1503       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1504       MI.eraseFromParent();
1505       return true;
1506     }
1507   }
1508   return selectImpl(MI, *CoverageInfo);
1509 }
1510 
1511 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1512                          bool &IsTexFail) {
1513   if (TexFailCtrl)
1514     IsTexFail = true;
1515 
1516   TFE = (TexFailCtrl & 0x1) ? true : false;
1517   TexFailCtrl &= ~(uint64_t)0x1;
1518   LWE = (TexFailCtrl & 0x2) ? true : false;
1519   TexFailCtrl &= ~(uint64_t)0x2;
1520 
1521   return TexFailCtrl == 0;
1522 }
1523 
1524 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1525   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1526   MachineBasicBlock *MBB = MI.getParent();
1527   const DebugLoc &DL = MI.getDebugLoc();
1528 
1529   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1530     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1531 
1532   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1533   unsigned IntrOpcode = Intr->BaseOpcode;
1534   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1535 
1536   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1537 
1538   Register VDataIn, VDataOut;
1539   LLT VDataTy;
1540   int NumVDataDwords = -1;
1541   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1542                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1543 
1544   bool Unorm;
1545   if (!BaseOpcode->Sampler)
1546     Unorm = true;
1547   else
1548     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1549 
1550   bool TFE;
1551   bool LWE;
1552   bool IsTexFail = false;
1553   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1554                     TFE, LWE, IsTexFail))
1555     return false;
1556 
1557   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1558   const bool IsA16 = (Flags & 1) != 0;
1559   const bool IsG16 = (Flags & 2) != 0;
1560 
1561   // A16 implies 16 bit gradients if subtarget doesn't support G16
1562   if (IsA16 && !STI.hasG16() && !IsG16)
1563     return false;
1564 
1565   unsigned DMask = 0;
1566   unsigned DMaskLanes = 0;
1567 
1568   if (BaseOpcode->Atomic) {
1569     VDataOut = MI.getOperand(0).getReg();
1570     VDataIn = MI.getOperand(2).getReg();
1571     LLT Ty = MRI->getType(VDataIn);
1572 
1573     // Be careful to allow atomic swap on 16-bit element vectors.
1574     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1575       Ty.getSizeInBits() == 128 :
1576       Ty.getSizeInBits() == 64;
1577 
1578     if (BaseOpcode->AtomicX2) {
1579       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1580 
1581       DMask = Is64Bit ? 0xf : 0x3;
1582       NumVDataDwords = Is64Bit ? 4 : 2;
1583     } else {
1584       DMask = Is64Bit ? 0x3 : 0x1;
1585       NumVDataDwords = Is64Bit ? 2 : 1;
1586     }
1587   } else {
1588     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1589     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1590 
1591     if (BaseOpcode->Store) {
1592       VDataIn = MI.getOperand(1).getReg();
1593       VDataTy = MRI->getType(VDataIn);
1594       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1595     } else {
1596       VDataOut = MI.getOperand(0).getReg();
1597       VDataTy = MRI->getType(VDataOut);
1598       NumVDataDwords = DMaskLanes;
1599 
1600       if (IsD16 && !STI.hasUnpackedD16VMem())
1601         NumVDataDwords = (DMaskLanes + 1) / 2;
1602     }
1603   }
1604 
1605   // Set G16 opcode
1606   if (IsG16 && !IsA16) {
1607     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1608         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1609     assert(G16MappingInfo);
1610     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1611   }
1612 
1613   // TODO: Check this in verifier.
1614   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1615 
1616   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1617   if (BaseOpcode->Atomic)
1618     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1619   if (CPol & ~AMDGPU::CPol::ALL)
1620     return false;
1621 
1622   int NumVAddrRegs = 0;
1623   int NumVAddrDwords = 0;
1624   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1625     // Skip the $noregs and 0s inserted during legalization.
1626     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1627     if (!AddrOp.isReg())
1628       continue; // XXX - Break?
1629 
1630     Register Addr = AddrOp.getReg();
1631     if (!Addr)
1632       break;
1633 
1634     ++NumVAddrRegs;
1635     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1636   }
1637 
1638   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1639   // NSA, these should have been packed into a single value in the first
1640   // address register
1641   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1642   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1643     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1644     return false;
1645   }
1646 
1647   if (IsTexFail)
1648     ++NumVDataDwords;
1649 
1650   int Opcode = -1;
1651   if (IsGFX10Plus) {
1652     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1653                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1654                                           : AMDGPU::MIMGEncGfx10Default,
1655                                    NumVDataDwords, NumVAddrDwords);
1656   } else {
1657     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1658       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1659                                      NumVDataDwords, NumVAddrDwords);
1660     if (Opcode == -1)
1661       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1662                                      NumVDataDwords, NumVAddrDwords);
1663   }
1664   assert(Opcode != -1);
1665 
1666   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1667     .cloneMemRefs(MI);
1668 
1669   if (VDataOut) {
1670     if (BaseOpcode->AtomicX2) {
1671       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1672 
1673       Register TmpReg = MRI->createVirtualRegister(
1674         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1675       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1676 
1677       MIB.addDef(TmpReg);
1678       if (!MRI->use_empty(VDataOut)) {
1679         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1680             .addReg(TmpReg, RegState::Kill, SubReg);
1681       }
1682 
1683     } else {
1684       MIB.addDef(VDataOut); // vdata output
1685     }
1686   }
1687 
1688   if (VDataIn)
1689     MIB.addReg(VDataIn); // vdata input
1690 
1691   for (int I = 0; I != NumVAddrRegs; ++I) {
1692     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1693     if (SrcOp.isReg()) {
1694       assert(SrcOp.getReg() != 0);
1695       MIB.addReg(SrcOp.getReg());
1696     }
1697   }
1698 
1699   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1700   if (BaseOpcode->Sampler)
1701     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1702 
1703   MIB.addImm(DMask); // dmask
1704 
1705   if (IsGFX10Plus)
1706     MIB.addImm(DimInfo->Encoding);
1707   MIB.addImm(Unorm);
1708 
1709   MIB.addImm(CPol);
1710   MIB.addImm(IsA16 &&  // a16 or r128
1711              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1712   if (IsGFX10Plus)
1713     MIB.addImm(IsA16 ? -1 : 0);
1714 
1715   MIB.addImm(TFE); // tfe
1716   MIB.addImm(LWE); // lwe
1717   if (!IsGFX10Plus)
1718     MIB.addImm(DimInfo->DA ? -1 : 0);
1719   if (BaseOpcode->HasD16)
1720     MIB.addImm(IsD16 ? -1 : 0);
1721 
1722   if (IsTexFail) {
1723     // An image load instruction with TFE/LWE only conditionally writes to its
1724     // result registers. Initialize them to zero so that we always get well
1725     // defined result values.
1726     assert(VDataOut && !VDataIn);
1727     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1728     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1729     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1730       .addImm(0);
1731     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1732     if (STI.usePRTStrictNull()) {
1733       // With enable-prt-strict-null enabled, initialize all result registers to
1734       // zero.
1735       auto RegSeq =
1736           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1737       for (auto Sub : Parts)
1738         RegSeq.addReg(Zero).addImm(Sub);
1739     } else {
1740       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1741       // result register.
1742       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1743       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1744       auto RegSeq =
1745           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1746       for (auto Sub : Parts.drop_back(1))
1747         RegSeq.addReg(Undef).addImm(Sub);
1748       RegSeq.addReg(Zero).addImm(Parts.back());
1749     }
1750     MIB.addReg(Tied, RegState::Implicit);
1751     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1752   }
1753 
1754   MI.eraseFromParent();
1755   constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1756   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
1757   return true;
1758 }
1759 
1760 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1761     MachineInstr &I) const {
1762   unsigned IntrinsicID = I.getIntrinsicID();
1763   switch (IntrinsicID) {
1764   case Intrinsic::amdgcn_end_cf:
1765     return selectEndCfIntrinsic(I);
1766   case Intrinsic::amdgcn_ds_ordered_add:
1767   case Intrinsic::amdgcn_ds_ordered_swap:
1768     return selectDSOrderedIntrinsic(I, IntrinsicID);
1769   case Intrinsic::amdgcn_ds_gws_init:
1770   case Intrinsic::amdgcn_ds_gws_barrier:
1771   case Intrinsic::amdgcn_ds_gws_sema_v:
1772   case Intrinsic::amdgcn_ds_gws_sema_br:
1773   case Intrinsic::amdgcn_ds_gws_sema_p:
1774   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1775     return selectDSGWSIntrinsic(I, IntrinsicID);
1776   case Intrinsic::amdgcn_ds_append:
1777     return selectDSAppendConsume(I, true);
1778   case Intrinsic::amdgcn_ds_consume:
1779     return selectDSAppendConsume(I, false);
1780   case Intrinsic::amdgcn_s_barrier:
1781     return selectSBarrier(I);
1782   case Intrinsic::amdgcn_global_atomic_fadd:
1783     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1784   case Intrinsic::amdgcn_raw_buffer_load_lds:
1785   case Intrinsic::amdgcn_struct_buffer_load_lds:
1786     return selectBufferLoadLds(I);
1787   case Intrinsic::amdgcn_global_load_lds:
1788     return selectGlobalLoadLds(I);
1789   default: {
1790     return selectImpl(I, *CoverageInfo);
1791   }
1792   }
1793 }
1794 
1795 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1796   if (selectImpl(I, *CoverageInfo))
1797     return true;
1798 
1799   MachineBasicBlock *BB = I.getParent();
1800   const DebugLoc &DL = I.getDebugLoc();
1801 
1802   Register DstReg = I.getOperand(0).getReg();
1803   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1804   assert(Size <= 32 || Size == 64);
1805   const MachineOperand &CCOp = I.getOperand(1);
1806   Register CCReg = CCOp.getReg();
1807   if (!isVCC(CCReg, *MRI)) {
1808     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1809                                          AMDGPU::S_CSELECT_B32;
1810     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1811             .addReg(CCReg);
1812 
1813     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1814     // bank, because it does not cover the register class that we used to represent
1815     // for it.  So we need to manually set the register class here.
1816     if (!MRI->getRegClassOrNull(CCReg))
1817         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1818     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1819             .add(I.getOperand(2))
1820             .add(I.getOperand(3));
1821 
1822     bool Ret = false;
1823     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1824     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1825     I.eraseFromParent();
1826     return Ret;
1827   }
1828 
1829   // Wide VGPR select should have been split in RegBankSelect.
1830   if (Size > 32)
1831     return false;
1832 
1833   MachineInstr *Select =
1834       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1835               .addImm(0)
1836               .add(I.getOperand(3))
1837               .addImm(0)
1838               .add(I.getOperand(2))
1839               .add(I.getOperand(1));
1840 
1841   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1842   I.eraseFromParent();
1843   return Ret;
1844 }
1845 
1846 static int sizeToSubRegIndex(unsigned Size) {
1847   switch (Size) {
1848   case 32:
1849     return AMDGPU::sub0;
1850   case 64:
1851     return AMDGPU::sub0_sub1;
1852   case 96:
1853     return AMDGPU::sub0_sub1_sub2;
1854   case 128:
1855     return AMDGPU::sub0_sub1_sub2_sub3;
1856   case 256:
1857     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1858   default:
1859     if (Size < 32)
1860       return AMDGPU::sub0;
1861     if (Size > 256)
1862       return -1;
1863     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1864   }
1865 }
1866 
1867 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1868   Register DstReg = I.getOperand(0).getReg();
1869   Register SrcReg = I.getOperand(1).getReg();
1870   const LLT DstTy = MRI->getType(DstReg);
1871   const LLT SrcTy = MRI->getType(SrcReg);
1872   const LLT S1 = LLT::scalar(1);
1873 
1874   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1875   const RegisterBank *DstRB;
1876   if (DstTy == S1) {
1877     // This is a special case. We don't treat s1 for legalization artifacts as
1878     // vcc booleans.
1879     DstRB = SrcRB;
1880   } else {
1881     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1882     if (SrcRB != DstRB)
1883       return false;
1884   }
1885 
1886   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1887 
1888   unsigned DstSize = DstTy.getSizeInBits();
1889   unsigned SrcSize = SrcTy.getSizeInBits();
1890 
1891   const TargetRegisterClass *SrcRC =
1892       TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
1893   const TargetRegisterClass *DstRC =
1894       TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
1895   if (!SrcRC || !DstRC)
1896     return false;
1897 
1898   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1899       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1900     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1901     return false;
1902   }
1903 
1904   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1905     MachineBasicBlock *MBB = I.getParent();
1906     const DebugLoc &DL = I.getDebugLoc();
1907 
1908     Register LoReg = MRI->createVirtualRegister(DstRC);
1909     Register HiReg = MRI->createVirtualRegister(DstRC);
1910     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1911       .addReg(SrcReg, 0, AMDGPU::sub0);
1912     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1913       .addReg(SrcReg, 0, AMDGPU::sub1);
1914 
1915     if (IsVALU && STI.hasSDWA()) {
1916       // Write the low 16-bits of the high element into the high 16-bits of the
1917       // low element.
1918       MachineInstr *MovSDWA =
1919         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1920         .addImm(0)                             // $src0_modifiers
1921         .addReg(HiReg)                         // $src0
1922         .addImm(0)                             // $clamp
1923         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1924         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1925         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1926         .addReg(LoReg, RegState::Implicit);
1927       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1928     } else {
1929       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1930       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1931       Register ImmReg = MRI->createVirtualRegister(DstRC);
1932       if (IsVALU) {
1933         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1934           .addImm(16)
1935           .addReg(HiReg);
1936       } else {
1937         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1938           .addReg(HiReg)
1939           .addImm(16);
1940       }
1941 
1942       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1943       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1944       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1945 
1946       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1947         .addImm(0xffff);
1948       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1949         .addReg(LoReg)
1950         .addReg(ImmReg);
1951       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1952         .addReg(TmpReg0)
1953         .addReg(TmpReg1);
1954     }
1955 
1956     I.eraseFromParent();
1957     return true;
1958   }
1959 
1960   if (!DstTy.isScalar())
1961     return false;
1962 
1963   if (SrcSize > 32) {
1964     int SubRegIdx = sizeToSubRegIndex(DstSize);
1965     if (SubRegIdx == -1)
1966       return false;
1967 
1968     // Deal with weird cases where the class only partially supports the subreg
1969     // index.
1970     const TargetRegisterClass *SrcWithSubRC
1971       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1972     if (!SrcWithSubRC)
1973       return false;
1974 
1975     if (SrcWithSubRC != SrcRC) {
1976       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1977         return false;
1978     }
1979 
1980     I.getOperand(1).setSubReg(SubRegIdx);
1981   }
1982 
1983   I.setDesc(TII.get(TargetOpcode::COPY));
1984   return true;
1985 }
1986 
1987 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1988 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1989   Mask = maskTrailingOnes<unsigned>(Size);
1990   int SignedMask = static_cast<int>(Mask);
1991   return SignedMask >= -16 && SignedMask <= 64;
1992 }
1993 
1994 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1995 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1996   Register Reg, const MachineRegisterInfo &MRI,
1997   const TargetRegisterInfo &TRI) const {
1998   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1999   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2000     return RB;
2001 
2002   // Ignore the type, since we don't use vcc in artifacts.
2003   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2004     return &RBI.getRegBankFromRegClass(*RC, LLT());
2005   return nullptr;
2006 }
2007 
2008 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2009   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2010   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2011   const DebugLoc &DL = I.getDebugLoc();
2012   MachineBasicBlock &MBB = *I.getParent();
2013   const Register DstReg = I.getOperand(0).getReg();
2014   const Register SrcReg = I.getOperand(1).getReg();
2015 
2016   const LLT DstTy = MRI->getType(DstReg);
2017   const LLT SrcTy = MRI->getType(SrcReg);
2018   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2019     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2020   const unsigned DstSize = DstTy.getSizeInBits();
2021   if (!DstTy.isScalar())
2022     return false;
2023 
2024   // Artifact casts should never use vcc.
2025   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2026 
2027   // FIXME: This should probably be illegal and split earlier.
2028   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2029     if (DstSize <= 32)
2030       return selectCOPY(I);
2031 
2032     const TargetRegisterClass *SrcRC =
2033         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2034     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2035     const TargetRegisterClass *DstRC =
2036         TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2037 
2038     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2039     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2040     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2041       .addReg(SrcReg)
2042       .addImm(AMDGPU::sub0)
2043       .addReg(UndefReg)
2044       .addImm(AMDGPU::sub1);
2045     I.eraseFromParent();
2046 
2047     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2048            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2049   }
2050 
2051   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2052     // 64-bit should have been split up in RegBankSelect
2053 
2054     // Try to use an and with a mask if it will save code size.
2055     unsigned Mask;
2056     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2057       MachineInstr *ExtI =
2058       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2059         .addImm(Mask)
2060         .addReg(SrcReg);
2061       I.eraseFromParent();
2062       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2063     }
2064 
2065     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2066     MachineInstr *ExtI =
2067       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2068       .addReg(SrcReg)
2069       .addImm(0) // Offset
2070       .addImm(SrcSize); // Width
2071     I.eraseFromParent();
2072     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2073   }
2074 
2075   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2076     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2077       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2078     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2079       return false;
2080 
2081     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2082       const unsigned SextOpc = SrcSize == 8 ?
2083         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2084       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2085         .addReg(SrcReg);
2086       I.eraseFromParent();
2087       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2088     }
2089 
2090     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2091     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2092 
2093     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2094     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2095       // We need a 64-bit register source, but the high bits don't matter.
2096       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2097       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2098       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2099 
2100       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2101       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2102         .addReg(SrcReg, 0, SubReg)
2103         .addImm(AMDGPU::sub0)
2104         .addReg(UndefReg)
2105         .addImm(AMDGPU::sub1);
2106 
2107       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2108         .addReg(ExtReg)
2109         .addImm(SrcSize << 16);
2110 
2111       I.eraseFromParent();
2112       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2113     }
2114 
2115     unsigned Mask;
2116     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2117       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2118         .addReg(SrcReg)
2119         .addImm(Mask);
2120     } else {
2121       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2122         .addReg(SrcReg)
2123         .addImm(SrcSize << 16);
2124     }
2125 
2126     I.eraseFromParent();
2127     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2128   }
2129 
2130   return false;
2131 }
2132 
2133 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2134   MachineBasicBlock *BB = I.getParent();
2135   MachineOperand &ImmOp = I.getOperand(1);
2136   Register DstReg = I.getOperand(0).getReg();
2137   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2138 
2139   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2140   if (ImmOp.isFPImm()) {
2141     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2142     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2143   } else if (ImmOp.isCImm()) {
2144     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2145   } else {
2146     llvm_unreachable("Not supported by g_constants");
2147   }
2148 
2149   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2150   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2151 
2152   unsigned Opcode;
2153   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2154     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2155   } else {
2156     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2157 
2158     // We should never produce s1 values on banks other than VCC. If the user of
2159     // this already constrained the register, we may incorrectly think it's VCC
2160     // if it wasn't originally.
2161     if (Size == 1)
2162       return false;
2163   }
2164 
2165   if (Size != 64) {
2166     I.setDesc(TII.get(Opcode));
2167     I.addImplicitDefUseOperands(*MF);
2168     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2169   }
2170 
2171   const DebugLoc &DL = I.getDebugLoc();
2172 
2173   APInt Imm(Size, I.getOperand(1).getImm());
2174 
2175   MachineInstr *ResInst;
2176   if (IsSgpr && TII.isInlineConstant(Imm)) {
2177     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2178       .addImm(I.getOperand(1).getImm());
2179   } else {
2180     const TargetRegisterClass *RC = IsSgpr ?
2181       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2182     Register LoReg = MRI->createVirtualRegister(RC);
2183     Register HiReg = MRI->createVirtualRegister(RC);
2184 
2185     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2186       .addImm(Imm.trunc(32).getZExtValue());
2187 
2188     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2189       .addImm(Imm.ashr(32).getZExtValue());
2190 
2191     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2192       .addReg(LoReg)
2193       .addImm(AMDGPU::sub0)
2194       .addReg(HiReg)
2195       .addImm(AMDGPU::sub1);
2196   }
2197 
2198   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2199   // work for target independent opcodes
2200   I.eraseFromParent();
2201   const TargetRegisterClass *DstRC =
2202     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2203   if (!DstRC)
2204     return true;
2205   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2206 }
2207 
2208 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2209   // Only manually handle the f64 SGPR case.
2210   //
2211   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2212   // the bit ops theoretically have a second result due to the implicit def of
2213   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2214   // that is easy by disabling the check. The result works, but uses a
2215   // nonsensical sreg32orlds_and_sreg_1 regclass.
2216   //
2217   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2218   // the variadic REG_SEQUENCE operands.
2219 
2220   Register Dst = MI.getOperand(0).getReg();
2221   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2222   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2223       MRI->getType(Dst) != LLT::scalar(64))
2224     return false;
2225 
2226   Register Src = MI.getOperand(1).getReg();
2227   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2228   if (Fabs)
2229     Src = Fabs->getOperand(1).getReg();
2230 
2231   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2232       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2233     return false;
2234 
2235   MachineBasicBlock *BB = MI.getParent();
2236   const DebugLoc &DL = MI.getDebugLoc();
2237   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2238   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2239   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2240   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2241 
2242   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2243     .addReg(Src, 0, AMDGPU::sub0);
2244   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2245     .addReg(Src, 0, AMDGPU::sub1);
2246   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2247     .addImm(0x80000000);
2248 
2249   // Set or toggle sign bit.
2250   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2251   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2252     .addReg(HiReg)
2253     .addReg(ConstReg);
2254   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2255     .addReg(LoReg)
2256     .addImm(AMDGPU::sub0)
2257     .addReg(OpReg)
2258     .addImm(AMDGPU::sub1);
2259   MI.eraseFromParent();
2260   return true;
2261 }
2262 
2263 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2264 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2265   Register Dst = MI.getOperand(0).getReg();
2266   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2267   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2268       MRI->getType(Dst) != LLT::scalar(64))
2269     return false;
2270 
2271   Register Src = MI.getOperand(1).getReg();
2272   MachineBasicBlock *BB = MI.getParent();
2273   const DebugLoc &DL = MI.getDebugLoc();
2274   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2275   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2276   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2277   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2278 
2279   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2280       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2281     return false;
2282 
2283   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2284     .addReg(Src, 0, AMDGPU::sub0);
2285   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2286     .addReg(Src, 0, AMDGPU::sub1);
2287   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2288     .addImm(0x7fffffff);
2289 
2290   // Clear sign bit.
2291   // TODO: Should this used S_BITSET0_*?
2292   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2293     .addReg(HiReg)
2294     .addReg(ConstReg);
2295   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2296     .addReg(LoReg)
2297     .addImm(AMDGPU::sub0)
2298     .addReg(OpReg)
2299     .addImm(AMDGPU::sub1);
2300 
2301   MI.eraseFromParent();
2302   return true;
2303 }
2304 
2305 static bool isConstant(const MachineInstr &MI) {
2306   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2307 }
2308 
2309 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2310     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2311 
2312   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2313 
2314   assert(PtrMI);
2315 
2316   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2317     return;
2318 
2319   GEPInfo GEPInfo(*PtrMI);
2320 
2321   for (unsigned i = 1; i != 3; ++i) {
2322     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2323     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2324     assert(OpDef);
2325     if (i == 2 && isConstant(*OpDef)) {
2326       // TODO: Could handle constant base + variable offset, but a combine
2327       // probably should have commuted it.
2328       assert(GEPInfo.Imm == 0);
2329       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2330       continue;
2331     }
2332     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2333     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2334       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2335     else
2336       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2337   }
2338 
2339   AddrInfo.push_back(GEPInfo);
2340   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2341 }
2342 
2343 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2344   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2345 }
2346 
2347 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2348   if (!MI.hasOneMemOperand())
2349     return false;
2350 
2351   const MachineMemOperand *MMO = *MI.memoperands_begin();
2352   const Value *Ptr = MMO->getValue();
2353 
2354   // UndefValue means this is a load of a kernel input.  These are uniform.
2355   // Sometimes LDS instructions have constant pointers.
2356   // If Ptr is null, then that means this mem operand contains a
2357   // PseudoSourceValue like GOT.
2358   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2359       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2360     return true;
2361 
2362   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2363     return true;
2364 
2365   const Instruction *I = dyn_cast<Instruction>(Ptr);
2366   return I && I->getMetadata("amdgpu.uniform");
2367 }
2368 
2369 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2370   for (const GEPInfo &GEPInfo : AddrInfo) {
2371     if (!GEPInfo.VgprParts.empty())
2372       return true;
2373   }
2374   return false;
2375 }
2376 
2377 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2378   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2379   unsigned AS = PtrTy.getAddressSpace();
2380   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2381       STI.ldsRequiresM0Init()) {
2382     MachineBasicBlock *BB = I.getParent();
2383 
2384     // If DS instructions require M0 initialization, insert it before selecting.
2385     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2386       .addImm(-1);
2387   }
2388 }
2389 
2390 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2391   MachineInstr &I) const {
2392   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2393     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2394     unsigned AS = PtrTy.getAddressSpace();
2395     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2396       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2397   }
2398 
2399   initM0(I);
2400   return selectImpl(I, *CoverageInfo);
2401 }
2402 
2403 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2404   if (Reg.isPhysical())
2405     return false;
2406 
2407   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2408   const unsigned Opcode = MI.getOpcode();
2409 
2410   if (Opcode == AMDGPU::COPY)
2411     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2412 
2413   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2414       Opcode == AMDGPU::G_XOR)
2415     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2416            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2417 
2418   if (Opcode == TargetOpcode::G_INTRINSIC)
2419     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2420 
2421   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2422 }
2423 
2424 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2425   MachineBasicBlock *BB = I.getParent();
2426   MachineOperand &CondOp = I.getOperand(0);
2427   Register CondReg = CondOp.getReg();
2428   const DebugLoc &DL = I.getDebugLoc();
2429 
2430   unsigned BrOpcode;
2431   Register CondPhysReg;
2432   const TargetRegisterClass *ConstrainRC;
2433 
2434   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2435   // whether the branch is uniform when selecting the instruction. In
2436   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2437   // RegBankSelect knows what it's doing if the branch condition is scc, even
2438   // though it currently does not.
2439   if (!isVCC(CondReg, *MRI)) {
2440     if (MRI->getType(CondReg) != LLT::scalar(32))
2441       return false;
2442 
2443     CondPhysReg = AMDGPU::SCC;
2444     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2445     ConstrainRC = &AMDGPU::SReg_32RegClass;
2446   } else {
2447     // FIXME: Should scc->vcc copies and with exec?
2448 
2449     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2450     // need to insert an and with exec.
2451     if (!isVCmpResult(CondReg, *MRI)) {
2452       const bool Is64 = STI.isWave64();
2453       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2454       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2455 
2456       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2457       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2458           .addReg(CondReg)
2459           .addReg(Exec);
2460       CondReg = TmpReg;
2461     }
2462 
2463     CondPhysReg = TRI.getVCC();
2464     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2465     ConstrainRC = TRI.getBoolRC();
2466   }
2467 
2468   if (!MRI->getRegClassOrNull(CondReg))
2469     MRI->setRegClass(CondReg, ConstrainRC);
2470 
2471   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2472     .addReg(CondReg);
2473   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2474     .addMBB(I.getOperand(1).getMBB());
2475 
2476   I.eraseFromParent();
2477   return true;
2478 }
2479 
2480 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2481   MachineInstr &I) const {
2482   Register DstReg = I.getOperand(0).getReg();
2483   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2484   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2485   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2486   if (IsVGPR)
2487     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2488 
2489   return RBI.constrainGenericRegister(
2490     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2491 }
2492 
2493 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2494   Register DstReg = I.getOperand(0).getReg();
2495   Register SrcReg = I.getOperand(1).getReg();
2496   Register MaskReg = I.getOperand(2).getReg();
2497   LLT Ty = MRI->getType(DstReg);
2498   LLT MaskTy = MRI->getType(MaskReg);
2499   MachineBasicBlock *BB = I.getParent();
2500   const DebugLoc &DL = I.getDebugLoc();
2501 
2502   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2503   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2504   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2505   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2506   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2507     return false;
2508 
2509   // Try to avoid emitting a bit operation when we only need to touch half of
2510   // the 64-bit pointer.
2511   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64);
2512   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2513   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2514 
2515   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2516   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2517 
2518   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2519       !CanCopyLow32 && !CanCopyHi32) {
2520     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2521       .addReg(SrcReg)
2522       .addReg(MaskReg);
2523     I.eraseFromParent();
2524     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2525   }
2526 
2527   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2528   const TargetRegisterClass &RegRC
2529     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2530 
2531   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2532   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2533   const TargetRegisterClass *MaskRC =
2534       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2535 
2536   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2537       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2538       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2539     return false;
2540 
2541   if (Ty.getSizeInBits() == 32) {
2542     assert(MaskTy.getSizeInBits() == 32 &&
2543            "ptrmask should have been narrowed during legalize");
2544 
2545     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2546       .addReg(SrcReg)
2547       .addReg(MaskReg);
2548     I.eraseFromParent();
2549     return true;
2550   }
2551 
2552   Register HiReg = MRI->createVirtualRegister(&RegRC);
2553   Register LoReg = MRI->createVirtualRegister(&RegRC);
2554 
2555   // Extract the subregisters from the source pointer.
2556   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2557     .addReg(SrcReg, 0, AMDGPU::sub0);
2558   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2559     .addReg(SrcReg, 0, AMDGPU::sub1);
2560 
2561   Register MaskedLo, MaskedHi;
2562 
2563   if (CanCopyLow32) {
2564     // If all the bits in the low half are 1, we only need a copy for it.
2565     MaskedLo = LoReg;
2566   } else {
2567     // Extract the mask subregister and apply the and.
2568     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2569     MaskedLo = MRI->createVirtualRegister(&RegRC);
2570 
2571     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2572       .addReg(MaskReg, 0, AMDGPU::sub0);
2573     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2574       .addReg(LoReg)
2575       .addReg(MaskLo);
2576   }
2577 
2578   if (CanCopyHi32) {
2579     // If all the bits in the high half are 1, we only need a copy for it.
2580     MaskedHi = HiReg;
2581   } else {
2582     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2583     MaskedHi = MRI->createVirtualRegister(&RegRC);
2584 
2585     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2586       .addReg(MaskReg, 0, AMDGPU::sub1);
2587     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2588       .addReg(HiReg)
2589       .addReg(MaskHi);
2590   }
2591 
2592   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2593     .addReg(MaskedLo)
2594     .addImm(AMDGPU::sub0)
2595     .addReg(MaskedHi)
2596     .addImm(AMDGPU::sub1);
2597   I.eraseFromParent();
2598   return true;
2599 }
2600 
2601 /// Return the register to use for the index value, and the subregister to use
2602 /// for the indirectly accessed register.
2603 static std::pair<Register, unsigned>
2604 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2605                         const SIRegisterInfo &TRI,
2606                         const TargetRegisterClass *SuperRC,
2607                         Register IdxReg,
2608                         unsigned EltSize) {
2609   Register IdxBaseReg;
2610   int Offset;
2611 
2612   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2613   if (IdxBaseReg == AMDGPU::NoRegister) {
2614     // This will happen if the index is a known constant. This should ordinarily
2615     // be legalized out, but handle it as a register just in case.
2616     assert(Offset == 0);
2617     IdxBaseReg = IdxReg;
2618   }
2619 
2620   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2621 
2622   // Skip out of bounds offsets, or else we would end up using an undefined
2623   // register.
2624   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2625     return std::make_pair(IdxReg, SubRegs[0]);
2626   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2627 }
2628 
2629 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2630   MachineInstr &MI) const {
2631   Register DstReg = MI.getOperand(0).getReg();
2632   Register SrcReg = MI.getOperand(1).getReg();
2633   Register IdxReg = MI.getOperand(2).getReg();
2634 
2635   LLT DstTy = MRI->getType(DstReg);
2636   LLT SrcTy = MRI->getType(SrcReg);
2637 
2638   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2639   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2640   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2641 
2642   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2643   // into a waterfall loop.
2644   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2645     return false;
2646 
2647   const TargetRegisterClass *SrcRC =
2648       TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2649   const TargetRegisterClass *DstRC =
2650       TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2651   if (!SrcRC || !DstRC)
2652     return false;
2653   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2654       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2655       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2656     return false;
2657 
2658   MachineBasicBlock *BB = MI.getParent();
2659   const DebugLoc &DL = MI.getDebugLoc();
2660   const bool Is64 = DstTy.getSizeInBits() == 64;
2661 
2662   unsigned SubReg;
2663   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2664                                                      DstTy.getSizeInBits() / 8);
2665 
2666   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2667     if (DstTy.getSizeInBits() != 32 && !Is64)
2668       return false;
2669 
2670     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2671       .addReg(IdxReg);
2672 
2673     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2674     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2675       .addReg(SrcReg, 0, SubReg)
2676       .addReg(SrcReg, RegState::Implicit);
2677     MI.eraseFromParent();
2678     return true;
2679   }
2680 
2681   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2682     return false;
2683 
2684   if (!STI.useVGPRIndexMode()) {
2685     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2686       .addReg(IdxReg);
2687     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2688       .addReg(SrcReg, 0, SubReg)
2689       .addReg(SrcReg, RegState::Implicit);
2690     MI.eraseFromParent();
2691     return true;
2692   }
2693 
2694   const MCInstrDesc &GPRIDXDesc =
2695       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2696   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2697       .addReg(SrcReg)
2698       .addReg(IdxReg)
2699       .addImm(SubReg);
2700 
2701   MI.eraseFromParent();
2702   return true;
2703 }
2704 
2705 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2706 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2707   MachineInstr &MI) const {
2708   Register DstReg = MI.getOperand(0).getReg();
2709   Register VecReg = MI.getOperand(1).getReg();
2710   Register ValReg = MI.getOperand(2).getReg();
2711   Register IdxReg = MI.getOperand(3).getReg();
2712 
2713   LLT VecTy = MRI->getType(DstReg);
2714   LLT ValTy = MRI->getType(ValReg);
2715   unsigned VecSize = VecTy.getSizeInBits();
2716   unsigned ValSize = ValTy.getSizeInBits();
2717 
2718   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2719   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2720   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2721 
2722   assert(VecTy.getElementType() == ValTy);
2723 
2724   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2725   // into a waterfall loop.
2726   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2727     return false;
2728 
2729   const TargetRegisterClass *VecRC =
2730       TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
2731   const TargetRegisterClass *ValRC =
2732       TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
2733 
2734   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2735       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2736       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2737       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2738     return false;
2739 
2740   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2741     return false;
2742 
2743   unsigned SubReg;
2744   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2745                                                      ValSize / 8);
2746 
2747   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2748                          STI.useVGPRIndexMode();
2749 
2750   MachineBasicBlock *BB = MI.getParent();
2751   const DebugLoc &DL = MI.getDebugLoc();
2752 
2753   if (!IndexMode) {
2754     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2755       .addReg(IdxReg);
2756 
2757     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2758         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2759     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2760         .addReg(VecReg)
2761         .addReg(ValReg)
2762         .addImm(SubReg);
2763     MI.eraseFromParent();
2764     return true;
2765   }
2766 
2767   const MCInstrDesc &GPRIDXDesc =
2768       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2769   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2770       .addReg(VecReg)
2771       .addReg(ValReg)
2772       .addReg(IdxReg)
2773       .addImm(SubReg);
2774 
2775   MI.eraseFromParent();
2776   return true;
2777 }
2778 
2779 static bool isZeroOrUndef(int X) {
2780   return X == 0 || X == -1;
2781 }
2782 
2783 static bool isOneOrUndef(int X) {
2784   return X == 1 || X == -1;
2785 }
2786 
2787 static bool isZeroOrOneOrUndef(int X) {
2788   return X == 0 || X == 1 || X == -1;
2789 }
2790 
2791 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2792 // 32-bit register.
2793 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2794                                    ArrayRef<int> Mask) {
2795   NewMask[0] = Mask[0];
2796   NewMask[1] = Mask[1];
2797   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2798     return Src0;
2799 
2800   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2801   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2802 
2803   // Shift the mask inputs to be 0/1;
2804   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2805   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2806   return Src1;
2807 }
2808 
2809 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2810 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2811   MachineInstr &MI) const {
2812   Register DstReg = MI.getOperand(0).getReg();
2813   Register Src0Reg = MI.getOperand(1).getReg();
2814   Register Src1Reg = MI.getOperand(2).getReg();
2815   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2816 
2817   const LLT V2S16 = LLT::fixed_vector(2, 16);
2818   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2819     return false;
2820 
2821   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2822     return false;
2823 
2824   assert(ShufMask.size() == 2);
2825   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2826 
2827   MachineBasicBlock *MBB = MI.getParent();
2828   const DebugLoc &DL = MI.getDebugLoc();
2829 
2830   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2831   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2832   const TargetRegisterClass &RC = IsVALU ?
2833     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2834 
2835   // Handle the degenerate case which should have folded out.
2836   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2837     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2838 
2839     MI.eraseFromParent();
2840     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2841   }
2842 
2843   // A legal VOP3P mask only reads one of the sources.
2844   int Mask[2];
2845   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2846 
2847   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2848       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2849     return false;
2850 
2851   // TODO: This also should have been folded out
2852   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2853     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2854       .addReg(SrcVec);
2855 
2856     MI.eraseFromParent();
2857     return true;
2858   }
2859 
2860   if (Mask[0] == 1 && Mask[1] == -1) {
2861     if (IsVALU) {
2862       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2863         .addImm(16)
2864         .addReg(SrcVec);
2865     } else {
2866       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2867         .addReg(SrcVec)
2868         .addImm(16);
2869     }
2870   } else if (Mask[0] == -1 && Mask[1] == 0) {
2871     if (IsVALU) {
2872       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2873         .addImm(16)
2874         .addReg(SrcVec);
2875     } else {
2876       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2877         .addReg(SrcVec)
2878         .addImm(16);
2879     }
2880   } else if (Mask[0] == 0 && Mask[1] == 0) {
2881     if (IsVALU) {
2882       // Write low half of the register into the high half.
2883       MachineInstr *MovSDWA =
2884         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2885         .addImm(0)                             // $src0_modifiers
2886         .addReg(SrcVec)                        // $src0
2887         .addImm(0)                             // $clamp
2888         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2889         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2890         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2891         .addReg(SrcVec, RegState::Implicit);
2892       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2893     } else {
2894       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2895         .addReg(SrcVec)
2896         .addReg(SrcVec);
2897     }
2898   } else if (Mask[0] == 1 && Mask[1] == 1) {
2899     if (IsVALU) {
2900       // Write high half of the register into the low half.
2901       MachineInstr *MovSDWA =
2902         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2903         .addImm(0)                             // $src0_modifiers
2904         .addReg(SrcVec)                        // $src0
2905         .addImm(0)                             // $clamp
2906         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2907         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2908         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2909         .addReg(SrcVec, RegState::Implicit);
2910       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2911     } else {
2912       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2913         .addReg(SrcVec)
2914         .addReg(SrcVec);
2915     }
2916   } else if (Mask[0] == 1 && Mask[1] == 0) {
2917     if (IsVALU) {
2918       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2919         .addReg(SrcVec)
2920         .addReg(SrcVec)
2921         .addImm(16);
2922     } else {
2923       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2924       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2925         .addReg(SrcVec)
2926         .addImm(16);
2927       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2928         .addReg(TmpReg)
2929         .addReg(SrcVec);
2930     }
2931   } else
2932     llvm_unreachable("all shuffle masks should be handled");
2933 
2934   MI.eraseFromParent();
2935   return true;
2936 }
2937 
2938 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2939   MachineInstr &MI) const {
2940   if (STI.hasGFX90AInsts())
2941     return selectImpl(MI, *CoverageInfo);
2942 
2943   MachineBasicBlock *MBB = MI.getParent();
2944   const DebugLoc &DL = MI.getDebugLoc();
2945 
2946   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2947     Function &F = MBB->getParent()->getFunction();
2948     DiagnosticInfoUnsupported
2949       NoFpRet(F, "return versions of fp atomics not supported",
2950               MI.getDebugLoc(), DS_Error);
2951     F.getContext().diagnose(NoFpRet);
2952     return false;
2953   }
2954 
2955   // FIXME: This is only needed because tablegen requires number of dst operands
2956   // in match and replace pattern to be the same. Otherwise patterns can be
2957   // exported from SDag path.
2958   MachineOperand &VDataIn = MI.getOperand(1);
2959   MachineOperand &VIndex = MI.getOperand(3);
2960   MachineOperand &VOffset = MI.getOperand(4);
2961   MachineOperand &SOffset = MI.getOperand(5);
2962   int16_t Offset = MI.getOperand(6).getImm();
2963 
2964   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2965   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2966 
2967   unsigned Opcode;
2968   if (HasVOffset) {
2969     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2970                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2971   } else {
2972     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2973                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2974   }
2975 
2976   if (MRI->getType(VDataIn.getReg()).isVector()) {
2977     switch (Opcode) {
2978     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2979       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2980       break;
2981     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2982       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2983       break;
2984     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
2985       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
2986       break;
2987     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
2988       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
2989       break;
2990     }
2991   }
2992 
2993   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
2994   I.add(VDataIn);
2995 
2996   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
2997       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
2998     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
2999     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3000       .addReg(VIndex.getReg())
3001       .addImm(AMDGPU::sub0)
3002       .addReg(VOffset.getReg())
3003       .addImm(AMDGPU::sub1);
3004 
3005     I.addReg(IdxReg);
3006   } else if (HasVIndex) {
3007     I.add(VIndex);
3008   } else if (HasVOffset) {
3009     I.add(VOffset);
3010   }
3011 
3012   I.add(MI.getOperand(2)); // rsrc
3013   I.add(SOffset);
3014   I.addImm(Offset);
3015   I.addImm(MI.getOperand(7).getImm()); // cpol
3016   I.cloneMemRefs(MI);
3017 
3018   MI.eraseFromParent();
3019 
3020   return true;
3021 }
3022 
3023 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3024   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3025 
3026   if (STI.hasGFX90AInsts()) {
3027     // gfx90a adds return versions of the global atomic fadd instructions so no
3028     // special handling is required.
3029     return selectImpl(MI, *CoverageInfo);
3030   }
3031 
3032   MachineBasicBlock *MBB = MI.getParent();
3033   const DebugLoc &DL = MI.getDebugLoc();
3034 
3035   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3036     Function &F = MBB->getParent()->getFunction();
3037     DiagnosticInfoUnsupported
3038       NoFpRet(F, "return versions of fp atomics not supported",
3039               MI.getDebugLoc(), DS_Error);
3040     F.getContext().diagnose(NoFpRet);
3041     return false;
3042   }
3043 
3044   // FIXME: This is only needed because tablegen requires number of dst operands
3045   // in match and replace pattern to be the same. Otherwise patterns can be
3046   // exported from SDag path.
3047   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3048 
3049   Register Data = DataOp.getReg();
3050   const unsigned Opc = MRI->getType(Data).isVector() ?
3051     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3052   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3053     .addReg(Addr.first)
3054     .addReg(Data)
3055     .addImm(Addr.second)
3056     .addImm(0) // cpol
3057     .cloneMemRefs(MI);
3058 
3059   MI.eraseFromParent();
3060   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3061 }
3062 
3063 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3064   unsigned Opc;
3065   unsigned Size = MI.getOperand(3).getImm();
3066 
3067   // The struct intrinsic variants add one additional operand over raw.
3068   const bool HasVIndex = MI.getNumOperands() == 9;
3069   Register VIndex;
3070   int OpOffset = 0;
3071   if (HasVIndex) {
3072     VIndex = MI.getOperand(4).getReg();
3073     OpOffset = 1;
3074   }
3075 
3076   Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3077   Optional<ValueAndVReg> MaybeVOffset =
3078       getIConstantVRegValWithLookThrough(VOffset, *MRI);
3079   const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3080 
3081   switch (Size) {
3082   default:
3083     return false;
3084   case 1:
3085     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3086                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3087                     : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3088                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3089     break;
3090   case 2:
3091     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3092                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3093                     : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3094                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3095     break;
3096   case 4:
3097     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3098                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3099                     : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3100                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3101     break;
3102   }
3103 
3104   MachineBasicBlock *MBB = MI.getParent();
3105   const DebugLoc &DL = MI.getDebugLoc();
3106   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3107     .add(MI.getOperand(2));
3108 
3109   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3110 
3111   if (HasVIndex && HasVOffset) {
3112     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3113     BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3114       .addReg(VIndex)
3115       .addImm(AMDGPU::sub0)
3116       .addReg(VOffset)
3117       .addImm(AMDGPU::sub1);
3118 
3119     MIB.addReg(IdxReg);
3120   } else if (HasVIndex) {
3121     MIB.addReg(VIndex);
3122   } else if (HasVOffset) {
3123     MIB.addReg(VOffset);
3124   }
3125 
3126   MIB.add(MI.getOperand(1));            // rsrc
3127   MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3128   MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3129   unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3130   MIB.addImm(Aux & AMDGPU::CPol::ALL);  // cpol
3131   MIB.addImm((Aux >> 3) & 1);           // swz
3132 
3133   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3134   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3135   LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3136   MachinePointerInfo StorePtrI = LoadPtrI;
3137   StorePtrI.V = nullptr;
3138   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3139 
3140   auto F = LoadMMO->getFlags() &
3141            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3142   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3143                                      Size, LoadMMO->getBaseAlign());
3144 
3145   MachineMemOperand *StoreMMO =
3146       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3147                                sizeof(int32_t), LoadMMO->getBaseAlign());
3148 
3149   MIB.setMemRefs({LoadMMO, StoreMMO});
3150 
3151   MI.eraseFromParent();
3152   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3153 }
3154 
3155 /// Match a zero extend from a 32-bit value to 64-bits.
3156 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3157   Register ZExtSrc;
3158   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3159     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3160 
3161   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3162   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3163   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3164     return false;
3165 
3166   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3167     return Def->getOperand(1).getReg();
3168   }
3169 
3170   return Register();
3171 }
3172 
3173 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3174   unsigned Opc;
3175   unsigned Size = MI.getOperand(3).getImm();
3176 
3177   switch (Size) {
3178   default:
3179     return false;
3180   case 1:
3181     Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3182     break;
3183   case 2:
3184     Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3185     break;
3186   case 4:
3187     Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3188     break;
3189   }
3190 
3191   MachineBasicBlock *MBB = MI.getParent();
3192   const DebugLoc &DL = MI.getDebugLoc();
3193   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3194     .add(MI.getOperand(2));
3195 
3196   Register Addr = MI.getOperand(1).getReg();
3197   Register VOffset;
3198   // Try to split SAddr and VOffset. Global and LDS pointers share the same
3199   // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3200   if (!isSGPR(Addr)) {
3201     auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3202     if (isSGPR(AddrDef->Reg)) {
3203       Addr = AddrDef->Reg;
3204     } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3205       Register SAddr =
3206           getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3207       if (SAddr && isSGPR(SAddr)) {
3208         Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3209         if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3210           Addr = SAddr;
3211           VOffset = Off;
3212         }
3213       }
3214     }
3215   }
3216 
3217   if (isSGPR(Addr)) {
3218     Opc = AMDGPU::getGlobalSaddrOp(Opc);
3219     if (!VOffset) {
3220       VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3221       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3222         .addImm(0);
3223     }
3224   }
3225 
3226   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3227     .addReg(Addr);
3228 
3229   if (isSGPR(Addr))
3230     MIB.addReg(VOffset);
3231 
3232   MIB.add(MI.getOperand(4))  // offset
3233      .add(MI.getOperand(5)); // cpol
3234 
3235   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3236   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3237   LoadPtrI.Offset = MI.getOperand(4).getImm();
3238   MachinePointerInfo StorePtrI = LoadPtrI;
3239   LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3240   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3241   auto F = LoadMMO->getFlags() &
3242            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3243   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3244                                      Size, LoadMMO->getBaseAlign());
3245   MachineMemOperand *StoreMMO =
3246       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3247                                sizeof(int32_t), Align(4));
3248 
3249   MIB.setMemRefs({LoadMMO, StoreMMO});
3250 
3251   MI.eraseFromParent();
3252   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3253 }
3254 
3255 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3256   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3257   MI.removeOperand(1);
3258   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3259   return true;
3260 }
3261 
3262 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3263   unsigned Opc;
3264   switch (MI.getIntrinsicID()) {
3265   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3266     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3267     break;
3268   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3269     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3270     break;
3271   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3272     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3273     break;
3274   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3275     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3276     break;
3277   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3278     Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3279     break;
3280   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3281     Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3282     break;
3283   default:
3284     llvm_unreachable("unhandled smfmac intrinsic");
3285   }
3286 
3287   auto VDst_In = MI.getOperand(4);
3288 
3289   MI.setDesc(TII.get(Opc));
3290   MI.removeOperand(4); // VDst_In
3291   MI.removeOperand(1); // Intrinsic ID
3292   MI.addOperand(VDst_In); // Readd VDst_In to the end
3293   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3294   return true;
3295 }
3296 
3297 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3298   Register DstReg = MI.getOperand(0).getReg();
3299   Register SrcReg = MI.getOperand(1).getReg();
3300   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3301   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3302   MachineBasicBlock *MBB = MI.getParent();
3303   const DebugLoc &DL = MI.getDebugLoc();
3304 
3305   if (IsVALU) {
3306     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3307       .addImm(Subtarget->getWavefrontSizeLog2())
3308       .addReg(SrcReg);
3309   } else {
3310     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3311       .addReg(SrcReg)
3312       .addImm(Subtarget->getWavefrontSizeLog2());
3313   }
3314 
3315   const TargetRegisterClass &RC =
3316       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3317   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3318     return false;
3319 
3320   MI.eraseFromParent();
3321   return true;
3322 }
3323 
3324 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3325   if (I.isPHI())
3326     return selectPHI(I);
3327 
3328   if (!I.isPreISelOpcode()) {
3329     if (I.isCopy())
3330       return selectCOPY(I);
3331     return true;
3332   }
3333 
3334   switch (I.getOpcode()) {
3335   case TargetOpcode::G_AND:
3336   case TargetOpcode::G_OR:
3337   case TargetOpcode::G_XOR:
3338     if (selectImpl(I, *CoverageInfo))
3339       return true;
3340     return selectG_AND_OR_XOR(I);
3341   case TargetOpcode::G_ADD:
3342   case TargetOpcode::G_SUB:
3343     if (selectImpl(I, *CoverageInfo))
3344       return true;
3345     return selectG_ADD_SUB(I);
3346   case TargetOpcode::G_UADDO:
3347   case TargetOpcode::G_USUBO:
3348   case TargetOpcode::G_UADDE:
3349   case TargetOpcode::G_USUBE:
3350     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3351   case AMDGPU::G_AMDGPU_MAD_U64_U32:
3352   case AMDGPU::G_AMDGPU_MAD_I64_I32:
3353     return selectG_AMDGPU_MAD_64_32(I);
3354   case TargetOpcode::G_INTTOPTR:
3355   case TargetOpcode::G_BITCAST:
3356   case TargetOpcode::G_PTRTOINT:
3357     return selectCOPY(I);
3358   case TargetOpcode::G_CONSTANT:
3359   case TargetOpcode::G_FCONSTANT:
3360     return selectG_CONSTANT(I);
3361   case TargetOpcode::G_FNEG:
3362     if (selectImpl(I, *CoverageInfo))
3363       return true;
3364     return selectG_FNEG(I);
3365   case TargetOpcode::G_FABS:
3366     if (selectImpl(I, *CoverageInfo))
3367       return true;
3368     return selectG_FABS(I);
3369   case TargetOpcode::G_EXTRACT:
3370     return selectG_EXTRACT(I);
3371   case TargetOpcode::G_MERGE_VALUES:
3372   case TargetOpcode::G_BUILD_VECTOR:
3373   case TargetOpcode::G_CONCAT_VECTORS:
3374     return selectG_MERGE_VALUES(I);
3375   case TargetOpcode::G_UNMERGE_VALUES:
3376     return selectG_UNMERGE_VALUES(I);
3377   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3378     return selectG_BUILD_VECTOR_TRUNC(I);
3379   case TargetOpcode::G_PTR_ADD:
3380     return selectG_PTR_ADD(I);
3381   case TargetOpcode::G_IMPLICIT_DEF:
3382     return selectG_IMPLICIT_DEF(I);
3383   case TargetOpcode::G_FREEZE:
3384     return selectCOPY(I);
3385   case TargetOpcode::G_INSERT:
3386     return selectG_INSERT(I);
3387   case TargetOpcode::G_INTRINSIC:
3388     return selectG_INTRINSIC(I);
3389   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3390     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3391   case TargetOpcode::G_ICMP:
3392     if (selectG_ICMP(I))
3393       return true;
3394     return selectImpl(I, *CoverageInfo);
3395   case TargetOpcode::G_LOAD:
3396   case TargetOpcode::G_STORE:
3397   case TargetOpcode::G_ATOMIC_CMPXCHG:
3398   case TargetOpcode::G_ATOMICRMW_XCHG:
3399   case TargetOpcode::G_ATOMICRMW_ADD:
3400   case TargetOpcode::G_ATOMICRMW_SUB:
3401   case TargetOpcode::G_ATOMICRMW_AND:
3402   case TargetOpcode::G_ATOMICRMW_OR:
3403   case TargetOpcode::G_ATOMICRMW_XOR:
3404   case TargetOpcode::G_ATOMICRMW_MIN:
3405   case TargetOpcode::G_ATOMICRMW_MAX:
3406   case TargetOpcode::G_ATOMICRMW_UMIN:
3407   case TargetOpcode::G_ATOMICRMW_UMAX:
3408   case TargetOpcode::G_ATOMICRMW_FADD:
3409   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3410   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3411   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3412   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3413     return selectG_LOAD_STORE_ATOMICRMW(I);
3414   case TargetOpcode::G_SELECT:
3415     return selectG_SELECT(I);
3416   case TargetOpcode::G_TRUNC:
3417     return selectG_TRUNC(I);
3418   case TargetOpcode::G_SEXT:
3419   case TargetOpcode::G_ZEXT:
3420   case TargetOpcode::G_ANYEXT:
3421   case TargetOpcode::G_SEXT_INREG:
3422     if (selectImpl(I, *CoverageInfo))
3423       return true;
3424     return selectG_SZA_EXT(I);
3425   case TargetOpcode::G_BRCOND:
3426     return selectG_BRCOND(I);
3427   case TargetOpcode::G_GLOBAL_VALUE:
3428     return selectG_GLOBAL_VALUE(I);
3429   case TargetOpcode::G_PTRMASK:
3430     return selectG_PTRMASK(I);
3431   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3432     return selectG_EXTRACT_VECTOR_ELT(I);
3433   case TargetOpcode::G_INSERT_VECTOR_ELT:
3434     return selectG_INSERT_VECTOR_ELT(I);
3435   case TargetOpcode::G_SHUFFLE_VECTOR:
3436     return selectG_SHUFFLE_VECTOR(I);
3437   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3438   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3439   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3440   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3441     const AMDGPU::ImageDimIntrinsicInfo *Intr
3442       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3443     assert(Intr && "not an image intrinsic with image pseudo");
3444     return selectImageIntrinsic(I, Intr);
3445   }
3446   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3447     return selectBVHIntrinsic(I);
3448   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3449     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3450   case AMDGPU::G_SBFX:
3451   case AMDGPU::G_UBFX:
3452     return selectG_SBFX_UBFX(I);
3453   case AMDGPU::G_SI_CALL:
3454     I.setDesc(TII.get(AMDGPU::SI_CALL));
3455     return true;
3456   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3457     return selectWaveAddress(I);
3458   default:
3459     return selectImpl(I, *CoverageInfo);
3460   }
3461   return false;
3462 }
3463 
3464 InstructionSelector::ComplexRendererFns
3465 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3466   return {{
3467       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3468   }};
3469 
3470 }
3471 
3472 std::pair<Register, unsigned>
3473 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3474                                               bool AllowAbs) const {
3475   Register Src = Root.getReg();
3476   Register OrigSrc = Src;
3477   unsigned Mods = 0;
3478   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3479 
3480   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3481     Src = MI->getOperand(1).getReg();
3482     Mods |= SISrcMods::NEG;
3483     MI = getDefIgnoringCopies(Src, *MRI);
3484   }
3485 
3486   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3487     Src = MI->getOperand(1).getReg();
3488     Mods |= SISrcMods::ABS;
3489   }
3490 
3491   if (Mods != 0 &&
3492       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3493     MachineInstr *UseMI = Root.getParent();
3494 
3495     // If we looked through copies to find source modifiers on an SGPR operand,
3496     // we now have an SGPR register source. To avoid potentially violating the
3497     // constant bus restriction, we need to insert a copy to a VGPR.
3498     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3499     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3500             TII.get(AMDGPU::COPY), VGPRSrc)
3501       .addReg(Src);
3502     Src = VGPRSrc;
3503   }
3504 
3505   return std::make_pair(Src, Mods);
3506 }
3507 
3508 ///
3509 /// This will select either an SGPR or VGPR operand and will save us from
3510 /// having to write an extra tablegen pattern.
3511 InstructionSelector::ComplexRendererFns
3512 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3513   return {{
3514       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3515   }};
3516 }
3517 
3518 InstructionSelector::ComplexRendererFns
3519 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3520   Register Src;
3521   unsigned Mods;
3522   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3523 
3524   return {{
3525       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3526       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3527       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3528       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3529   }};
3530 }
3531 
3532 InstructionSelector::ComplexRendererFns
3533 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3534   Register Src;
3535   unsigned Mods;
3536   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3537 
3538   return {{
3539       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3540       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3541       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3542       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3543   }};
3544 }
3545 
3546 InstructionSelector::ComplexRendererFns
3547 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3548   return {{
3549       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3550       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3551       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3552   }};
3553 }
3554 
3555 InstructionSelector::ComplexRendererFns
3556 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3557   Register Src;
3558   unsigned Mods;
3559   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3560 
3561   return {{
3562       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3563       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3564   }};
3565 }
3566 
3567 InstructionSelector::ComplexRendererFns
3568 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3569   Register Src;
3570   unsigned Mods;
3571   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3572 
3573   return {{
3574       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3575       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3576   }};
3577 }
3578 
3579 InstructionSelector::ComplexRendererFns
3580 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3581   Register Reg = Root.getReg();
3582   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3583   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3584               Def->getOpcode() == AMDGPU::G_FABS))
3585     return {};
3586   return {{
3587       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3588   }};
3589 }
3590 
3591 std::pair<Register, unsigned>
3592 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3593   Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3594   unsigned Mods = 0;
3595   MachineInstr *MI = MRI.getVRegDef(Src);
3596 
3597   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3598       // It's possible to see an f32 fneg here, but unlikely.
3599       // TODO: Treat f32 fneg as only high bit.
3600       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3601     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3602     Src = MI->getOperand(1).getReg();
3603     MI = MRI.getVRegDef(Src);
3604   }
3605 
3606   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3607   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3608 
3609   // Packed instructions do not have abs modifiers.
3610   Mods |= SISrcMods::OP_SEL_1;
3611 
3612   return std::make_pair(Src, Mods);
3613 }
3614 
3615 InstructionSelector::ComplexRendererFns
3616 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3617   MachineRegisterInfo &MRI
3618     = Root.getParent()->getParent()->getParent()->getRegInfo();
3619 
3620   Register Src;
3621   unsigned Mods;
3622   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3623 
3624   return {{
3625       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3626       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3627   }};
3628 }
3629 
3630 InstructionSelector::ComplexRendererFns
3631 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3632   MachineRegisterInfo &MRI
3633     = Root.getParent()->getParent()->getParent()->getRegInfo();
3634 
3635   Register Src;
3636   unsigned Mods;
3637   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3638 
3639   return {{
3640       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3641       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3642   }};
3643 }
3644 
3645 InstructionSelector::ComplexRendererFns
3646 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3647   Register Src;
3648   unsigned Mods;
3649   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3650   if (!isKnownNeverNaN(Src, *MRI))
3651     return None;
3652 
3653   return {{
3654       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3655       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3656   }};
3657 }
3658 
3659 InstructionSelector::ComplexRendererFns
3660 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3661   // FIXME: Handle op_sel
3662   return {{
3663       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3664       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3665   }};
3666 }
3667 
3668 InstructionSelector::ComplexRendererFns
3669 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3670   SmallVector<GEPInfo, 4> AddrInfo;
3671   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3672 
3673   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3674     return None;
3675 
3676   const GEPInfo &GEPInfo = AddrInfo[0];
3677   Optional<int64_t> EncodedImm =
3678       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3679   if (!EncodedImm)
3680     return None;
3681 
3682   unsigned PtrReg = GEPInfo.SgprParts[0];
3683   return {{
3684     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3685     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3686   }};
3687 }
3688 
3689 InstructionSelector::ComplexRendererFns
3690 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3691   SmallVector<GEPInfo, 4> AddrInfo;
3692   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3693 
3694   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3695     return None;
3696 
3697   const GEPInfo &GEPInfo = AddrInfo[0];
3698   Register PtrReg = GEPInfo.SgprParts[0];
3699   Optional<int64_t> EncodedImm =
3700       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3701   if (!EncodedImm)
3702     return None;
3703 
3704   return {{
3705     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3706     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3707   }};
3708 }
3709 
3710 InstructionSelector::ComplexRendererFns
3711 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3712   MachineInstr *MI = Root.getParent();
3713   MachineBasicBlock *MBB = MI->getParent();
3714 
3715   SmallVector<GEPInfo, 4> AddrInfo;
3716   getAddrModeInfo(*MI, *MRI, AddrInfo);
3717 
3718   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3719   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3720   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3721     return None;
3722 
3723   const GEPInfo &GEPInfo = AddrInfo[0];
3724   // SGPR offset is unsigned.
3725   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3726     return None;
3727 
3728   // If we make it this far we have a load with an 32-bit immediate offset.
3729   // It is OK to select this using a sgpr offset, because we have already
3730   // failed trying to select this load into one of the _IMM variants since
3731   // the _IMM Patterns are considered before the _SGPR patterns.
3732   Register PtrReg = GEPInfo.SgprParts[0];
3733   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3734   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3735           .addImm(GEPInfo.Imm);
3736   return {{
3737     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3738     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3739   }};
3740 }
3741 
3742 std::pair<Register, int>
3743 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3744                                                 uint64_t FlatVariant) const {
3745   MachineInstr *MI = Root.getParent();
3746 
3747   auto Default = std::make_pair(Root.getReg(), 0);
3748 
3749   if (!STI.hasFlatInstOffsets())
3750     return Default;
3751 
3752   Register PtrBase;
3753   int64_t ConstOffset;
3754   std::tie(PtrBase, ConstOffset) =
3755       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3756   if (ConstOffset == 0)
3757     return Default;
3758 
3759   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3760   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3761     return Default;
3762 
3763   return std::make_pair(PtrBase, ConstOffset);
3764 }
3765 
3766 InstructionSelector::ComplexRendererFns
3767 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3768   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3769 
3770   return {{
3771       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3772       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3773     }};
3774 }
3775 
3776 InstructionSelector::ComplexRendererFns
3777 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3778   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3779 
3780   return {{
3781       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3782       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3783   }};
3784 }
3785 
3786 InstructionSelector::ComplexRendererFns
3787 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3788   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3789 
3790   return {{
3791       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3792       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3793     }};
3794 }
3795 
3796 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3797 InstructionSelector::ComplexRendererFns
3798 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3799   Register Addr = Root.getReg();
3800   Register PtrBase;
3801   int64_t ConstOffset;
3802   int64_t ImmOffset = 0;
3803 
3804   // Match the immediate offset first, which canonically is moved as low as
3805   // possible.
3806   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3807 
3808   if (ConstOffset != 0) {
3809     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3810                               SIInstrFlags::FlatGlobal)) {
3811       Addr = PtrBase;
3812       ImmOffset = ConstOffset;
3813     } else {
3814       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3815       if (isSGPR(PtrBaseDef->Reg)) {
3816         if (ConstOffset > 0) {
3817           // Offset is too large.
3818           //
3819           // saddr + large_offset -> saddr +
3820           //                         (voffset = large_offset & ~MaxOffset) +
3821           //                         (large_offset & MaxOffset);
3822           int64_t SplitImmOffset, RemainderOffset;
3823           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3824               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3825 
3826           if (isUInt<32>(RemainderOffset)) {
3827             MachineInstr *MI = Root.getParent();
3828             MachineBasicBlock *MBB = MI->getParent();
3829             Register HighBits =
3830                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3831 
3832             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3833                     HighBits)
3834                 .addImm(RemainderOffset);
3835 
3836             return {{
3837                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3838                 [=](MachineInstrBuilder &MIB) {
3839                   MIB.addReg(HighBits);
3840                 }, // voffset
3841                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3842             }};
3843           }
3844         }
3845 
3846         // We are adding a 64 bit SGPR and a constant. If constant bus limit
3847         // is 1 we would need to perform 1 or 2 extra moves for each half of
3848         // the constant and it is better to do a scalar add and then issue a
3849         // single VALU instruction to materialize zero. Otherwise it is less
3850         // instructions to perform VALU adds with immediates or inline literals.
3851         unsigned NumLiterals =
3852             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3853             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3854         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3855           return None;
3856       }
3857     }
3858   }
3859 
3860   // Match the variable offset.
3861   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3862   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3863     // Look through the SGPR->VGPR copy.
3864     Register SAddr =
3865         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3866 
3867     if (SAddr && isSGPR(SAddr)) {
3868       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3869 
3870       // It's possible voffset is an SGPR here, but the copy to VGPR will be
3871       // inserted later.
3872       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3873         return {{[=](MachineInstrBuilder &MIB) { // saddr
3874                    MIB.addReg(SAddr);
3875                  },
3876                  [=](MachineInstrBuilder &MIB) { // voffset
3877                    MIB.addReg(VOffset);
3878                  },
3879                  [=](MachineInstrBuilder &MIB) { // offset
3880                    MIB.addImm(ImmOffset);
3881                  }}};
3882       }
3883     }
3884   }
3885 
3886   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3887   // drop this.
3888   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3889       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
3890     return None;
3891 
3892   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3893   // moves required to copy a 64-bit SGPR to VGPR.
3894   MachineInstr *MI = Root.getParent();
3895   MachineBasicBlock *MBB = MI->getParent();
3896   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3897 
3898   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3899       .addImm(0);
3900 
3901   return {{
3902       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
3903       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
3904       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
3905   }};
3906 }
3907 
3908 InstructionSelector::ComplexRendererFns
3909 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3910   Register Addr = Root.getReg();
3911   Register PtrBase;
3912   int64_t ConstOffset;
3913   int64_t ImmOffset = 0;
3914 
3915   // Match the immediate offset first, which canonically is moved as low as
3916   // possible.
3917   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3918 
3919   if (ConstOffset != 0 &&
3920       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
3921                             SIInstrFlags::FlatScratch)) {
3922     Addr = PtrBase;
3923     ImmOffset = ConstOffset;
3924   }
3925 
3926   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3927   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3928     int FI = AddrDef->MI->getOperand(1).getIndex();
3929     return {{
3930         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3931         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3932     }};
3933   }
3934 
3935   Register SAddr = AddrDef->Reg;
3936 
3937   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3938     Register LHS = AddrDef->MI->getOperand(1).getReg();
3939     Register RHS = AddrDef->MI->getOperand(2).getReg();
3940     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3941     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3942 
3943     if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3944         isSGPR(RHSDef->Reg)) {
3945       int FI = LHSDef->MI->getOperand(1).getIndex();
3946       MachineInstr &I = *Root.getParent();
3947       MachineBasicBlock *BB = I.getParent();
3948       const DebugLoc &DL = I.getDebugLoc();
3949       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3950 
3951       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
3952           .addFrameIndex(FI)
3953           .addReg(RHSDef->Reg);
3954     }
3955   }
3956 
3957   if (!isSGPR(SAddr))
3958     return None;
3959 
3960   return {{
3961       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3962       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3963   }};
3964 }
3965 
3966 InstructionSelector::ComplexRendererFns
3967 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
3968   Register Addr = Root.getReg();
3969   Register PtrBase;
3970   int64_t ConstOffset;
3971   int64_t ImmOffset = 0;
3972 
3973   // Match the immediate offset first, which canonically is moved as low as
3974   // possible.
3975   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3976 
3977   if (ConstOffset != 0 &&
3978       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
3979     Addr = PtrBase;
3980     ImmOffset = ConstOffset;
3981   }
3982 
3983   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3984   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
3985     return None;
3986 
3987   Register RHS = AddrDef->MI->getOperand(2).getReg();
3988   if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
3989     return None;
3990 
3991   Register LHS = AddrDef->MI->getOperand(1).getReg();
3992   auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3993 
3994   if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3995     int FI = LHSDef->MI->getOperand(1).getIndex();
3996     return {{
3997         [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
3998         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3999         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4000     }};
4001   }
4002 
4003   if (!isSGPR(LHS))
4004     return None;
4005 
4006   return {{
4007       [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4008       [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4009       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4010   }};
4011 }
4012 
4013 InstructionSelector::ComplexRendererFns
4014 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4015   MachineInstr *MI = Root.getParent();
4016   MachineBasicBlock *MBB = MI->getParent();
4017   MachineFunction *MF = MBB->getParent();
4018   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4019 
4020   int64_t Offset = 0;
4021   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4022       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4023     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4024 
4025     // TODO: Should this be inside the render function? The iterator seems to
4026     // move.
4027     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4028             HighBits)
4029       .addImm(Offset & ~4095);
4030 
4031     return {{[=](MachineInstrBuilder &MIB) { // rsrc
4032                MIB.addReg(Info->getScratchRSrcReg());
4033              },
4034              [=](MachineInstrBuilder &MIB) { // vaddr
4035                MIB.addReg(HighBits);
4036              },
4037              [=](MachineInstrBuilder &MIB) { // soffset
4038                // Use constant zero for soffset and rely on eliminateFrameIndex
4039                // to choose the appropriate frame register if need be.
4040                MIB.addImm(0);
4041              },
4042              [=](MachineInstrBuilder &MIB) { // offset
4043                MIB.addImm(Offset & 4095);
4044              }}};
4045   }
4046 
4047   assert(Offset == 0 || Offset == -1);
4048 
4049   // Try to fold a frame index directly into the MUBUF vaddr field, and any
4050   // offsets.
4051   Optional<int> FI;
4052   Register VAddr = Root.getReg();
4053   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4054     Register PtrBase;
4055     int64_t ConstOffset;
4056     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4057     if (ConstOffset != 0) {
4058       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4059           (!STI.privateMemoryResourceIsRangeChecked() ||
4060            KnownBits->signBitIsZero(PtrBase))) {
4061         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4062         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4063           FI = PtrBaseDef->getOperand(1).getIndex();
4064         else
4065           VAddr = PtrBase;
4066         Offset = ConstOffset;
4067       }
4068     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4069       FI = RootDef->getOperand(1).getIndex();
4070     }
4071   }
4072 
4073   return {{[=](MachineInstrBuilder &MIB) { // rsrc
4074              MIB.addReg(Info->getScratchRSrcReg());
4075            },
4076            [=](MachineInstrBuilder &MIB) { // vaddr
4077              if (FI.hasValue())
4078                MIB.addFrameIndex(FI.getValue());
4079              else
4080                MIB.addReg(VAddr);
4081            },
4082            [=](MachineInstrBuilder &MIB) { // soffset
4083              // Use constant zero for soffset and rely on eliminateFrameIndex
4084              // to choose the appropriate frame register if need be.
4085              MIB.addImm(0);
4086            },
4087            [=](MachineInstrBuilder &MIB) { // offset
4088              MIB.addImm(Offset);
4089            }}};
4090 }
4091 
4092 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4093                                                 int64_t Offset) const {
4094   if (!isUInt<16>(Offset))
4095     return false;
4096 
4097   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4098     return true;
4099 
4100   // On Southern Islands instruction with a negative base value and an offset
4101   // don't seem to work.
4102   return KnownBits->signBitIsZero(Base);
4103 }
4104 
4105 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4106                                                  int64_t Offset1,
4107                                                  unsigned Size) const {
4108   if (Offset0 % Size != 0 || Offset1 % Size != 0)
4109     return false;
4110   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4111     return false;
4112 
4113   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4114     return true;
4115 
4116   // On Southern Islands instruction with a negative base value and an offset
4117   // don't seem to work.
4118   return KnownBits->signBitIsZero(Base);
4119 }
4120 
4121 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4122                                                     unsigned ShAmtBits) const {
4123   assert(MI.getOpcode() == TargetOpcode::G_AND);
4124 
4125   Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4126   if (!RHS)
4127     return false;
4128 
4129   if (RHS->countTrailingOnes() >= ShAmtBits)
4130     return true;
4131 
4132   const APInt &LHSKnownZeros =
4133       KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
4134   return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
4135 }
4136 
4137 // Return the wave level SGPR base address if this is a wave address.
4138 static Register getWaveAddress(const MachineInstr *Def) {
4139   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4140              ? Def->getOperand(1).getReg()
4141              : Register();
4142 }
4143 
4144 InstructionSelector::ComplexRendererFns
4145 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4146     MachineOperand &Root) const {
4147   Register Reg = Root.getReg();
4148   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4149 
4150   const MachineInstr *Def = MRI->getVRegDef(Reg);
4151   if (Register WaveBase = getWaveAddress(Def)) {
4152     return {{
4153         [=](MachineInstrBuilder &MIB) { // rsrc
4154           MIB.addReg(Info->getScratchRSrcReg());
4155         },
4156         [=](MachineInstrBuilder &MIB) { // soffset
4157           MIB.addReg(WaveBase);
4158         },
4159         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4160     }};
4161   }
4162 
4163   int64_t Offset = 0;
4164 
4165   // FIXME: Copy check is a hack
4166   Register BasePtr;
4167   if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4168     if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4169       return {};
4170     const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4171     Register WaveBase = getWaveAddress(BasePtrDef);
4172     if (!WaveBase)
4173       return {};
4174 
4175     return {{
4176         [=](MachineInstrBuilder &MIB) { // rsrc
4177           MIB.addReg(Info->getScratchRSrcReg());
4178         },
4179         [=](MachineInstrBuilder &MIB) { // soffset
4180           MIB.addReg(WaveBase);
4181         },
4182         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4183     }};
4184   }
4185 
4186   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4187       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4188     return {};
4189 
4190   return {{
4191       [=](MachineInstrBuilder &MIB) { // rsrc
4192         MIB.addReg(Info->getScratchRSrcReg());
4193       },
4194       [=](MachineInstrBuilder &MIB) { // soffset
4195         MIB.addImm(0);
4196       },
4197       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4198   }};
4199 }
4200 
4201 std::pair<Register, unsigned>
4202 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4203   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4204   if (!RootDef)
4205     return std::make_pair(Root.getReg(), 0);
4206 
4207   int64_t ConstAddr = 0;
4208 
4209   Register PtrBase;
4210   int64_t Offset;
4211   std::tie(PtrBase, Offset) =
4212     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4213 
4214   if (Offset) {
4215     if (isDSOffsetLegal(PtrBase, Offset)) {
4216       // (add n0, c0)
4217       return std::make_pair(PtrBase, Offset);
4218     }
4219   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4220     // TODO
4221 
4222 
4223   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4224     // TODO
4225 
4226   }
4227 
4228   return std::make_pair(Root.getReg(), 0);
4229 }
4230 
4231 InstructionSelector::ComplexRendererFns
4232 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4233   Register Reg;
4234   unsigned Offset;
4235   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4236   return {{
4237       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4238       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4239     }};
4240 }
4241 
4242 InstructionSelector::ComplexRendererFns
4243 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4244   return selectDSReadWrite2(Root, 4);
4245 }
4246 
4247 InstructionSelector::ComplexRendererFns
4248 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4249   return selectDSReadWrite2(Root, 8);
4250 }
4251 
4252 InstructionSelector::ComplexRendererFns
4253 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4254                                               unsigned Size) const {
4255   Register Reg;
4256   unsigned Offset;
4257   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4258   return {{
4259       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4260       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4261       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4262     }};
4263 }
4264 
4265 std::pair<Register, unsigned>
4266 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4267                                                   unsigned Size) const {
4268   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4269   if (!RootDef)
4270     return std::make_pair(Root.getReg(), 0);
4271 
4272   int64_t ConstAddr = 0;
4273 
4274   Register PtrBase;
4275   int64_t Offset;
4276   std::tie(PtrBase, Offset) =
4277     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4278 
4279   if (Offset) {
4280     int64_t OffsetValue0 = Offset;
4281     int64_t OffsetValue1 = Offset + Size;
4282     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4283       // (add n0, c0)
4284       return std::make_pair(PtrBase, OffsetValue0 / Size);
4285     }
4286   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4287     // TODO
4288 
4289   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4290     // TODO
4291 
4292   }
4293 
4294   return std::make_pair(Root.getReg(), 0);
4295 }
4296 
4297 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4298 /// the base value with the constant offset. There may be intervening copies
4299 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4300 /// not match the pattern.
4301 std::pair<Register, int64_t>
4302 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4303   Register Root, const MachineRegisterInfo &MRI) const {
4304   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4305   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4306     return {Root, 0};
4307 
4308   MachineOperand &RHS = RootI->getOperand(2);
4309   Optional<ValueAndVReg> MaybeOffset =
4310       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4311   if (!MaybeOffset)
4312     return {Root, 0};
4313   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4314 }
4315 
4316 static void addZeroImm(MachineInstrBuilder &MIB) {
4317   MIB.addImm(0);
4318 }
4319 
4320 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4321 /// BasePtr is not valid, a null base pointer will be used.
4322 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4323                           uint32_t FormatLo, uint32_t FormatHi,
4324                           Register BasePtr) {
4325   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4326   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4327   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4328   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4329 
4330   B.buildInstr(AMDGPU::S_MOV_B32)
4331     .addDef(RSrc2)
4332     .addImm(FormatLo);
4333   B.buildInstr(AMDGPU::S_MOV_B32)
4334     .addDef(RSrc3)
4335     .addImm(FormatHi);
4336 
4337   // Build the half of the subregister with the constants before building the
4338   // full 128-bit register. If we are building multiple resource descriptors,
4339   // this will allow CSEing of the 2-component register.
4340   B.buildInstr(AMDGPU::REG_SEQUENCE)
4341     .addDef(RSrcHi)
4342     .addReg(RSrc2)
4343     .addImm(AMDGPU::sub0)
4344     .addReg(RSrc3)
4345     .addImm(AMDGPU::sub1);
4346 
4347   Register RSrcLo = BasePtr;
4348   if (!BasePtr) {
4349     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4350     B.buildInstr(AMDGPU::S_MOV_B64)
4351       .addDef(RSrcLo)
4352       .addImm(0);
4353   }
4354 
4355   B.buildInstr(AMDGPU::REG_SEQUENCE)
4356     .addDef(RSrc)
4357     .addReg(RSrcLo)
4358     .addImm(AMDGPU::sub0_sub1)
4359     .addReg(RSrcHi)
4360     .addImm(AMDGPU::sub2_sub3);
4361 
4362   return RSrc;
4363 }
4364 
4365 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4366                                 const SIInstrInfo &TII, Register BasePtr) {
4367   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4368 
4369   // FIXME: Why are half the "default" bits ignored based on the addressing
4370   // mode?
4371   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4372 }
4373 
4374 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4375                                const SIInstrInfo &TII, Register BasePtr) {
4376   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4377 
4378   // FIXME: Why are half the "default" bits ignored based on the addressing
4379   // mode?
4380   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4381 }
4382 
4383 AMDGPUInstructionSelector::MUBUFAddressData
4384 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4385   MUBUFAddressData Data;
4386   Data.N0 = Src;
4387 
4388   Register PtrBase;
4389   int64_t Offset;
4390 
4391   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4392   if (isUInt<32>(Offset)) {
4393     Data.N0 = PtrBase;
4394     Data.Offset = Offset;
4395   }
4396 
4397   if (MachineInstr *InputAdd
4398       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4399     Data.N2 = InputAdd->getOperand(1).getReg();
4400     Data.N3 = InputAdd->getOperand(2).getReg();
4401 
4402     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4403     // FIXME: Don't know this was defined by operand 0
4404     //
4405     // TODO: Remove this when we have copy folding optimizations after
4406     // RegBankSelect.
4407     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4408     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4409   }
4410 
4411   return Data;
4412 }
4413 
4414 /// Return if the addr64 mubuf mode should be used for the given address.
4415 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4416   // (ptr_add N2, N3) -> addr64, or
4417   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4418   if (Addr.N2)
4419     return true;
4420 
4421   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4422   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4423 }
4424 
4425 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4426 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4427 /// component.
4428 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4429   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4430   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4431     return;
4432 
4433   // Illegal offset, store it in soffset.
4434   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4435   B.buildInstr(AMDGPU::S_MOV_B32)
4436     .addDef(SOffset)
4437     .addImm(ImmOffset);
4438   ImmOffset = 0;
4439 }
4440 
4441 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4442   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4443   Register &SOffset, int64_t &Offset) const {
4444   // FIXME: Predicates should stop this from reaching here.
4445   // addr64 bit was removed for volcanic islands.
4446   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4447     return false;
4448 
4449   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4450   if (!shouldUseAddr64(AddrData))
4451     return false;
4452 
4453   Register N0 = AddrData.N0;
4454   Register N2 = AddrData.N2;
4455   Register N3 = AddrData.N3;
4456   Offset = AddrData.Offset;
4457 
4458   // Base pointer for the SRD.
4459   Register SRDPtr;
4460 
4461   if (N2) {
4462     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4463       assert(N3);
4464       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4465         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4466         // addr64, and construct the default resource from a 0 address.
4467         VAddr = N0;
4468       } else {
4469         SRDPtr = N3;
4470         VAddr = N2;
4471       }
4472     } else {
4473       // N2 is not divergent.
4474       SRDPtr = N2;
4475       VAddr = N3;
4476     }
4477   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4478     // Use the default null pointer in the resource
4479     VAddr = N0;
4480   } else {
4481     // N0 -> offset, or
4482     // (N0 + C1) -> offset
4483     SRDPtr = N0;
4484   }
4485 
4486   MachineIRBuilder B(*Root.getParent());
4487   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4488   splitIllegalMUBUFOffset(B, SOffset, Offset);
4489   return true;
4490 }
4491 
4492 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4493   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4494   int64_t &Offset) const {
4495 
4496   // FIXME: Pattern should not reach here.
4497   if (STI.useFlatForGlobal())
4498     return false;
4499 
4500   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4501   if (shouldUseAddr64(AddrData))
4502     return false;
4503 
4504   // N0 -> offset, or
4505   // (N0 + C1) -> offset
4506   Register SRDPtr = AddrData.N0;
4507   Offset = AddrData.Offset;
4508 
4509   // TODO: Look through extensions for 32-bit soffset.
4510   MachineIRBuilder B(*Root.getParent());
4511 
4512   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4513   splitIllegalMUBUFOffset(B, SOffset, Offset);
4514   return true;
4515 }
4516 
4517 InstructionSelector::ComplexRendererFns
4518 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4519   Register VAddr;
4520   Register RSrcReg;
4521   Register SOffset;
4522   int64_t Offset = 0;
4523 
4524   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4525     return {};
4526 
4527   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4528   // pattern.
4529   return {{
4530       [=](MachineInstrBuilder &MIB) {  // rsrc
4531         MIB.addReg(RSrcReg);
4532       },
4533       [=](MachineInstrBuilder &MIB) { // vaddr
4534         MIB.addReg(VAddr);
4535       },
4536       [=](MachineInstrBuilder &MIB) { // soffset
4537         if (SOffset)
4538           MIB.addReg(SOffset);
4539         else
4540           MIB.addImm(0);
4541       },
4542       [=](MachineInstrBuilder &MIB) { // offset
4543         MIB.addImm(Offset);
4544       },
4545       addZeroImm, //  cpol
4546       addZeroImm, //  tfe
4547       addZeroImm  //  swz
4548     }};
4549 }
4550 
4551 InstructionSelector::ComplexRendererFns
4552 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4553   Register RSrcReg;
4554   Register SOffset;
4555   int64_t Offset = 0;
4556 
4557   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4558     return {};
4559 
4560   return {{
4561       [=](MachineInstrBuilder &MIB) {  // rsrc
4562         MIB.addReg(RSrcReg);
4563       },
4564       [=](MachineInstrBuilder &MIB) { // soffset
4565         if (SOffset)
4566           MIB.addReg(SOffset);
4567         else
4568           MIB.addImm(0);
4569       },
4570       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4571       addZeroImm, //  cpol
4572       addZeroImm, //  tfe
4573       addZeroImm, //  swz
4574     }};
4575 }
4576 
4577 InstructionSelector::ComplexRendererFns
4578 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4579   Register VAddr;
4580   Register RSrcReg;
4581   Register SOffset;
4582   int64_t Offset = 0;
4583 
4584   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4585     return {};
4586 
4587   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4588   // pattern.
4589   return {{
4590       [=](MachineInstrBuilder &MIB) {  // rsrc
4591         MIB.addReg(RSrcReg);
4592       },
4593       [=](MachineInstrBuilder &MIB) { // vaddr
4594         MIB.addReg(VAddr);
4595       },
4596       [=](MachineInstrBuilder &MIB) { // soffset
4597         if (SOffset)
4598           MIB.addReg(SOffset);
4599         else
4600           MIB.addImm(0);
4601       },
4602       [=](MachineInstrBuilder &MIB) { // offset
4603         MIB.addImm(Offset);
4604       },
4605       [=](MachineInstrBuilder &MIB) {
4606         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4607       }
4608     }};
4609 }
4610 
4611 InstructionSelector::ComplexRendererFns
4612 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4613   Register RSrcReg;
4614   Register SOffset;
4615   int64_t Offset = 0;
4616 
4617   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4618     return {};
4619 
4620   return {{
4621       [=](MachineInstrBuilder &MIB) {  // rsrc
4622         MIB.addReg(RSrcReg);
4623       },
4624       [=](MachineInstrBuilder &MIB) { // soffset
4625         if (SOffset)
4626           MIB.addReg(SOffset);
4627         else
4628           MIB.addImm(0);
4629       },
4630       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4631       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4632     }};
4633 }
4634 
4635 /// Get an immediate that must be 32-bits, and treated as zero extended.
4636 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4637                                                const MachineRegisterInfo &MRI) {
4638   // getIConstantVRegVal sexts any values, so see if that matters.
4639   Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4640   if (!OffsetVal || !isInt<32>(*OffsetVal))
4641     return None;
4642   return Lo_32(*OffsetVal);
4643 }
4644 
4645 InstructionSelector::ComplexRendererFns
4646 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4647   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4648   if (!OffsetVal)
4649     return {};
4650 
4651   Optional<int64_t> EncodedImm =
4652       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4653   if (!EncodedImm)
4654     return {};
4655 
4656   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4657 }
4658 
4659 InstructionSelector::ComplexRendererFns
4660 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4661   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4662 
4663   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4664   if (!OffsetVal)
4665     return {};
4666 
4667   Optional<int64_t> EncodedImm
4668     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4669   if (!EncodedImm)
4670     return {};
4671 
4672   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4673 }
4674 
4675 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4676                                                  const MachineInstr &MI,
4677                                                  int OpIdx) const {
4678   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4679          "Expected G_CONSTANT");
4680   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4681 }
4682 
4683 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4684                                                 const MachineInstr &MI,
4685                                                 int OpIdx) const {
4686   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4687          "Expected G_CONSTANT");
4688   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4689 }
4690 
4691 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4692                                                  const MachineInstr &MI,
4693                                                  int OpIdx) const {
4694   assert(OpIdx == -1);
4695 
4696   const MachineOperand &Op = MI.getOperand(1);
4697   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4698     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4699   else {
4700     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4701     MIB.addImm(Op.getCImm()->getSExtValue());
4702   }
4703 }
4704 
4705 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4706                                                 const MachineInstr &MI,
4707                                                 int OpIdx) const {
4708   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4709          "Expected G_CONSTANT");
4710   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4711 }
4712 
4713 /// This only really exists to satisfy DAG type checking machinery, so is a
4714 /// no-op here.
4715 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4716                                                 const MachineInstr &MI,
4717                                                 int OpIdx) const {
4718   MIB.addImm(MI.getOperand(OpIdx).getImm());
4719 }
4720 
4721 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4722                                                   const MachineInstr &MI,
4723                                                   int OpIdx) const {
4724   assert(OpIdx >= 0 && "expected to match an immediate operand");
4725   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4726 }
4727 
4728 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4729                                                  const MachineInstr &MI,
4730                                                  int OpIdx) const {
4731   assert(OpIdx >= 0 && "expected to match an immediate operand");
4732   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4733 }
4734 
4735 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4736                                              const MachineInstr &MI,
4737                                              int OpIdx) const {
4738   assert(OpIdx >= 0 && "expected to match an immediate operand");
4739   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4740 }
4741 
4742 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4743                                                  const MachineInstr &MI,
4744                                                  int OpIdx) const {
4745   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4746 }
4747 
4748 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4749   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4750 }
4751 
4752 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4753   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4754 }
4755 
4756 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4757   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4758 }
4759 
4760 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4761   return TII.isInlineConstant(Imm);
4762 }
4763