1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 
37 #define DEBUG_TYPE "amdgpu-isel"
38 
39 using namespace llvm;
40 using namespace MIPatternMatch;
41 
42 static cl::opt<bool> AllowRiskySelect(
43   "amdgpu-global-isel-risky-select",
44   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
45   cl::init(false),
46   cl::ReallyHidden);
47 
48 #define GET_GLOBALISEL_IMPL
49 #define AMDGPUSubtarget GCNSubtarget
50 #include "AMDGPUGenGlobalISel.inc"
51 #undef GET_GLOBALISEL_IMPL
52 #undef AMDGPUSubtarget
53 
54 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
55     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
56     const AMDGPUTargetMachine &TM)
57     : InstructionSelector(), TII(*STI.getInstrInfo()),
58       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
59       STI(STI),
60       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
61 #define GET_GLOBALISEL_PREDICATES_INIT
62 #include "AMDGPUGenGlobalISel.inc"
63 #undef GET_GLOBALISEL_PREDICATES_INIT
64 #define GET_GLOBALISEL_TEMPORARIES_INIT
65 #include "AMDGPUGenGlobalISel.inc"
66 #undef GET_GLOBALISEL_TEMPORARIES_INIT
67 {
68 }
69 
70 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
71 
72 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
73                                         CodeGenCoverage &CoverageInfo) {
74   MRI = &MF.getRegInfo();
75   Subtarget = &MF.getSubtarget<GCNSubtarget>();
76   InstructionSelector::setupMF(MF, KB, CoverageInfo);
77 }
78 
79 bool AMDGPUInstructionSelector::isVCC(Register Reg,
80                                       const MachineRegisterInfo &MRI) const {
81   // The verifier is oblivious to s1 being a valid value for wavesize registers.
82   if (Reg.isPhysical())
83     return false;
84 
85   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
86   const TargetRegisterClass *RC =
87       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
88   if (RC) {
89     const LLT Ty = MRI.getType(Reg);
90     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
91            Ty.isValid() && Ty.getSizeInBits() == 1;
92   }
93 
94   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
95   return RB->getID() == AMDGPU::VCCRegBankID;
96 }
97 
98 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
99                                                         unsigned NewOpc) const {
100   MI.setDesc(TII.get(NewOpc));
101   MI.RemoveOperand(1); // Remove intrinsic ID.
102   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
103 
104   MachineOperand &Dst = MI.getOperand(0);
105   MachineOperand &Src = MI.getOperand(1);
106 
107   // TODO: This should be legalized to s32 if needed
108   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
109     return false;
110 
111   const TargetRegisterClass *DstRC
112     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
113   const TargetRegisterClass *SrcRC
114     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
115   if (!DstRC || DstRC != SrcRC)
116     return false;
117 
118   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
119          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
120 }
121 
122 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
123   const DebugLoc &DL = I.getDebugLoc();
124   MachineBasicBlock *BB = I.getParent();
125   I.setDesc(TII.get(TargetOpcode::COPY));
126 
127   const MachineOperand &Src = I.getOperand(1);
128   MachineOperand &Dst = I.getOperand(0);
129   Register DstReg = Dst.getReg();
130   Register SrcReg = Src.getReg();
131 
132   if (isVCC(DstReg, *MRI)) {
133     if (SrcReg == AMDGPU::SCC) {
134       const TargetRegisterClass *RC
135         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
136       if (!RC)
137         return true;
138       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
139     }
140 
141     if (!isVCC(SrcReg, *MRI)) {
142       // TODO: Should probably leave the copy and let copyPhysReg expand it.
143       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
144         return false;
145 
146       const TargetRegisterClass *SrcRC
147         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
148 
149       Register MaskedReg = MRI->createVirtualRegister(SrcRC);
150 
151       // We can't trust the high bits at this point, so clear them.
152 
153       // TODO: Skip masking high bits if def is known boolean.
154 
155       unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
156         AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
157       BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
158         .addImm(1)
159         .addReg(SrcReg);
160       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
161         .addImm(0)
162         .addReg(MaskedReg);
163 
164       if (!MRI->getRegClassOrNull(SrcReg))
165         MRI->setRegClass(SrcReg, SrcRC);
166       I.eraseFromParent();
167       return true;
168     }
169 
170     const TargetRegisterClass *RC =
171       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
172     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
173       return false;
174 
175     return true;
176   }
177 
178   for (const MachineOperand &MO : I.operands()) {
179     if (MO.getReg().isPhysical())
180       continue;
181 
182     const TargetRegisterClass *RC =
183             TRI.getConstrainedRegClassForOperand(MO, *MRI);
184     if (!RC)
185       continue;
186     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
187   }
188   return true;
189 }
190 
191 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
192   const Register DefReg = I.getOperand(0).getReg();
193   const LLT DefTy = MRI->getType(DefReg);
194   if (DefTy == LLT::scalar(1)) {
195     if (!AllowRiskySelect) {
196       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
197       return false;
198     }
199 
200     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
201   }
202 
203   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
204 
205   const RegClassOrRegBank &RegClassOrBank =
206     MRI->getRegClassOrRegBank(DefReg);
207 
208   const TargetRegisterClass *DefRC
209     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
210   if (!DefRC) {
211     if (!DefTy.isValid()) {
212       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
213       return false;
214     }
215 
216     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
217     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
218     if (!DefRC) {
219       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
220       return false;
221     }
222   }
223 
224   // TODO: Verify that all registers have the same bank
225   I.setDesc(TII.get(TargetOpcode::PHI));
226   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
227 }
228 
229 MachineOperand
230 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
231                                            const TargetRegisterClass &SubRC,
232                                            unsigned SubIdx) const {
233 
234   MachineInstr *MI = MO.getParent();
235   MachineBasicBlock *BB = MO.getParent()->getParent();
236   Register DstReg = MRI->createVirtualRegister(&SubRC);
237 
238   if (MO.isReg()) {
239     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
240     Register Reg = MO.getReg();
241     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
242             .addReg(Reg, 0, ComposedSubIdx);
243 
244     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
245                                      MO.isKill(), MO.isDead(), MO.isUndef(),
246                                      MO.isEarlyClobber(), 0, MO.isDebug(),
247                                      MO.isInternalRead());
248   }
249 
250   assert(MO.isImm());
251 
252   APInt Imm(64, MO.getImm());
253 
254   switch (SubIdx) {
255   default:
256     llvm_unreachable("do not know to split immediate with this sub index.");
257   case AMDGPU::sub0:
258     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
259   case AMDGPU::sub1:
260     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
261   }
262 }
263 
264 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
265   switch (Opc) {
266   case AMDGPU::G_AND:
267     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
268   case AMDGPU::G_OR:
269     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
270   case AMDGPU::G_XOR:
271     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
272   default:
273     llvm_unreachable("not a bit op");
274   }
275 }
276 
277 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
278   Register DstReg = I.getOperand(0).getReg();
279   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
280 
281   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
282   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
283       DstRB->getID() != AMDGPU::VCCRegBankID)
284     return false;
285 
286   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
287                             STI.isWave64());
288   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
289 
290   // Dead implicit-def of scc
291   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
292                                          true, // isImp
293                                          false, // isKill
294                                          true)); // isDead
295   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
296 }
297 
298 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
299   MachineBasicBlock *BB = I.getParent();
300   MachineFunction *MF = BB->getParent();
301   Register DstReg = I.getOperand(0).getReg();
302   const DebugLoc &DL = I.getDebugLoc();
303   LLT Ty = MRI->getType(DstReg);
304   if (Ty.isVector())
305     return false;
306 
307   unsigned Size = Ty.getSizeInBits();
308   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
309   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
310   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
311 
312   if (Size == 32) {
313     if (IsSALU) {
314       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
315       MachineInstr *Add =
316         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
317         .add(I.getOperand(1))
318         .add(I.getOperand(2));
319       I.eraseFromParent();
320       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
321     }
322 
323     if (STI.hasAddNoCarry()) {
324       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
325       I.setDesc(TII.get(Opc));
326       I.addOperand(*MF, MachineOperand::CreateImm(0));
327       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
328       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
329     }
330 
331     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
332 
333     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
334     MachineInstr *Add
335       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
336       .addDef(UnusedCarry, RegState::Dead)
337       .add(I.getOperand(1))
338       .add(I.getOperand(2))
339       .addImm(0);
340     I.eraseFromParent();
341     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
342   }
343 
344   assert(!Sub && "illegal sub should not reach here");
345 
346   const TargetRegisterClass &RC
347     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
348   const TargetRegisterClass &HalfRC
349     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
350 
351   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
352   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
353   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
354   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
355 
356   Register DstLo = MRI->createVirtualRegister(&HalfRC);
357   Register DstHi = MRI->createVirtualRegister(&HalfRC);
358 
359   if (IsSALU) {
360     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
361       .add(Lo1)
362       .add(Lo2);
363     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
364       .add(Hi1)
365       .add(Hi2);
366   } else {
367     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
368     Register CarryReg = MRI->createVirtualRegister(CarryRC);
369     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
370       .addDef(CarryReg)
371       .add(Lo1)
372       .add(Lo2)
373       .addImm(0);
374     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
375       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
376       .add(Hi1)
377       .add(Hi2)
378       .addReg(CarryReg, RegState::Kill)
379       .addImm(0);
380 
381     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
382       return false;
383   }
384 
385   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
386     .addReg(DstLo)
387     .addImm(AMDGPU::sub0)
388     .addReg(DstHi)
389     .addImm(AMDGPU::sub1);
390 
391 
392   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
393     return false;
394 
395   I.eraseFromParent();
396   return true;
397 }
398 
399 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
400   MachineInstr &I) const {
401   MachineBasicBlock *BB = I.getParent();
402   MachineFunction *MF = BB->getParent();
403   const DebugLoc &DL = I.getDebugLoc();
404   Register Dst0Reg = I.getOperand(0).getReg();
405   Register Dst1Reg = I.getOperand(1).getReg();
406   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
407                      I.getOpcode() == AMDGPU::G_UADDE;
408   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
409                           I.getOpcode() == AMDGPU::G_USUBE;
410 
411   if (isVCC(Dst1Reg, *MRI)) {
412     unsigned NoCarryOpc =
413         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
414     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
415     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
416     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
417     I.addOperand(*MF, MachineOperand::CreateImm(0));
418     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
419   }
420 
421   Register Src0Reg = I.getOperand(2).getReg();
422   Register Src1Reg = I.getOperand(3).getReg();
423 
424   if (HasCarryIn) {
425     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
426       .addReg(I.getOperand(4).getReg());
427   }
428 
429   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
430   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
431 
432   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
433     .add(I.getOperand(2))
434     .add(I.getOperand(3));
435   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
436     .addReg(AMDGPU::SCC);
437 
438   if (!MRI->getRegClassOrNull(Dst1Reg))
439     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
440 
441   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
442       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
443       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
444     return false;
445 
446   if (HasCarryIn &&
447       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
448                                     AMDGPU::SReg_32RegClass, *MRI))
449     return false;
450 
451   I.eraseFromParent();
452   return true;
453 }
454 
455 // TODO: We should probably legalize these to only using 32-bit results.
456 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
457   MachineBasicBlock *BB = I.getParent();
458   Register DstReg = I.getOperand(0).getReg();
459   Register SrcReg = I.getOperand(1).getReg();
460   LLT DstTy = MRI->getType(DstReg);
461   LLT SrcTy = MRI->getType(SrcReg);
462   const unsigned SrcSize = SrcTy.getSizeInBits();
463   unsigned DstSize = DstTy.getSizeInBits();
464 
465   // TODO: Should handle any multiple of 32 offset.
466   unsigned Offset = I.getOperand(2).getImm();
467   if (Offset % 32 != 0 || DstSize > 128)
468     return false;
469 
470   // 16-bit operations really use 32-bit registers.
471   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
472   if (DstSize == 16)
473     DstSize = 32;
474 
475   const TargetRegisterClass *DstRC =
476     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
477   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
478     return false;
479 
480   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
481   const TargetRegisterClass *SrcRC =
482     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
483   if (!SrcRC)
484     return false;
485   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
486                                                          DstSize / 32);
487   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
488   if (!SrcRC)
489     return false;
490 
491   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
492                                     *SrcRC, I.getOperand(1));
493   const DebugLoc &DL = I.getDebugLoc();
494   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
495     .addReg(SrcReg, 0, SubReg);
496 
497   I.eraseFromParent();
498   return true;
499 }
500 
501 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
502   MachineBasicBlock *BB = MI.getParent();
503   Register DstReg = MI.getOperand(0).getReg();
504   LLT DstTy = MRI->getType(DstReg);
505   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
506 
507   const unsigned SrcSize = SrcTy.getSizeInBits();
508   if (SrcSize < 32)
509     return selectImpl(MI, *CoverageInfo);
510 
511   const DebugLoc &DL = MI.getDebugLoc();
512   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
513   const unsigned DstSize = DstTy.getSizeInBits();
514   const TargetRegisterClass *DstRC =
515     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
516   if (!DstRC)
517     return false;
518 
519   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
520   MachineInstrBuilder MIB =
521     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
522   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
523     MachineOperand &Src = MI.getOperand(I + 1);
524     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
525     MIB.addImm(SubRegs[I]);
526 
527     const TargetRegisterClass *SrcRC
528       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
529     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
530       return false;
531   }
532 
533   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
534     return false;
535 
536   MI.eraseFromParent();
537   return true;
538 }
539 
540 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
541   MachineBasicBlock *BB = MI.getParent();
542   const int NumDst = MI.getNumOperands() - 1;
543 
544   MachineOperand &Src = MI.getOperand(NumDst);
545 
546   Register SrcReg = Src.getReg();
547   Register DstReg0 = MI.getOperand(0).getReg();
548   LLT DstTy = MRI->getType(DstReg0);
549   LLT SrcTy = MRI->getType(SrcReg);
550 
551   const unsigned DstSize = DstTy.getSizeInBits();
552   const unsigned SrcSize = SrcTy.getSizeInBits();
553   const DebugLoc &DL = MI.getDebugLoc();
554   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
555 
556   const TargetRegisterClass *SrcRC =
557     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
558   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
559     return false;
560 
561   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
562   // source, and this relies on the fact that the same subregister indices are
563   // used for both.
564   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
565   for (int I = 0, E = NumDst; I != E; ++I) {
566     MachineOperand &Dst = MI.getOperand(I);
567     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
568       .addReg(SrcReg, 0, SubRegs[I]);
569 
570     // Make sure the subregister index is valid for the source register.
571     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
572     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
573       return false;
574 
575     const TargetRegisterClass *DstRC =
576       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
577     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
578       return false;
579   }
580 
581   MI.eraseFromParent();
582   return true;
583 }
584 
585 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
586   MachineInstr &MI) const {
587   if (selectImpl(MI, *CoverageInfo))
588     return true;
589 
590   const LLT S32 = LLT::scalar(32);
591   const LLT V2S16 = LLT::vector(2, 16);
592 
593   Register Dst = MI.getOperand(0).getReg();
594   if (MRI->getType(Dst) != V2S16)
595     return false;
596 
597   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
598   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
599     return false;
600 
601   Register Src0 = MI.getOperand(1).getReg();
602   Register Src1 = MI.getOperand(2).getReg();
603   if (MRI->getType(Src0) != S32)
604     return false;
605 
606   const DebugLoc &DL = MI.getDebugLoc();
607   MachineBasicBlock *BB = MI.getParent();
608 
609   auto ConstSrc1 = getConstantVRegValWithLookThrough(Src1, *MRI, true, true);
610   if (ConstSrc1) {
611     auto ConstSrc0 = getConstantVRegValWithLookThrough(Src0, *MRI, true, true);
612     if (ConstSrc0) {
613       uint32_t Lo16 = static_cast<uint32_t>(ConstSrc0->Value) & 0xffff;
614       uint32_t Hi16 = static_cast<uint32_t>(ConstSrc1->Value) & 0xffff;
615 
616       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
617         .addImm(Lo16 | (Hi16 << 16));
618       MI.eraseFromParent();
619       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
620     }
621   }
622 
623   // TODO: This should probably be a combine somewhere
624   // (build_vector_trunc $src0, undef -> copy $src0
625   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
626   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
627     MI.setDesc(TII.get(AMDGPU::COPY));
628     MI.RemoveOperand(2);
629     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
630            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
631   }
632 
633   Register ShiftSrc0;
634   Register ShiftSrc1;
635   int64_t ShiftAmt;
636 
637   // With multiple uses of the shift, this will duplicate the shift and
638   // increase register pressure.
639   //
640   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
641   //  => (S_PACK_HH_B32_B16 $src0, $src1)
642   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
643   //  => (S_PACK_LH_B32_B16 $src0, $src1)
644   // (build_vector_trunc $src0, $src1)
645   //  => (S_PACK_LL_B32_B16 $src0, $src1)
646 
647   // FIXME: This is an inconvenient way to check a specific value
648   bool Shift0 = mi_match(
649     Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_ICst(ShiftAmt)))) &&
650     ShiftAmt == 16;
651 
652   bool Shift1 = mi_match(
653     Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_ICst(ShiftAmt)))) &&
654     ShiftAmt == 16;
655 
656   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
657   if (Shift0 && Shift1) {
658     Opc = AMDGPU::S_PACK_HH_B32_B16;
659     MI.getOperand(1).setReg(ShiftSrc0);
660     MI.getOperand(2).setReg(ShiftSrc1);
661   } else if (Shift1) {
662     Opc = AMDGPU::S_PACK_LH_B32_B16;
663     MI.getOperand(2).setReg(ShiftSrc1);
664   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
665     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
666     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
667       .addReg(ShiftSrc0)
668       .addImm(16);
669 
670     MI.eraseFromParent();
671     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
672   }
673 
674   MI.setDesc(TII.get(Opc));
675   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
676 }
677 
678 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
679   return selectG_ADD_SUB(I);
680 }
681 
682 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
683   const MachineOperand &MO = I.getOperand(0);
684 
685   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
686   // regbank check here is to know why getConstrainedRegClassForOperand failed.
687   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
688   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
689       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
690     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
691     return true;
692   }
693 
694   return false;
695 }
696 
697 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
698   MachineBasicBlock *BB = I.getParent();
699 
700   Register DstReg = I.getOperand(0).getReg();
701   Register Src0Reg = I.getOperand(1).getReg();
702   Register Src1Reg = I.getOperand(2).getReg();
703   LLT Src1Ty = MRI->getType(Src1Reg);
704 
705   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
706   unsigned InsSize = Src1Ty.getSizeInBits();
707 
708   int64_t Offset = I.getOperand(3).getImm();
709 
710   // FIXME: These cases should have been illegal and unnecessary to check here.
711   if (Offset % 32 != 0 || InsSize % 32 != 0)
712     return false;
713 
714   // Currently not handled by getSubRegFromChannel.
715   if (InsSize > 128)
716     return false;
717 
718   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
719   if (SubReg == AMDGPU::NoSubRegister)
720     return false;
721 
722   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
723   const TargetRegisterClass *DstRC =
724     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
725   if (!DstRC)
726     return false;
727 
728   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
729   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
730   const TargetRegisterClass *Src0RC =
731     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
732   const TargetRegisterClass *Src1RC =
733     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
734 
735   // Deal with weird cases where the class only partially supports the subreg
736   // index.
737   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
738   if (!Src0RC || !Src1RC)
739     return false;
740 
741   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
742       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
743       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
744     return false;
745 
746   const DebugLoc &DL = I.getDebugLoc();
747   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
748     .addReg(Src0Reg)
749     .addReg(Src1Reg)
750     .addImm(SubReg);
751 
752   I.eraseFromParent();
753   return true;
754 }
755 
756 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
757   if (STI.getLDSBankCount() != 16)
758     return selectImpl(MI, *CoverageInfo);
759 
760   Register Dst = MI.getOperand(0).getReg();
761   Register Src0 = MI.getOperand(2).getReg();
762   Register M0Val = MI.getOperand(6).getReg();
763   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
764       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
765       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
766     return false;
767 
768   // This requires 2 instructions. It is possible to write a pattern to support
769   // this, but the generated isel emitter doesn't correctly deal with multiple
770   // output instructions using the same physical register input. The copy to m0
771   // is incorrectly placed before the second instruction.
772   //
773   // TODO: Match source modifiers.
774 
775   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
776   const DebugLoc &DL = MI.getDebugLoc();
777   MachineBasicBlock *MBB = MI.getParent();
778 
779   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
780     .addReg(M0Val);
781   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
782     .addImm(2)
783     .addImm(MI.getOperand(4).getImm())  // $attr
784     .addImm(MI.getOperand(3).getImm()); // $attrchan
785 
786   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
787     .addImm(0)                          // $src0_modifiers
788     .addReg(Src0)                       // $src0
789     .addImm(MI.getOperand(4).getImm())  // $attr
790     .addImm(MI.getOperand(3).getImm())  // $attrchan
791     .addImm(0)                          // $src2_modifiers
792     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
793     .addImm(MI.getOperand(5).getImm())  // $high
794     .addImm(0)                          // $clamp
795     .addImm(0);                         // $omod
796 
797   MI.eraseFromParent();
798   return true;
799 }
800 
801 // Writelane is special in that it can use SGPR and M0 (which would normally
802 // count as using the constant bus twice - but in this case it is allowed since
803 // the lane selector doesn't count as a use of the constant bus). However, it is
804 // still required to abide by the 1 SGPR rule. Fix this up if we might have
805 // multiple SGPRs.
806 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
807   // With a constant bus limit of at least 2, there's no issue.
808   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
809     return selectImpl(MI, *CoverageInfo);
810 
811   MachineBasicBlock *MBB = MI.getParent();
812   const DebugLoc &DL = MI.getDebugLoc();
813   Register VDst = MI.getOperand(0).getReg();
814   Register Val = MI.getOperand(2).getReg();
815   Register LaneSelect = MI.getOperand(3).getReg();
816   Register VDstIn = MI.getOperand(4).getReg();
817 
818   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
819 
820   Optional<ValueAndVReg> ConstSelect =
821     getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true);
822   if (ConstSelect) {
823     // The selector has to be an inline immediate, so we can use whatever for
824     // the other operands.
825     MIB.addReg(Val);
826     MIB.addImm(ConstSelect->Value &
827                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
828   } else {
829     Optional<ValueAndVReg> ConstVal =
830       getConstantVRegValWithLookThrough(Val, *MRI, true, true);
831 
832     // If the value written is an inline immediate, we can get away without a
833     // copy to m0.
834     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value,
835                                                  STI.hasInv2PiInlineImm())) {
836       MIB.addImm(ConstVal->Value);
837       MIB.addReg(LaneSelect);
838     } else {
839       MIB.addReg(Val);
840 
841       // If the lane selector was originally in a VGPR and copied with
842       // readfirstlane, there's a hazard to read the same SGPR from the
843       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
844       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
845 
846       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
847         .addReg(LaneSelect);
848       MIB.addReg(AMDGPU::M0);
849     }
850   }
851 
852   MIB.addReg(VDstIn);
853 
854   MI.eraseFromParent();
855   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
856 }
857 
858 // We need to handle this here because tablegen doesn't support matching
859 // instructions with multiple outputs.
860 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
861   Register Dst0 = MI.getOperand(0).getReg();
862   Register Dst1 = MI.getOperand(1).getReg();
863 
864   LLT Ty = MRI->getType(Dst0);
865   unsigned Opc;
866   if (Ty == LLT::scalar(32))
867     Opc = AMDGPU::V_DIV_SCALE_F32;
868   else if (Ty == LLT::scalar(64))
869     Opc = AMDGPU::V_DIV_SCALE_F64;
870   else
871     return false;
872 
873   const DebugLoc &DL = MI.getDebugLoc();
874   MachineBasicBlock *MBB = MI.getParent();
875 
876   Register Numer = MI.getOperand(3).getReg();
877   Register Denom = MI.getOperand(4).getReg();
878   unsigned ChooseDenom = MI.getOperand(5).getImm();
879 
880   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
881 
882   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
883     .addDef(Dst1)
884     .addUse(Src0)
885     .addUse(Denom)
886     .addUse(Numer);
887 
888   MI.eraseFromParent();
889   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
890 }
891 
892 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
893   unsigned IntrinsicID = I.getIntrinsicID();
894   switch (IntrinsicID) {
895   case Intrinsic::amdgcn_if_break: {
896     MachineBasicBlock *BB = I.getParent();
897 
898     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
899     // SelectionDAG uses for wave32 vs wave64.
900     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
901       .add(I.getOperand(0))
902       .add(I.getOperand(2))
903       .add(I.getOperand(3));
904 
905     Register DstReg = I.getOperand(0).getReg();
906     Register Src0Reg = I.getOperand(2).getReg();
907     Register Src1Reg = I.getOperand(3).getReg();
908 
909     I.eraseFromParent();
910 
911     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
912       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
913 
914     return true;
915   }
916   case Intrinsic::amdgcn_interp_p1_f16:
917     return selectInterpP1F16(I);
918   case Intrinsic::amdgcn_wqm:
919     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
920   case Intrinsic::amdgcn_softwqm:
921     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
922   case Intrinsic::amdgcn_wwm:
923     return constrainCopyLikeIntrin(I, AMDGPU::WWM);
924   case Intrinsic::amdgcn_writelane:
925     return selectWritelane(I);
926   case Intrinsic::amdgcn_div_scale:
927     return selectDivScale(I);
928   case Intrinsic::amdgcn_icmp:
929     return selectIntrinsicIcmp(I);
930   case Intrinsic::amdgcn_ballot:
931     return selectBallot(I);
932   case Intrinsic::amdgcn_reloc_constant:
933     return selectRelocConstant(I);
934   case Intrinsic::amdgcn_groupstaticsize:
935     return selectGroupStaticSize(I);
936   case Intrinsic::returnaddress:
937     return selectReturnAddress(I);
938   default:
939     return selectImpl(I, *CoverageInfo);
940   }
941 }
942 
943 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
944   if (Size != 32 && Size != 64)
945     return -1;
946   switch (P) {
947   default:
948     llvm_unreachable("Unknown condition code!");
949   case CmpInst::ICMP_NE:
950     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
951   case CmpInst::ICMP_EQ:
952     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
953   case CmpInst::ICMP_SGT:
954     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
955   case CmpInst::ICMP_SGE:
956     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
957   case CmpInst::ICMP_SLT:
958     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
959   case CmpInst::ICMP_SLE:
960     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
961   case CmpInst::ICMP_UGT:
962     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
963   case CmpInst::ICMP_UGE:
964     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
965   case CmpInst::ICMP_ULT:
966     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
967   case CmpInst::ICMP_ULE:
968     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
969   }
970 }
971 
972 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
973                                               unsigned Size) const {
974   if (Size == 64) {
975     if (!STI.hasScalarCompareEq64())
976       return -1;
977 
978     switch (P) {
979     case CmpInst::ICMP_NE:
980       return AMDGPU::S_CMP_LG_U64;
981     case CmpInst::ICMP_EQ:
982       return AMDGPU::S_CMP_EQ_U64;
983     default:
984       return -1;
985     }
986   }
987 
988   if (Size != 32)
989     return -1;
990 
991   switch (P) {
992   case CmpInst::ICMP_NE:
993     return AMDGPU::S_CMP_LG_U32;
994   case CmpInst::ICMP_EQ:
995     return AMDGPU::S_CMP_EQ_U32;
996   case CmpInst::ICMP_SGT:
997     return AMDGPU::S_CMP_GT_I32;
998   case CmpInst::ICMP_SGE:
999     return AMDGPU::S_CMP_GE_I32;
1000   case CmpInst::ICMP_SLT:
1001     return AMDGPU::S_CMP_LT_I32;
1002   case CmpInst::ICMP_SLE:
1003     return AMDGPU::S_CMP_LE_I32;
1004   case CmpInst::ICMP_UGT:
1005     return AMDGPU::S_CMP_GT_U32;
1006   case CmpInst::ICMP_UGE:
1007     return AMDGPU::S_CMP_GE_U32;
1008   case CmpInst::ICMP_ULT:
1009     return AMDGPU::S_CMP_LT_U32;
1010   case CmpInst::ICMP_ULE:
1011     return AMDGPU::S_CMP_LE_U32;
1012   default:
1013     llvm_unreachable("Unknown condition code!");
1014   }
1015 }
1016 
1017 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1018   MachineBasicBlock *BB = I.getParent();
1019   const DebugLoc &DL = I.getDebugLoc();
1020 
1021   Register SrcReg = I.getOperand(2).getReg();
1022   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1023 
1024   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1025 
1026   Register CCReg = I.getOperand(0).getReg();
1027   if (!isVCC(CCReg, *MRI)) {
1028     int Opcode = getS_CMPOpcode(Pred, Size);
1029     if (Opcode == -1)
1030       return false;
1031     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1032             .add(I.getOperand(2))
1033             .add(I.getOperand(3));
1034     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1035       .addReg(AMDGPU::SCC);
1036     bool Ret =
1037         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1038         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1039     I.eraseFromParent();
1040     return Ret;
1041   }
1042 
1043   int Opcode = getV_CMPOpcode(Pred, Size);
1044   if (Opcode == -1)
1045     return false;
1046 
1047   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1048             I.getOperand(0).getReg())
1049             .add(I.getOperand(2))
1050             .add(I.getOperand(3));
1051   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1052                                *TRI.getBoolRC(), *MRI);
1053   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1054   I.eraseFromParent();
1055   return Ret;
1056 }
1057 
1058 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1059   Register Dst = I.getOperand(0).getReg();
1060   if (isVCC(Dst, *MRI))
1061     return false;
1062 
1063   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1064     return false;
1065 
1066   MachineBasicBlock *BB = I.getParent();
1067   const DebugLoc &DL = I.getDebugLoc();
1068   Register SrcReg = I.getOperand(2).getReg();
1069   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1070   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1071 
1072   int Opcode = getV_CMPOpcode(Pred, Size);
1073   if (Opcode == -1)
1074     return false;
1075 
1076   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1077                            .add(I.getOperand(2))
1078                            .add(I.getOperand(3));
1079   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1080                                *MRI);
1081   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1082   I.eraseFromParent();
1083   return Ret;
1084 }
1085 
1086 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1087   MachineBasicBlock *BB = I.getParent();
1088   const DebugLoc &DL = I.getDebugLoc();
1089   Register DstReg = I.getOperand(0).getReg();
1090   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1091   const bool Is64 = Size == 64;
1092 
1093   if (Size != STI.getWavefrontSize())
1094     return false;
1095 
1096   Optional<ValueAndVReg> Arg =
1097       getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true);
1098 
1099   if (Arg.hasValue()) {
1100     const int64_t Value = Arg.getValue().Value;
1101     if (Value == 0) {
1102       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1103       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1104     } else if (Value == -1) { // all ones
1105       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1106       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1107     } else
1108       return false;
1109   } else {
1110     Register SrcReg = I.getOperand(2).getReg();
1111     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1112   }
1113 
1114   I.eraseFromParent();
1115   return true;
1116 }
1117 
1118 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1119   Register DstReg = I.getOperand(0).getReg();
1120   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1121   const TargetRegisterClass *DstRC =
1122     TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1123   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1124     return false;
1125 
1126   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1127 
1128   Module *M = MF->getFunction().getParent();
1129   const MDNode *Metadata = I.getOperand(2).getMetadata();
1130   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1131   auto RelocSymbol = cast<GlobalVariable>(
1132     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1133 
1134   MachineBasicBlock *BB = I.getParent();
1135   BuildMI(*BB, &I, I.getDebugLoc(),
1136           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1137     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1138 
1139   I.eraseFromParent();
1140   return true;
1141 }
1142 
1143 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1144   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1145 
1146   Register DstReg = I.getOperand(0).getReg();
1147   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1148   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1149     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1150 
1151   MachineBasicBlock *MBB = I.getParent();
1152   const DebugLoc &DL = I.getDebugLoc();
1153 
1154   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1155 
1156   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1157     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1158     MIB.addImm(MFI->getLDSSize());
1159   } else {
1160     Module *M = MF->getFunction().getParent();
1161     const GlobalValue *GV
1162       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1163     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1164   }
1165 
1166   I.eraseFromParent();
1167   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1168 }
1169 
1170 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1171   MachineBasicBlock *MBB = I.getParent();
1172   MachineFunction &MF = *MBB->getParent();
1173   const DebugLoc &DL = I.getDebugLoc();
1174 
1175   MachineOperand &Dst = I.getOperand(0);
1176   Register DstReg = Dst.getReg();
1177   unsigned Depth = I.getOperand(2).getImm();
1178 
1179   const TargetRegisterClass *RC
1180     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1181   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1182       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1183     return false;
1184 
1185   // Check for kernel and shader functions
1186   if (Depth != 0 ||
1187       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1188     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1189       .addImm(0);
1190     I.eraseFromParent();
1191     return true;
1192   }
1193 
1194   MachineFrameInfo &MFI = MF.getFrameInfo();
1195   // There is a call to @llvm.returnaddress in this function
1196   MFI.setReturnAddressIsTaken(true);
1197 
1198   // Get the return address reg and mark it as an implicit live-in
1199   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1200   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1201                                              AMDGPU::SReg_64RegClass);
1202   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1203     .addReg(LiveIn);
1204   I.eraseFromParent();
1205   return true;
1206 }
1207 
1208 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1209   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1210   // SelectionDAG uses for wave32 vs wave64.
1211   MachineBasicBlock *BB = MI.getParent();
1212   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1213       .add(MI.getOperand(1));
1214 
1215   Register Reg = MI.getOperand(1).getReg();
1216   MI.eraseFromParent();
1217 
1218   if (!MRI->getRegClassOrNull(Reg))
1219     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1220   return true;
1221 }
1222 
1223 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1224   MachineInstr &MI, Intrinsic::ID IntrID) const {
1225   MachineBasicBlock *MBB = MI.getParent();
1226   MachineFunction *MF = MBB->getParent();
1227   const DebugLoc &DL = MI.getDebugLoc();
1228 
1229   unsigned IndexOperand = MI.getOperand(7).getImm();
1230   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1231   bool WaveDone = MI.getOperand(9).getImm() != 0;
1232 
1233   if (WaveDone && !WaveRelease)
1234     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1235 
1236   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1237   IndexOperand &= ~0x3f;
1238   unsigned CountDw = 0;
1239 
1240   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1241     CountDw = (IndexOperand >> 24) & 0xf;
1242     IndexOperand &= ~(0xf << 24);
1243 
1244     if (CountDw < 1 || CountDw > 4) {
1245       report_fatal_error(
1246         "ds_ordered_count: dword count must be between 1 and 4");
1247     }
1248   }
1249 
1250   if (IndexOperand)
1251     report_fatal_error("ds_ordered_count: bad index operand");
1252 
1253   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1254   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1255 
1256   unsigned Offset0 = OrderedCountIndex << 2;
1257   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1258                      (Instruction << 4);
1259 
1260   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1261     Offset1 |= (CountDw - 1) << 6;
1262 
1263   unsigned Offset = Offset0 | (Offset1 << 8);
1264 
1265   Register M0Val = MI.getOperand(2).getReg();
1266   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1267     .addReg(M0Val);
1268 
1269   Register DstReg = MI.getOperand(0).getReg();
1270   Register ValReg = MI.getOperand(3).getReg();
1271   MachineInstrBuilder DS =
1272     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1273       .addReg(ValReg)
1274       .addImm(Offset)
1275       .cloneMemRefs(MI);
1276 
1277   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1278     return false;
1279 
1280   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1281   MI.eraseFromParent();
1282   return Ret;
1283 }
1284 
1285 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1286   switch (IntrID) {
1287   case Intrinsic::amdgcn_ds_gws_init:
1288     return AMDGPU::DS_GWS_INIT;
1289   case Intrinsic::amdgcn_ds_gws_barrier:
1290     return AMDGPU::DS_GWS_BARRIER;
1291   case Intrinsic::amdgcn_ds_gws_sema_v:
1292     return AMDGPU::DS_GWS_SEMA_V;
1293   case Intrinsic::amdgcn_ds_gws_sema_br:
1294     return AMDGPU::DS_GWS_SEMA_BR;
1295   case Intrinsic::amdgcn_ds_gws_sema_p:
1296     return AMDGPU::DS_GWS_SEMA_P;
1297   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1298     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1299   default:
1300     llvm_unreachable("not a gws intrinsic");
1301   }
1302 }
1303 
1304 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1305                                                      Intrinsic::ID IID) const {
1306   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1307       !STI.hasGWSSemaReleaseAll())
1308     return false;
1309 
1310   // intrinsic ID, vsrc, offset
1311   const bool HasVSrc = MI.getNumOperands() == 3;
1312   assert(HasVSrc || MI.getNumOperands() == 2);
1313 
1314   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1315   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1316   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1317     return false;
1318 
1319   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1320   assert(OffsetDef);
1321 
1322   unsigned ImmOffset;
1323 
1324   MachineBasicBlock *MBB = MI.getParent();
1325   const DebugLoc &DL = MI.getDebugLoc();
1326 
1327   MachineInstr *Readfirstlane = nullptr;
1328 
1329   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1330   // incoming offset, in case there's an add of a constant. We'll have to put it
1331   // back later.
1332   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1333     Readfirstlane = OffsetDef;
1334     BaseOffset = OffsetDef->getOperand(1).getReg();
1335     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1336   }
1337 
1338   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1339     // If we have a constant offset, try to use the 0 in m0 as the base.
1340     // TODO: Look into changing the default m0 initialization value. If the
1341     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1342     // the immediate offset.
1343 
1344     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1345     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1346       .addImm(0);
1347   } else {
1348     std::tie(BaseOffset, ImmOffset, OffsetDef)
1349       = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1350 
1351     if (Readfirstlane) {
1352       // We have the constant offset now, so put the readfirstlane back on the
1353       // variable component.
1354       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1355         return false;
1356 
1357       Readfirstlane->getOperand(1).setReg(BaseOffset);
1358       BaseOffset = Readfirstlane->getOperand(0).getReg();
1359     } else {
1360       if (!RBI.constrainGenericRegister(BaseOffset,
1361                                         AMDGPU::SReg_32RegClass, *MRI))
1362         return false;
1363     }
1364 
1365     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1366     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1367       .addReg(BaseOffset)
1368       .addImm(16);
1369 
1370     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1371       .addReg(M0Base);
1372   }
1373 
1374   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1375   // offset field) % 64. Some versions of the programming guide omit the m0
1376   // part, or claim it's from offset 0.
1377   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1378 
1379   if (HasVSrc) {
1380     Register VSrc = MI.getOperand(1).getReg();
1381     MIB.addReg(VSrc);
1382     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1383       return false;
1384   }
1385 
1386   MIB.addImm(ImmOffset)
1387      .addImm(-1) // $gds
1388      .cloneMemRefs(MI);
1389 
1390   MI.eraseFromParent();
1391   return true;
1392 }
1393 
1394 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1395                                                       bool IsAppend) const {
1396   Register PtrBase = MI.getOperand(2).getReg();
1397   LLT PtrTy = MRI->getType(PtrBase);
1398   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1399 
1400   unsigned Offset;
1401   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1402 
1403   // TODO: Should this try to look through readfirstlane like GWS?
1404   if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
1405     PtrBase = MI.getOperand(2).getReg();
1406     Offset = 0;
1407   }
1408 
1409   MachineBasicBlock *MBB = MI.getParent();
1410   const DebugLoc &DL = MI.getDebugLoc();
1411   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1412 
1413   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1414     .addReg(PtrBase);
1415   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1416     return false;
1417 
1418   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1419     .addImm(Offset)
1420     .addImm(IsGDS ? -1 : 0)
1421     .cloneMemRefs(MI);
1422   MI.eraseFromParent();
1423   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1424 }
1425 
1426 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1427   if (TM.getOptLevel() > CodeGenOpt::None) {
1428     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1429     if (WGSize <= STI.getWavefrontSize()) {
1430       MachineBasicBlock *MBB = MI.getParent();
1431       const DebugLoc &DL = MI.getDebugLoc();
1432       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1433       MI.eraseFromParent();
1434       return true;
1435     }
1436   }
1437   return selectImpl(MI, *CoverageInfo);
1438 }
1439 
1440 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1441                          bool &IsTexFail) {
1442   if (TexFailCtrl)
1443     IsTexFail = true;
1444 
1445   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1446   TexFailCtrl &= ~(uint64_t)0x1;
1447   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1448   TexFailCtrl &= ~(uint64_t)0x2;
1449 
1450   return TexFailCtrl == 0;
1451 }
1452 
1453 static bool parseCachePolicy(uint64_t Value,
1454                              bool *GLC, bool *SLC, bool *DLC) {
1455   if (GLC) {
1456     *GLC = (Value & 0x1) ? 1 : 0;
1457     Value &= ~(uint64_t)0x1;
1458   }
1459   if (SLC) {
1460     *SLC = (Value & 0x2) ? 1 : 0;
1461     Value &= ~(uint64_t)0x2;
1462   }
1463   if (DLC) {
1464     *DLC = (Value & 0x4) ? 1 : 0;
1465     Value &= ~(uint64_t)0x4;
1466   }
1467 
1468   return Value == 0;
1469 }
1470 
1471 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1472   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1473   MachineBasicBlock *MBB = MI.getParent();
1474   const DebugLoc &DL = MI.getDebugLoc();
1475 
1476   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1477     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1478 
1479   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1480   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1481       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1482   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1483       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1484   unsigned IntrOpcode = Intr->BaseOpcode;
1485   const bool IsGFX10 = STI.getGeneration() >= AMDGPUSubtarget::GFX10;
1486 
1487   const int VAddrIdx = getImageVAddrIdxBegin(BaseOpcode,
1488                                              MI.getNumExplicitDefs());
1489   int NumVAddr, NumGradients;
1490   std::tie(NumVAddr, NumGradients) = getImageNumVAddr(Intr, BaseOpcode);
1491 
1492   Register VDataIn, VDataOut;
1493   LLT VDataTy;
1494   int NumVDataDwords = -1;
1495   bool IsD16 = false;
1496 
1497   // XXX - Can we just get the second to last argument for ctrl?
1498   unsigned CtrlIdx; // Index of texfailctrl argument
1499   bool Unorm;
1500   if (!BaseOpcode->Sampler) {
1501     Unorm = true;
1502     CtrlIdx = VAddrIdx + NumVAddr + 1;
1503   } else {
1504     Unorm = MI.getOperand(VAddrIdx + NumVAddr + 2).getImm() != 0;
1505     CtrlIdx = VAddrIdx + NumVAddr + 3;
1506   }
1507 
1508   bool TFE;
1509   bool LWE;
1510   bool IsTexFail = false;
1511   if (!parseTexFail(MI.getOperand(CtrlIdx).getImm(), TFE, LWE, IsTexFail))
1512     return false;
1513 
1514   const int Flags = MI.getOperand(CtrlIdx + 2).getImm();
1515   const bool IsA16 = (Flags & 1) != 0;
1516   const bool IsG16 = (Flags & 2) != 0;
1517 
1518   // A16 implies 16 bit gradients
1519   if (IsA16 && !IsG16)
1520     return false;
1521 
1522   unsigned DMask = 0;
1523   unsigned DMaskLanes = 0;
1524 
1525   if (BaseOpcode->Atomic) {
1526     VDataOut = MI.getOperand(0).getReg();
1527     VDataIn = MI.getOperand(2).getReg();
1528     LLT Ty = MRI->getType(VDataIn);
1529 
1530     // Be careful to allow atomic swap on 16-bit element vectors.
1531     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1532       Ty.getSizeInBits() == 128 :
1533       Ty.getSizeInBits() == 64;
1534 
1535     if (BaseOpcode->AtomicX2) {
1536       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1537 
1538       DMask = Is64Bit ? 0xf : 0x3;
1539       NumVDataDwords = Is64Bit ? 4 : 2;
1540     } else {
1541       DMask = Is64Bit ? 0x3 : 0x1;
1542       NumVDataDwords = Is64Bit ? 2 : 1;
1543     }
1544   } else {
1545     const int DMaskIdx = 2; // Input/output + intrinsic ID.
1546 
1547     DMask = MI.getOperand(DMaskIdx).getImm();
1548     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1549 
1550     if (BaseOpcode->Store) {
1551       VDataIn = MI.getOperand(1).getReg();
1552       VDataTy = MRI->getType(VDataIn);
1553       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1554     } else {
1555       VDataOut = MI.getOperand(0).getReg();
1556       VDataTy = MRI->getType(VDataOut);
1557       NumVDataDwords = DMaskLanes;
1558 
1559       // One memoperand is mandatory, except for getresinfo.
1560       // FIXME: Check this in verifier.
1561       if (!MI.memoperands_empty()) {
1562         const MachineMemOperand *MMO = *MI.memoperands_begin();
1563 
1564         // Infer d16 from the memory size, as the register type will be mangled by
1565         // unpacked subtargets, or by TFE.
1566         IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1567 
1568         if (IsD16 && !STI.hasUnpackedD16VMem())
1569           NumVDataDwords = (DMaskLanes + 1) / 2;
1570       }
1571     }
1572   }
1573 
1574   // Optimize _L to _LZ when _L is zero
1575   if (LZMappingInfo) {
1576     // The legalizer replaced the register with an immediate 0 if we need to
1577     // change the opcode.
1578     const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
1579     if (Lod.isImm()) {
1580       assert(Lod.getImm() == 0);
1581       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1582     }
1583   }
1584 
1585   // Optimize _mip away, when 'lod' is zero
1586   if (MIPMappingInfo) {
1587     const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
1588     if (Lod.isImm()) {
1589       assert(Lod.getImm() == 0);
1590       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1591     }
1592   }
1593 
1594   // Set G16 opcode
1595   if (IsG16 && !IsA16) {
1596     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1597         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1598     assert(G16MappingInfo);
1599     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1600   }
1601 
1602   // TODO: Check this in verifier.
1603   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1604 
1605   bool GLC = false;
1606   bool SLC = false;
1607   bool DLC = false;
1608   if (BaseOpcode->Atomic) {
1609     GLC = true; // TODO no-return optimization
1610     if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), nullptr, &SLC,
1611                           IsGFX10 ? &DLC : nullptr))
1612       return false;
1613   } else {
1614     if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), &GLC, &SLC,
1615                           IsGFX10 ? &DLC : nullptr))
1616       return false;
1617   }
1618 
1619   int NumVAddrRegs = 0;
1620   int NumVAddrDwords = 0;
1621   for (int I = 0; I < NumVAddr; ++I) {
1622     // Skip the $noregs and 0s inserted during legalization.
1623     MachineOperand &AddrOp = MI.getOperand(VAddrIdx + I);
1624     if (!AddrOp.isReg())
1625       continue; // XXX - Break?
1626 
1627     Register Addr = AddrOp.getReg();
1628     if (!Addr)
1629       break;
1630 
1631     ++NumVAddrRegs;
1632     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1633   }
1634 
1635   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1636   // NSA, these should have beeen packed into a single value in the first
1637   // address register
1638   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1639   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1640     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1641     return false;
1642   }
1643 
1644   if (IsTexFail)
1645     ++NumVDataDwords;
1646 
1647   int Opcode = -1;
1648   if (IsGFX10) {
1649     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1650                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1651                                           : AMDGPU::MIMGEncGfx10Default,
1652                                    NumVDataDwords, NumVAddrDwords);
1653   } else {
1654     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1655       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1656                                      NumVDataDwords, NumVAddrDwords);
1657     if (Opcode == -1)
1658       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1659                                      NumVDataDwords, NumVAddrDwords);
1660   }
1661   assert(Opcode != -1);
1662 
1663   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1664     .cloneMemRefs(MI);
1665 
1666   if (VDataOut) {
1667     if (BaseOpcode->AtomicX2) {
1668       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1669 
1670       Register TmpReg = MRI->createVirtualRegister(
1671         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1672       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1673 
1674       MIB.addDef(TmpReg);
1675       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1676         .addReg(TmpReg, RegState::Kill, SubReg);
1677 
1678     } else {
1679       MIB.addDef(VDataOut); // vdata output
1680     }
1681   }
1682 
1683   if (VDataIn)
1684     MIB.addReg(VDataIn); // vdata input
1685 
1686   for (int i = 0; i != NumVAddrRegs; ++i) {
1687     MachineOperand &SrcOp = MI.getOperand(VAddrIdx + i);
1688     if (SrcOp.isReg()) {
1689       assert(SrcOp.getReg() != 0);
1690       MIB.addReg(SrcOp.getReg());
1691     }
1692   }
1693 
1694   MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr).getReg()); // rsrc
1695   if (BaseOpcode->Sampler)
1696     MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr + 1).getReg()); // sampler
1697 
1698   MIB.addImm(DMask); // dmask
1699 
1700   if (IsGFX10)
1701     MIB.addImm(DimInfo->Encoding);
1702   MIB.addImm(Unorm);
1703   if (IsGFX10)
1704     MIB.addImm(DLC);
1705 
1706   MIB.addImm(GLC);
1707   MIB.addImm(SLC);
1708   MIB.addImm(IsA16 &&  // a16 or r128
1709              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1710   if (IsGFX10)
1711     MIB.addImm(IsA16 ? -1 : 0);
1712 
1713   MIB.addImm(TFE); // tfe
1714   MIB.addImm(LWE); // lwe
1715   if (!IsGFX10)
1716     MIB.addImm(DimInfo->DA ? -1 : 0);
1717   if (BaseOpcode->HasD16)
1718     MIB.addImm(IsD16 ? -1 : 0);
1719 
1720   MI.eraseFromParent();
1721   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1722 }
1723 
1724 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1725     MachineInstr &I) const {
1726   unsigned IntrinsicID = I.getIntrinsicID();
1727   switch (IntrinsicID) {
1728   case Intrinsic::amdgcn_end_cf:
1729     return selectEndCfIntrinsic(I);
1730   case Intrinsic::amdgcn_ds_ordered_add:
1731   case Intrinsic::amdgcn_ds_ordered_swap:
1732     return selectDSOrderedIntrinsic(I, IntrinsicID);
1733   case Intrinsic::amdgcn_ds_gws_init:
1734   case Intrinsic::amdgcn_ds_gws_barrier:
1735   case Intrinsic::amdgcn_ds_gws_sema_v:
1736   case Intrinsic::amdgcn_ds_gws_sema_br:
1737   case Intrinsic::amdgcn_ds_gws_sema_p:
1738   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1739     return selectDSGWSIntrinsic(I, IntrinsicID);
1740   case Intrinsic::amdgcn_ds_append:
1741     return selectDSAppendConsume(I, true);
1742   case Intrinsic::amdgcn_ds_consume:
1743     return selectDSAppendConsume(I, false);
1744   case Intrinsic::amdgcn_s_barrier:
1745     return selectSBarrier(I);
1746   default: {
1747     return selectImpl(I, *CoverageInfo);
1748   }
1749   }
1750 }
1751 
1752 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1753   if (selectImpl(I, *CoverageInfo))
1754     return true;
1755 
1756   MachineBasicBlock *BB = I.getParent();
1757   const DebugLoc &DL = I.getDebugLoc();
1758 
1759   Register DstReg = I.getOperand(0).getReg();
1760   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1761   assert(Size <= 32 || Size == 64);
1762   const MachineOperand &CCOp = I.getOperand(1);
1763   Register CCReg = CCOp.getReg();
1764   if (!isVCC(CCReg, *MRI)) {
1765     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1766                                          AMDGPU::S_CSELECT_B32;
1767     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1768             .addReg(CCReg);
1769 
1770     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1771     // bank, because it does not cover the register class that we used to represent
1772     // for it.  So we need to manually set the register class here.
1773     if (!MRI->getRegClassOrNull(CCReg))
1774         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1775     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1776             .add(I.getOperand(2))
1777             .add(I.getOperand(3));
1778 
1779     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1780                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1781     I.eraseFromParent();
1782     return Ret;
1783   }
1784 
1785   // Wide VGPR select should have been split in RegBankSelect.
1786   if (Size > 32)
1787     return false;
1788 
1789   MachineInstr *Select =
1790       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1791               .addImm(0)
1792               .add(I.getOperand(3))
1793               .addImm(0)
1794               .add(I.getOperand(2))
1795               .add(I.getOperand(1));
1796 
1797   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1798   I.eraseFromParent();
1799   return Ret;
1800 }
1801 
1802 static int sizeToSubRegIndex(unsigned Size) {
1803   switch (Size) {
1804   case 32:
1805     return AMDGPU::sub0;
1806   case 64:
1807     return AMDGPU::sub0_sub1;
1808   case 96:
1809     return AMDGPU::sub0_sub1_sub2;
1810   case 128:
1811     return AMDGPU::sub0_sub1_sub2_sub3;
1812   case 256:
1813     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1814   default:
1815     if (Size < 32)
1816       return AMDGPU::sub0;
1817     if (Size > 256)
1818       return -1;
1819     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1820   }
1821 }
1822 
1823 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1824   Register DstReg = I.getOperand(0).getReg();
1825   Register SrcReg = I.getOperand(1).getReg();
1826   const LLT DstTy = MRI->getType(DstReg);
1827   const LLT SrcTy = MRI->getType(SrcReg);
1828   const LLT S1 = LLT::scalar(1);
1829 
1830   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1831   const RegisterBank *DstRB;
1832   if (DstTy == S1) {
1833     // This is a special case. We don't treat s1 for legalization artifacts as
1834     // vcc booleans.
1835     DstRB = SrcRB;
1836   } else {
1837     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1838     if (SrcRB != DstRB)
1839       return false;
1840   }
1841 
1842   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1843 
1844   unsigned DstSize = DstTy.getSizeInBits();
1845   unsigned SrcSize = SrcTy.getSizeInBits();
1846 
1847   const TargetRegisterClass *SrcRC
1848     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1849   const TargetRegisterClass *DstRC
1850     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1851   if (!SrcRC || !DstRC)
1852     return false;
1853 
1854   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1855       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1856     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1857     return false;
1858   }
1859 
1860   if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1861     MachineBasicBlock *MBB = I.getParent();
1862     const DebugLoc &DL = I.getDebugLoc();
1863 
1864     Register LoReg = MRI->createVirtualRegister(DstRC);
1865     Register HiReg = MRI->createVirtualRegister(DstRC);
1866     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1867       .addReg(SrcReg, 0, AMDGPU::sub0);
1868     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1869       .addReg(SrcReg, 0, AMDGPU::sub1);
1870 
1871     if (IsVALU && STI.hasSDWA()) {
1872       // Write the low 16-bits of the high element into the high 16-bits of the
1873       // low element.
1874       MachineInstr *MovSDWA =
1875         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1876         .addImm(0)                             // $src0_modifiers
1877         .addReg(HiReg)                         // $src0
1878         .addImm(0)                             // $clamp
1879         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1880         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1881         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1882         .addReg(LoReg, RegState::Implicit);
1883       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1884     } else {
1885       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1886       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1887       Register ImmReg = MRI->createVirtualRegister(DstRC);
1888       if (IsVALU) {
1889         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1890           .addImm(16)
1891           .addReg(HiReg);
1892       } else {
1893         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1894           .addReg(HiReg)
1895           .addImm(16);
1896       }
1897 
1898       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1899       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1900       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1901 
1902       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1903         .addImm(0xffff);
1904       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1905         .addReg(LoReg)
1906         .addReg(ImmReg);
1907       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1908         .addReg(TmpReg0)
1909         .addReg(TmpReg1);
1910     }
1911 
1912     I.eraseFromParent();
1913     return true;
1914   }
1915 
1916   if (!DstTy.isScalar())
1917     return false;
1918 
1919   if (SrcSize > 32) {
1920     int SubRegIdx = sizeToSubRegIndex(DstSize);
1921     if (SubRegIdx == -1)
1922       return false;
1923 
1924     // Deal with weird cases where the class only partially supports the subreg
1925     // index.
1926     const TargetRegisterClass *SrcWithSubRC
1927       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1928     if (!SrcWithSubRC)
1929       return false;
1930 
1931     if (SrcWithSubRC != SrcRC) {
1932       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1933         return false;
1934     }
1935 
1936     I.getOperand(1).setSubReg(SubRegIdx);
1937   }
1938 
1939   I.setDesc(TII.get(TargetOpcode::COPY));
1940   return true;
1941 }
1942 
1943 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1944 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1945   Mask = maskTrailingOnes<unsigned>(Size);
1946   int SignedMask = static_cast<int>(Mask);
1947   return SignedMask >= -16 && SignedMask <= 64;
1948 }
1949 
1950 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1951 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1952   Register Reg, const MachineRegisterInfo &MRI,
1953   const TargetRegisterInfo &TRI) const {
1954   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1955   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1956     return RB;
1957 
1958   // Ignore the type, since we don't use vcc in artifacts.
1959   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1960     return &RBI.getRegBankFromRegClass(*RC, LLT());
1961   return nullptr;
1962 }
1963 
1964 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1965   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1966   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1967   const DebugLoc &DL = I.getDebugLoc();
1968   MachineBasicBlock &MBB = *I.getParent();
1969   const Register DstReg = I.getOperand(0).getReg();
1970   const Register SrcReg = I.getOperand(1).getReg();
1971 
1972   const LLT DstTy = MRI->getType(DstReg);
1973   const LLT SrcTy = MRI->getType(SrcReg);
1974   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1975     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1976   const unsigned DstSize = DstTy.getSizeInBits();
1977   if (!DstTy.isScalar())
1978     return false;
1979 
1980   // Artifact casts should never use vcc.
1981   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1982 
1983   // FIXME: This should probably be illegal and split earlier.
1984   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
1985     if (DstSize <= 32)
1986       return selectCOPY(I);
1987 
1988     const TargetRegisterClass *SrcRC =
1989         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
1990     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1991     const TargetRegisterClass *DstRC =
1992         TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
1993 
1994     Register UndefReg = MRI->createVirtualRegister(SrcRC);
1995     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1996     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1997       .addReg(SrcReg)
1998       .addImm(AMDGPU::sub0)
1999       .addReg(UndefReg)
2000       .addImm(AMDGPU::sub1);
2001     I.eraseFromParent();
2002 
2003     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2004            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2005   }
2006 
2007   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2008     // 64-bit should have been split up in RegBankSelect
2009 
2010     // Try to use an and with a mask if it will save code size.
2011     unsigned Mask;
2012     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2013       MachineInstr *ExtI =
2014       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2015         .addImm(Mask)
2016         .addReg(SrcReg);
2017       I.eraseFromParent();
2018       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2019     }
2020 
2021     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
2022     MachineInstr *ExtI =
2023       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2024       .addReg(SrcReg)
2025       .addImm(0) // Offset
2026       .addImm(SrcSize); // Width
2027     I.eraseFromParent();
2028     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2029   }
2030 
2031   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2032     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2033       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2034     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2035       return false;
2036 
2037     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2038       const unsigned SextOpc = SrcSize == 8 ?
2039         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2040       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2041         .addReg(SrcReg);
2042       I.eraseFromParent();
2043       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2044     }
2045 
2046     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2047     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2048 
2049     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2050     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2051       // We need a 64-bit register source, but the high bits don't matter.
2052       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2053       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2054       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2055 
2056       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2057       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2058         .addReg(SrcReg, 0, SubReg)
2059         .addImm(AMDGPU::sub0)
2060         .addReg(UndefReg)
2061         .addImm(AMDGPU::sub1);
2062 
2063       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2064         .addReg(ExtReg)
2065         .addImm(SrcSize << 16);
2066 
2067       I.eraseFromParent();
2068       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2069     }
2070 
2071     unsigned Mask;
2072     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2073       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2074         .addReg(SrcReg)
2075         .addImm(Mask);
2076     } else {
2077       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2078         .addReg(SrcReg)
2079         .addImm(SrcSize << 16);
2080     }
2081 
2082     I.eraseFromParent();
2083     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2084   }
2085 
2086   return false;
2087 }
2088 
2089 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2090   MachineBasicBlock *BB = I.getParent();
2091   MachineOperand &ImmOp = I.getOperand(1);
2092   Register DstReg = I.getOperand(0).getReg();
2093   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2094 
2095   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2096   if (ImmOp.isFPImm()) {
2097     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2098     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2099   } else if (ImmOp.isCImm()) {
2100     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2101   } else {
2102     llvm_unreachable("Not supported by g_constants");
2103   }
2104 
2105   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2106   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2107 
2108   unsigned Opcode;
2109   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2110     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2111   } else {
2112     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2113 
2114     // We should never produce s1 values on banks other than VCC. If the user of
2115     // this already constrained the register, we may incorrectly think it's VCC
2116     // if it wasn't originally.
2117     if (Size == 1)
2118       return false;
2119   }
2120 
2121   if (Size != 64) {
2122     I.setDesc(TII.get(Opcode));
2123     I.addImplicitDefUseOperands(*MF);
2124     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2125   }
2126 
2127   const DebugLoc &DL = I.getDebugLoc();
2128 
2129   APInt Imm(Size, I.getOperand(1).getImm());
2130 
2131   MachineInstr *ResInst;
2132   if (IsSgpr && TII.isInlineConstant(Imm)) {
2133     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2134       .addImm(I.getOperand(1).getImm());
2135   } else {
2136     const TargetRegisterClass *RC = IsSgpr ?
2137       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2138     Register LoReg = MRI->createVirtualRegister(RC);
2139     Register HiReg = MRI->createVirtualRegister(RC);
2140 
2141     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2142       .addImm(Imm.trunc(32).getZExtValue());
2143 
2144     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2145       .addImm(Imm.ashr(32).getZExtValue());
2146 
2147     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2148       .addReg(LoReg)
2149       .addImm(AMDGPU::sub0)
2150       .addReg(HiReg)
2151       .addImm(AMDGPU::sub1);
2152   }
2153 
2154   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2155   // work for target independent opcodes
2156   I.eraseFromParent();
2157   const TargetRegisterClass *DstRC =
2158     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2159   if (!DstRC)
2160     return true;
2161   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2162 }
2163 
2164 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2165   // Only manually handle the f64 SGPR case.
2166   //
2167   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2168   // the bit ops theoretically have a second result due to the implicit def of
2169   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2170   // that is easy by disabling the check. The result works, but uses a
2171   // nonsensical sreg32orlds_and_sreg_1 regclass.
2172   //
2173   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2174   // the variadic REG_SEQUENCE operands.
2175 
2176   Register Dst = MI.getOperand(0).getReg();
2177   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2178   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2179       MRI->getType(Dst) != LLT::scalar(64))
2180     return false;
2181 
2182   Register Src = MI.getOperand(1).getReg();
2183   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2184   if (Fabs)
2185     Src = Fabs->getOperand(1).getReg();
2186 
2187   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2188       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2189     return false;
2190 
2191   MachineBasicBlock *BB = MI.getParent();
2192   const DebugLoc &DL = MI.getDebugLoc();
2193   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2194   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2195   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2196   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2197 
2198   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2199     .addReg(Src, 0, AMDGPU::sub0);
2200   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2201     .addReg(Src, 0, AMDGPU::sub1);
2202   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2203     .addImm(0x80000000);
2204 
2205   // Set or toggle sign bit.
2206   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2207   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2208     .addReg(HiReg)
2209     .addReg(ConstReg);
2210   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2211     .addReg(LoReg)
2212     .addImm(AMDGPU::sub0)
2213     .addReg(OpReg)
2214     .addImm(AMDGPU::sub1);
2215   MI.eraseFromParent();
2216   return true;
2217 }
2218 
2219 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2220 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2221   Register Dst = MI.getOperand(0).getReg();
2222   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2223   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2224       MRI->getType(Dst) != LLT::scalar(64))
2225     return false;
2226 
2227   Register Src = MI.getOperand(1).getReg();
2228   MachineBasicBlock *BB = MI.getParent();
2229   const DebugLoc &DL = MI.getDebugLoc();
2230   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2231   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2232   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2233   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2234 
2235   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2236       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2237     return false;
2238 
2239   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2240     .addReg(Src, 0, AMDGPU::sub0);
2241   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2242     .addReg(Src, 0, AMDGPU::sub1);
2243   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2244     .addImm(0x7fffffff);
2245 
2246   // Clear sign bit.
2247   // TODO: Should this used S_BITSET0_*?
2248   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2249     .addReg(HiReg)
2250     .addReg(ConstReg);
2251   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2252     .addReg(LoReg)
2253     .addImm(AMDGPU::sub0)
2254     .addReg(OpReg)
2255     .addImm(AMDGPU::sub1);
2256 
2257   MI.eraseFromParent();
2258   return true;
2259 }
2260 
2261 static bool isConstant(const MachineInstr &MI) {
2262   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2263 }
2264 
2265 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2266     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2267 
2268   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2269 
2270   assert(PtrMI);
2271 
2272   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2273     return;
2274 
2275   GEPInfo GEPInfo(*PtrMI);
2276 
2277   for (unsigned i = 1; i != 3; ++i) {
2278     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2279     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2280     assert(OpDef);
2281     if (i == 2 && isConstant(*OpDef)) {
2282       // TODO: Could handle constant base + variable offset, but a combine
2283       // probably should have commuted it.
2284       assert(GEPInfo.Imm == 0);
2285       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2286       continue;
2287     }
2288     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2289     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2290       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2291     else
2292       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2293   }
2294 
2295   AddrInfo.push_back(GEPInfo);
2296   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2297 }
2298 
2299 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2300   if (!MI.hasOneMemOperand())
2301     return false;
2302 
2303   const MachineMemOperand *MMO = *MI.memoperands_begin();
2304   const Value *Ptr = MMO->getValue();
2305 
2306   // UndefValue means this is a load of a kernel input.  These are uniform.
2307   // Sometimes LDS instructions have constant pointers.
2308   // If Ptr is null, then that means this mem operand contains a
2309   // PseudoSourceValue like GOT.
2310   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2311       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2312     return true;
2313 
2314   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2315     return true;
2316 
2317   const Instruction *I = dyn_cast<Instruction>(Ptr);
2318   return I && I->getMetadata("amdgpu.uniform");
2319 }
2320 
2321 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2322   for (const GEPInfo &GEPInfo : AddrInfo) {
2323     if (!GEPInfo.VgprParts.empty())
2324       return true;
2325   }
2326   return false;
2327 }
2328 
2329 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2330   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2331   unsigned AS = PtrTy.getAddressSpace();
2332   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2333       STI.ldsRequiresM0Init()) {
2334     MachineBasicBlock *BB = I.getParent();
2335 
2336     // If DS instructions require M0 initializtion, insert it before selecting.
2337     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2338       .addImm(-1);
2339   }
2340 }
2341 
2342 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2343   MachineInstr &I) const {
2344   initM0(I);
2345   return selectImpl(I, *CoverageInfo);
2346 }
2347 
2348 // TODO: No rtn optimization.
2349 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2350   MachineInstr &MI) const {
2351   Register PtrReg = MI.getOperand(1).getReg();
2352   const LLT PtrTy = MRI->getType(PtrReg);
2353   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2354       STI.useFlatForGlobal())
2355     return selectImpl(MI, *CoverageInfo);
2356 
2357   Register DstReg = MI.getOperand(0).getReg();
2358   const LLT Ty = MRI->getType(DstReg);
2359   const bool Is64 = Ty.getSizeInBits() == 64;
2360   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2361   Register TmpReg = MRI->createVirtualRegister(
2362     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2363 
2364   const DebugLoc &DL = MI.getDebugLoc();
2365   MachineBasicBlock *BB = MI.getParent();
2366 
2367   Register VAddr, RSrcReg, SOffset;
2368   int64_t Offset = 0;
2369 
2370   unsigned Opcode;
2371   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2372     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2373                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2374   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2375                                    RSrcReg, SOffset, Offset)) {
2376     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2377                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2378   } else
2379     return selectImpl(MI, *CoverageInfo);
2380 
2381   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2382     .addReg(MI.getOperand(2).getReg());
2383 
2384   if (VAddr)
2385     MIB.addReg(VAddr);
2386 
2387   MIB.addReg(RSrcReg);
2388   if (SOffset)
2389     MIB.addReg(SOffset);
2390   else
2391     MIB.addImm(0);
2392 
2393   MIB.addImm(Offset);
2394   MIB.addImm(0); // slc
2395   MIB.cloneMemRefs(MI);
2396 
2397   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2398     .addReg(TmpReg, RegState::Kill, SubReg);
2399 
2400   MI.eraseFromParent();
2401 
2402   MRI->setRegClass(
2403     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2404   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2405 }
2406 
2407 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2408   MachineBasicBlock *BB = I.getParent();
2409   MachineOperand &CondOp = I.getOperand(0);
2410   Register CondReg = CondOp.getReg();
2411   const DebugLoc &DL = I.getDebugLoc();
2412 
2413   unsigned BrOpcode;
2414   Register CondPhysReg;
2415   const TargetRegisterClass *ConstrainRC;
2416 
2417   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2418   // whether the branch is uniform when selecting the instruction. In
2419   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2420   // RegBankSelect knows what it's doing if the branch condition is scc, even
2421   // though it currently does not.
2422   if (!isVCC(CondReg, *MRI)) {
2423     if (MRI->getType(CondReg) != LLT::scalar(32))
2424       return false;
2425 
2426     CondPhysReg = AMDGPU::SCC;
2427     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2428     ConstrainRC = &AMDGPU::SReg_32RegClass;
2429   } else {
2430     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2431     // We sort of know that a VCC producer based on the register bank, that ands
2432     // inactive lanes with 0. What if there was a logical operation with vcc
2433     // producers in different blocks/with different exec masks?
2434     // FIXME: Should scc->vcc copies and with exec?
2435     CondPhysReg = TRI.getVCC();
2436     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2437     ConstrainRC = TRI.getBoolRC();
2438   }
2439 
2440   if (!MRI->getRegClassOrNull(CondReg))
2441     MRI->setRegClass(CondReg, ConstrainRC);
2442 
2443   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2444     .addReg(CondReg);
2445   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2446     .addMBB(I.getOperand(1).getMBB());
2447 
2448   I.eraseFromParent();
2449   return true;
2450 }
2451 
2452 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2453   MachineInstr &I) const {
2454   Register DstReg = I.getOperand(0).getReg();
2455   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2456   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2457   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2458   if (IsVGPR)
2459     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2460 
2461   return RBI.constrainGenericRegister(
2462     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2463 }
2464 
2465 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2466   Register DstReg = I.getOperand(0).getReg();
2467   Register SrcReg = I.getOperand(1).getReg();
2468   Register MaskReg = I.getOperand(2).getReg();
2469   LLT Ty = MRI->getType(DstReg);
2470   LLT MaskTy = MRI->getType(MaskReg);
2471 
2472   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2473   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2474   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2475   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2476   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2477     return false;
2478 
2479   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2480   const TargetRegisterClass &RegRC
2481     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2482 
2483   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2484                                                                   *MRI);
2485   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2486                                                                   *MRI);
2487   const TargetRegisterClass *MaskRC =
2488       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2489 
2490   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2491       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2492       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2493     return false;
2494 
2495   MachineBasicBlock *BB = I.getParent();
2496   const DebugLoc &DL = I.getDebugLoc();
2497   if (Ty.getSizeInBits() == 32) {
2498     assert(MaskTy.getSizeInBits() == 32 &&
2499            "ptrmask should have been narrowed during legalize");
2500 
2501     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2502       .addReg(SrcReg)
2503       .addReg(MaskReg);
2504     I.eraseFromParent();
2505     return true;
2506   }
2507 
2508   Register HiReg = MRI->createVirtualRegister(&RegRC);
2509   Register LoReg = MRI->createVirtualRegister(&RegRC);
2510 
2511   // Extract the subregisters from the source pointer.
2512   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2513     .addReg(SrcReg, 0, AMDGPU::sub0);
2514   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2515     .addReg(SrcReg, 0, AMDGPU::sub1);
2516 
2517   Register MaskedLo, MaskedHi;
2518 
2519   // Try to avoid emitting a bit operation when we only need to touch half of
2520   // the 64-bit pointer.
2521   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2522 
2523   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2524   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2525   if ((MaskOnes & MaskLo32) == MaskLo32) {
2526     // If all the bits in the low half are 1, we only need a copy for it.
2527     MaskedLo = LoReg;
2528   } else {
2529     // Extract the mask subregister and apply the and.
2530     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2531     MaskedLo = MRI->createVirtualRegister(&RegRC);
2532 
2533     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2534       .addReg(MaskReg, 0, AMDGPU::sub0);
2535     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2536       .addReg(LoReg)
2537       .addReg(MaskLo);
2538   }
2539 
2540   if ((MaskOnes & MaskHi32) == MaskHi32) {
2541     // If all the bits in the high half are 1, we only need a copy for it.
2542     MaskedHi = HiReg;
2543   } else {
2544     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2545     MaskedHi = MRI->createVirtualRegister(&RegRC);
2546 
2547     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2548       .addReg(MaskReg, 0, AMDGPU::sub1);
2549     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2550       .addReg(HiReg)
2551       .addReg(MaskHi);
2552   }
2553 
2554   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2555     .addReg(MaskedLo)
2556     .addImm(AMDGPU::sub0)
2557     .addReg(MaskedHi)
2558     .addImm(AMDGPU::sub1);
2559   I.eraseFromParent();
2560   return true;
2561 }
2562 
2563 /// Return the register to use for the index value, and the subregister to use
2564 /// for the indirectly accessed register.
2565 static std::pair<Register, unsigned>
2566 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2567                         const SIRegisterInfo &TRI,
2568                         const TargetRegisterClass *SuperRC,
2569                         Register IdxReg,
2570                         unsigned EltSize) {
2571   Register IdxBaseReg;
2572   int Offset;
2573   MachineInstr *Unused;
2574 
2575   std::tie(IdxBaseReg, Offset, Unused)
2576     = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2577   if (IdxBaseReg == AMDGPU::NoRegister) {
2578     // This will happen if the index is a known constant. This should ordinarily
2579     // be legalized out, but handle it as a register just in case.
2580     assert(Offset == 0);
2581     IdxBaseReg = IdxReg;
2582   }
2583 
2584   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2585 
2586   // Skip out of bounds offsets, or else we would end up using an undefined
2587   // register.
2588   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2589     return std::make_pair(IdxReg, SubRegs[0]);
2590   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2591 }
2592 
2593 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2594   MachineInstr &MI) const {
2595   Register DstReg = MI.getOperand(0).getReg();
2596   Register SrcReg = MI.getOperand(1).getReg();
2597   Register IdxReg = MI.getOperand(2).getReg();
2598 
2599   LLT DstTy = MRI->getType(DstReg);
2600   LLT SrcTy = MRI->getType(SrcReg);
2601 
2602   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2603   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2604   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2605 
2606   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2607   // into a waterfall loop.
2608   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2609     return false;
2610 
2611   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2612                                                                   *MRI);
2613   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2614                                                                   *MRI);
2615   if (!SrcRC || !DstRC)
2616     return false;
2617   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2618       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2619       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2620     return false;
2621 
2622   MachineBasicBlock *BB = MI.getParent();
2623   const DebugLoc &DL = MI.getDebugLoc();
2624   const bool Is64 = DstTy.getSizeInBits() == 64;
2625 
2626   unsigned SubReg;
2627   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2628                                                      DstTy.getSizeInBits() / 8);
2629 
2630   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2631     if (DstTy.getSizeInBits() != 32 && !Is64)
2632       return false;
2633 
2634     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2635       .addReg(IdxReg);
2636 
2637     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2638     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2639       .addReg(SrcReg, 0, SubReg)
2640       .addReg(SrcReg, RegState::Implicit);
2641     MI.eraseFromParent();
2642     return true;
2643   }
2644 
2645   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2646     return false;
2647 
2648   if (!STI.useVGPRIndexMode()) {
2649     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2650       .addReg(IdxReg);
2651     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2652       .addReg(SrcReg, 0, SubReg)
2653       .addReg(SrcReg, RegState::Implicit);
2654     MI.eraseFromParent();
2655     return true;
2656   }
2657 
2658   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2659     .addReg(IdxReg)
2660     .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
2661   BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
2662     .addReg(SrcReg, 0, SubReg)
2663     .addReg(SrcReg, RegState::Implicit)
2664     .addReg(AMDGPU::M0, RegState::Implicit);
2665   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2666 
2667   MI.eraseFromParent();
2668   return true;
2669 }
2670 
2671 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2672 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2673   MachineInstr &MI) const {
2674   Register DstReg = MI.getOperand(0).getReg();
2675   Register VecReg = MI.getOperand(1).getReg();
2676   Register ValReg = MI.getOperand(2).getReg();
2677   Register IdxReg = MI.getOperand(3).getReg();
2678 
2679   LLT VecTy = MRI->getType(DstReg);
2680   LLT ValTy = MRI->getType(ValReg);
2681   unsigned VecSize = VecTy.getSizeInBits();
2682   unsigned ValSize = ValTy.getSizeInBits();
2683 
2684   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2685   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2686   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2687 
2688   assert(VecTy.getElementType() == ValTy);
2689 
2690   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2691   // into a waterfall loop.
2692   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2693     return false;
2694 
2695   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2696                                                                   *MRI);
2697   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2698                                                                   *MRI);
2699 
2700   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2701       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2702       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2703       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2704     return false;
2705 
2706   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2707     return false;
2708 
2709   unsigned SubReg;
2710   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2711                                                      ValSize / 8);
2712 
2713   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2714                          STI.useVGPRIndexMode();
2715 
2716   MachineBasicBlock *BB = MI.getParent();
2717   const DebugLoc &DL = MI.getDebugLoc();
2718 
2719   if (IndexMode) {
2720     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2721       .addReg(IdxReg)
2722       .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
2723   } else {
2724     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2725       .addReg(IdxReg);
2726   }
2727 
2728   const MCInstrDesc &RegWriteOp
2729     = TII.getIndirectRegWritePseudo(VecSize, ValSize,
2730                                     VecRB->getID() == AMDGPU::SGPRRegBankID);
2731   BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2732     .addReg(VecReg)
2733     .addReg(ValReg)
2734     .addImm(SubReg);
2735 
2736   if (IndexMode)
2737     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2738 
2739   MI.eraseFromParent();
2740   return true;
2741 }
2742 
2743 static bool isZeroOrUndef(int X) {
2744   return X == 0 || X == -1;
2745 }
2746 
2747 static bool isOneOrUndef(int X) {
2748   return X == 1 || X == -1;
2749 }
2750 
2751 static bool isZeroOrOneOrUndef(int X) {
2752   return X == 0 || X == 1 || X == -1;
2753 }
2754 
2755 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2756 // 32-bit register.
2757 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2758                                    ArrayRef<int> Mask) {
2759   NewMask[0] = Mask[0];
2760   NewMask[1] = Mask[1];
2761   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2762     return Src0;
2763 
2764   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2765   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2766 
2767   // Shift the mask inputs to be 0/1;
2768   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2769   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2770   return Src1;
2771 }
2772 
2773 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2774 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2775   MachineInstr &MI) const {
2776   Register DstReg = MI.getOperand(0).getReg();
2777   Register Src0Reg = MI.getOperand(1).getReg();
2778   Register Src1Reg = MI.getOperand(2).getReg();
2779   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2780 
2781   const LLT V2S16 = LLT::vector(2, 16);
2782   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2783     return false;
2784 
2785   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2786     return false;
2787 
2788   assert(ShufMask.size() == 2);
2789   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2790 
2791   MachineBasicBlock *MBB = MI.getParent();
2792   const DebugLoc &DL = MI.getDebugLoc();
2793 
2794   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2795   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2796   const TargetRegisterClass &RC = IsVALU ?
2797     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2798 
2799   // Handle the degenerate case which should have folded out.
2800   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2801     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2802 
2803     MI.eraseFromParent();
2804     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2805   }
2806 
2807   // A legal VOP3P mask only reads one of the sources.
2808   int Mask[2];
2809   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2810 
2811   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2812       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2813     return false;
2814 
2815   // TODO: This also should have been folded out
2816   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2817     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2818       .addReg(SrcVec);
2819 
2820     MI.eraseFromParent();
2821     return true;
2822   }
2823 
2824   if (Mask[0] == 1 && Mask[1] == -1) {
2825     if (IsVALU) {
2826       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2827         .addImm(16)
2828         .addReg(SrcVec);
2829     } else {
2830       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2831         .addReg(SrcVec)
2832         .addImm(16);
2833     }
2834   } else if (Mask[0] == -1 && Mask[1] == 0) {
2835     if (IsVALU) {
2836       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2837         .addImm(16)
2838         .addReg(SrcVec);
2839     } else {
2840       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2841         .addReg(SrcVec)
2842         .addImm(16);
2843     }
2844   } else if (Mask[0] == 0 && Mask[1] == 0) {
2845     if (IsVALU) {
2846       // Write low half of the register into the high half.
2847       MachineInstr *MovSDWA =
2848         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2849         .addImm(0)                             // $src0_modifiers
2850         .addReg(SrcVec)                        // $src0
2851         .addImm(0)                             // $clamp
2852         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2853         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2854         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2855         .addReg(SrcVec, RegState::Implicit);
2856       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2857     } else {
2858       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2859         .addReg(SrcVec)
2860         .addReg(SrcVec);
2861     }
2862   } else if (Mask[0] == 1 && Mask[1] == 1) {
2863     if (IsVALU) {
2864       // Write high half of the register into the low half.
2865       MachineInstr *MovSDWA =
2866         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2867         .addImm(0)                             // $src0_modifiers
2868         .addReg(SrcVec)                        // $src0
2869         .addImm(0)                             // $clamp
2870         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2871         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2872         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2873         .addReg(SrcVec, RegState::Implicit);
2874       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2875     } else {
2876       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2877         .addReg(SrcVec)
2878         .addReg(SrcVec);
2879     }
2880   } else if (Mask[0] == 1 && Mask[1] == 0) {
2881     if (IsVALU) {
2882       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32), DstReg)
2883         .addReg(SrcVec)
2884         .addReg(SrcVec)
2885         .addImm(16);
2886     } else {
2887       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2888       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2889         .addReg(SrcVec)
2890         .addImm(16);
2891       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2892         .addReg(TmpReg)
2893         .addReg(SrcVec);
2894     }
2895   } else
2896     llvm_unreachable("all shuffle masks should be handled");
2897 
2898   MI.eraseFromParent();
2899   return true;
2900 }
2901 
2902 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
2903   if (I.isPHI())
2904     return selectPHI(I);
2905 
2906   if (!I.isPreISelOpcode()) {
2907     if (I.isCopy())
2908       return selectCOPY(I);
2909     return true;
2910   }
2911 
2912   switch (I.getOpcode()) {
2913   case TargetOpcode::G_AND:
2914   case TargetOpcode::G_OR:
2915   case TargetOpcode::G_XOR:
2916     if (selectImpl(I, *CoverageInfo))
2917       return true;
2918     return selectG_AND_OR_XOR(I);
2919   case TargetOpcode::G_ADD:
2920   case TargetOpcode::G_SUB:
2921     if (selectImpl(I, *CoverageInfo))
2922       return true;
2923     return selectG_ADD_SUB(I);
2924   case TargetOpcode::G_UADDO:
2925   case TargetOpcode::G_USUBO:
2926   case TargetOpcode::G_UADDE:
2927   case TargetOpcode::G_USUBE:
2928     return selectG_UADDO_USUBO_UADDE_USUBE(I);
2929   case TargetOpcode::G_INTTOPTR:
2930   case TargetOpcode::G_BITCAST:
2931   case TargetOpcode::G_PTRTOINT:
2932     return selectCOPY(I);
2933   case TargetOpcode::G_CONSTANT:
2934   case TargetOpcode::G_FCONSTANT:
2935     return selectG_CONSTANT(I);
2936   case TargetOpcode::G_FNEG:
2937     if (selectImpl(I, *CoverageInfo))
2938       return true;
2939     return selectG_FNEG(I);
2940   case TargetOpcode::G_FABS:
2941     if (selectImpl(I, *CoverageInfo))
2942       return true;
2943     return selectG_FABS(I);
2944   case TargetOpcode::G_EXTRACT:
2945     return selectG_EXTRACT(I);
2946   case TargetOpcode::G_MERGE_VALUES:
2947   case TargetOpcode::G_BUILD_VECTOR:
2948   case TargetOpcode::G_CONCAT_VECTORS:
2949     return selectG_MERGE_VALUES(I);
2950   case TargetOpcode::G_UNMERGE_VALUES:
2951     return selectG_UNMERGE_VALUES(I);
2952   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
2953     return selectG_BUILD_VECTOR_TRUNC(I);
2954   case TargetOpcode::G_PTR_ADD:
2955     return selectG_PTR_ADD(I);
2956   case TargetOpcode::G_IMPLICIT_DEF:
2957     return selectG_IMPLICIT_DEF(I);
2958   case TargetOpcode::G_FREEZE:
2959     return selectCOPY(I);
2960   case TargetOpcode::G_INSERT:
2961     return selectG_INSERT(I);
2962   case TargetOpcode::G_INTRINSIC:
2963     return selectG_INTRINSIC(I);
2964   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2965     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
2966   case TargetOpcode::G_ICMP:
2967     if (selectG_ICMP(I))
2968       return true;
2969     return selectImpl(I, *CoverageInfo);
2970   case TargetOpcode::G_LOAD:
2971   case TargetOpcode::G_STORE:
2972   case TargetOpcode::G_ATOMIC_CMPXCHG:
2973   case TargetOpcode::G_ATOMICRMW_XCHG:
2974   case TargetOpcode::G_ATOMICRMW_ADD:
2975   case TargetOpcode::G_ATOMICRMW_SUB:
2976   case TargetOpcode::G_ATOMICRMW_AND:
2977   case TargetOpcode::G_ATOMICRMW_OR:
2978   case TargetOpcode::G_ATOMICRMW_XOR:
2979   case TargetOpcode::G_ATOMICRMW_MIN:
2980   case TargetOpcode::G_ATOMICRMW_MAX:
2981   case TargetOpcode::G_ATOMICRMW_UMIN:
2982   case TargetOpcode::G_ATOMICRMW_UMAX:
2983   case TargetOpcode::G_ATOMICRMW_FADD:
2984   case AMDGPU::G_AMDGPU_ATOMIC_INC:
2985   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
2986   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
2987   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
2988     return selectG_LOAD_STORE_ATOMICRMW(I);
2989   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
2990     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
2991   case TargetOpcode::G_SELECT:
2992     return selectG_SELECT(I);
2993   case TargetOpcode::G_TRUNC:
2994     return selectG_TRUNC(I);
2995   case TargetOpcode::G_SEXT:
2996   case TargetOpcode::G_ZEXT:
2997   case TargetOpcode::G_ANYEXT:
2998   case TargetOpcode::G_SEXT_INREG:
2999     if (selectImpl(I, *CoverageInfo))
3000       return true;
3001     return selectG_SZA_EXT(I);
3002   case TargetOpcode::G_BRCOND:
3003     return selectG_BRCOND(I);
3004   case TargetOpcode::G_GLOBAL_VALUE:
3005     return selectG_GLOBAL_VALUE(I);
3006   case TargetOpcode::G_PTRMASK:
3007     return selectG_PTRMASK(I);
3008   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3009     return selectG_EXTRACT_VECTOR_ELT(I);
3010   case TargetOpcode::G_INSERT_VECTOR_ELT:
3011     return selectG_INSERT_VECTOR_ELT(I);
3012   case TargetOpcode::G_SHUFFLE_VECTOR:
3013     return selectG_SHUFFLE_VECTOR(I);
3014   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3015   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
3016     const AMDGPU::ImageDimIntrinsicInfo *Intr
3017       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3018     assert(Intr && "not an image intrinsic with image pseudo");
3019     return selectImageIntrinsic(I, Intr);
3020   }
3021   default:
3022     return selectImpl(I, *CoverageInfo);
3023   }
3024   return false;
3025 }
3026 
3027 InstructionSelector::ComplexRendererFns
3028 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3029   return {{
3030       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3031   }};
3032 
3033 }
3034 
3035 std::pair<Register, unsigned>
3036 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root) const {
3037   Register Src = Root.getReg();
3038   Register OrigSrc = Src;
3039   unsigned Mods = 0;
3040   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3041 
3042   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3043     Src = MI->getOperand(1).getReg();
3044     Mods |= SISrcMods::NEG;
3045     MI = getDefIgnoringCopies(Src, *MRI);
3046   }
3047 
3048   if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
3049     Src = MI->getOperand(1).getReg();
3050     Mods |= SISrcMods::ABS;
3051   }
3052 
3053   if (Mods != 0 &&
3054       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3055     MachineInstr *UseMI = Root.getParent();
3056 
3057     // If we looked through copies to find source modifiers on an SGPR operand,
3058     // we now have an SGPR register source. To avoid potentially violating the
3059     // constant bus restriction, we need to insert a copy to a VGPR.
3060     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3061     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3062             TII.get(AMDGPU::COPY), VGPRSrc)
3063       .addReg(Src);
3064     Src = VGPRSrc;
3065   }
3066 
3067   return std::make_pair(Src, Mods);
3068 }
3069 
3070 ///
3071 /// This will select either an SGPR or VGPR operand and will save us from
3072 /// having to write an extra tablegen pattern.
3073 InstructionSelector::ComplexRendererFns
3074 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3075   return {{
3076       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3077   }};
3078 }
3079 
3080 InstructionSelector::ComplexRendererFns
3081 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3082   Register Src;
3083   unsigned Mods;
3084   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3085 
3086   return {{
3087       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3088       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3089       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3090       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3091   }};
3092 }
3093 
3094 InstructionSelector::ComplexRendererFns
3095 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3096   return {{
3097       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3098       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3099       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3100   }};
3101 }
3102 
3103 InstructionSelector::ComplexRendererFns
3104 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3105   Register Src;
3106   unsigned Mods;
3107   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3108 
3109   return {{
3110       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3111       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3112   }};
3113 }
3114 
3115 InstructionSelector::ComplexRendererFns
3116 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3117   Register Reg = Root.getReg();
3118   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3119   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3120               Def->getOpcode() == AMDGPU::G_FABS))
3121     return {};
3122   return {{
3123       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3124   }};
3125 }
3126 
3127 std::pair<Register, unsigned>
3128 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3129   Register Src, const MachineRegisterInfo &MRI) const {
3130   unsigned Mods = 0;
3131   MachineInstr *MI = MRI.getVRegDef(Src);
3132 
3133   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3134       // It's possible to see an f32 fneg here, but unlikely.
3135       // TODO: Treat f32 fneg as only high bit.
3136       MRI.getType(Src) == LLT::vector(2, 16)) {
3137     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3138     Src = MI->getOperand(1).getReg();
3139     MI = MRI.getVRegDef(Src);
3140   }
3141 
3142   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3143 
3144   // Packed instructions do not have abs modifiers.
3145   Mods |= SISrcMods::OP_SEL_1;
3146 
3147   return std::make_pair(Src, Mods);
3148 }
3149 
3150 InstructionSelector::ComplexRendererFns
3151 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3152   MachineRegisterInfo &MRI
3153     = Root.getParent()->getParent()->getParent()->getRegInfo();
3154 
3155   Register Src;
3156   unsigned Mods;
3157   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3158 
3159   return {{
3160       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3161       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3162   }};
3163 }
3164 
3165 InstructionSelector::ComplexRendererFns
3166 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3167   Register Src;
3168   unsigned Mods;
3169   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3170   if (!isKnownNeverNaN(Src, *MRI))
3171     return None;
3172 
3173   return {{
3174       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3175       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3176   }};
3177 }
3178 
3179 InstructionSelector::ComplexRendererFns
3180 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3181   // FIXME: Handle op_sel
3182   return {{
3183       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3184       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3185   }};
3186 }
3187 
3188 InstructionSelector::ComplexRendererFns
3189 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3190   SmallVector<GEPInfo, 4> AddrInfo;
3191   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3192 
3193   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3194     return None;
3195 
3196   const GEPInfo &GEPInfo = AddrInfo[0];
3197   Optional<int64_t> EncodedImm =
3198       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3199   if (!EncodedImm)
3200     return None;
3201 
3202   unsigned PtrReg = GEPInfo.SgprParts[0];
3203   return {{
3204     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3205     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3206   }};
3207 }
3208 
3209 InstructionSelector::ComplexRendererFns
3210 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3211   SmallVector<GEPInfo, 4> AddrInfo;
3212   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3213 
3214   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3215     return None;
3216 
3217   const GEPInfo &GEPInfo = AddrInfo[0];
3218   Register PtrReg = GEPInfo.SgprParts[0];
3219   Optional<int64_t> EncodedImm =
3220       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3221   if (!EncodedImm)
3222     return None;
3223 
3224   return {{
3225     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3226     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3227   }};
3228 }
3229 
3230 InstructionSelector::ComplexRendererFns
3231 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3232   MachineInstr *MI = Root.getParent();
3233   MachineBasicBlock *MBB = MI->getParent();
3234 
3235   SmallVector<GEPInfo, 4> AddrInfo;
3236   getAddrModeInfo(*MI, *MRI, AddrInfo);
3237 
3238   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3239   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3240   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3241     return None;
3242 
3243   const GEPInfo &GEPInfo = AddrInfo[0];
3244   // SGPR offset is unsigned.
3245   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3246     return None;
3247 
3248   // If we make it this far we have a load with an 32-bit immediate offset.
3249   // It is OK to select this using a sgpr offset, because we have already
3250   // failed trying to select this load into one of the _IMM variants since
3251   // the _IMM Patterns are considered before the _SGPR patterns.
3252   Register PtrReg = GEPInfo.SgprParts[0];
3253   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3254   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3255           .addImm(GEPInfo.Imm);
3256   return {{
3257     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3258     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3259   }};
3260 }
3261 
3262 template <bool Signed>
3263 InstructionSelector::ComplexRendererFns
3264 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
3265   MachineInstr *MI = Root.getParent();
3266 
3267   InstructionSelector::ComplexRendererFns Default = {{
3268       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3269       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },  // offset
3270     }};
3271 
3272   if (!STI.hasFlatInstOffsets())
3273     return Default;
3274 
3275   const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
3276   if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
3277     return Default;
3278 
3279   Optional<int64_t> Offset =
3280     getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
3281   if (!Offset.hasValue())
3282     return Default;
3283 
3284   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3285   if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
3286     return Default;
3287 
3288   Register BasePtr = OpDef->getOperand(1).getReg();
3289 
3290   return {{
3291       [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
3292       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
3293     }};
3294 }
3295 
3296 InstructionSelector::ComplexRendererFns
3297 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3298   return selectFlatOffsetImpl<false>(Root);
3299 }
3300 
3301 InstructionSelector::ComplexRendererFns
3302 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
3303   return selectFlatOffsetImpl<true>(Root);
3304 }
3305 
3306 /// Match a zero extend from a 32-bit value to 64-bits.
3307 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3308   Register ZExtSrc;
3309   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3310     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3311 
3312   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3313   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3314   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3315     return false;
3316 
3317   int64_t MergeRHS;
3318   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(MergeRHS)) &&
3319       MergeRHS == 0) {
3320     return Def->getOperand(1).getReg();
3321   }
3322 
3323   return Register();
3324 }
3325 
3326 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3327 InstructionSelector::ComplexRendererFns
3328 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3329   Register PtrBase;
3330   int64_t ImmOffset;
3331 
3332   // Match the immediate offset first, which canonically is moved as low as
3333   // possible.
3334   std::tie(PtrBase, ImmOffset) = getPtrBaseWithConstantOffset(Root.getReg(),
3335                                                               *MRI);
3336 
3337   // TODO: Could split larger constant into VGPR offset.
3338   if (ImmOffset != 0 &&
3339       !TII.isLegalFLATOffset(ImmOffset, AMDGPUAS::GLOBAL_ADDRESS, true)) {
3340     PtrBase = Root.getReg();
3341     ImmOffset = 0;
3342   }
3343 
3344   // Match the variable offset.
3345   const MachineInstr *PtrBaseDef = getDefIgnoringCopies(PtrBase, *MRI);
3346   if (PtrBaseDef->getOpcode() != AMDGPU::G_PTR_ADD)
3347     return None;
3348 
3349   // Look through the SGPR->VGPR copy.
3350   Register PtrBaseSrc =
3351     getSrcRegIgnoringCopies(PtrBaseDef->getOperand(1).getReg(), *MRI);
3352   if (!PtrBaseSrc)
3353     return None;
3354 
3355   const RegisterBank *BaseRB = RBI.getRegBank(PtrBaseSrc, *MRI, TRI);
3356   if (BaseRB->getID() != AMDGPU::SGPRRegBankID)
3357     return None;
3358 
3359   Register SAddr = PtrBaseSrc;
3360   Register PtrBaseOffset = PtrBaseDef->getOperand(2).getReg();
3361 
3362   // It's possible voffset is an SGPR here, but the copy to VGPR will be
3363   // inserted later.
3364   Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset);
3365   if (!VOffset)
3366     return None;
3367 
3368   return {{[=](MachineInstrBuilder &MIB) { // saddr
3369              MIB.addReg(SAddr);
3370            },
3371            [=](MachineInstrBuilder &MIB) { // voffset
3372              MIB.addReg(VOffset);
3373            },
3374            [=](MachineInstrBuilder &MIB) { // offset
3375              MIB.addImm(ImmOffset);
3376            }}};
3377 }
3378 
3379 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3380   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3381   return PSV && PSV->isStack();
3382 }
3383 
3384 InstructionSelector::ComplexRendererFns
3385 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3386   MachineInstr *MI = Root.getParent();
3387   MachineBasicBlock *MBB = MI->getParent();
3388   MachineFunction *MF = MBB->getParent();
3389   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3390 
3391   int64_t Offset = 0;
3392   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3393       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3394     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3395 
3396     // TODO: Should this be inside the render function? The iterator seems to
3397     // move.
3398     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3399             HighBits)
3400       .addImm(Offset & ~4095);
3401 
3402     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3403                MIB.addReg(Info->getScratchRSrcReg());
3404              },
3405              [=](MachineInstrBuilder &MIB) { // vaddr
3406                MIB.addReg(HighBits);
3407              },
3408              [=](MachineInstrBuilder &MIB) { // soffset
3409                const MachineMemOperand *MMO = *MI->memoperands_begin();
3410                const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3411 
3412                if (isStackPtrRelative(PtrInfo))
3413                  MIB.addReg(Info->getStackPtrOffsetReg());
3414                else
3415                  MIB.addImm(0);
3416              },
3417              [=](MachineInstrBuilder &MIB) { // offset
3418                MIB.addImm(Offset & 4095);
3419              }}};
3420   }
3421 
3422   assert(Offset == 0 || Offset == -1);
3423 
3424   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3425   // offsets.
3426   Optional<int> FI;
3427   Register VAddr = Root.getReg();
3428   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3429     if (isBaseWithConstantOffset(Root, *MRI)) {
3430       const MachineOperand &LHS = RootDef->getOperand(1);
3431       const MachineOperand &RHS = RootDef->getOperand(2);
3432       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
3433       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
3434       if (LHSDef && RHSDef) {
3435         int64_t PossibleOffset =
3436             RHSDef->getOperand(1).getCImm()->getSExtValue();
3437         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
3438             (!STI.privateMemoryResourceIsRangeChecked() ||
3439              KnownBits->signBitIsZero(LHS.getReg()))) {
3440           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3441             FI = LHSDef->getOperand(1).getIndex();
3442           else
3443             VAddr = LHS.getReg();
3444           Offset = PossibleOffset;
3445         }
3446       }
3447     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3448       FI = RootDef->getOperand(1).getIndex();
3449     }
3450   }
3451 
3452   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3453              MIB.addReg(Info->getScratchRSrcReg());
3454            },
3455            [=](MachineInstrBuilder &MIB) { // vaddr
3456              if (FI.hasValue())
3457                MIB.addFrameIndex(FI.getValue());
3458              else
3459                MIB.addReg(VAddr);
3460            },
3461            [=](MachineInstrBuilder &MIB) { // soffset
3462              // If we don't know this private access is a local stack object, it
3463              // needs to be relative to the entry point's scratch wave offset.
3464              // TODO: Should split large offsets that don't fit like above.
3465              // TODO: Don't use scratch wave offset just because the offset
3466              // didn't fit.
3467              if (!Info->isEntryFunction() && FI.hasValue())
3468                MIB.addReg(Info->getStackPtrOffsetReg());
3469              else
3470                MIB.addImm(0);
3471            },
3472            [=](MachineInstrBuilder &MIB) { // offset
3473              MIB.addImm(Offset);
3474            }}};
3475 }
3476 
3477 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3478                                                 int64_t Offset,
3479                                                 unsigned OffsetBits) const {
3480   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
3481       (OffsetBits == 8 && !isUInt<8>(Offset)))
3482     return false;
3483 
3484   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3485     return true;
3486 
3487   // On Southern Islands instruction with a negative base value and an offset
3488   // don't seem to work.
3489   return KnownBits->signBitIsZero(Base);
3490 }
3491 
3492 InstructionSelector::ComplexRendererFns
3493 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3494     MachineOperand &Root) const {
3495   MachineInstr *MI = Root.getParent();
3496   MachineBasicBlock *MBB = MI->getParent();
3497 
3498   int64_t Offset = 0;
3499   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3500       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3501     return {};
3502 
3503   const MachineFunction *MF = MBB->getParent();
3504   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3505   const MachineMemOperand *MMO = *MI->memoperands_begin();
3506   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3507 
3508   return {{
3509       [=](MachineInstrBuilder &MIB) { // rsrc
3510         MIB.addReg(Info->getScratchRSrcReg());
3511       },
3512       [=](MachineInstrBuilder &MIB) { // soffset
3513         if (isStackPtrRelative(PtrInfo))
3514           MIB.addReg(Info->getStackPtrOffsetReg());
3515         else
3516           MIB.addImm(0);
3517       },
3518       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3519   }};
3520 }
3521 
3522 std::pair<Register, unsigned>
3523 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3524   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3525   if (!RootDef)
3526     return std::make_pair(Root.getReg(), 0);
3527 
3528   int64_t ConstAddr = 0;
3529 
3530   Register PtrBase;
3531   int64_t Offset;
3532   std::tie(PtrBase, Offset) =
3533     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3534 
3535   if (Offset) {
3536     if (isDSOffsetLegal(PtrBase, Offset, 16)) {
3537       // (add n0, c0)
3538       return std::make_pair(PtrBase, Offset);
3539     }
3540   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3541     // TODO
3542 
3543 
3544   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3545     // TODO
3546 
3547   }
3548 
3549   return std::make_pair(Root.getReg(), 0);
3550 }
3551 
3552 InstructionSelector::ComplexRendererFns
3553 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3554   Register Reg;
3555   unsigned Offset;
3556   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3557   return {{
3558       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3559       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3560     }};
3561 }
3562 
3563 InstructionSelector::ComplexRendererFns
3564 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3565   return selectDSReadWrite2(Root, false);
3566 }
3567 
3568 InstructionSelector::ComplexRendererFns
3569 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
3570   return selectDSReadWrite2(Root, true);
3571 }
3572 
3573 InstructionSelector::ComplexRendererFns
3574 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
3575                                               bool IsDS128) const {
3576   Register Reg;
3577   unsigned Offset;
3578   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, IsDS128);
3579   return {{
3580       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3581       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3582       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3583     }};
3584 }
3585 
3586 std::pair<Register, unsigned>
3587 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
3588                                                   bool IsDS128) const {
3589   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3590   if (!RootDef)
3591     return std::make_pair(Root.getReg(), 0);
3592 
3593   int64_t ConstAddr = 0;
3594 
3595   Register PtrBase;
3596   int64_t Offset;
3597   std::tie(PtrBase, Offset) =
3598     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3599 
3600   if (Offset) {
3601     int64_t OffsetValue0 = Offset / (IsDS128 ? 8 : 4);
3602     int64_t OffsetValue1 = OffsetValue0 + 1;
3603     if (isDSOffsetLegal(PtrBase, OffsetValue1, (IsDS128 ? 16 : 8))) {
3604       // (add n0, c0)
3605       return std::make_pair(PtrBase, OffsetValue0);
3606     }
3607   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3608     // TODO
3609 
3610   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3611     // TODO
3612 
3613   }
3614 
3615   return std::make_pair(Root.getReg(), 0);
3616 }
3617 
3618 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3619 /// the base value with the constant offset. There may be intervening copies
3620 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
3621 /// not match the pattern.
3622 std::pair<Register, int64_t>
3623 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3624   Register Root, const MachineRegisterInfo &MRI) const {
3625   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
3626   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3627     return {Root, 0};
3628 
3629   MachineOperand &RHS = RootI->getOperand(2);
3630   Optional<ValueAndVReg> MaybeOffset
3631     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3632   if (!MaybeOffset)
3633     return {Root, 0};
3634   return {RootI->getOperand(1).getReg(), MaybeOffset->Value};
3635 }
3636 
3637 static void addZeroImm(MachineInstrBuilder &MIB) {
3638   MIB.addImm(0);
3639 }
3640 
3641 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3642 /// BasePtr is not valid, a null base pointer will be used.
3643 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3644                           uint32_t FormatLo, uint32_t FormatHi,
3645                           Register BasePtr) {
3646   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3647   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3648   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3649   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3650 
3651   B.buildInstr(AMDGPU::S_MOV_B32)
3652     .addDef(RSrc2)
3653     .addImm(FormatLo);
3654   B.buildInstr(AMDGPU::S_MOV_B32)
3655     .addDef(RSrc3)
3656     .addImm(FormatHi);
3657 
3658   // Build the half of the subregister with the constants before building the
3659   // full 128-bit register. If we are building multiple resource descriptors,
3660   // this will allow CSEing of the 2-component register.
3661   B.buildInstr(AMDGPU::REG_SEQUENCE)
3662     .addDef(RSrcHi)
3663     .addReg(RSrc2)
3664     .addImm(AMDGPU::sub0)
3665     .addReg(RSrc3)
3666     .addImm(AMDGPU::sub1);
3667 
3668   Register RSrcLo = BasePtr;
3669   if (!BasePtr) {
3670     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3671     B.buildInstr(AMDGPU::S_MOV_B64)
3672       .addDef(RSrcLo)
3673       .addImm(0);
3674   }
3675 
3676   B.buildInstr(AMDGPU::REG_SEQUENCE)
3677     .addDef(RSrc)
3678     .addReg(RSrcLo)
3679     .addImm(AMDGPU::sub0_sub1)
3680     .addReg(RSrcHi)
3681     .addImm(AMDGPU::sub2_sub3);
3682 
3683   return RSrc;
3684 }
3685 
3686 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3687                                 const SIInstrInfo &TII, Register BasePtr) {
3688   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3689 
3690   // FIXME: Why are half the "default" bits ignored based on the addressing
3691   // mode?
3692   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3693 }
3694 
3695 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3696                                const SIInstrInfo &TII, Register BasePtr) {
3697   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3698 
3699   // FIXME: Why are half the "default" bits ignored based on the addressing
3700   // mode?
3701   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
3702 }
3703 
3704 AMDGPUInstructionSelector::MUBUFAddressData
3705 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
3706   MUBUFAddressData Data;
3707   Data.N0 = Src;
3708 
3709   Register PtrBase;
3710   int64_t Offset;
3711 
3712   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
3713   if (isUInt<32>(Offset)) {
3714     Data.N0 = PtrBase;
3715     Data.Offset = Offset;
3716   }
3717 
3718   if (MachineInstr *InputAdd
3719       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
3720     Data.N2 = InputAdd->getOperand(1).getReg();
3721     Data.N3 = InputAdd->getOperand(2).getReg();
3722 
3723     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
3724     // FIXME: Don't know this was defined by operand 0
3725     //
3726     // TODO: Remove this when we have copy folding optimizations after
3727     // RegBankSelect.
3728     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
3729     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
3730   }
3731 
3732   return Data;
3733 }
3734 
3735 /// Return if the addr64 mubuf mode should be used for the given address.
3736 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
3737   // (ptr_add N2, N3) -> addr64, or
3738   // (ptr_add (ptr_add N2, N3), C1) -> addr64
3739   if (Addr.N2)
3740     return true;
3741 
3742   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
3743   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
3744 }
3745 
3746 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
3747 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
3748 /// component.
3749 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
3750   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
3751   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
3752     return;
3753 
3754   // Illegal offset, store it in soffset.
3755   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3756   B.buildInstr(AMDGPU::S_MOV_B32)
3757     .addDef(SOffset)
3758     .addImm(ImmOffset);
3759   ImmOffset = 0;
3760 }
3761 
3762 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
3763   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
3764   Register &SOffset, int64_t &Offset) const {
3765   // FIXME: Predicates should stop this from reaching here.
3766   // addr64 bit was removed for volcanic islands.
3767   if (!STI.hasAddr64() || STI.useFlatForGlobal())
3768     return false;
3769 
3770   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3771   if (!shouldUseAddr64(AddrData))
3772     return false;
3773 
3774   Register N0 = AddrData.N0;
3775   Register N2 = AddrData.N2;
3776   Register N3 = AddrData.N3;
3777   Offset = AddrData.Offset;
3778 
3779   // Base pointer for the SRD.
3780   Register SRDPtr;
3781 
3782   if (N2) {
3783     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3784       assert(N3);
3785       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3786         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
3787         // addr64, and construct the default resource from a 0 address.
3788         VAddr = N0;
3789       } else {
3790         SRDPtr = N3;
3791         VAddr = N2;
3792       }
3793     } else {
3794       // N2 is not divergent.
3795       SRDPtr = N2;
3796       VAddr = N3;
3797     }
3798   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3799     // Use the default null pointer in the resource
3800     VAddr = N0;
3801   } else {
3802     // N0 -> offset, or
3803     // (N0 + C1) -> offset
3804     SRDPtr = N0;
3805   }
3806 
3807   MachineIRBuilder B(*Root.getParent());
3808   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
3809   splitIllegalMUBUFOffset(B, SOffset, Offset);
3810   return true;
3811 }
3812 
3813 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
3814   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
3815   int64_t &Offset) const {
3816 
3817   // FIXME: Pattern should not reach here.
3818   if (STI.useFlatForGlobal())
3819     return false;
3820 
3821   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3822   if (shouldUseAddr64(AddrData))
3823     return false;
3824 
3825   // N0 -> offset, or
3826   // (N0 + C1) -> offset
3827   Register SRDPtr = AddrData.N0;
3828   Offset = AddrData.Offset;
3829 
3830   // TODO: Look through extensions for 32-bit soffset.
3831   MachineIRBuilder B(*Root.getParent());
3832 
3833   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
3834   splitIllegalMUBUFOffset(B, SOffset, Offset);
3835   return true;
3836 }
3837 
3838 InstructionSelector::ComplexRendererFns
3839 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
3840   Register VAddr;
3841   Register RSrcReg;
3842   Register SOffset;
3843   int64_t Offset = 0;
3844 
3845   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
3846     return {};
3847 
3848   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
3849   // pattern.
3850   return {{
3851       [=](MachineInstrBuilder &MIB) {  // rsrc
3852         MIB.addReg(RSrcReg);
3853       },
3854       [=](MachineInstrBuilder &MIB) { // vaddr
3855         MIB.addReg(VAddr);
3856       },
3857       [=](MachineInstrBuilder &MIB) { // soffset
3858         if (SOffset)
3859           MIB.addReg(SOffset);
3860         else
3861           MIB.addImm(0);
3862       },
3863       [=](MachineInstrBuilder &MIB) { // offset
3864         MIB.addImm(Offset);
3865       },
3866       addZeroImm, //  glc
3867       addZeroImm, //  slc
3868       addZeroImm, //  tfe
3869       addZeroImm, //  dlc
3870       addZeroImm  //  swz
3871     }};
3872 }
3873 
3874 InstructionSelector::ComplexRendererFns
3875 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
3876   Register RSrcReg;
3877   Register SOffset;
3878   int64_t Offset = 0;
3879 
3880   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
3881     return {};
3882 
3883   return {{
3884       [=](MachineInstrBuilder &MIB) {  // rsrc
3885         MIB.addReg(RSrcReg);
3886       },
3887       [=](MachineInstrBuilder &MIB) { // soffset
3888         if (SOffset)
3889           MIB.addReg(SOffset);
3890         else
3891           MIB.addImm(0);
3892       },
3893       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
3894       addZeroImm, //  glc
3895       addZeroImm, //  slc
3896       addZeroImm, //  tfe
3897       addZeroImm, //  dlc
3898       addZeroImm  //  swz
3899     }};
3900 }
3901 
3902 InstructionSelector::ComplexRendererFns
3903 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
3904   Register VAddr;
3905   Register RSrcReg;
3906   Register SOffset;
3907   int64_t Offset = 0;
3908 
3909   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
3910     return {};
3911 
3912   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
3913   // pattern.
3914   return {{
3915       [=](MachineInstrBuilder &MIB) {  // rsrc
3916         MIB.addReg(RSrcReg);
3917       },
3918       [=](MachineInstrBuilder &MIB) { // vaddr
3919         MIB.addReg(VAddr);
3920       },
3921       [=](MachineInstrBuilder &MIB) { // soffset
3922         if (SOffset)
3923           MIB.addReg(SOffset);
3924         else
3925           MIB.addImm(0);
3926       },
3927       [=](MachineInstrBuilder &MIB) { // offset
3928         MIB.addImm(Offset);
3929       },
3930       addZeroImm //  slc
3931     }};
3932 }
3933 
3934 InstructionSelector::ComplexRendererFns
3935 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
3936   Register RSrcReg;
3937   Register SOffset;
3938   int64_t Offset = 0;
3939 
3940   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
3941     return {};
3942 
3943   return {{
3944       [=](MachineInstrBuilder &MIB) {  // rsrc
3945         MIB.addReg(RSrcReg);
3946       },
3947       [=](MachineInstrBuilder &MIB) { // soffset
3948         if (SOffset)
3949           MIB.addReg(SOffset);
3950         else
3951           MIB.addImm(0);
3952       },
3953       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
3954       addZeroImm //  slc
3955     }};
3956 }
3957 
3958 /// Get an immediate that must be 32-bits, and treated as zero extended.
3959 static Optional<uint64_t> getConstantZext32Val(Register Reg,
3960                                                const MachineRegisterInfo &MRI) {
3961   // getConstantVRegVal sexts any values, so see if that matters.
3962   Optional<int64_t> OffsetVal = getConstantVRegVal(Reg, MRI);
3963   if (!OffsetVal || !isInt<32>(*OffsetVal))
3964     return None;
3965   return Lo_32(*OffsetVal);
3966 }
3967 
3968 InstructionSelector::ComplexRendererFns
3969 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
3970   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
3971   if (!OffsetVal)
3972     return {};
3973 
3974   Optional<int64_t> EncodedImm =
3975       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
3976   if (!EncodedImm)
3977     return {};
3978 
3979   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
3980 }
3981 
3982 InstructionSelector::ComplexRendererFns
3983 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
3984   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
3985 
3986   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
3987   if (!OffsetVal)
3988     return {};
3989 
3990   Optional<int64_t> EncodedImm
3991     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
3992   if (!EncodedImm)
3993     return {};
3994 
3995   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
3996 }
3997 
3998 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
3999                                                  const MachineInstr &MI,
4000                                                  int OpIdx) const {
4001   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4002          "Expected G_CONSTANT");
4003   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4004 }
4005 
4006 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4007                                                 const MachineInstr &MI,
4008                                                 int OpIdx) const {
4009   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4010          "Expected G_CONSTANT");
4011   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4012 }
4013 
4014 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4015                                                  const MachineInstr &MI,
4016                                                  int OpIdx) const {
4017   assert(OpIdx == -1);
4018 
4019   const MachineOperand &Op = MI.getOperand(1);
4020   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4021     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4022   else {
4023     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4024     MIB.addImm(Op.getCImm()->getSExtValue());
4025   }
4026 }
4027 
4028 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4029                                                 const MachineInstr &MI,
4030                                                 int OpIdx) const {
4031   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4032          "Expected G_CONSTANT");
4033   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4034 }
4035 
4036 /// This only really exists to satisfy DAG type checking machinery, so is a
4037 /// no-op here.
4038 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4039                                                 const MachineInstr &MI,
4040                                                 int OpIdx) const {
4041   MIB.addImm(MI.getOperand(OpIdx).getImm());
4042 }
4043 
4044 void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
4045                                                  const MachineInstr &MI,
4046                                                  int OpIdx) const {
4047   assert(OpIdx >= 0 && "expected to match an immediate operand");
4048   MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
4049 }
4050 
4051 void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
4052                                                  const MachineInstr &MI,
4053                                                  int OpIdx) const {
4054   assert(OpIdx >= 0 && "expected to match an immediate operand");
4055   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
4056 }
4057 
4058 void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
4059                                                  const MachineInstr &MI,
4060                                                  int OpIdx) const {
4061   assert(OpIdx >= 0 && "expected to match an immediate operand");
4062   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
4063 }
4064 
4065 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4066                                                  const MachineInstr &MI,
4067                                                  int OpIdx) const {
4068   assert(OpIdx >= 0 && "expected to match an immediate operand");
4069   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4070 }
4071 
4072 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4073                                                  const MachineInstr &MI,
4074                                                  int OpIdx) const {
4075   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4076 }
4077 
4078 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4079   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4080 }
4081 
4082 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4083   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4084 }
4085 
4086 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4087   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4088 }
4089 
4090 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4091   return TII.isInlineConstant(Imm);
4092 }
4093