1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/IR/DiagnosticInfo.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
37 
38 #define DEBUG_TYPE "amdgpu-isel"
39 
40 using namespace llvm;
41 using namespace MIPatternMatch;
42 
43 static cl::opt<bool> AllowRiskySelect(
44   "amdgpu-global-isel-risky-select",
45   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
46   cl::init(false),
47   cl::ReallyHidden);
48 
49 #define GET_GLOBALISEL_IMPL
50 #define AMDGPUSubtarget GCNSubtarget
51 #include "AMDGPUGenGlobalISel.inc"
52 #undef GET_GLOBALISEL_IMPL
53 #undef AMDGPUSubtarget
54 
55 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
56     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
57     const AMDGPUTargetMachine &TM)
58     : InstructionSelector(), TII(*STI.getInstrInfo()),
59       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
60       STI(STI),
61       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
62 #define GET_GLOBALISEL_PREDICATES_INIT
63 #include "AMDGPUGenGlobalISel.inc"
64 #undef GET_GLOBALISEL_PREDICATES_INIT
65 #define GET_GLOBALISEL_TEMPORARIES_INIT
66 #include "AMDGPUGenGlobalISel.inc"
67 #undef GET_GLOBALISEL_TEMPORARIES_INIT
68 {
69 }
70 
71 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
72 
73 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
74                                         CodeGenCoverage &CoverageInfo) {
75   MRI = &MF.getRegInfo();
76   Subtarget = &MF.getSubtarget<GCNSubtarget>();
77   InstructionSelector::setupMF(MF, KB, CoverageInfo);
78 }
79 
80 bool AMDGPUInstructionSelector::isVCC(Register Reg,
81                                       const MachineRegisterInfo &MRI) const {
82   // The verifier is oblivious to s1 being a valid value for wavesize registers.
83   if (Reg.isPhysical())
84     return false;
85 
86   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
87   const TargetRegisterClass *RC =
88       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
89   if (RC) {
90     const LLT Ty = MRI.getType(Reg);
91     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
92            Ty.isValid() && Ty.getSizeInBits() == 1;
93   }
94 
95   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
96   return RB->getID() == AMDGPU::VCCRegBankID;
97 }
98 
99 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
100                                                         unsigned NewOpc) const {
101   MI.setDesc(TII.get(NewOpc));
102   MI.RemoveOperand(1); // Remove intrinsic ID.
103   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
104 
105   MachineOperand &Dst = MI.getOperand(0);
106   MachineOperand &Src = MI.getOperand(1);
107 
108   // TODO: This should be legalized to s32 if needed
109   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
110     return false;
111 
112   const TargetRegisterClass *DstRC
113     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
114   const TargetRegisterClass *SrcRC
115     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
116   if (!DstRC || DstRC != SrcRC)
117     return false;
118 
119   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
120          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
121 }
122 
123 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
124   const DebugLoc &DL = I.getDebugLoc();
125   MachineBasicBlock *BB = I.getParent();
126   I.setDesc(TII.get(TargetOpcode::COPY));
127 
128   const MachineOperand &Src = I.getOperand(1);
129   MachineOperand &Dst = I.getOperand(0);
130   Register DstReg = Dst.getReg();
131   Register SrcReg = Src.getReg();
132 
133   if (isVCC(DstReg, *MRI)) {
134     if (SrcReg == AMDGPU::SCC) {
135       const TargetRegisterClass *RC
136         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
137       if (!RC)
138         return true;
139       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
140     }
141 
142     if (!isVCC(SrcReg, *MRI)) {
143       // TODO: Should probably leave the copy and let copyPhysReg expand it.
144       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
145         return false;
146 
147       const TargetRegisterClass *SrcRC
148         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
149 
150       Register MaskedReg = MRI->createVirtualRegister(SrcRC);
151 
152       // We can't trust the high bits at this point, so clear them.
153 
154       // TODO: Skip masking high bits if def is known boolean.
155 
156       unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
157         AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
158       BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
159         .addImm(1)
160         .addReg(SrcReg);
161       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
162         .addImm(0)
163         .addReg(MaskedReg);
164 
165       if (!MRI->getRegClassOrNull(SrcReg))
166         MRI->setRegClass(SrcReg, SrcRC);
167       I.eraseFromParent();
168       return true;
169     }
170 
171     const TargetRegisterClass *RC =
172       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
173     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
174       return false;
175 
176     return true;
177   }
178 
179   for (const MachineOperand &MO : I.operands()) {
180     if (MO.getReg().isPhysical())
181       continue;
182 
183     const TargetRegisterClass *RC =
184             TRI.getConstrainedRegClassForOperand(MO, *MRI);
185     if (!RC)
186       continue;
187     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
188   }
189   return true;
190 }
191 
192 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
193   const Register DefReg = I.getOperand(0).getReg();
194   const LLT DefTy = MRI->getType(DefReg);
195   if (DefTy == LLT::scalar(1)) {
196     if (!AllowRiskySelect) {
197       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
198       return false;
199     }
200 
201     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
202   }
203 
204   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
205 
206   const RegClassOrRegBank &RegClassOrBank =
207     MRI->getRegClassOrRegBank(DefReg);
208 
209   const TargetRegisterClass *DefRC
210     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
211   if (!DefRC) {
212     if (!DefTy.isValid()) {
213       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
214       return false;
215     }
216 
217     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
218     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
219     if (!DefRC) {
220       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
221       return false;
222     }
223   }
224 
225   // TODO: Verify that all registers have the same bank
226   I.setDesc(TII.get(TargetOpcode::PHI));
227   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
228 }
229 
230 MachineOperand
231 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
232                                            const TargetRegisterClass &SubRC,
233                                            unsigned SubIdx) const {
234 
235   MachineInstr *MI = MO.getParent();
236   MachineBasicBlock *BB = MO.getParent()->getParent();
237   Register DstReg = MRI->createVirtualRegister(&SubRC);
238 
239   if (MO.isReg()) {
240     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
241     Register Reg = MO.getReg();
242     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
243             .addReg(Reg, 0, ComposedSubIdx);
244 
245     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
246                                      MO.isKill(), MO.isDead(), MO.isUndef(),
247                                      MO.isEarlyClobber(), 0, MO.isDebug(),
248                                      MO.isInternalRead());
249   }
250 
251   assert(MO.isImm());
252 
253   APInt Imm(64, MO.getImm());
254 
255   switch (SubIdx) {
256   default:
257     llvm_unreachable("do not know to split immediate with this sub index.");
258   case AMDGPU::sub0:
259     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
260   case AMDGPU::sub1:
261     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
262   }
263 }
264 
265 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
266   switch (Opc) {
267   case AMDGPU::G_AND:
268     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
269   case AMDGPU::G_OR:
270     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
271   case AMDGPU::G_XOR:
272     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
273   default:
274     llvm_unreachable("not a bit op");
275   }
276 }
277 
278 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
279   Register DstReg = I.getOperand(0).getReg();
280   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
281 
282   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
283   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
284       DstRB->getID() != AMDGPU::VCCRegBankID)
285     return false;
286 
287   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
288                             STI.isWave64());
289   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
290 
291   // Dead implicit-def of scc
292   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
293                                          true, // isImp
294                                          false, // isKill
295                                          true)); // isDead
296   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
297 }
298 
299 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
300   MachineBasicBlock *BB = I.getParent();
301   MachineFunction *MF = BB->getParent();
302   Register DstReg = I.getOperand(0).getReg();
303   const DebugLoc &DL = I.getDebugLoc();
304   LLT Ty = MRI->getType(DstReg);
305   if (Ty.isVector())
306     return false;
307 
308   unsigned Size = Ty.getSizeInBits();
309   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
310   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
311   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
312 
313   if (Size == 32) {
314     if (IsSALU) {
315       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
316       MachineInstr *Add =
317         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
318         .add(I.getOperand(1))
319         .add(I.getOperand(2));
320       I.eraseFromParent();
321       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
322     }
323 
324     if (STI.hasAddNoCarry()) {
325       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
326       I.setDesc(TII.get(Opc));
327       I.addOperand(*MF, MachineOperand::CreateImm(0));
328       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
329       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
330     }
331 
332     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
333 
334     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
335     MachineInstr *Add
336       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
337       .addDef(UnusedCarry, RegState::Dead)
338       .add(I.getOperand(1))
339       .add(I.getOperand(2))
340       .addImm(0);
341     I.eraseFromParent();
342     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
343   }
344 
345   assert(!Sub && "illegal sub should not reach here");
346 
347   const TargetRegisterClass &RC
348     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
349   const TargetRegisterClass &HalfRC
350     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
351 
352   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
353   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
354   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
355   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
356 
357   Register DstLo = MRI->createVirtualRegister(&HalfRC);
358   Register DstHi = MRI->createVirtualRegister(&HalfRC);
359 
360   if (IsSALU) {
361     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
362       .add(Lo1)
363       .add(Lo2);
364     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
365       .add(Hi1)
366       .add(Hi2);
367   } else {
368     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
369     Register CarryReg = MRI->createVirtualRegister(CarryRC);
370     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
371       .addDef(CarryReg)
372       .add(Lo1)
373       .add(Lo2)
374       .addImm(0);
375     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
376       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
377       .add(Hi1)
378       .add(Hi2)
379       .addReg(CarryReg, RegState::Kill)
380       .addImm(0);
381 
382     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
383       return false;
384   }
385 
386   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
387     .addReg(DstLo)
388     .addImm(AMDGPU::sub0)
389     .addReg(DstHi)
390     .addImm(AMDGPU::sub1);
391 
392 
393   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
394     return false;
395 
396   I.eraseFromParent();
397   return true;
398 }
399 
400 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
401   MachineInstr &I) const {
402   MachineBasicBlock *BB = I.getParent();
403   MachineFunction *MF = BB->getParent();
404   const DebugLoc &DL = I.getDebugLoc();
405   Register Dst0Reg = I.getOperand(0).getReg();
406   Register Dst1Reg = I.getOperand(1).getReg();
407   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
408                      I.getOpcode() == AMDGPU::G_UADDE;
409   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
410                           I.getOpcode() == AMDGPU::G_USUBE;
411 
412   if (isVCC(Dst1Reg, *MRI)) {
413     unsigned NoCarryOpc =
414         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
415     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
416     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
417     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
418     I.addOperand(*MF, MachineOperand::CreateImm(0));
419     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
420   }
421 
422   Register Src0Reg = I.getOperand(2).getReg();
423   Register Src1Reg = I.getOperand(3).getReg();
424 
425   if (HasCarryIn) {
426     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
427       .addReg(I.getOperand(4).getReg());
428   }
429 
430   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
431   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
432 
433   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
434     .add(I.getOperand(2))
435     .add(I.getOperand(3));
436   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
437     .addReg(AMDGPU::SCC);
438 
439   if (!MRI->getRegClassOrNull(Dst1Reg))
440     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
441 
442   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
443       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
444       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
445     return false;
446 
447   if (HasCarryIn &&
448       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
449                                     AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   I.eraseFromParent();
453   return true;
454 }
455 
456 // TODO: We should probably legalize these to only using 32-bit results.
457 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
458   MachineBasicBlock *BB = I.getParent();
459   Register DstReg = I.getOperand(0).getReg();
460   Register SrcReg = I.getOperand(1).getReg();
461   LLT DstTy = MRI->getType(DstReg);
462   LLT SrcTy = MRI->getType(SrcReg);
463   const unsigned SrcSize = SrcTy.getSizeInBits();
464   unsigned DstSize = DstTy.getSizeInBits();
465 
466   // TODO: Should handle any multiple of 32 offset.
467   unsigned Offset = I.getOperand(2).getImm();
468   if (Offset % 32 != 0 || DstSize > 128)
469     return false;
470 
471   // 16-bit operations really use 32-bit registers.
472   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
473   if (DstSize == 16)
474     DstSize = 32;
475 
476   const TargetRegisterClass *DstRC =
477     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
478   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
479     return false;
480 
481   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
482   const TargetRegisterClass *SrcRC =
483     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
484   if (!SrcRC)
485     return false;
486   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
487                                                          DstSize / 32);
488   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
489   if (!SrcRC)
490     return false;
491 
492   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
493                                     *SrcRC, I.getOperand(1));
494   const DebugLoc &DL = I.getDebugLoc();
495   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
496     .addReg(SrcReg, 0, SubReg);
497 
498   I.eraseFromParent();
499   return true;
500 }
501 
502 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
503   MachineBasicBlock *BB = MI.getParent();
504   Register DstReg = MI.getOperand(0).getReg();
505   LLT DstTy = MRI->getType(DstReg);
506   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
507 
508   const unsigned SrcSize = SrcTy.getSizeInBits();
509   if (SrcSize < 32)
510     return selectImpl(MI, *CoverageInfo);
511 
512   const DebugLoc &DL = MI.getDebugLoc();
513   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
514   const unsigned DstSize = DstTy.getSizeInBits();
515   const TargetRegisterClass *DstRC =
516     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
517   if (!DstRC)
518     return false;
519 
520   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
521   MachineInstrBuilder MIB =
522     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
523   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
524     MachineOperand &Src = MI.getOperand(I + 1);
525     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
526     MIB.addImm(SubRegs[I]);
527 
528     const TargetRegisterClass *SrcRC
529       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
530     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
531       return false;
532   }
533 
534   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
535     return false;
536 
537   MI.eraseFromParent();
538   return true;
539 }
540 
541 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
542   MachineBasicBlock *BB = MI.getParent();
543   const int NumDst = MI.getNumOperands() - 1;
544 
545   MachineOperand &Src = MI.getOperand(NumDst);
546 
547   Register SrcReg = Src.getReg();
548   Register DstReg0 = MI.getOperand(0).getReg();
549   LLT DstTy = MRI->getType(DstReg0);
550   LLT SrcTy = MRI->getType(SrcReg);
551 
552   const unsigned DstSize = DstTy.getSizeInBits();
553   const unsigned SrcSize = SrcTy.getSizeInBits();
554   const DebugLoc &DL = MI.getDebugLoc();
555   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
556 
557   const TargetRegisterClass *SrcRC =
558     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
559   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
560     return false;
561 
562   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
563   // source, and this relies on the fact that the same subregister indices are
564   // used for both.
565   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
566   for (int I = 0, E = NumDst; I != E; ++I) {
567     MachineOperand &Dst = MI.getOperand(I);
568     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
569       .addReg(SrcReg, 0, SubRegs[I]);
570 
571     // Make sure the subregister index is valid for the source register.
572     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
573     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
574       return false;
575 
576     const TargetRegisterClass *DstRC =
577       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
578     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
579       return false;
580   }
581 
582   MI.eraseFromParent();
583   return true;
584 }
585 
586 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
587   MachineInstr &MI) const {
588   if (selectImpl(MI, *CoverageInfo))
589     return true;
590 
591   const LLT S32 = LLT::scalar(32);
592   const LLT V2S16 = LLT::vector(2, 16);
593 
594   Register Dst = MI.getOperand(0).getReg();
595   if (MRI->getType(Dst) != V2S16)
596     return false;
597 
598   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
599   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
600     return false;
601 
602   Register Src0 = MI.getOperand(1).getReg();
603   Register Src1 = MI.getOperand(2).getReg();
604   if (MRI->getType(Src0) != S32)
605     return false;
606 
607   const DebugLoc &DL = MI.getDebugLoc();
608   MachineBasicBlock *BB = MI.getParent();
609 
610   auto ConstSrc1 = getConstantVRegValWithLookThrough(Src1, *MRI, true, true);
611   if (ConstSrc1) {
612     auto ConstSrc0 = getConstantVRegValWithLookThrough(Src0, *MRI, true, true);
613     if (ConstSrc0) {
614       uint32_t Lo16 = static_cast<uint32_t>(ConstSrc0->Value) & 0xffff;
615       uint32_t Hi16 = static_cast<uint32_t>(ConstSrc1->Value) & 0xffff;
616 
617       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
618         .addImm(Lo16 | (Hi16 << 16));
619       MI.eraseFromParent();
620       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
621     }
622   }
623 
624   // TODO: This should probably be a combine somewhere
625   // (build_vector_trunc $src0, undef -> copy $src0
626   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
627   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
628     MI.setDesc(TII.get(AMDGPU::COPY));
629     MI.RemoveOperand(2);
630     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
631            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
632   }
633 
634   Register ShiftSrc0;
635   Register ShiftSrc1;
636   int64_t ShiftAmt;
637 
638   // With multiple uses of the shift, this will duplicate the shift and
639   // increase register pressure.
640   //
641   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
642   //  => (S_PACK_HH_B32_B16 $src0, $src1)
643   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
644   //  => (S_PACK_LH_B32_B16 $src0, $src1)
645   // (build_vector_trunc $src0, $src1)
646   //  => (S_PACK_LL_B32_B16 $src0, $src1)
647 
648   // FIXME: This is an inconvenient way to check a specific value
649   bool Shift0 = mi_match(
650     Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_ICst(ShiftAmt)))) &&
651     ShiftAmt == 16;
652 
653   bool Shift1 = mi_match(
654     Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_ICst(ShiftAmt)))) &&
655     ShiftAmt == 16;
656 
657   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
658   if (Shift0 && Shift1) {
659     Opc = AMDGPU::S_PACK_HH_B32_B16;
660     MI.getOperand(1).setReg(ShiftSrc0);
661     MI.getOperand(2).setReg(ShiftSrc1);
662   } else if (Shift1) {
663     Opc = AMDGPU::S_PACK_LH_B32_B16;
664     MI.getOperand(2).setReg(ShiftSrc1);
665   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
666     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
667     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
668       .addReg(ShiftSrc0)
669       .addImm(16);
670 
671     MI.eraseFromParent();
672     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
673   }
674 
675   MI.setDesc(TII.get(Opc));
676   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
677 }
678 
679 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
680   return selectG_ADD_SUB(I);
681 }
682 
683 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
684   const MachineOperand &MO = I.getOperand(0);
685 
686   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
687   // regbank check here is to know why getConstrainedRegClassForOperand failed.
688   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
689   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
690       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
691     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
692     return true;
693   }
694 
695   return false;
696 }
697 
698 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
699   MachineBasicBlock *BB = I.getParent();
700 
701   Register DstReg = I.getOperand(0).getReg();
702   Register Src0Reg = I.getOperand(1).getReg();
703   Register Src1Reg = I.getOperand(2).getReg();
704   LLT Src1Ty = MRI->getType(Src1Reg);
705 
706   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
707   unsigned InsSize = Src1Ty.getSizeInBits();
708 
709   int64_t Offset = I.getOperand(3).getImm();
710 
711   // FIXME: These cases should have been illegal and unnecessary to check here.
712   if (Offset % 32 != 0 || InsSize % 32 != 0)
713     return false;
714 
715   // Currently not handled by getSubRegFromChannel.
716   if (InsSize > 128)
717     return false;
718 
719   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
720   if (SubReg == AMDGPU::NoSubRegister)
721     return false;
722 
723   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
724   const TargetRegisterClass *DstRC =
725     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
726   if (!DstRC)
727     return false;
728 
729   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
730   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
731   const TargetRegisterClass *Src0RC =
732     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
733   const TargetRegisterClass *Src1RC =
734     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
735 
736   // Deal with weird cases where the class only partially supports the subreg
737   // index.
738   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
739   if (!Src0RC || !Src1RC)
740     return false;
741 
742   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
743       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
744       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
745     return false;
746 
747   const DebugLoc &DL = I.getDebugLoc();
748   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
749     .addReg(Src0Reg)
750     .addReg(Src1Reg)
751     .addImm(SubReg);
752 
753   I.eraseFromParent();
754   return true;
755 }
756 
757 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
758   if (STI.getLDSBankCount() != 16)
759     return selectImpl(MI, *CoverageInfo);
760 
761   Register Dst = MI.getOperand(0).getReg();
762   Register Src0 = MI.getOperand(2).getReg();
763   Register M0Val = MI.getOperand(6).getReg();
764   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
765       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
766       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
767     return false;
768 
769   // This requires 2 instructions. It is possible to write a pattern to support
770   // this, but the generated isel emitter doesn't correctly deal with multiple
771   // output instructions using the same physical register input. The copy to m0
772   // is incorrectly placed before the second instruction.
773   //
774   // TODO: Match source modifiers.
775 
776   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
777   const DebugLoc &DL = MI.getDebugLoc();
778   MachineBasicBlock *MBB = MI.getParent();
779 
780   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
781     .addReg(M0Val);
782   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
783     .addImm(2)
784     .addImm(MI.getOperand(4).getImm())  // $attr
785     .addImm(MI.getOperand(3).getImm()); // $attrchan
786 
787   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
788     .addImm(0)                          // $src0_modifiers
789     .addReg(Src0)                       // $src0
790     .addImm(MI.getOperand(4).getImm())  // $attr
791     .addImm(MI.getOperand(3).getImm())  // $attrchan
792     .addImm(0)                          // $src2_modifiers
793     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
794     .addImm(MI.getOperand(5).getImm())  // $high
795     .addImm(0)                          // $clamp
796     .addImm(0);                         // $omod
797 
798   MI.eraseFromParent();
799   return true;
800 }
801 
802 // Writelane is special in that it can use SGPR and M0 (which would normally
803 // count as using the constant bus twice - but in this case it is allowed since
804 // the lane selector doesn't count as a use of the constant bus). However, it is
805 // still required to abide by the 1 SGPR rule. Fix this up if we might have
806 // multiple SGPRs.
807 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
808   // With a constant bus limit of at least 2, there's no issue.
809   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
810     return selectImpl(MI, *CoverageInfo);
811 
812   MachineBasicBlock *MBB = MI.getParent();
813   const DebugLoc &DL = MI.getDebugLoc();
814   Register VDst = MI.getOperand(0).getReg();
815   Register Val = MI.getOperand(2).getReg();
816   Register LaneSelect = MI.getOperand(3).getReg();
817   Register VDstIn = MI.getOperand(4).getReg();
818 
819   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
820 
821   Optional<ValueAndVReg> ConstSelect =
822     getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true);
823   if (ConstSelect) {
824     // The selector has to be an inline immediate, so we can use whatever for
825     // the other operands.
826     MIB.addReg(Val);
827     MIB.addImm(ConstSelect->Value &
828                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
829   } else {
830     Optional<ValueAndVReg> ConstVal =
831       getConstantVRegValWithLookThrough(Val, *MRI, true, true);
832 
833     // If the value written is an inline immediate, we can get away without a
834     // copy to m0.
835     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value,
836                                                  STI.hasInv2PiInlineImm())) {
837       MIB.addImm(ConstVal->Value);
838       MIB.addReg(LaneSelect);
839     } else {
840       MIB.addReg(Val);
841 
842       // If the lane selector was originally in a VGPR and copied with
843       // readfirstlane, there's a hazard to read the same SGPR from the
844       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
845       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
846 
847       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
848         .addReg(LaneSelect);
849       MIB.addReg(AMDGPU::M0);
850     }
851   }
852 
853   MIB.addReg(VDstIn);
854 
855   MI.eraseFromParent();
856   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
857 }
858 
859 // We need to handle this here because tablegen doesn't support matching
860 // instructions with multiple outputs.
861 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
862   Register Dst0 = MI.getOperand(0).getReg();
863   Register Dst1 = MI.getOperand(1).getReg();
864 
865   LLT Ty = MRI->getType(Dst0);
866   unsigned Opc;
867   if (Ty == LLT::scalar(32))
868     Opc = AMDGPU::V_DIV_SCALE_F32;
869   else if (Ty == LLT::scalar(64))
870     Opc = AMDGPU::V_DIV_SCALE_F64;
871   else
872     return false;
873 
874   const DebugLoc &DL = MI.getDebugLoc();
875   MachineBasicBlock *MBB = MI.getParent();
876 
877   Register Numer = MI.getOperand(3).getReg();
878   Register Denom = MI.getOperand(4).getReg();
879   unsigned ChooseDenom = MI.getOperand(5).getImm();
880 
881   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
882 
883   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
884     .addDef(Dst1)
885     .addUse(Src0)
886     .addUse(Denom)
887     .addUse(Numer);
888 
889   MI.eraseFromParent();
890   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
891 }
892 
893 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
894   unsigned IntrinsicID = I.getIntrinsicID();
895   switch (IntrinsicID) {
896   case Intrinsic::amdgcn_if_break: {
897     MachineBasicBlock *BB = I.getParent();
898 
899     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
900     // SelectionDAG uses for wave32 vs wave64.
901     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
902       .add(I.getOperand(0))
903       .add(I.getOperand(2))
904       .add(I.getOperand(3));
905 
906     Register DstReg = I.getOperand(0).getReg();
907     Register Src0Reg = I.getOperand(2).getReg();
908     Register Src1Reg = I.getOperand(3).getReg();
909 
910     I.eraseFromParent();
911 
912     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
913       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
914 
915     return true;
916   }
917   case Intrinsic::amdgcn_interp_p1_f16:
918     return selectInterpP1F16(I);
919   case Intrinsic::amdgcn_wqm:
920     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
921   case Intrinsic::amdgcn_softwqm:
922     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
923   case Intrinsic::amdgcn_wwm:
924     return constrainCopyLikeIntrin(I, AMDGPU::WWM);
925   case Intrinsic::amdgcn_writelane:
926     return selectWritelane(I);
927   case Intrinsic::amdgcn_div_scale:
928     return selectDivScale(I);
929   case Intrinsic::amdgcn_icmp:
930     return selectIntrinsicIcmp(I);
931   case Intrinsic::amdgcn_ballot:
932     return selectBallot(I);
933   case Intrinsic::amdgcn_reloc_constant:
934     return selectRelocConstant(I);
935   case Intrinsic::amdgcn_groupstaticsize:
936     return selectGroupStaticSize(I);
937   case Intrinsic::returnaddress:
938     return selectReturnAddress(I);
939   default:
940     return selectImpl(I, *CoverageInfo);
941   }
942 }
943 
944 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
945   if (Size != 32 && Size != 64)
946     return -1;
947   switch (P) {
948   default:
949     llvm_unreachable("Unknown condition code!");
950   case CmpInst::ICMP_NE:
951     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
952   case CmpInst::ICMP_EQ:
953     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
954   case CmpInst::ICMP_SGT:
955     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
956   case CmpInst::ICMP_SGE:
957     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
958   case CmpInst::ICMP_SLT:
959     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
960   case CmpInst::ICMP_SLE:
961     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
962   case CmpInst::ICMP_UGT:
963     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
964   case CmpInst::ICMP_UGE:
965     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
966   case CmpInst::ICMP_ULT:
967     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
968   case CmpInst::ICMP_ULE:
969     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
970   }
971 }
972 
973 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
974                                               unsigned Size) const {
975   if (Size == 64) {
976     if (!STI.hasScalarCompareEq64())
977       return -1;
978 
979     switch (P) {
980     case CmpInst::ICMP_NE:
981       return AMDGPU::S_CMP_LG_U64;
982     case CmpInst::ICMP_EQ:
983       return AMDGPU::S_CMP_EQ_U64;
984     default:
985       return -1;
986     }
987   }
988 
989   if (Size != 32)
990     return -1;
991 
992   switch (P) {
993   case CmpInst::ICMP_NE:
994     return AMDGPU::S_CMP_LG_U32;
995   case CmpInst::ICMP_EQ:
996     return AMDGPU::S_CMP_EQ_U32;
997   case CmpInst::ICMP_SGT:
998     return AMDGPU::S_CMP_GT_I32;
999   case CmpInst::ICMP_SGE:
1000     return AMDGPU::S_CMP_GE_I32;
1001   case CmpInst::ICMP_SLT:
1002     return AMDGPU::S_CMP_LT_I32;
1003   case CmpInst::ICMP_SLE:
1004     return AMDGPU::S_CMP_LE_I32;
1005   case CmpInst::ICMP_UGT:
1006     return AMDGPU::S_CMP_GT_U32;
1007   case CmpInst::ICMP_UGE:
1008     return AMDGPU::S_CMP_GE_U32;
1009   case CmpInst::ICMP_ULT:
1010     return AMDGPU::S_CMP_LT_U32;
1011   case CmpInst::ICMP_ULE:
1012     return AMDGPU::S_CMP_LE_U32;
1013   default:
1014     llvm_unreachable("Unknown condition code!");
1015   }
1016 }
1017 
1018 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1019   MachineBasicBlock *BB = I.getParent();
1020   const DebugLoc &DL = I.getDebugLoc();
1021 
1022   Register SrcReg = I.getOperand(2).getReg();
1023   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1024 
1025   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1026 
1027   Register CCReg = I.getOperand(0).getReg();
1028   if (!isVCC(CCReg, *MRI)) {
1029     int Opcode = getS_CMPOpcode(Pred, Size);
1030     if (Opcode == -1)
1031       return false;
1032     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1033             .add(I.getOperand(2))
1034             .add(I.getOperand(3));
1035     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1036       .addReg(AMDGPU::SCC);
1037     bool Ret =
1038         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1039         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1040     I.eraseFromParent();
1041     return Ret;
1042   }
1043 
1044   int Opcode = getV_CMPOpcode(Pred, Size);
1045   if (Opcode == -1)
1046     return false;
1047 
1048   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1049             I.getOperand(0).getReg())
1050             .add(I.getOperand(2))
1051             .add(I.getOperand(3));
1052   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1053                                *TRI.getBoolRC(), *MRI);
1054   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1055   I.eraseFromParent();
1056   return Ret;
1057 }
1058 
1059 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1060   Register Dst = I.getOperand(0).getReg();
1061   if (isVCC(Dst, *MRI))
1062     return false;
1063 
1064   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1065     return false;
1066 
1067   MachineBasicBlock *BB = I.getParent();
1068   const DebugLoc &DL = I.getDebugLoc();
1069   Register SrcReg = I.getOperand(2).getReg();
1070   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1071   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1072 
1073   int Opcode = getV_CMPOpcode(Pred, Size);
1074   if (Opcode == -1)
1075     return false;
1076 
1077   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1078                            .add(I.getOperand(2))
1079                            .add(I.getOperand(3));
1080   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1081                                *MRI);
1082   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1083   I.eraseFromParent();
1084   return Ret;
1085 }
1086 
1087 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1088   MachineBasicBlock *BB = I.getParent();
1089   const DebugLoc &DL = I.getDebugLoc();
1090   Register DstReg = I.getOperand(0).getReg();
1091   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1092   const bool Is64 = Size == 64;
1093 
1094   if (Size != STI.getWavefrontSize())
1095     return false;
1096 
1097   Optional<ValueAndVReg> Arg =
1098       getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true);
1099 
1100   if (Arg.hasValue()) {
1101     const int64_t Value = Arg.getValue().Value;
1102     if (Value == 0) {
1103       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1104       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1105     } else if (Value == -1) { // all ones
1106       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1107       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1108     } else
1109       return false;
1110   } else {
1111     Register SrcReg = I.getOperand(2).getReg();
1112     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1113   }
1114 
1115   I.eraseFromParent();
1116   return true;
1117 }
1118 
1119 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1120   Register DstReg = I.getOperand(0).getReg();
1121   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1122   const TargetRegisterClass *DstRC =
1123     TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1124   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1125     return false;
1126 
1127   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1128 
1129   Module *M = MF->getFunction().getParent();
1130   const MDNode *Metadata = I.getOperand(2).getMetadata();
1131   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1132   auto RelocSymbol = cast<GlobalVariable>(
1133     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1134 
1135   MachineBasicBlock *BB = I.getParent();
1136   BuildMI(*BB, &I, I.getDebugLoc(),
1137           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1138     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1139 
1140   I.eraseFromParent();
1141   return true;
1142 }
1143 
1144 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1145   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1146 
1147   Register DstReg = I.getOperand(0).getReg();
1148   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1149   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1150     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1151 
1152   MachineBasicBlock *MBB = I.getParent();
1153   const DebugLoc &DL = I.getDebugLoc();
1154 
1155   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1156 
1157   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1158     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1159     MIB.addImm(MFI->getLDSSize());
1160   } else {
1161     Module *M = MF->getFunction().getParent();
1162     const GlobalValue *GV
1163       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1164     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1165   }
1166 
1167   I.eraseFromParent();
1168   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1169 }
1170 
1171 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1172   MachineBasicBlock *MBB = I.getParent();
1173   MachineFunction &MF = *MBB->getParent();
1174   const DebugLoc &DL = I.getDebugLoc();
1175 
1176   MachineOperand &Dst = I.getOperand(0);
1177   Register DstReg = Dst.getReg();
1178   unsigned Depth = I.getOperand(2).getImm();
1179 
1180   const TargetRegisterClass *RC
1181     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1182   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1183       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1184     return false;
1185 
1186   // Check for kernel and shader functions
1187   if (Depth != 0 ||
1188       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1189     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1190       .addImm(0);
1191     I.eraseFromParent();
1192     return true;
1193   }
1194 
1195   MachineFrameInfo &MFI = MF.getFrameInfo();
1196   // There is a call to @llvm.returnaddress in this function
1197   MFI.setReturnAddressIsTaken(true);
1198 
1199   // Get the return address reg and mark it as an implicit live-in
1200   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1201   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1202                                              AMDGPU::SReg_64RegClass);
1203   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1204     .addReg(LiveIn);
1205   I.eraseFromParent();
1206   return true;
1207 }
1208 
1209 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1210   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1211   // SelectionDAG uses for wave32 vs wave64.
1212   MachineBasicBlock *BB = MI.getParent();
1213   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1214       .add(MI.getOperand(1));
1215 
1216   Register Reg = MI.getOperand(1).getReg();
1217   MI.eraseFromParent();
1218 
1219   if (!MRI->getRegClassOrNull(Reg))
1220     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1221   return true;
1222 }
1223 
1224 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1225   MachineInstr &MI, Intrinsic::ID IntrID) const {
1226   MachineBasicBlock *MBB = MI.getParent();
1227   MachineFunction *MF = MBB->getParent();
1228   const DebugLoc &DL = MI.getDebugLoc();
1229 
1230   unsigned IndexOperand = MI.getOperand(7).getImm();
1231   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1232   bool WaveDone = MI.getOperand(9).getImm() != 0;
1233 
1234   if (WaveDone && !WaveRelease)
1235     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1236 
1237   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1238   IndexOperand &= ~0x3f;
1239   unsigned CountDw = 0;
1240 
1241   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1242     CountDw = (IndexOperand >> 24) & 0xf;
1243     IndexOperand &= ~(0xf << 24);
1244 
1245     if (CountDw < 1 || CountDw > 4) {
1246       report_fatal_error(
1247         "ds_ordered_count: dword count must be between 1 and 4");
1248     }
1249   }
1250 
1251   if (IndexOperand)
1252     report_fatal_error("ds_ordered_count: bad index operand");
1253 
1254   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1255   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1256 
1257   unsigned Offset0 = OrderedCountIndex << 2;
1258   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1259                      (Instruction << 4);
1260 
1261   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1262     Offset1 |= (CountDw - 1) << 6;
1263 
1264   unsigned Offset = Offset0 | (Offset1 << 8);
1265 
1266   Register M0Val = MI.getOperand(2).getReg();
1267   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1268     .addReg(M0Val);
1269 
1270   Register DstReg = MI.getOperand(0).getReg();
1271   Register ValReg = MI.getOperand(3).getReg();
1272   MachineInstrBuilder DS =
1273     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1274       .addReg(ValReg)
1275       .addImm(Offset)
1276       .cloneMemRefs(MI);
1277 
1278   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1279     return false;
1280 
1281   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1282   MI.eraseFromParent();
1283   return Ret;
1284 }
1285 
1286 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1287   switch (IntrID) {
1288   case Intrinsic::amdgcn_ds_gws_init:
1289     return AMDGPU::DS_GWS_INIT;
1290   case Intrinsic::amdgcn_ds_gws_barrier:
1291     return AMDGPU::DS_GWS_BARRIER;
1292   case Intrinsic::amdgcn_ds_gws_sema_v:
1293     return AMDGPU::DS_GWS_SEMA_V;
1294   case Intrinsic::amdgcn_ds_gws_sema_br:
1295     return AMDGPU::DS_GWS_SEMA_BR;
1296   case Intrinsic::amdgcn_ds_gws_sema_p:
1297     return AMDGPU::DS_GWS_SEMA_P;
1298   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1299     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1300   default:
1301     llvm_unreachable("not a gws intrinsic");
1302   }
1303 }
1304 
1305 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1306                                                      Intrinsic::ID IID) const {
1307   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1308       !STI.hasGWSSemaReleaseAll())
1309     return false;
1310 
1311   // intrinsic ID, vsrc, offset
1312   const bool HasVSrc = MI.getNumOperands() == 3;
1313   assert(HasVSrc || MI.getNumOperands() == 2);
1314 
1315   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1316   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1317   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1318     return false;
1319 
1320   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1321   assert(OffsetDef);
1322 
1323   unsigned ImmOffset;
1324 
1325   MachineBasicBlock *MBB = MI.getParent();
1326   const DebugLoc &DL = MI.getDebugLoc();
1327 
1328   MachineInstr *Readfirstlane = nullptr;
1329 
1330   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1331   // incoming offset, in case there's an add of a constant. We'll have to put it
1332   // back later.
1333   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1334     Readfirstlane = OffsetDef;
1335     BaseOffset = OffsetDef->getOperand(1).getReg();
1336     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1337   }
1338 
1339   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1340     // If we have a constant offset, try to use the 0 in m0 as the base.
1341     // TODO: Look into changing the default m0 initialization value. If the
1342     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1343     // the immediate offset.
1344 
1345     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1346     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1347       .addImm(0);
1348   } else {
1349     std::tie(BaseOffset, ImmOffset, OffsetDef)
1350       = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1351 
1352     if (Readfirstlane) {
1353       // We have the constant offset now, so put the readfirstlane back on the
1354       // variable component.
1355       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1356         return false;
1357 
1358       Readfirstlane->getOperand(1).setReg(BaseOffset);
1359       BaseOffset = Readfirstlane->getOperand(0).getReg();
1360     } else {
1361       if (!RBI.constrainGenericRegister(BaseOffset,
1362                                         AMDGPU::SReg_32RegClass, *MRI))
1363         return false;
1364     }
1365 
1366     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1367     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1368       .addReg(BaseOffset)
1369       .addImm(16);
1370 
1371     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1372       .addReg(M0Base);
1373   }
1374 
1375   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1376   // offset field) % 64. Some versions of the programming guide omit the m0
1377   // part, or claim it's from offset 0.
1378   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1379 
1380   if (HasVSrc) {
1381     Register VSrc = MI.getOperand(1).getReg();
1382     MIB.addReg(VSrc);
1383     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1384       return false;
1385   }
1386 
1387   MIB.addImm(ImmOffset)
1388      .addImm(-1) // $gds
1389      .cloneMemRefs(MI);
1390 
1391   MI.eraseFromParent();
1392   return true;
1393 }
1394 
1395 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1396                                                       bool IsAppend) const {
1397   Register PtrBase = MI.getOperand(2).getReg();
1398   LLT PtrTy = MRI->getType(PtrBase);
1399   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1400 
1401   unsigned Offset;
1402   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1403 
1404   // TODO: Should this try to look through readfirstlane like GWS?
1405   if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
1406     PtrBase = MI.getOperand(2).getReg();
1407     Offset = 0;
1408   }
1409 
1410   MachineBasicBlock *MBB = MI.getParent();
1411   const DebugLoc &DL = MI.getDebugLoc();
1412   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1413 
1414   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1415     .addReg(PtrBase);
1416   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1417     return false;
1418 
1419   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1420     .addImm(Offset)
1421     .addImm(IsGDS ? -1 : 0)
1422     .cloneMemRefs(MI);
1423   MI.eraseFromParent();
1424   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1425 }
1426 
1427 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1428   if (TM.getOptLevel() > CodeGenOpt::None) {
1429     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1430     if (WGSize <= STI.getWavefrontSize()) {
1431       MachineBasicBlock *MBB = MI.getParent();
1432       const DebugLoc &DL = MI.getDebugLoc();
1433       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1434       MI.eraseFromParent();
1435       return true;
1436     }
1437   }
1438   return selectImpl(MI, *CoverageInfo);
1439 }
1440 
1441 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1442                          bool &IsTexFail) {
1443   if (TexFailCtrl)
1444     IsTexFail = true;
1445 
1446   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1447   TexFailCtrl &= ~(uint64_t)0x1;
1448   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1449   TexFailCtrl &= ~(uint64_t)0x2;
1450 
1451   return TexFailCtrl == 0;
1452 }
1453 
1454 static bool parseCachePolicy(uint64_t Value,
1455                              bool *GLC, bool *SLC, bool *DLC) {
1456   if (GLC) {
1457     *GLC = (Value & 0x1) ? 1 : 0;
1458     Value &= ~(uint64_t)0x1;
1459   }
1460   if (SLC) {
1461     *SLC = (Value & 0x2) ? 1 : 0;
1462     Value &= ~(uint64_t)0x2;
1463   }
1464   if (DLC) {
1465     *DLC = (Value & 0x4) ? 1 : 0;
1466     Value &= ~(uint64_t)0x4;
1467   }
1468 
1469   return Value == 0;
1470 }
1471 
1472 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1473   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1474   MachineBasicBlock *MBB = MI.getParent();
1475   const DebugLoc &DL = MI.getDebugLoc();
1476 
1477   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1478     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1479 
1480   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1481   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1482       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1483   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1484       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1485   unsigned IntrOpcode = Intr->BaseOpcode;
1486   const bool IsGFX10 = STI.getGeneration() >= AMDGPUSubtarget::GFX10;
1487 
1488   const int VAddrIdx = getImageVAddrIdxBegin(BaseOpcode,
1489                                              MI.getNumExplicitDefs());
1490   int NumVAddr, NumGradients;
1491   std::tie(NumVAddr, NumGradients) = getImageNumVAddr(Intr, BaseOpcode);
1492 
1493   Register VDataIn, VDataOut;
1494   LLT VDataTy;
1495   int NumVDataDwords = -1;
1496   bool IsD16 = false;
1497 
1498   // XXX - Can we just get the second to last argument for ctrl?
1499   unsigned CtrlIdx; // Index of texfailctrl argument
1500   bool Unorm;
1501   if (!BaseOpcode->Sampler) {
1502     Unorm = true;
1503     CtrlIdx = VAddrIdx + NumVAddr + 1;
1504   } else {
1505     Unorm = MI.getOperand(VAddrIdx + NumVAddr + 2).getImm() != 0;
1506     CtrlIdx = VAddrIdx + NumVAddr + 3;
1507   }
1508 
1509   bool TFE;
1510   bool LWE;
1511   bool IsTexFail = false;
1512   if (!parseTexFail(MI.getOperand(CtrlIdx).getImm(), TFE, LWE, IsTexFail))
1513     return false;
1514 
1515   const int Flags = MI.getOperand(CtrlIdx + 2).getImm();
1516   const bool IsA16 = (Flags & 1) != 0;
1517   const bool IsG16 = (Flags & 2) != 0;
1518 
1519   // A16 implies 16 bit gradients
1520   if (IsA16 && !IsG16)
1521     return false;
1522 
1523   unsigned DMask = 0;
1524   unsigned DMaskLanes = 0;
1525 
1526   if (BaseOpcode->Atomic) {
1527     VDataOut = MI.getOperand(0).getReg();
1528     VDataIn = MI.getOperand(2).getReg();
1529     LLT Ty = MRI->getType(VDataIn);
1530 
1531     // Be careful to allow atomic swap on 16-bit element vectors.
1532     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1533       Ty.getSizeInBits() == 128 :
1534       Ty.getSizeInBits() == 64;
1535 
1536     if (BaseOpcode->AtomicX2) {
1537       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1538 
1539       DMask = Is64Bit ? 0xf : 0x3;
1540       NumVDataDwords = Is64Bit ? 4 : 2;
1541     } else {
1542       DMask = Is64Bit ? 0x3 : 0x1;
1543       NumVDataDwords = Is64Bit ? 2 : 1;
1544     }
1545   } else {
1546     const int DMaskIdx = 2; // Input/output + intrinsic ID.
1547 
1548     DMask = MI.getOperand(DMaskIdx).getImm();
1549     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1550 
1551     if (BaseOpcode->Store) {
1552       VDataIn = MI.getOperand(1).getReg();
1553       VDataTy = MRI->getType(VDataIn);
1554       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1555     } else {
1556       VDataOut = MI.getOperand(0).getReg();
1557       VDataTy = MRI->getType(VDataOut);
1558       NumVDataDwords = DMaskLanes;
1559 
1560       // One memoperand is mandatory, except for getresinfo.
1561       // FIXME: Check this in verifier.
1562       if (!MI.memoperands_empty()) {
1563         const MachineMemOperand *MMO = *MI.memoperands_begin();
1564 
1565         // Infer d16 from the memory size, as the register type will be mangled by
1566         // unpacked subtargets, or by TFE.
1567         IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1568 
1569         if (IsD16 && !STI.hasUnpackedD16VMem())
1570           NumVDataDwords = (DMaskLanes + 1) / 2;
1571       }
1572     }
1573   }
1574 
1575   // Optimize _L to _LZ when _L is zero
1576   if (LZMappingInfo) {
1577     // The legalizer replaced the register with an immediate 0 if we need to
1578     // change the opcode.
1579     const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
1580     if (Lod.isImm()) {
1581       assert(Lod.getImm() == 0);
1582       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1583     }
1584   }
1585 
1586   // Optimize _mip away, when 'lod' is zero
1587   if (MIPMappingInfo) {
1588     const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
1589     if (Lod.isImm()) {
1590       assert(Lod.getImm() == 0);
1591       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1592     }
1593   }
1594 
1595   // Set G16 opcode
1596   if (IsG16 && !IsA16) {
1597     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1598         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1599     assert(G16MappingInfo);
1600     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1601   }
1602 
1603   // TODO: Check this in verifier.
1604   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1605 
1606   bool GLC = false;
1607   bool SLC = false;
1608   bool DLC = false;
1609   if (BaseOpcode->Atomic) {
1610     GLC = true; // TODO no-return optimization
1611     if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), nullptr, &SLC,
1612                           IsGFX10 ? &DLC : nullptr))
1613       return false;
1614   } else {
1615     if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), &GLC, &SLC,
1616                           IsGFX10 ? &DLC : nullptr))
1617       return false;
1618   }
1619 
1620   int NumVAddrRegs = 0;
1621   int NumVAddrDwords = 0;
1622   for (int I = 0; I < NumVAddr; ++I) {
1623     // Skip the $noregs and 0s inserted during legalization.
1624     MachineOperand &AddrOp = MI.getOperand(VAddrIdx + I);
1625     if (!AddrOp.isReg())
1626       continue; // XXX - Break?
1627 
1628     Register Addr = AddrOp.getReg();
1629     if (!Addr)
1630       break;
1631 
1632     ++NumVAddrRegs;
1633     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1634   }
1635 
1636   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1637   // NSA, these should have beeen packed into a single value in the first
1638   // address register
1639   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1640   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1641     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1642     return false;
1643   }
1644 
1645   if (IsTexFail)
1646     ++NumVDataDwords;
1647 
1648   int Opcode = -1;
1649   if (IsGFX10) {
1650     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1651                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1652                                           : AMDGPU::MIMGEncGfx10Default,
1653                                    NumVDataDwords, NumVAddrDwords);
1654   } else {
1655     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1656       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1657                                      NumVDataDwords, NumVAddrDwords);
1658     if (Opcode == -1)
1659       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1660                                      NumVDataDwords, NumVAddrDwords);
1661   }
1662   assert(Opcode != -1);
1663 
1664   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1665     .cloneMemRefs(MI);
1666 
1667   if (VDataOut) {
1668     if (BaseOpcode->AtomicX2) {
1669       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1670 
1671       Register TmpReg = MRI->createVirtualRegister(
1672         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1673       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1674 
1675       MIB.addDef(TmpReg);
1676       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1677         .addReg(TmpReg, RegState::Kill, SubReg);
1678 
1679     } else {
1680       MIB.addDef(VDataOut); // vdata output
1681     }
1682   }
1683 
1684   if (VDataIn)
1685     MIB.addReg(VDataIn); // vdata input
1686 
1687   for (int i = 0; i != NumVAddrRegs; ++i) {
1688     MachineOperand &SrcOp = MI.getOperand(VAddrIdx + i);
1689     if (SrcOp.isReg()) {
1690       assert(SrcOp.getReg() != 0);
1691       MIB.addReg(SrcOp.getReg());
1692     }
1693   }
1694 
1695   MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr).getReg()); // rsrc
1696   if (BaseOpcode->Sampler)
1697     MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr + 1).getReg()); // sampler
1698 
1699   MIB.addImm(DMask); // dmask
1700 
1701   if (IsGFX10)
1702     MIB.addImm(DimInfo->Encoding);
1703   MIB.addImm(Unorm);
1704   if (IsGFX10)
1705     MIB.addImm(DLC);
1706 
1707   MIB.addImm(GLC);
1708   MIB.addImm(SLC);
1709   MIB.addImm(IsA16 &&  // a16 or r128
1710              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1711   if (IsGFX10)
1712     MIB.addImm(IsA16 ? -1 : 0);
1713 
1714   MIB.addImm(TFE); // tfe
1715   MIB.addImm(LWE); // lwe
1716   if (!IsGFX10)
1717     MIB.addImm(DimInfo->DA ? -1 : 0);
1718   if (BaseOpcode->HasD16)
1719     MIB.addImm(IsD16 ? -1 : 0);
1720 
1721   MI.eraseFromParent();
1722   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1723 }
1724 
1725 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1726     MachineInstr &I) const {
1727   unsigned IntrinsicID = I.getIntrinsicID();
1728   switch (IntrinsicID) {
1729   case Intrinsic::amdgcn_end_cf:
1730     return selectEndCfIntrinsic(I);
1731   case Intrinsic::amdgcn_ds_ordered_add:
1732   case Intrinsic::amdgcn_ds_ordered_swap:
1733     return selectDSOrderedIntrinsic(I, IntrinsicID);
1734   case Intrinsic::amdgcn_ds_gws_init:
1735   case Intrinsic::amdgcn_ds_gws_barrier:
1736   case Intrinsic::amdgcn_ds_gws_sema_v:
1737   case Intrinsic::amdgcn_ds_gws_sema_br:
1738   case Intrinsic::amdgcn_ds_gws_sema_p:
1739   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1740     return selectDSGWSIntrinsic(I, IntrinsicID);
1741   case Intrinsic::amdgcn_ds_append:
1742     return selectDSAppendConsume(I, true);
1743   case Intrinsic::amdgcn_ds_consume:
1744     return selectDSAppendConsume(I, false);
1745   case Intrinsic::amdgcn_s_barrier:
1746     return selectSBarrier(I);
1747   case Intrinsic::amdgcn_global_atomic_fadd:
1748     return selectGlobalAtomicFaddIntrinsic(I);
1749   default: {
1750     return selectImpl(I, *CoverageInfo);
1751   }
1752   }
1753 }
1754 
1755 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1756   if (selectImpl(I, *CoverageInfo))
1757     return true;
1758 
1759   MachineBasicBlock *BB = I.getParent();
1760   const DebugLoc &DL = I.getDebugLoc();
1761 
1762   Register DstReg = I.getOperand(0).getReg();
1763   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1764   assert(Size <= 32 || Size == 64);
1765   const MachineOperand &CCOp = I.getOperand(1);
1766   Register CCReg = CCOp.getReg();
1767   if (!isVCC(CCReg, *MRI)) {
1768     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1769                                          AMDGPU::S_CSELECT_B32;
1770     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1771             .addReg(CCReg);
1772 
1773     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1774     // bank, because it does not cover the register class that we used to represent
1775     // for it.  So we need to manually set the register class here.
1776     if (!MRI->getRegClassOrNull(CCReg))
1777         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1778     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1779             .add(I.getOperand(2))
1780             .add(I.getOperand(3));
1781 
1782     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1783                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1784     I.eraseFromParent();
1785     return Ret;
1786   }
1787 
1788   // Wide VGPR select should have been split in RegBankSelect.
1789   if (Size > 32)
1790     return false;
1791 
1792   MachineInstr *Select =
1793       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1794               .addImm(0)
1795               .add(I.getOperand(3))
1796               .addImm(0)
1797               .add(I.getOperand(2))
1798               .add(I.getOperand(1));
1799 
1800   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1801   I.eraseFromParent();
1802   return Ret;
1803 }
1804 
1805 static int sizeToSubRegIndex(unsigned Size) {
1806   switch (Size) {
1807   case 32:
1808     return AMDGPU::sub0;
1809   case 64:
1810     return AMDGPU::sub0_sub1;
1811   case 96:
1812     return AMDGPU::sub0_sub1_sub2;
1813   case 128:
1814     return AMDGPU::sub0_sub1_sub2_sub3;
1815   case 256:
1816     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1817   default:
1818     if (Size < 32)
1819       return AMDGPU::sub0;
1820     if (Size > 256)
1821       return -1;
1822     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1823   }
1824 }
1825 
1826 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1827   Register DstReg = I.getOperand(0).getReg();
1828   Register SrcReg = I.getOperand(1).getReg();
1829   const LLT DstTy = MRI->getType(DstReg);
1830   const LLT SrcTy = MRI->getType(SrcReg);
1831   const LLT S1 = LLT::scalar(1);
1832 
1833   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1834   const RegisterBank *DstRB;
1835   if (DstTy == S1) {
1836     // This is a special case. We don't treat s1 for legalization artifacts as
1837     // vcc booleans.
1838     DstRB = SrcRB;
1839   } else {
1840     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1841     if (SrcRB != DstRB)
1842       return false;
1843   }
1844 
1845   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1846 
1847   unsigned DstSize = DstTy.getSizeInBits();
1848   unsigned SrcSize = SrcTy.getSizeInBits();
1849 
1850   const TargetRegisterClass *SrcRC
1851     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1852   const TargetRegisterClass *DstRC
1853     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1854   if (!SrcRC || !DstRC)
1855     return false;
1856 
1857   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1858       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1859     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1860     return false;
1861   }
1862 
1863   if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1864     MachineBasicBlock *MBB = I.getParent();
1865     const DebugLoc &DL = I.getDebugLoc();
1866 
1867     Register LoReg = MRI->createVirtualRegister(DstRC);
1868     Register HiReg = MRI->createVirtualRegister(DstRC);
1869     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1870       .addReg(SrcReg, 0, AMDGPU::sub0);
1871     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1872       .addReg(SrcReg, 0, AMDGPU::sub1);
1873 
1874     if (IsVALU && STI.hasSDWA()) {
1875       // Write the low 16-bits of the high element into the high 16-bits of the
1876       // low element.
1877       MachineInstr *MovSDWA =
1878         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1879         .addImm(0)                             // $src0_modifiers
1880         .addReg(HiReg)                         // $src0
1881         .addImm(0)                             // $clamp
1882         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1883         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1884         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1885         .addReg(LoReg, RegState::Implicit);
1886       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1887     } else {
1888       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1889       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1890       Register ImmReg = MRI->createVirtualRegister(DstRC);
1891       if (IsVALU) {
1892         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1893           .addImm(16)
1894           .addReg(HiReg);
1895       } else {
1896         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1897           .addReg(HiReg)
1898           .addImm(16);
1899       }
1900 
1901       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1902       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1903       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1904 
1905       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1906         .addImm(0xffff);
1907       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1908         .addReg(LoReg)
1909         .addReg(ImmReg);
1910       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1911         .addReg(TmpReg0)
1912         .addReg(TmpReg1);
1913     }
1914 
1915     I.eraseFromParent();
1916     return true;
1917   }
1918 
1919   if (!DstTy.isScalar())
1920     return false;
1921 
1922   if (SrcSize > 32) {
1923     int SubRegIdx = sizeToSubRegIndex(DstSize);
1924     if (SubRegIdx == -1)
1925       return false;
1926 
1927     // Deal with weird cases where the class only partially supports the subreg
1928     // index.
1929     const TargetRegisterClass *SrcWithSubRC
1930       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1931     if (!SrcWithSubRC)
1932       return false;
1933 
1934     if (SrcWithSubRC != SrcRC) {
1935       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1936         return false;
1937     }
1938 
1939     I.getOperand(1).setSubReg(SubRegIdx);
1940   }
1941 
1942   I.setDesc(TII.get(TargetOpcode::COPY));
1943   return true;
1944 }
1945 
1946 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1947 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1948   Mask = maskTrailingOnes<unsigned>(Size);
1949   int SignedMask = static_cast<int>(Mask);
1950   return SignedMask >= -16 && SignedMask <= 64;
1951 }
1952 
1953 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1954 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1955   Register Reg, const MachineRegisterInfo &MRI,
1956   const TargetRegisterInfo &TRI) const {
1957   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1958   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1959     return RB;
1960 
1961   // Ignore the type, since we don't use vcc in artifacts.
1962   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1963     return &RBI.getRegBankFromRegClass(*RC, LLT());
1964   return nullptr;
1965 }
1966 
1967 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1968   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1969   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1970   const DebugLoc &DL = I.getDebugLoc();
1971   MachineBasicBlock &MBB = *I.getParent();
1972   const Register DstReg = I.getOperand(0).getReg();
1973   const Register SrcReg = I.getOperand(1).getReg();
1974 
1975   const LLT DstTy = MRI->getType(DstReg);
1976   const LLT SrcTy = MRI->getType(SrcReg);
1977   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1978     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1979   const unsigned DstSize = DstTy.getSizeInBits();
1980   if (!DstTy.isScalar())
1981     return false;
1982 
1983   // Artifact casts should never use vcc.
1984   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1985 
1986   // FIXME: This should probably be illegal and split earlier.
1987   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
1988     if (DstSize <= 32)
1989       return selectCOPY(I);
1990 
1991     const TargetRegisterClass *SrcRC =
1992         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
1993     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1994     const TargetRegisterClass *DstRC =
1995         TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
1996 
1997     Register UndefReg = MRI->createVirtualRegister(SrcRC);
1998     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1999     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2000       .addReg(SrcReg)
2001       .addImm(AMDGPU::sub0)
2002       .addReg(UndefReg)
2003       .addImm(AMDGPU::sub1);
2004     I.eraseFromParent();
2005 
2006     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2007            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2008   }
2009 
2010   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2011     // 64-bit should have been split up in RegBankSelect
2012 
2013     // Try to use an and with a mask if it will save code size.
2014     unsigned Mask;
2015     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2016       MachineInstr *ExtI =
2017       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2018         .addImm(Mask)
2019         .addReg(SrcReg);
2020       I.eraseFromParent();
2021       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2022     }
2023 
2024     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
2025     MachineInstr *ExtI =
2026       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2027       .addReg(SrcReg)
2028       .addImm(0) // Offset
2029       .addImm(SrcSize); // Width
2030     I.eraseFromParent();
2031     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2032   }
2033 
2034   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2035     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2036       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2037     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2038       return false;
2039 
2040     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2041       const unsigned SextOpc = SrcSize == 8 ?
2042         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2043       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2044         .addReg(SrcReg);
2045       I.eraseFromParent();
2046       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2047     }
2048 
2049     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2050     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2051 
2052     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2053     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2054       // We need a 64-bit register source, but the high bits don't matter.
2055       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2056       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2057       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2058 
2059       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2060       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2061         .addReg(SrcReg, 0, SubReg)
2062         .addImm(AMDGPU::sub0)
2063         .addReg(UndefReg)
2064         .addImm(AMDGPU::sub1);
2065 
2066       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2067         .addReg(ExtReg)
2068         .addImm(SrcSize << 16);
2069 
2070       I.eraseFromParent();
2071       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2072     }
2073 
2074     unsigned Mask;
2075     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2076       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2077         .addReg(SrcReg)
2078         .addImm(Mask);
2079     } else {
2080       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2081         .addReg(SrcReg)
2082         .addImm(SrcSize << 16);
2083     }
2084 
2085     I.eraseFromParent();
2086     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2087   }
2088 
2089   return false;
2090 }
2091 
2092 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2093   MachineBasicBlock *BB = I.getParent();
2094   MachineOperand &ImmOp = I.getOperand(1);
2095   Register DstReg = I.getOperand(0).getReg();
2096   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2097 
2098   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2099   if (ImmOp.isFPImm()) {
2100     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2101     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2102   } else if (ImmOp.isCImm()) {
2103     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2104   } else {
2105     llvm_unreachable("Not supported by g_constants");
2106   }
2107 
2108   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2109   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2110 
2111   unsigned Opcode;
2112   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2113     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2114   } else {
2115     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2116 
2117     // We should never produce s1 values on banks other than VCC. If the user of
2118     // this already constrained the register, we may incorrectly think it's VCC
2119     // if it wasn't originally.
2120     if (Size == 1)
2121       return false;
2122   }
2123 
2124   if (Size != 64) {
2125     I.setDesc(TII.get(Opcode));
2126     I.addImplicitDefUseOperands(*MF);
2127     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2128   }
2129 
2130   const DebugLoc &DL = I.getDebugLoc();
2131 
2132   APInt Imm(Size, I.getOperand(1).getImm());
2133 
2134   MachineInstr *ResInst;
2135   if (IsSgpr && TII.isInlineConstant(Imm)) {
2136     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2137       .addImm(I.getOperand(1).getImm());
2138   } else {
2139     const TargetRegisterClass *RC = IsSgpr ?
2140       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2141     Register LoReg = MRI->createVirtualRegister(RC);
2142     Register HiReg = MRI->createVirtualRegister(RC);
2143 
2144     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2145       .addImm(Imm.trunc(32).getZExtValue());
2146 
2147     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2148       .addImm(Imm.ashr(32).getZExtValue());
2149 
2150     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2151       .addReg(LoReg)
2152       .addImm(AMDGPU::sub0)
2153       .addReg(HiReg)
2154       .addImm(AMDGPU::sub1);
2155   }
2156 
2157   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2158   // work for target independent opcodes
2159   I.eraseFromParent();
2160   const TargetRegisterClass *DstRC =
2161     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2162   if (!DstRC)
2163     return true;
2164   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2165 }
2166 
2167 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2168   // Only manually handle the f64 SGPR case.
2169   //
2170   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2171   // the bit ops theoretically have a second result due to the implicit def of
2172   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2173   // that is easy by disabling the check. The result works, but uses a
2174   // nonsensical sreg32orlds_and_sreg_1 regclass.
2175   //
2176   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2177   // the variadic REG_SEQUENCE operands.
2178 
2179   Register Dst = MI.getOperand(0).getReg();
2180   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2181   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2182       MRI->getType(Dst) != LLT::scalar(64))
2183     return false;
2184 
2185   Register Src = MI.getOperand(1).getReg();
2186   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2187   if (Fabs)
2188     Src = Fabs->getOperand(1).getReg();
2189 
2190   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2191       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2192     return false;
2193 
2194   MachineBasicBlock *BB = MI.getParent();
2195   const DebugLoc &DL = MI.getDebugLoc();
2196   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2197   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2198   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2199   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2200 
2201   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2202     .addReg(Src, 0, AMDGPU::sub0);
2203   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2204     .addReg(Src, 0, AMDGPU::sub1);
2205   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2206     .addImm(0x80000000);
2207 
2208   // Set or toggle sign bit.
2209   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2210   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2211     .addReg(HiReg)
2212     .addReg(ConstReg);
2213   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2214     .addReg(LoReg)
2215     .addImm(AMDGPU::sub0)
2216     .addReg(OpReg)
2217     .addImm(AMDGPU::sub1);
2218   MI.eraseFromParent();
2219   return true;
2220 }
2221 
2222 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2223 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2224   Register Dst = MI.getOperand(0).getReg();
2225   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2226   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2227       MRI->getType(Dst) != LLT::scalar(64))
2228     return false;
2229 
2230   Register Src = MI.getOperand(1).getReg();
2231   MachineBasicBlock *BB = MI.getParent();
2232   const DebugLoc &DL = MI.getDebugLoc();
2233   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2234   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2235   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2236   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2237 
2238   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2239       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2240     return false;
2241 
2242   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2243     .addReg(Src, 0, AMDGPU::sub0);
2244   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2245     .addReg(Src, 0, AMDGPU::sub1);
2246   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2247     .addImm(0x7fffffff);
2248 
2249   // Clear sign bit.
2250   // TODO: Should this used S_BITSET0_*?
2251   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2252     .addReg(HiReg)
2253     .addReg(ConstReg);
2254   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2255     .addReg(LoReg)
2256     .addImm(AMDGPU::sub0)
2257     .addReg(OpReg)
2258     .addImm(AMDGPU::sub1);
2259 
2260   MI.eraseFromParent();
2261   return true;
2262 }
2263 
2264 static bool isConstant(const MachineInstr &MI) {
2265   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2266 }
2267 
2268 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2269     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2270 
2271   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2272 
2273   assert(PtrMI);
2274 
2275   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2276     return;
2277 
2278   GEPInfo GEPInfo(*PtrMI);
2279 
2280   for (unsigned i = 1; i != 3; ++i) {
2281     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2282     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2283     assert(OpDef);
2284     if (i == 2 && isConstant(*OpDef)) {
2285       // TODO: Could handle constant base + variable offset, but a combine
2286       // probably should have commuted it.
2287       assert(GEPInfo.Imm == 0);
2288       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2289       continue;
2290     }
2291     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2292     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2293       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2294     else
2295       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2296   }
2297 
2298   AddrInfo.push_back(GEPInfo);
2299   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2300 }
2301 
2302 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2303   if (!MI.hasOneMemOperand())
2304     return false;
2305 
2306   const MachineMemOperand *MMO = *MI.memoperands_begin();
2307   const Value *Ptr = MMO->getValue();
2308 
2309   // UndefValue means this is a load of a kernel input.  These are uniform.
2310   // Sometimes LDS instructions have constant pointers.
2311   // If Ptr is null, then that means this mem operand contains a
2312   // PseudoSourceValue like GOT.
2313   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2314       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2315     return true;
2316 
2317   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2318     return true;
2319 
2320   const Instruction *I = dyn_cast<Instruction>(Ptr);
2321   return I && I->getMetadata("amdgpu.uniform");
2322 }
2323 
2324 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2325   for (const GEPInfo &GEPInfo : AddrInfo) {
2326     if (!GEPInfo.VgprParts.empty())
2327       return true;
2328   }
2329   return false;
2330 }
2331 
2332 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2333   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2334   unsigned AS = PtrTy.getAddressSpace();
2335   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2336       STI.ldsRequiresM0Init()) {
2337     MachineBasicBlock *BB = I.getParent();
2338 
2339     // If DS instructions require M0 initializtion, insert it before selecting.
2340     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2341       .addImm(-1);
2342   }
2343 }
2344 
2345 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2346   MachineInstr &I) const {
2347   initM0(I);
2348   return selectImpl(I, *CoverageInfo);
2349 }
2350 
2351 // TODO: No rtn optimization.
2352 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2353   MachineInstr &MI) const {
2354   Register PtrReg = MI.getOperand(1).getReg();
2355   const LLT PtrTy = MRI->getType(PtrReg);
2356   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2357       STI.useFlatForGlobal())
2358     return selectImpl(MI, *CoverageInfo);
2359 
2360   Register DstReg = MI.getOperand(0).getReg();
2361   const LLT Ty = MRI->getType(DstReg);
2362   const bool Is64 = Ty.getSizeInBits() == 64;
2363   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2364   Register TmpReg = MRI->createVirtualRegister(
2365     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2366 
2367   const DebugLoc &DL = MI.getDebugLoc();
2368   MachineBasicBlock *BB = MI.getParent();
2369 
2370   Register VAddr, RSrcReg, SOffset;
2371   int64_t Offset = 0;
2372 
2373   unsigned Opcode;
2374   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2375     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2376                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2377   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2378                                    RSrcReg, SOffset, Offset)) {
2379     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2380                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2381   } else
2382     return selectImpl(MI, *CoverageInfo);
2383 
2384   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2385     .addReg(MI.getOperand(2).getReg());
2386 
2387   if (VAddr)
2388     MIB.addReg(VAddr);
2389 
2390   MIB.addReg(RSrcReg);
2391   if (SOffset)
2392     MIB.addReg(SOffset);
2393   else
2394     MIB.addImm(0);
2395 
2396   MIB.addImm(Offset);
2397   MIB.addImm(0); // slc
2398   MIB.cloneMemRefs(MI);
2399 
2400   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2401     .addReg(TmpReg, RegState::Kill, SubReg);
2402 
2403   MI.eraseFromParent();
2404 
2405   MRI->setRegClass(
2406     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2407   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2408 }
2409 
2410 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2411   MachineBasicBlock *BB = I.getParent();
2412   MachineOperand &CondOp = I.getOperand(0);
2413   Register CondReg = CondOp.getReg();
2414   const DebugLoc &DL = I.getDebugLoc();
2415 
2416   unsigned BrOpcode;
2417   Register CondPhysReg;
2418   const TargetRegisterClass *ConstrainRC;
2419 
2420   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2421   // whether the branch is uniform when selecting the instruction. In
2422   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2423   // RegBankSelect knows what it's doing if the branch condition is scc, even
2424   // though it currently does not.
2425   if (!isVCC(CondReg, *MRI)) {
2426     if (MRI->getType(CondReg) != LLT::scalar(32))
2427       return false;
2428 
2429     CondPhysReg = AMDGPU::SCC;
2430     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2431     ConstrainRC = &AMDGPU::SReg_32RegClass;
2432   } else {
2433     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2434     // We sort of know that a VCC producer based on the register bank, that ands
2435     // inactive lanes with 0. What if there was a logical operation with vcc
2436     // producers in different blocks/with different exec masks?
2437     // FIXME: Should scc->vcc copies and with exec?
2438     CondPhysReg = TRI.getVCC();
2439     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2440     ConstrainRC = TRI.getBoolRC();
2441   }
2442 
2443   if (!MRI->getRegClassOrNull(CondReg))
2444     MRI->setRegClass(CondReg, ConstrainRC);
2445 
2446   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2447     .addReg(CondReg);
2448   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2449     .addMBB(I.getOperand(1).getMBB());
2450 
2451   I.eraseFromParent();
2452   return true;
2453 }
2454 
2455 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2456   MachineInstr &I) const {
2457   Register DstReg = I.getOperand(0).getReg();
2458   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2459   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2460   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2461   if (IsVGPR)
2462     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2463 
2464   return RBI.constrainGenericRegister(
2465     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2466 }
2467 
2468 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2469   Register DstReg = I.getOperand(0).getReg();
2470   Register SrcReg = I.getOperand(1).getReg();
2471   Register MaskReg = I.getOperand(2).getReg();
2472   LLT Ty = MRI->getType(DstReg);
2473   LLT MaskTy = MRI->getType(MaskReg);
2474 
2475   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2476   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2477   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2478   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2479   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2480     return false;
2481 
2482   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2483   const TargetRegisterClass &RegRC
2484     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2485 
2486   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2487                                                                   *MRI);
2488   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2489                                                                   *MRI);
2490   const TargetRegisterClass *MaskRC =
2491       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2492 
2493   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2494       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2495       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2496     return false;
2497 
2498   MachineBasicBlock *BB = I.getParent();
2499   const DebugLoc &DL = I.getDebugLoc();
2500   if (Ty.getSizeInBits() == 32) {
2501     assert(MaskTy.getSizeInBits() == 32 &&
2502            "ptrmask should have been narrowed during legalize");
2503 
2504     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2505       .addReg(SrcReg)
2506       .addReg(MaskReg);
2507     I.eraseFromParent();
2508     return true;
2509   }
2510 
2511   Register HiReg = MRI->createVirtualRegister(&RegRC);
2512   Register LoReg = MRI->createVirtualRegister(&RegRC);
2513 
2514   // Extract the subregisters from the source pointer.
2515   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2516     .addReg(SrcReg, 0, AMDGPU::sub0);
2517   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2518     .addReg(SrcReg, 0, AMDGPU::sub1);
2519 
2520   Register MaskedLo, MaskedHi;
2521 
2522   // Try to avoid emitting a bit operation when we only need to touch half of
2523   // the 64-bit pointer.
2524   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2525 
2526   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2527   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2528   if ((MaskOnes & MaskLo32) == MaskLo32) {
2529     // If all the bits in the low half are 1, we only need a copy for it.
2530     MaskedLo = LoReg;
2531   } else {
2532     // Extract the mask subregister and apply the and.
2533     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2534     MaskedLo = MRI->createVirtualRegister(&RegRC);
2535 
2536     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2537       .addReg(MaskReg, 0, AMDGPU::sub0);
2538     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2539       .addReg(LoReg)
2540       .addReg(MaskLo);
2541   }
2542 
2543   if ((MaskOnes & MaskHi32) == MaskHi32) {
2544     // If all the bits in the high half are 1, we only need a copy for it.
2545     MaskedHi = HiReg;
2546   } else {
2547     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2548     MaskedHi = MRI->createVirtualRegister(&RegRC);
2549 
2550     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2551       .addReg(MaskReg, 0, AMDGPU::sub1);
2552     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2553       .addReg(HiReg)
2554       .addReg(MaskHi);
2555   }
2556 
2557   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2558     .addReg(MaskedLo)
2559     .addImm(AMDGPU::sub0)
2560     .addReg(MaskedHi)
2561     .addImm(AMDGPU::sub1);
2562   I.eraseFromParent();
2563   return true;
2564 }
2565 
2566 /// Return the register to use for the index value, and the subregister to use
2567 /// for the indirectly accessed register.
2568 static std::pair<Register, unsigned>
2569 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2570                         const SIRegisterInfo &TRI,
2571                         const TargetRegisterClass *SuperRC,
2572                         Register IdxReg,
2573                         unsigned EltSize) {
2574   Register IdxBaseReg;
2575   int Offset;
2576   MachineInstr *Unused;
2577 
2578   std::tie(IdxBaseReg, Offset, Unused)
2579     = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2580   if (IdxBaseReg == AMDGPU::NoRegister) {
2581     // This will happen if the index is a known constant. This should ordinarily
2582     // be legalized out, but handle it as a register just in case.
2583     assert(Offset == 0);
2584     IdxBaseReg = IdxReg;
2585   }
2586 
2587   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2588 
2589   // Skip out of bounds offsets, or else we would end up using an undefined
2590   // register.
2591   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2592     return std::make_pair(IdxReg, SubRegs[0]);
2593   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2594 }
2595 
2596 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2597   MachineInstr &MI) const {
2598   Register DstReg = MI.getOperand(0).getReg();
2599   Register SrcReg = MI.getOperand(1).getReg();
2600   Register IdxReg = MI.getOperand(2).getReg();
2601 
2602   LLT DstTy = MRI->getType(DstReg);
2603   LLT SrcTy = MRI->getType(SrcReg);
2604 
2605   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2606   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2607   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2608 
2609   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2610   // into a waterfall loop.
2611   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2612     return false;
2613 
2614   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2615                                                                   *MRI);
2616   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2617                                                                   *MRI);
2618   if (!SrcRC || !DstRC)
2619     return false;
2620   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2621       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2622       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2623     return false;
2624 
2625   MachineBasicBlock *BB = MI.getParent();
2626   const DebugLoc &DL = MI.getDebugLoc();
2627   const bool Is64 = DstTy.getSizeInBits() == 64;
2628 
2629   unsigned SubReg;
2630   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2631                                                      DstTy.getSizeInBits() / 8);
2632 
2633   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2634     if (DstTy.getSizeInBits() != 32 && !Is64)
2635       return false;
2636 
2637     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2638       .addReg(IdxReg);
2639 
2640     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2641     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2642       .addReg(SrcReg, 0, SubReg)
2643       .addReg(SrcReg, RegState::Implicit);
2644     MI.eraseFromParent();
2645     return true;
2646   }
2647 
2648   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2649     return false;
2650 
2651   if (!STI.useVGPRIndexMode()) {
2652     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2653       .addReg(IdxReg);
2654     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2655       .addReg(SrcReg, 0, SubReg)
2656       .addReg(SrcReg, RegState::Implicit);
2657     MI.eraseFromParent();
2658     return true;
2659   }
2660 
2661   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2662     .addReg(IdxReg)
2663     .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
2664   BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
2665     .addReg(SrcReg, 0, SubReg)
2666     .addReg(SrcReg, RegState::Implicit)
2667     .addReg(AMDGPU::M0, RegState::Implicit);
2668   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2669 
2670   MI.eraseFromParent();
2671   return true;
2672 }
2673 
2674 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2675 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2676   MachineInstr &MI) const {
2677   Register DstReg = MI.getOperand(0).getReg();
2678   Register VecReg = MI.getOperand(1).getReg();
2679   Register ValReg = MI.getOperand(2).getReg();
2680   Register IdxReg = MI.getOperand(3).getReg();
2681 
2682   LLT VecTy = MRI->getType(DstReg);
2683   LLT ValTy = MRI->getType(ValReg);
2684   unsigned VecSize = VecTy.getSizeInBits();
2685   unsigned ValSize = ValTy.getSizeInBits();
2686 
2687   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2688   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2689   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2690 
2691   assert(VecTy.getElementType() == ValTy);
2692 
2693   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2694   // into a waterfall loop.
2695   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2696     return false;
2697 
2698   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2699                                                                   *MRI);
2700   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2701                                                                   *MRI);
2702 
2703   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2704       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2705       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2706       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2707     return false;
2708 
2709   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2710     return false;
2711 
2712   unsigned SubReg;
2713   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2714                                                      ValSize / 8);
2715 
2716   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2717                          STI.useVGPRIndexMode();
2718 
2719   MachineBasicBlock *BB = MI.getParent();
2720   const DebugLoc &DL = MI.getDebugLoc();
2721 
2722   if (IndexMode) {
2723     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2724       .addReg(IdxReg)
2725       .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
2726   } else {
2727     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2728       .addReg(IdxReg);
2729   }
2730 
2731   const MCInstrDesc &RegWriteOp
2732     = TII.getIndirectRegWritePseudo(VecSize, ValSize,
2733                                     VecRB->getID() == AMDGPU::SGPRRegBankID);
2734   BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2735     .addReg(VecReg)
2736     .addReg(ValReg)
2737     .addImm(SubReg);
2738 
2739   if (IndexMode)
2740     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2741 
2742   MI.eraseFromParent();
2743   return true;
2744 }
2745 
2746 static bool isZeroOrUndef(int X) {
2747   return X == 0 || X == -1;
2748 }
2749 
2750 static bool isOneOrUndef(int X) {
2751   return X == 1 || X == -1;
2752 }
2753 
2754 static bool isZeroOrOneOrUndef(int X) {
2755   return X == 0 || X == 1 || X == -1;
2756 }
2757 
2758 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2759 // 32-bit register.
2760 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2761                                    ArrayRef<int> Mask) {
2762   NewMask[0] = Mask[0];
2763   NewMask[1] = Mask[1];
2764   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2765     return Src0;
2766 
2767   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2768   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2769 
2770   // Shift the mask inputs to be 0/1;
2771   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2772   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2773   return Src1;
2774 }
2775 
2776 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2777 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2778   MachineInstr &MI) const {
2779   Register DstReg = MI.getOperand(0).getReg();
2780   Register Src0Reg = MI.getOperand(1).getReg();
2781   Register Src1Reg = MI.getOperand(2).getReg();
2782   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2783 
2784   const LLT V2S16 = LLT::vector(2, 16);
2785   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2786     return false;
2787 
2788   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2789     return false;
2790 
2791   assert(ShufMask.size() == 2);
2792   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2793 
2794   MachineBasicBlock *MBB = MI.getParent();
2795   const DebugLoc &DL = MI.getDebugLoc();
2796 
2797   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2798   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2799   const TargetRegisterClass &RC = IsVALU ?
2800     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2801 
2802   // Handle the degenerate case which should have folded out.
2803   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2804     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2805 
2806     MI.eraseFromParent();
2807     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2808   }
2809 
2810   // A legal VOP3P mask only reads one of the sources.
2811   int Mask[2];
2812   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2813 
2814   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2815       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2816     return false;
2817 
2818   // TODO: This also should have been folded out
2819   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2820     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2821       .addReg(SrcVec);
2822 
2823     MI.eraseFromParent();
2824     return true;
2825   }
2826 
2827   if (Mask[0] == 1 && Mask[1] == -1) {
2828     if (IsVALU) {
2829       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2830         .addImm(16)
2831         .addReg(SrcVec);
2832     } else {
2833       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2834         .addReg(SrcVec)
2835         .addImm(16);
2836     }
2837   } else if (Mask[0] == -1 && Mask[1] == 0) {
2838     if (IsVALU) {
2839       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2840         .addImm(16)
2841         .addReg(SrcVec);
2842     } else {
2843       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2844         .addReg(SrcVec)
2845         .addImm(16);
2846     }
2847   } else if (Mask[0] == 0 && Mask[1] == 0) {
2848     if (IsVALU) {
2849       // Write low half of the register into the high half.
2850       MachineInstr *MovSDWA =
2851         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2852         .addImm(0)                             // $src0_modifiers
2853         .addReg(SrcVec)                        // $src0
2854         .addImm(0)                             // $clamp
2855         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2856         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2857         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2858         .addReg(SrcVec, RegState::Implicit);
2859       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2860     } else {
2861       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2862         .addReg(SrcVec)
2863         .addReg(SrcVec);
2864     }
2865   } else if (Mask[0] == 1 && Mask[1] == 1) {
2866     if (IsVALU) {
2867       // Write high half of the register into the low half.
2868       MachineInstr *MovSDWA =
2869         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2870         .addImm(0)                             // $src0_modifiers
2871         .addReg(SrcVec)                        // $src0
2872         .addImm(0)                             // $clamp
2873         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2874         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2875         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2876         .addReg(SrcVec, RegState::Implicit);
2877       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2878     } else {
2879       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2880         .addReg(SrcVec)
2881         .addReg(SrcVec);
2882     }
2883   } else if (Mask[0] == 1 && Mask[1] == 0) {
2884     if (IsVALU) {
2885       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32), DstReg)
2886         .addReg(SrcVec)
2887         .addReg(SrcVec)
2888         .addImm(16);
2889     } else {
2890       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2891       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2892         .addReg(SrcVec)
2893         .addImm(16);
2894       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2895         .addReg(TmpReg)
2896         .addReg(SrcVec);
2897     }
2898   } else
2899     llvm_unreachable("all shuffle masks should be handled");
2900 
2901   MI.eraseFromParent();
2902   return true;
2903 }
2904 
2905 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2906   MachineInstr &MI) const {
2907 
2908   MachineBasicBlock *MBB = MI.getParent();
2909   const DebugLoc &DL = MI.getDebugLoc();
2910 
2911   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2912     Function &F = MBB->getParent()->getFunction();
2913     DiagnosticInfoUnsupported
2914       NoFpRet(F, "return versions of fp atomics not supported",
2915               MI.getDebugLoc(), DS_Error);
2916     F.getContext().diagnose(NoFpRet);
2917     return false;
2918   }
2919 
2920   // FIXME: This is only needed because tablegen requires number of dst operands
2921   // in match and replace pattern to be the same. Otherwise patterns can be
2922   // exported from SDag path.
2923   MachineOperand &VDataIn = MI.getOperand(1);
2924   MachineOperand &VIndex = MI.getOperand(3);
2925   MachineOperand &VOffset = MI.getOperand(4);
2926   MachineOperand &SOffset = MI.getOperand(5);
2927   int16_t Offset = MI.getOperand(6).getImm();
2928 
2929   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2930   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2931 
2932   unsigned Opcode;
2933   if (HasVOffset) {
2934     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2935                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2936   } else {
2937     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2938                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2939   }
2940 
2941   if (MRI->getType(VDataIn.getReg()).isVector()) {
2942     switch (Opcode) {
2943     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2944       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2945       break;
2946     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2947       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2948       break;
2949     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
2950       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
2951       break;
2952     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
2953       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
2954       break;
2955     }
2956   }
2957 
2958   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
2959   I.add(VDataIn);
2960 
2961   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
2962       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
2963     Register IdxReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
2964     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
2965       .addReg(VIndex.getReg())
2966       .addImm(AMDGPU::sub0)
2967       .addReg(VOffset.getReg())
2968       .addImm(AMDGPU::sub1);
2969 
2970     I.addReg(IdxReg);
2971   } else if (HasVIndex) {
2972     I.add(VIndex);
2973   } else if (HasVOffset) {
2974     I.add(VOffset);
2975   }
2976 
2977   I.add(MI.getOperand(2)); // rsrc
2978   I.add(SOffset);
2979   I.addImm(Offset);
2980   renderExtractSLC(I, MI, 7);
2981   I.cloneMemRefs(MI);
2982 
2983   MI.eraseFromParent();
2984 
2985   return true;
2986 }
2987 
2988 bool AMDGPUInstructionSelector::selectGlobalAtomicFaddIntrinsic(
2989   MachineInstr &MI) const{
2990 
2991   MachineBasicBlock *MBB = MI.getParent();
2992   const DebugLoc &DL = MI.getDebugLoc();
2993 
2994   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2995     Function &F = MBB->getParent()->getFunction();
2996     DiagnosticInfoUnsupported
2997       NoFpRet(F, "return versions of fp atomics not supported",
2998               MI.getDebugLoc(), DS_Error);
2999     F.getContext().diagnose(NoFpRet);
3000     return false;
3001   }
3002 
3003   // FIXME: This is only needed because tablegen requires number of dst operands
3004   // in match and replace pattern to be the same. Otherwise patterns can be
3005   // exported from SDag path.
3006   auto Addr = selectFlatOffsetImpl<true>(MI.getOperand(2));
3007 
3008   Register Data = MI.getOperand(3).getReg();
3009   const unsigned Opc = MRI->getType(Data).isVector() ?
3010     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3011   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3012     .addReg(Addr.first)
3013     .addReg(Data)
3014     .addImm(Addr.second)
3015     .addImm(0) // SLC
3016     .cloneMemRefs(MI);
3017 
3018   MI.eraseFromParent();
3019   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3020 }
3021 
3022 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3023   if (I.isPHI())
3024     return selectPHI(I);
3025 
3026   if (!I.isPreISelOpcode()) {
3027     if (I.isCopy())
3028       return selectCOPY(I);
3029     return true;
3030   }
3031 
3032   switch (I.getOpcode()) {
3033   case TargetOpcode::G_AND:
3034   case TargetOpcode::G_OR:
3035   case TargetOpcode::G_XOR:
3036     if (selectImpl(I, *CoverageInfo))
3037       return true;
3038     return selectG_AND_OR_XOR(I);
3039   case TargetOpcode::G_ADD:
3040   case TargetOpcode::G_SUB:
3041     if (selectImpl(I, *CoverageInfo))
3042       return true;
3043     return selectG_ADD_SUB(I);
3044   case TargetOpcode::G_UADDO:
3045   case TargetOpcode::G_USUBO:
3046   case TargetOpcode::G_UADDE:
3047   case TargetOpcode::G_USUBE:
3048     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3049   case TargetOpcode::G_INTTOPTR:
3050   case TargetOpcode::G_BITCAST:
3051   case TargetOpcode::G_PTRTOINT:
3052     return selectCOPY(I);
3053   case TargetOpcode::G_CONSTANT:
3054   case TargetOpcode::G_FCONSTANT:
3055     return selectG_CONSTANT(I);
3056   case TargetOpcode::G_FNEG:
3057     if (selectImpl(I, *CoverageInfo))
3058       return true;
3059     return selectG_FNEG(I);
3060   case TargetOpcode::G_FABS:
3061     if (selectImpl(I, *CoverageInfo))
3062       return true;
3063     return selectG_FABS(I);
3064   case TargetOpcode::G_EXTRACT:
3065     return selectG_EXTRACT(I);
3066   case TargetOpcode::G_MERGE_VALUES:
3067   case TargetOpcode::G_BUILD_VECTOR:
3068   case TargetOpcode::G_CONCAT_VECTORS:
3069     return selectG_MERGE_VALUES(I);
3070   case TargetOpcode::G_UNMERGE_VALUES:
3071     return selectG_UNMERGE_VALUES(I);
3072   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3073     return selectG_BUILD_VECTOR_TRUNC(I);
3074   case TargetOpcode::G_PTR_ADD:
3075     return selectG_PTR_ADD(I);
3076   case TargetOpcode::G_IMPLICIT_DEF:
3077     return selectG_IMPLICIT_DEF(I);
3078   case TargetOpcode::G_FREEZE:
3079     return selectCOPY(I);
3080   case TargetOpcode::G_INSERT:
3081     return selectG_INSERT(I);
3082   case TargetOpcode::G_INTRINSIC:
3083     return selectG_INTRINSIC(I);
3084   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3085     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3086   case TargetOpcode::G_ICMP:
3087     if (selectG_ICMP(I))
3088       return true;
3089     return selectImpl(I, *CoverageInfo);
3090   case TargetOpcode::G_LOAD:
3091   case TargetOpcode::G_STORE:
3092   case TargetOpcode::G_ATOMIC_CMPXCHG:
3093   case TargetOpcode::G_ATOMICRMW_XCHG:
3094   case TargetOpcode::G_ATOMICRMW_ADD:
3095   case TargetOpcode::G_ATOMICRMW_SUB:
3096   case TargetOpcode::G_ATOMICRMW_AND:
3097   case TargetOpcode::G_ATOMICRMW_OR:
3098   case TargetOpcode::G_ATOMICRMW_XOR:
3099   case TargetOpcode::G_ATOMICRMW_MIN:
3100   case TargetOpcode::G_ATOMICRMW_MAX:
3101   case TargetOpcode::G_ATOMICRMW_UMIN:
3102   case TargetOpcode::G_ATOMICRMW_UMAX:
3103   case TargetOpcode::G_ATOMICRMW_FADD:
3104   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3105   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3106   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3107   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3108     return selectG_LOAD_STORE_ATOMICRMW(I);
3109   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3110     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3111   case TargetOpcode::G_SELECT:
3112     return selectG_SELECT(I);
3113   case TargetOpcode::G_TRUNC:
3114     return selectG_TRUNC(I);
3115   case TargetOpcode::G_SEXT:
3116   case TargetOpcode::G_ZEXT:
3117   case TargetOpcode::G_ANYEXT:
3118   case TargetOpcode::G_SEXT_INREG:
3119     if (selectImpl(I, *CoverageInfo))
3120       return true;
3121     return selectG_SZA_EXT(I);
3122   case TargetOpcode::G_BRCOND:
3123     return selectG_BRCOND(I);
3124   case TargetOpcode::G_GLOBAL_VALUE:
3125     return selectG_GLOBAL_VALUE(I);
3126   case TargetOpcode::G_PTRMASK:
3127     return selectG_PTRMASK(I);
3128   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3129     return selectG_EXTRACT_VECTOR_ELT(I);
3130   case TargetOpcode::G_INSERT_VECTOR_ELT:
3131     return selectG_INSERT_VECTOR_ELT(I);
3132   case TargetOpcode::G_SHUFFLE_VECTOR:
3133     return selectG_SHUFFLE_VECTOR(I);
3134   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3135   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
3136     const AMDGPU::ImageDimIntrinsicInfo *Intr
3137       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3138     assert(Intr && "not an image intrinsic with image pseudo");
3139     return selectImageIntrinsic(I, Intr);
3140   }
3141   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3142     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3143   default:
3144     return selectImpl(I, *CoverageInfo);
3145   }
3146   return false;
3147 }
3148 
3149 InstructionSelector::ComplexRendererFns
3150 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3151   return {{
3152       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3153   }};
3154 
3155 }
3156 
3157 std::pair<Register, unsigned>
3158 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root) const {
3159   Register Src = Root.getReg();
3160   Register OrigSrc = Src;
3161   unsigned Mods = 0;
3162   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3163 
3164   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3165     Src = MI->getOperand(1).getReg();
3166     Mods |= SISrcMods::NEG;
3167     MI = getDefIgnoringCopies(Src, *MRI);
3168   }
3169 
3170   if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
3171     Src = MI->getOperand(1).getReg();
3172     Mods |= SISrcMods::ABS;
3173   }
3174 
3175   if (Mods != 0 &&
3176       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3177     MachineInstr *UseMI = Root.getParent();
3178 
3179     // If we looked through copies to find source modifiers on an SGPR operand,
3180     // we now have an SGPR register source. To avoid potentially violating the
3181     // constant bus restriction, we need to insert a copy to a VGPR.
3182     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3183     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3184             TII.get(AMDGPU::COPY), VGPRSrc)
3185       .addReg(Src);
3186     Src = VGPRSrc;
3187   }
3188 
3189   return std::make_pair(Src, Mods);
3190 }
3191 
3192 ///
3193 /// This will select either an SGPR or VGPR operand and will save us from
3194 /// having to write an extra tablegen pattern.
3195 InstructionSelector::ComplexRendererFns
3196 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3197   return {{
3198       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3199   }};
3200 }
3201 
3202 InstructionSelector::ComplexRendererFns
3203 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3204   Register Src;
3205   unsigned Mods;
3206   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3207 
3208   return {{
3209       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3210       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3211       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3212       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3213   }};
3214 }
3215 
3216 InstructionSelector::ComplexRendererFns
3217 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3218   return {{
3219       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3220       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3221       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3222   }};
3223 }
3224 
3225 InstructionSelector::ComplexRendererFns
3226 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3227   Register Src;
3228   unsigned Mods;
3229   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3230 
3231   return {{
3232       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3233       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3234   }};
3235 }
3236 
3237 InstructionSelector::ComplexRendererFns
3238 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3239   Register Reg = Root.getReg();
3240   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3241   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3242               Def->getOpcode() == AMDGPU::G_FABS))
3243     return {};
3244   return {{
3245       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3246   }};
3247 }
3248 
3249 std::pair<Register, unsigned>
3250 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3251   Register Src, const MachineRegisterInfo &MRI) const {
3252   unsigned Mods = 0;
3253   MachineInstr *MI = MRI.getVRegDef(Src);
3254 
3255   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3256       // It's possible to see an f32 fneg here, but unlikely.
3257       // TODO: Treat f32 fneg as only high bit.
3258       MRI.getType(Src) == LLT::vector(2, 16)) {
3259     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3260     Src = MI->getOperand(1).getReg();
3261     MI = MRI.getVRegDef(Src);
3262   }
3263 
3264   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3265 
3266   // Packed instructions do not have abs modifiers.
3267   Mods |= SISrcMods::OP_SEL_1;
3268 
3269   return std::make_pair(Src, Mods);
3270 }
3271 
3272 InstructionSelector::ComplexRendererFns
3273 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3274   MachineRegisterInfo &MRI
3275     = Root.getParent()->getParent()->getParent()->getRegInfo();
3276 
3277   Register Src;
3278   unsigned Mods;
3279   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3280 
3281   return {{
3282       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3283       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3284   }};
3285 }
3286 
3287 InstructionSelector::ComplexRendererFns
3288 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3289   Register Src;
3290   unsigned Mods;
3291   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3292   if (!isKnownNeverNaN(Src, *MRI))
3293     return None;
3294 
3295   return {{
3296       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3297       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3298   }};
3299 }
3300 
3301 InstructionSelector::ComplexRendererFns
3302 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3303   // FIXME: Handle op_sel
3304   return {{
3305       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3306       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3307   }};
3308 }
3309 
3310 InstructionSelector::ComplexRendererFns
3311 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3312   SmallVector<GEPInfo, 4> AddrInfo;
3313   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3314 
3315   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3316     return None;
3317 
3318   const GEPInfo &GEPInfo = AddrInfo[0];
3319   Optional<int64_t> EncodedImm =
3320       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3321   if (!EncodedImm)
3322     return None;
3323 
3324   unsigned PtrReg = GEPInfo.SgprParts[0];
3325   return {{
3326     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3327     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3328   }};
3329 }
3330 
3331 InstructionSelector::ComplexRendererFns
3332 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3333   SmallVector<GEPInfo, 4> AddrInfo;
3334   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3335 
3336   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3337     return None;
3338 
3339   const GEPInfo &GEPInfo = AddrInfo[0];
3340   Register PtrReg = GEPInfo.SgprParts[0];
3341   Optional<int64_t> EncodedImm =
3342       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3343   if (!EncodedImm)
3344     return None;
3345 
3346   return {{
3347     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3348     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3349   }};
3350 }
3351 
3352 InstructionSelector::ComplexRendererFns
3353 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3354   MachineInstr *MI = Root.getParent();
3355   MachineBasicBlock *MBB = MI->getParent();
3356 
3357   SmallVector<GEPInfo, 4> AddrInfo;
3358   getAddrModeInfo(*MI, *MRI, AddrInfo);
3359 
3360   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3361   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3362   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3363     return None;
3364 
3365   const GEPInfo &GEPInfo = AddrInfo[0];
3366   // SGPR offset is unsigned.
3367   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3368     return None;
3369 
3370   // If we make it this far we have a load with an 32-bit immediate offset.
3371   // It is OK to select this using a sgpr offset, because we have already
3372   // failed trying to select this load into one of the _IMM variants since
3373   // the _IMM Patterns are considered before the _SGPR patterns.
3374   Register PtrReg = GEPInfo.SgprParts[0];
3375   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3376   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3377           .addImm(GEPInfo.Imm);
3378   return {{
3379     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3380     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3381   }};
3382 }
3383 
3384 template <bool Signed>
3385 std::pair<Register, int>
3386 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
3387   MachineInstr *MI = Root.getParent();
3388 
3389   auto Default = std::make_pair(Root.getReg(), 0);
3390 
3391   if (!STI.hasFlatInstOffsets())
3392     return Default;
3393 
3394   const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
3395   if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
3396     return Default;
3397 
3398   Optional<int64_t> Offset =
3399     getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
3400   if (!Offset.hasValue())
3401     return Default;
3402 
3403   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3404   if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
3405     return Default;
3406 
3407   Register BasePtr = OpDef->getOperand(1).getReg();
3408 
3409   return std::make_pair(BasePtr, Offset.getValue());
3410 }
3411 
3412 InstructionSelector::ComplexRendererFns
3413 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3414   auto PtrWithOffset = selectFlatOffsetImpl<false>(Root);
3415 
3416   return {{
3417       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3418       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3419     }};
3420 }
3421 
3422 InstructionSelector::ComplexRendererFns
3423 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
3424   auto PtrWithOffset = selectFlatOffsetImpl<true>(Root);
3425 
3426   return {{
3427       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3428       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3429     }};
3430 }
3431 
3432 /// Match a zero extend from a 32-bit value to 64-bits.
3433 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3434   Register ZExtSrc;
3435   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3436     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3437 
3438   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3439   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3440   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3441     return false;
3442 
3443   int64_t MergeRHS;
3444   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(MergeRHS)) &&
3445       MergeRHS == 0) {
3446     return Def->getOperand(1).getReg();
3447   }
3448 
3449   return Register();
3450 }
3451 
3452 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3453 InstructionSelector::ComplexRendererFns
3454 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3455   Register PtrBase;
3456   int64_t ImmOffset;
3457 
3458   // Match the immediate offset first, which canonically is moved as low as
3459   // possible.
3460   std::tie(PtrBase, ImmOffset) = getPtrBaseWithConstantOffset(Root.getReg(),
3461                                                               *MRI);
3462 
3463   // TODO: Could split larger constant into VGPR offset.
3464   if (ImmOffset != 0 &&
3465       !TII.isLegalFLATOffset(ImmOffset, AMDGPUAS::GLOBAL_ADDRESS, true)) {
3466     PtrBase = Root.getReg();
3467     ImmOffset = 0;
3468   }
3469 
3470   // Match the variable offset.
3471   const MachineInstr *PtrBaseDef = getDefIgnoringCopies(PtrBase, *MRI);
3472   if (PtrBaseDef->getOpcode() != AMDGPU::G_PTR_ADD)
3473     return None;
3474 
3475   // Look through the SGPR->VGPR copy.
3476   Register PtrBaseSrc =
3477     getSrcRegIgnoringCopies(PtrBaseDef->getOperand(1).getReg(), *MRI);
3478   if (!PtrBaseSrc)
3479     return None;
3480 
3481   const RegisterBank *BaseRB = RBI.getRegBank(PtrBaseSrc, *MRI, TRI);
3482   if (BaseRB->getID() != AMDGPU::SGPRRegBankID)
3483     return None;
3484 
3485   Register SAddr = PtrBaseSrc;
3486   Register PtrBaseOffset = PtrBaseDef->getOperand(2).getReg();
3487 
3488   // It's possible voffset is an SGPR here, but the copy to VGPR will be
3489   // inserted later.
3490   Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset);
3491   if (!VOffset)
3492     return None;
3493 
3494   return {{[=](MachineInstrBuilder &MIB) { // saddr
3495              MIB.addReg(SAddr);
3496            },
3497            [=](MachineInstrBuilder &MIB) { // voffset
3498              MIB.addReg(VOffset);
3499            },
3500            [=](MachineInstrBuilder &MIB) { // offset
3501              MIB.addImm(ImmOffset);
3502            }}};
3503 }
3504 
3505 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3506   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3507   return PSV && PSV->isStack();
3508 }
3509 
3510 InstructionSelector::ComplexRendererFns
3511 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3512   MachineInstr *MI = Root.getParent();
3513   MachineBasicBlock *MBB = MI->getParent();
3514   MachineFunction *MF = MBB->getParent();
3515   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3516 
3517   int64_t Offset = 0;
3518   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3519       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3520     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3521 
3522     // TODO: Should this be inside the render function? The iterator seems to
3523     // move.
3524     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3525             HighBits)
3526       .addImm(Offset & ~4095);
3527 
3528     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3529                MIB.addReg(Info->getScratchRSrcReg());
3530              },
3531              [=](MachineInstrBuilder &MIB) { // vaddr
3532                MIB.addReg(HighBits);
3533              },
3534              [=](MachineInstrBuilder &MIB) { // soffset
3535                const MachineMemOperand *MMO = *MI->memoperands_begin();
3536                const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3537 
3538                if (isStackPtrRelative(PtrInfo))
3539                  MIB.addReg(Info->getStackPtrOffsetReg());
3540                else
3541                  MIB.addImm(0);
3542              },
3543              [=](MachineInstrBuilder &MIB) { // offset
3544                MIB.addImm(Offset & 4095);
3545              }}};
3546   }
3547 
3548   assert(Offset == 0 || Offset == -1);
3549 
3550   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3551   // offsets.
3552   Optional<int> FI;
3553   Register VAddr = Root.getReg();
3554   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3555     if (isBaseWithConstantOffset(Root, *MRI)) {
3556       const MachineOperand &LHS = RootDef->getOperand(1);
3557       const MachineOperand &RHS = RootDef->getOperand(2);
3558       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
3559       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
3560       if (LHSDef && RHSDef) {
3561         int64_t PossibleOffset =
3562             RHSDef->getOperand(1).getCImm()->getSExtValue();
3563         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
3564             (!STI.privateMemoryResourceIsRangeChecked() ||
3565              KnownBits->signBitIsZero(LHS.getReg()))) {
3566           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3567             FI = LHSDef->getOperand(1).getIndex();
3568           else
3569             VAddr = LHS.getReg();
3570           Offset = PossibleOffset;
3571         }
3572       }
3573     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3574       FI = RootDef->getOperand(1).getIndex();
3575     }
3576   }
3577 
3578   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3579              MIB.addReg(Info->getScratchRSrcReg());
3580            },
3581            [=](MachineInstrBuilder &MIB) { // vaddr
3582              if (FI.hasValue())
3583                MIB.addFrameIndex(FI.getValue());
3584              else
3585                MIB.addReg(VAddr);
3586            },
3587            [=](MachineInstrBuilder &MIB) { // soffset
3588              // If we don't know this private access is a local stack object, it
3589              // needs to be relative to the entry point's scratch wave offset.
3590              // TODO: Should split large offsets that don't fit like above.
3591              // TODO: Don't use scratch wave offset just because the offset
3592              // didn't fit.
3593              if (!Info->isEntryFunction() && FI.hasValue())
3594                MIB.addReg(Info->getStackPtrOffsetReg());
3595              else
3596                MIB.addImm(0);
3597            },
3598            [=](MachineInstrBuilder &MIB) { // offset
3599              MIB.addImm(Offset);
3600            }}};
3601 }
3602 
3603 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3604                                                 int64_t Offset,
3605                                                 unsigned OffsetBits) const {
3606   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
3607       (OffsetBits == 8 && !isUInt<8>(Offset)))
3608     return false;
3609 
3610   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3611     return true;
3612 
3613   // On Southern Islands instruction with a negative base value and an offset
3614   // don't seem to work.
3615   return KnownBits->signBitIsZero(Base);
3616 }
3617 
3618 InstructionSelector::ComplexRendererFns
3619 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3620     MachineOperand &Root) const {
3621   MachineInstr *MI = Root.getParent();
3622   MachineBasicBlock *MBB = MI->getParent();
3623 
3624   int64_t Offset = 0;
3625   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3626       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3627     return {};
3628 
3629   const MachineFunction *MF = MBB->getParent();
3630   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3631   const MachineMemOperand *MMO = *MI->memoperands_begin();
3632   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3633 
3634   return {{
3635       [=](MachineInstrBuilder &MIB) { // rsrc
3636         MIB.addReg(Info->getScratchRSrcReg());
3637       },
3638       [=](MachineInstrBuilder &MIB) { // soffset
3639         if (isStackPtrRelative(PtrInfo))
3640           MIB.addReg(Info->getStackPtrOffsetReg());
3641         else
3642           MIB.addImm(0);
3643       },
3644       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3645   }};
3646 }
3647 
3648 std::pair<Register, unsigned>
3649 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3650   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3651   if (!RootDef)
3652     return std::make_pair(Root.getReg(), 0);
3653 
3654   int64_t ConstAddr = 0;
3655 
3656   Register PtrBase;
3657   int64_t Offset;
3658   std::tie(PtrBase, Offset) =
3659     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3660 
3661   if (Offset) {
3662     if (isDSOffsetLegal(PtrBase, Offset, 16)) {
3663       // (add n0, c0)
3664       return std::make_pair(PtrBase, Offset);
3665     }
3666   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3667     // TODO
3668 
3669 
3670   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3671     // TODO
3672 
3673   }
3674 
3675   return std::make_pair(Root.getReg(), 0);
3676 }
3677 
3678 InstructionSelector::ComplexRendererFns
3679 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3680   Register Reg;
3681   unsigned Offset;
3682   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3683   return {{
3684       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3685       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3686     }};
3687 }
3688 
3689 InstructionSelector::ComplexRendererFns
3690 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3691   return selectDSReadWrite2(Root, false);
3692 }
3693 
3694 InstructionSelector::ComplexRendererFns
3695 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
3696   return selectDSReadWrite2(Root, true);
3697 }
3698 
3699 InstructionSelector::ComplexRendererFns
3700 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
3701                                               bool IsDS128) const {
3702   Register Reg;
3703   unsigned Offset;
3704   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, IsDS128);
3705   return {{
3706       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3707       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3708       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3709     }};
3710 }
3711 
3712 std::pair<Register, unsigned>
3713 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
3714                                                   bool IsDS128) const {
3715   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3716   if (!RootDef)
3717     return std::make_pair(Root.getReg(), 0);
3718 
3719   int64_t ConstAddr = 0;
3720 
3721   Register PtrBase;
3722   int64_t Offset;
3723   std::tie(PtrBase, Offset) =
3724     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3725 
3726   if (Offset) {
3727     int64_t OffsetValue0 = Offset / (IsDS128 ? 8 : 4);
3728     int64_t OffsetValue1 = OffsetValue0 + 1;
3729     if (isDSOffsetLegal(PtrBase, OffsetValue1, (IsDS128 ? 16 : 8))) {
3730       // (add n0, c0)
3731       return std::make_pair(PtrBase, OffsetValue0);
3732     }
3733   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3734     // TODO
3735 
3736   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3737     // TODO
3738 
3739   }
3740 
3741   return std::make_pair(Root.getReg(), 0);
3742 }
3743 
3744 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3745 /// the base value with the constant offset. There may be intervening copies
3746 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
3747 /// not match the pattern.
3748 std::pair<Register, int64_t>
3749 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3750   Register Root, const MachineRegisterInfo &MRI) const {
3751   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
3752   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3753     return {Root, 0};
3754 
3755   MachineOperand &RHS = RootI->getOperand(2);
3756   Optional<ValueAndVReg> MaybeOffset
3757     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3758   if (!MaybeOffset)
3759     return {Root, 0};
3760   return {RootI->getOperand(1).getReg(), MaybeOffset->Value};
3761 }
3762 
3763 static void addZeroImm(MachineInstrBuilder &MIB) {
3764   MIB.addImm(0);
3765 }
3766 
3767 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3768 /// BasePtr is not valid, a null base pointer will be used.
3769 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3770                           uint32_t FormatLo, uint32_t FormatHi,
3771                           Register BasePtr) {
3772   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3773   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3774   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3775   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3776 
3777   B.buildInstr(AMDGPU::S_MOV_B32)
3778     .addDef(RSrc2)
3779     .addImm(FormatLo);
3780   B.buildInstr(AMDGPU::S_MOV_B32)
3781     .addDef(RSrc3)
3782     .addImm(FormatHi);
3783 
3784   // Build the half of the subregister with the constants before building the
3785   // full 128-bit register. If we are building multiple resource descriptors,
3786   // this will allow CSEing of the 2-component register.
3787   B.buildInstr(AMDGPU::REG_SEQUENCE)
3788     .addDef(RSrcHi)
3789     .addReg(RSrc2)
3790     .addImm(AMDGPU::sub0)
3791     .addReg(RSrc3)
3792     .addImm(AMDGPU::sub1);
3793 
3794   Register RSrcLo = BasePtr;
3795   if (!BasePtr) {
3796     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3797     B.buildInstr(AMDGPU::S_MOV_B64)
3798       .addDef(RSrcLo)
3799       .addImm(0);
3800   }
3801 
3802   B.buildInstr(AMDGPU::REG_SEQUENCE)
3803     .addDef(RSrc)
3804     .addReg(RSrcLo)
3805     .addImm(AMDGPU::sub0_sub1)
3806     .addReg(RSrcHi)
3807     .addImm(AMDGPU::sub2_sub3);
3808 
3809   return RSrc;
3810 }
3811 
3812 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3813                                 const SIInstrInfo &TII, Register BasePtr) {
3814   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3815 
3816   // FIXME: Why are half the "default" bits ignored based on the addressing
3817   // mode?
3818   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3819 }
3820 
3821 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3822                                const SIInstrInfo &TII, Register BasePtr) {
3823   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3824 
3825   // FIXME: Why are half the "default" bits ignored based on the addressing
3826   // mode?
3827   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
3828 }
3829 
3830 AMDGPUInstructionSelector::MUBUFAddressData
3831 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
3832   MUBUFAddressData Data;
3833   Data.N0 = Src;
3834 
3835   Register PtrBase;
3836   int64_t Offset;
3837 
3838   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
3839   if (isUInt<32>(Offset)) {
3840     Data.N0 = PtrBase;
3841     Data.Offset = Offset;
3842   }
3843 
3844   if (MachineInstr *InputAdd
3845       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
3846     Data.N2 = InputAdd->getOperand(1).getReg();
3847     Data.N3 = InputAdd->getOperand(2).getReg();
3848 
3849     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
3850     // FIXME: Don't know this was defined by operand 0
3851     //
3852     // TODO: Remove this when we have copy folding optimizations after
3853     // RegBankSelect.
3854     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
3855     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
3856   }
3857 
3858   return Data;
3859 }
3860 
3861 /// Return if the addr64 mubuf mode should be used for the given address.
3862 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
3863   // (ptr_add N2, N3) -> addr64, or
3864   // (ptr_add (ptr_add N2, N3), C1) -> addr64
3865   if (Addr.N2)
3866     return true;
3867 
3868   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
3869   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
3870 }
3871 
3872 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
3873 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
3874 /// component.
3875 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
3876   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
3877   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
3878     return;
3879 
3880   // Illegal offset, store it in soffset.
3881   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3882   B.buildInstr(AMDGPU::S_MOV_B32)
3883     .addDef(SOffset)
3884     .addImm(ImmOffset);
3885   ImmOffset = 0;
3886 }
3887 
3888 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
3889   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
3890   Register &SOffset, int64_t &Offset) const {
3891   // FIXME: Predicates should stop this from reaching here.
3892   // addr64 bit was removed for volcanic islands.
3893   if (!STI.hasAddr64() || STI.useFlatForGlobal())
3894     return false;
3895 
3896   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3897   if (!shouldUseAddr64(AddrData))
3898     return false;
3899 
3900   Register N0 = AddrData.N0;
3901   Register N2 = AddrData.N2;
3902   Register N3 = AddrData.N3;
3903   Offset = AddrData.Offset;
3904 
3905   // Base pointer for the SRD.
3906   Register SRDPtr;
3907 
3908   if (N2) {
3909     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3910       assert(N3);
3911       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3912         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
3913         // addr64, and construct the default resource from a 0 address.
3914         VAddr = N0;
3915       } else {
3916         SRDPtr = N3;
3917         VAddr = N2;
3918       }
3919     } else {
3920       // N2 is not divergent.
3921       SRDPtr = N2;
3922       VAddr = N3;
3923     }
3924   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3925     // Use the default null pointer in the resource
3926     VAddr = N0;
3927   } else {
3928     // N0 -> offset, or
3929     // (N0 + C1) -> offset
3930     SRDPtr = N0;
3931   }
3932 
3933   MachineIRBuilder B(*Root.getParent());
3934   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
3935   splitIllegalMUBUFOffset(B, SOffset, Offset);
3936   return true;
3937 }
3938 
3939 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
3940   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
3941   int64_t &Offset) const {
3942 
3943   // FIXME: Pattern should not reach here.
3944   if (STI.useFlatForGlobal())
3945     return false;
3946 
3947   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3948   if (shouldUseAddr64(AddrData))
3949     return false;
3950 
3951   // N0 -> offset, or
3952   // (N0 + C1) -> offset
3953   Register SRDPtr = AddrData.N0;
3954   Offset = AddrData.Offset;
3955 
3956   // TODO: Look through extensions for 32-bit soffset.
3957   MachineIRBuilder B(*Root.getParent());
3958 
3959   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
3960   splitIllegalMUBUFOffset(B, SOffset, Offset);
3961   return true;
3962 }
3963 
3964 InstructionSelector::ComplexRendererFns
3965 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
3966   Register VAddr;
3967   Register RSrcReg;
3968   Register SOffset;
3969   int64_t Offset = 0;
3970 
3971   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
3972     return {};
3973 
3974   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
3975   // pattern.
3976   return {{
3977       [=](MachineInstrBuilder &MIB) {  // rsrc
3978         MIB.addReg(RSrcReg);
3979       },
3980       [=](MachineInstrBuilder &MIB) { // vaddr
3981         MIB.addReg(VAddr);
3982       },
3983       [=](MachineInstrBuilder &MIB) { // soffset
3984         if (SOffset)
3985           MIB.addReg(SOffset);
3986         else
3987           MIB.addImm(0);
3988       },
3989       [=](MachineInstrBuilder &MIB) { // offset
3990         MIB.addImm(Offset);
3991       },
3992       addZeroImm, //  glc
3993       addZeroImm, //  slc
3994       addZeroImm, //  tfe
3995       addZeroImm, //  dlc
3996       addZeroImm  //  swz
3997     }};
3998 }
3999 
4000 InstructionSelector::ComplexRendererFns
4001 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4002   Register RSrcReg;
4003   Register SOffset;
4004   int64_t Offset = 0;
4005 
4006   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4007     return {};
4008 
4009   return {{
4010       [=](MachineInstrBuilder &MIB) {  // rsrc
4011         MIB.addReg(RSrcReg);
4012       },
4013       [=](MachineInstrBuilder &MIB) { // soffset
4014         if (SOffset)
4015           MIB.addReg(SOffset);
4016         else
4017           MIB.addImm(0);
4018       },
4019       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4020       addZeroImm, //  glc
4021       addZeroImm, //  slc
4022       addZeroImm, //  tfe
4023       addZeroImm, //  dlc
4024       addZeroImm  //  swz
4025     }};
4026 }
4027 
4028 InstructionSelector::ComplexRendererFns
4029 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4030   Register VAddr;
4031   Register RSrcReg;
4032   Register SOffset;
4033   int64_t Offset = 0;
4034 
4035   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4036     return {};
4037 
4038   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4039   // pattern.
4040   return {{
4041       [=](MachineInstrBuilder &MIB) {  // rsrc
4042         MIB.addReg(RSrcReg);
4043       },
4044       [=](MachineInstrBuilder &MIB) { // vaddr
4045         MIB.addReg(VAddr);
4046       },
4047       [=](MachineInstrBuilder &MIB) { // soffset
4048         if (SOffset)
4049           MIB.addReg(SOffset);
4050         else
4051           MIB.addImm(0);
4052       },
4053       [=](MachineInstrBuilder &MIB) { // offset
4054         MIB.addImm(Offset);
4055       },
4056       addZeroImm //  slc
4057     }};
4058 }
4059 
4060 InstructionSelector::ComplexRendererFns
4061 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4062   Register RSrcReg;
4063   Register SOffset;
4064   int64_t Offset = 0;
4065 
4066   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4067     return {};
4068 
4069   return {{
4070       [=](MachineInstrBuilder &MIB) {  // rsrc
4071         MIB.addReg(RSrcReg);
4072       },
4073       [=](MachineInstrBuilder &MIB) { // soffset
4074         if (SOffset)
4075           MIB.addReg(SOffset);
4076         else
4077           MIB.addImm(0);
4078       },
4079       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4080       addZeroImm //  slc
4081     }};
4082 }
4083 
4084 /// Get an immediate that must be 32-bits, and treated as zero extended.
4085 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4086                                                const MachineRegisterInfo &MRI) {
4087   // getConstantVRegVal sexts any values, so see if that matters.
4088   Optional<int64_t> OffsetVal = getConstantVRegVal(Reg, MRI);
4089   if (!OffsetVal || !isInt<32>(*OffsetVal))
4090     return None;
4091   return Lo_32(*OffsetVal);
4092 }
4093 
4094 InstructionSelector::ComplexRendererFns
4095 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4096   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4097   if (!OffsetVal)
4098     return {};
4099 
4100   Optional<int64_t> EncodedImm =
4101       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4102   if (!EncodedImm)
4103     return {};
4104 
4105   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4106 }
4107 
4108 InstructionSelector::ComplexRendererFns
4109 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4110   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4111 
4112   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4113   if (!OffsetVal)
4114     return {};
4115 
4116   Optional<int64_t> EncodedImm
4117     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4118   if (!EncodedImm)
4119     return {};
4120 
4121   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4122 }
4123 
4124 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4125                                                  const MachineInstr &MI,
4126                                                  int OpIdx) const {
4127   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4128          "Expected G_CONSTANT");
4129   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4130 }
4131 
4132 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4133                                                 const MachineInstr &MI,
4134                                                 int OpIdx) const {
4135   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4136          "Expected G_CONSTANT");
4137   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4138 }
4139 
4140 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4141                                                  const MachineInstr &MI,
4142                                                  int OpIdx) const {
4143   assert(OpIdx == -1);
4144 
4145   const MachineOperand &Op = MI.getOperand(1);
4146   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4147     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4148   else {
4149     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4150     MIB.addImm(Op.getCImm()->getSExtValue());
4151   }
4152 }
4153 
4154 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4155                                                 const MachineInstr &MI,
4156                                                 int OpIdx) const {
4157   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4158          "Expected G_CONSTANT");
4159   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4160 }
4161 
4162 /// This only really exists to satisfy DAG type checking machinery, so is a
4163 /// no-op here.
4164 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4165                                                 const MachineInstr &MI,
4166                                                 int OpIdx) const {
4167   MIB.addImm(MI.getOperand(OpIdx).getImm());
4168 }
4169 
4170 void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
4171                                                  const MachineInstr &MI,
4172                                                  int OpIdx) const {
4173   assert(OpIdx >= 0 && "expected to match an immediate operand");
4174   MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
4175 }
4176 
4177 void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
4178                                                  const MachineInstr &MI,
4179                                                  int OpIdx) const {
4180   assert(OpIdx >= 0 && "expected to match an immediate operand");
4181   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
4182 }
4183 
4184 void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
4185                                                  const MachineInstr &MI,
4186                                                  int OpIdx) const {
4187   assert(OpIdx >= 0 && "expected to match an immediate operand");
4188   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
4189 }
4190 
4191 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4192                                                  const MachineInstr &MI,
4193                                                  int OpIdx) const {
4194   assert(OpIdx >= 0 && "expected to match an immediate operand");
4195   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4196 }
4197 
4198 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4199                                                  const MachineInstr &MI,
4200                                                  int OpIdx) const {
4201   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4202 }
4203 
4204 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4205   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4206 }
4207 
4208 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4209   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4210 }
4211 
4212 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4213   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4214 }
4215 
4216 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4217   return TII.isInlineConstant(Imm);
4218 }
4219