1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/IR/DiagnosticInfo.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
37 
38 #define DEBUG_TYPE "amdgpu-isel"
39 
40 using namespace llvm;
41 using namespace MIPatternMatch;
42 
43 static cl::opt<bool> AllowRiskySelect(
44   "amdgpu-global-isel-risky-select",
45   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
46   cl::init(false),
47   cl::ReallyHidden);
48 
49 #define GET_GLOBALISEL_IMPL
50 #define AMDGPUSubtarget GCNSubtarget
51 #include "AMDGPUGenGlobalISel.inc"
52 #undef GET_GLOBALISEL_IMPL
53 #undef AMDGPUSubtarget
54 
55 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
56     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
57     const AMDGPUTargetMachine &TM)
58     : InstructionSelector(), TII(*STI.getInstrInfo()),
59       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
60       STI(STI),
61       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
62 #define GET_GLOBALISEL_PREDICATES_INIT
63 #include "AMDGPUGenGlobalISel.inc"
64 #undef GET_GLOBALISEL_PREDICATES_INIT
65 #define GET_GLOBALISEL_TEMPORARIES_INIT
66 #include "AMDGPUGenGlobalISel.inc"
67 #undef GET_GLOBALISEL_TEMPORARIES_INIT
68 {
69 }
70 
71 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
72 
73 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
74                                         CodeGenCoverage &CoverageInfo) {
75   MRI = &MF.getRegInfo();
76   Subtarget = &MF.getSubtarget<GCNSubtarget>();
77   InstructionSelector::setupMF(MF, KB, CoverageInfo);
78 }
79 
80 bool AMDGPUInstructionSelector::isVCC(Register Reg,
81                                       const MachineRegisterInfo &MRI) const {
82   // The verifier is oblivious to s1 being a valid value for wavesize registers.
83   if (Reg.isPhysical())
84     return false;
85 
86   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
87   const TargetRegisterClass *RC =
88       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
89   if (RC) {
90     const LLT Ty = MRI.getType(Reg);
91     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
92            Ty.isValid() && Ty.getSizeInBits() == 1;
93   }
94 
95   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
96   return RB->getID() == AMDGPU::VCCRegBankID;
97 }
98 
99 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
100                                                         unsigned NewOpc) const {
101   MI.setDesc(TII.get(NewOpc));
102   MI.RemoveOperand(1); // Remove intrinsic ID.
103   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
104 
105   MachineOperand &Dst = MI.getOperand(0);
106   MachineOperand &Src = MI.getOperand(1);
107 
108   // TODO: This should be legalized to s32 if needed
109   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
110     return false;
111 
112   const TargetRegisterClass *DstRC
113     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
114   const TargetRegisterClass *SrcRC
115     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
116   if (!DstRC || DstRC != SrcRC)
117     return false;
118 
119   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
120          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
121 }
122 
123 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
124   const DebugLoc &DL = I.getDebugLoc();
125   MachineBasicBlock *BB = I.getParent();
126   I.setDesc(TII.get(TargetOpcode::COPY));
127 
128   const MachineOperand &Src = I.getOperand(1);
129   MachineOperand &Dst = I.getOperand(0);
130   Register DstReg = Dst.getReg();
131   Register SrcReg = Src.getReg();
132 
133   if (isVCC(DstReg, *MRI)) {
134     if (SrcReg == AMDGPU::SCC) {
135       const TargetRegisterClass *RC
136         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
137       if (!RC)
138         return true;
139       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
140     }
141 
142     if (!isVCC(SrcReg, *MRI)) {
143       // TODO: Should probably leave the copy and let copyPhysReg expand it.
144       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
145         return false;
146 
147       const TargetRegisterClass *SrcRC
148         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
149 
150       Register MaskedReg = MRI->createVirtualRegister(SrcRC);
151 
152       // We can't trust the high bits at this point, so clear them.
153 
154       // TODO: Skip masking high bits if def is known boolean.
155 
156       unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
157         AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
158       BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
159         .addImm(1)
160         .addReg(SrcReg);
161       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
162         .addImm(0)
163         .addReg(MaskedReg);
164 
165       if (!MRI->getRegClassOrNull(SrcReg))
166         MRI->setRegClass(SrcReg, SrcRC);
167       I.eraseFromParent();
168       return true;
169     }
170 
171     const TargetRegisterClass *RC =
172       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
173     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
174       return false;
175 
176     return true;
177   }
178 
179   for (const MachineOperand &MO : I.operands()) {
180     if (MO.getReg().isPhysical())
181       continue;
182 
183     const TargetRegisterClass *RC =
184             TRI.getConstrainedRegClassForOperand(MO, *MRI);
185     if (!RC)
186       continue;
187     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
188   }
189   return true;
190 }
191 
192 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
193   const Register DefReg = I.getOperand(0).getReg();
194   const LLT DefTy = MRI->getType(DefReg);
195   if (DefTy == LLT::scalar(1)) {
196     if (!AllowRiskySelect) {
197       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
198       return false;
199     }
200 
201     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
202   }
203 
204   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
205 
206   const RegClassOrRegBank &RegClassOrBank =
207     MRI->getRegClassOrRegBank(DefReg);
208 
209   const TargetRegisterClass *DefRC
210     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
211   if (!DefRC) {
212     if (!DefTy.isValid()) {
213       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
214       return false;
215     }
216 
217     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
218     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
219     if (!DefRC) {
220       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
221       return false;
222     }
223   }
224 
225   // TODO: Verify that all registers have the same bank
226   I.setDesc(TII.get(TargetOpcode::PHI));
227   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
228 }
229 
230 MachineOperand
231 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
232                                            const TargetRegisterClass &SubRC,
233                                            unsigned SubIdx) const {
234 
235   MachineInstr *MI = MO.getParent();
236   MachineBasicBlock *BB = MO.getParent()->getParent();
237   Register DstReg = MRI->createVirtualRegister(&SubRC);
238 
239   if (MO.isReg()) {
240     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
241     Register Reg = MO.getReg();
242     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
243             .addReg(Reg, 0, ComposedSubIdx);
244 
245     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
246                                      MO.isKill(), MO.isDead(), MO.isUndef(),
247                                      MO.isEarlyClobber(), 0, MO.isDebug(),
248                                      MO.isInternalRead());
249   }
250 
251   assert(MO.isImm());
252 
253   APInt Imm(64, MO.getImm());
254 
255   switch (SubIdx) {
256   default:
257     llvm_unreachable("do not know to split immediate with this sub index.");
258   case AMDGPU::sub0:
259     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
260   case AMDGPU::sub1:
261     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
262   }
263 }
264 
265 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
266   switch (Opc) {
267   case AMDGPU::G_AND:
268     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
269   case AMDGPU::G_OR:
270     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
271   case AMDGPU::G_XOR:
272     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
273   default:
274     llvm_unreachable("not a bit op");
275   }
276 }
277 
278 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
279   Register DstReg = I.getOperand(0).getReg();
280   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
281 
282   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
283   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
284       DstRB->getID() != AMDGPU::VCCRegBankID)
285     return false;
286 
287   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
288                             STI.isWave64());
289   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
290 
291   // Dead implicit-def of scc
292   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
293                                          true, // isImp
294                                          false, // isKill
295                                          true)); // isDead
296   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
297 }
298 
299 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
300   MachineBasicBlock *BB = I.getParent();
301   MachineFunction *MF = BB->getParent();
302   Register DstReg = I.getOperand(0).getReg();
303   const DebugLoc &DL = I.getDebugLoc();
304   LLT Ty = MRI->getType(DstReg);
305   if (Ty.isVector())
306     return false;
307 
308   unsigned Size = Ty.getSizeInBits();
309   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
310   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
311   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
312 
313   if (Size == 32) {
314     if (IsSALU) {
315       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
316       MachineInstr *Add =
317         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
318         .add(I.getOperand(1))
319         .add(I.getOperand(2));
320       I.eraseFromParent();
321       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
322     }
323 
324     if (STI.hasAddNoCarry()) {
325       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
326       I.setDesc(TII.get(Opc));
327       I.addOperand(*MF, MachineOperand::CreateImm(0));
328       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
329       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
330     }
331 
332     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
333 
334     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
335     MachineInstr *Add
336       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
337       .addDef(UnusedCarry, RegState::Dead)
338       .add(I.getOperand(1))
339       .add(I.getOperand(2))
340       .addImm(0);
341     I.eraseFromParent();
342     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
343   }
344 
345   assert(!Sub && "illegal sub should not reach here");
346 
347   const TargetRegisterClass &RC
348     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
349   const TargetRegisterClass &HalfRC
350     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
351 
352   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
353   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
354   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
355   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
356 
357   Register DstLo = MRI->createVirtualRegister(&HalfRC);
358   Register DstHi = MRI->createVirtualRegister(&HalfRC);
359 
360   if (IsSALU) {
361     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
362       .add(Lo1)
363       .add(Lo2);
364     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
365       .add(Hi1)
366       .add(Hi2);
367   } else {
368     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
369     Register CarryReg = MRI->createVirtualRegister(CarryRC);
370     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
371       .addDef(CarryReg)
372       .add(Lo1)
373       .add(Lo2)
374       .addImm(0);
375     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
376       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
377       .add(Hi1)
378       .add(Hi2)
379       .addReg(CarryReg, RegState::Kill)
380       .addImm(0);
381 
382     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
383       return false;
384   }
385 
386   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
387     .addReg(DstLo)
388     .addImm(AMDGPU::sub0)
389     .addReg(DstHi)
390     .addImm(AMDGPU::sub1);
391 
392 
393   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
394     return false;
395 
396   I.eraseFromParent();
397   return true;
398 }
399 
400 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
401   MachineInstr &I) const {
402   MachineBasicBlock *BB = I.getParent();
403   MachineFunction *MF = BB->getParent();
404   const DebugLoc &DL = I.getDebugLoc();
405   Register Dst0Reg = I.getOperand(0).getReg();
406   Register Dst1Reg = I.getOperand(1).getReg();
407   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
408                      I.getOpcode() == AMDGPU::G_UADDE;
409   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
410                           I.getOpcode() == AMDGPU::G_USUBE;
411 
412   if (isVCC(Dst1Reg, *MRI)) {
413     unsigned NoCarryOpc =
414         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
415     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
416     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
417     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
418     I.addOperand(*MF, MachineOperand::CreateImm(0));
419     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
420   }
421 
422   Register Src0Reg = I.getOperand(2).getReg();
423   Register Src1Reg = I.getOperand(3).getReg();
424 
425   if (HasCarryIn) {
426     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
427       .addReg(I.getOperand(4).getReg());
428   }
429 
430   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
431   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
432 
433   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
434     .add(I.getOperand(2))
435     .add(I.getOperand(3));
436   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
437     .addReg(AMDGPU::SCC);
438 
439   if (!MRI->getRegClassOrNull(Dst1Reg))
440     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
441 
442   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
443       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
444       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
445     return false;
446 
447   if (HasCarryIn &&
448       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
449                                     AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   I.eraseFromParent();
453   return true;
454 }
455 
456 // TODO: We should probably legalize these to only using 32-bit results.
457 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
458   MachineBasicBlock *BB = I.getParent();
459   Register DstReg = I.getOperand(0).getReg();
460   Register SrcReg = I.getOperand(1).getReg();
461   LLT DstTy = MRI->getType(DstReg);
462   LLT SrcTy = MRI->getType(SrcReg);
463   const unsigned SrcSize = SrcTy.getSizeInBits();
464   unsigned DstSize = DstTy.getSizeInBits();
465 
466   // TODO: Should handle any multiple of 32 offset.
467   unsigned Offset = I.getOperand(2).getImm();
468   if (Offset % 32 != 0 || DstSize > 128)
469     return false;
470 
471   // 16-bit operations really use 32-bit registers.
472   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
473   if (DstSize == 16)
474     DstSize = 32;
475 
476   const TargetRegisterClass *DstRC =
477     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
478   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
479     return false;
480 
481   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
482   const TargetRegisterClass *SrcRC =
483     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
484   if (!SrcRC)
485     return false;
486   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
487                                                          DstSize / 32);
488   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
489   if (!SrcRC)
490     return false;
491 
492   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
493                                     *SrcRC, I.getOperand(1));
494   const DebugLoc &DL = I.getDebugLoc();
495   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
496     .addReg(SrcReg, 0, SubReg);
497 
498   I.eraseFromParent();
499   return true;
500 }
501 
502 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
503   MachineBasicBlock *BB = MI.getParent();
504   Register DstReg = MI.getOperand(0).getReg();
505   LLT DstTy = MRI->getType(DstReg);
506   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
507 
508   const unsigned SrcSize = SrcTy.getSizeInBits();
509   if (SrcSize < 32)
510     return selectImpl(MI, *CoverageInfo);
511 
512   const DebugLoc &DL = MI.getDebugLoc();
513   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
514   const unsigned DstSize = DstTy.getSizeInBits();
515   const TargetRegisterClass *DstRC =
516     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
517   if (!DstRC)
518     return false;
519 
520   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
521   MachineInstrBuilder MIB =
522     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
523   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
524     MachineOperand &Src = MI.getOperand(I + 1);
525     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
526     MIB.addImm(SubRegs[I]);
527 
528     const TargetRegisterClass *SrcRC
529       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
530     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
531       return false;
532   }
533 
534   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
535     return false;
536 
537   MI.eraseFromParent();
538   return true;
539 }
540 
541 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
542   MachineBasicBlock *BB = MI.getParent();
543   const int NumDst = MI.getNumOperands() - 1;
544 
545   MachineOperand &Src = MI.getOperand(NumDst);
546 
547   Register SrcReg = Src.getReg();
548   Register DstReg0 = MI.getOperand(0).getReg();
549   LLT DstTy = MRI->getType(DstReg0);
550   LLT SrcTy = MRI->getType(SrcReg);
551 
552   const unsigned DstSize = DstTy.getSizeInBits();
553   const unsigned SrcSize = SrcTy.getSizeInBits();
554   const DebugLoc &DL = MI.getDebugLoc();
555   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
556 
557   const TargetRegisterClass *SrcRC =
558     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
559   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
560     return false;
561 
562   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
563   // source, and this relies on the fact that the same subregister indices are
564   // used for both.
565   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
566   for (int I = 0, E = NumDst; I != E; ++I) {
567     MachineOperand &Dst = MI.getOperand(I);
568     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
569       .addReg(SrcReg, 0, SubRegs[I]);
570 
571     // Make sure the subregister index is valid for the source register.
572     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
573     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
574       return false;
575 
576     const TargetRegisterClass *DstRC =
577       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
578     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
579       return false;
580   }
581 
582   MI.eraseFromParent();
583   return true;
584 }
585 
586 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
587   MachineInstr &MI) const {
588   if (selectImpl(MI, *CoverageInfo))
589     return true;
590 
591   const LLT S32 = LLT::scalar(32);
592   const LLT V2S16 = LLT::vector(2, 16);
593 
594   Register Dst = MI.getOperand(0).getReg();
595   if (MRI->getType(Dst) != V2S16)
596     return false;
597 
598   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
599   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
600     return false;
601 
602   Register Src0 = MI.getOperand(1).getReg();
603   Register Src1 = MI.getOperand(2).getReg();
604   if (MRI->getType(Src0) != S32)
605     return false;
606 
607   const DebugLoc &DL = MI.getDebugLoc();
608   MachineBasicBlock *BB = MI.getParent();
609 
610   auto ConstSrc1 = getConstantVRegValWithLookThrough(Src1, *MRI, true, true);
611   if (ConstSrc1) {
612     auto ConstSrc0 = getConstantVRegValWithLookThrough(Src0, *MRI, true, true);
613     if (ConstSrc0) {
614       uint32_t Lo16 = static_cast<uint32_t>(ConstSrc0->Value) & 0xffff;
615       uint32_t Hi16 = static_cast<uint32_t>(ConstSrc1->Value) & 0xffff;
616 
617       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
618         .addImm(Lo16 | (Hi16 << 16));
619       MI.eraseFromParent();
620       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
621     }
622   }
623 
624   // TODO: This should probably be a combine somewhere
625   // (build_vector_trunc $src0, undef -> copy $src0
626   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
627   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
628     MI.setDesc(TII.get(AMDGPU::COPY));
629     MI.RemoveOperand(2);
630     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
631            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
632   }
633 
634   Register ShiftSrc0;
635   Register ShiftSrc1;
636 
637   // With multiple uses of the shift, this will duplicate the shift and
638   // increase register pressure.
639   //
640   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
641   //  => (S_PACK_HH_B32_B16 $src0, $src1)
642   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
643   //  => (S_PACK_LH_B32_B16 $src0, $src1)
644   // (build_vector_trunc $src0, $src1)
645   //  => (S_PACK_LL_B32_B16 $src0, $src1)
646 
647   bool Shift0 = mi_match(
648       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
649 
650   bool Shift1 = mi_match(
651       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
652 
653   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
654   if (Shift0 && Shift1) {
655     Opc = AMDGPU::S_PACK_HH_B32_B16;
656     MI.getOperand(1).setReg(ShiftSrc0);
657     MI.getOperand(2).setReg(ShiftSrc1);
658   } else if (Shift1) {
659     Opc = AMDGPU::S_PACK_LH_B32_B16;
660     MI.getOperand(2).setReg(ShiftSrc1);
661   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
662     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
663     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
664       .addReg(ShiftSrc0)
665       .addImm(16);
666 
667     MI.eraseFromParent();
668     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
669   }
670 
671   MI.setDesc(TII.get(Opc));
672   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
673 }
674 
675 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
676   return selectG_ADD_SUB(I);
677 }
678 
679 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
680   const MachineOperand &MO = I.getOperand(0);
681 
682   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
683   // regbank check here is to know why getConstrainedRegClassForOperand failed.
684   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
685   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
686       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
687     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
688     return true;
689   }
690 
691   return false;
692 }
693 
694 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
695   MachineBasicBlock *BB = I.getParent();
696 
697   Register DstReg = I.getOperand(0).getReg();
698   Register Src0Reg = I.getOperand(1).getReg();
699   Register Src1Reg = I.getOperand(2).getReg();
700   LLT Src1Ty = MRI->getType(Src1Reg);
701 
702   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
703   unsigned InsSize = Src1Ty.getSizeInBits();
704 
705   int64_t Offset = I.getOperand(3).getImm();
706 
707   // FIXME: These cases should have been illegal and unnecessary to check here.
708   if (Offset % 32 != 0 || InsSize % 32 != 0)
709     return false;
710 
711   // Currently not handled by getSubRegFromChannel.
712   if (InsSize > 128)
713     return false;
714 
715   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
716   if (SubReg == AMDGPU::NoSubRegister)
717     return false;
718 
719   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
720   const TargetRegisterClass *DstRC =
721     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
722   if (!DstRC)
723     return false;
724 
725   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
726   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
727   const TargetRegisterClass *Src0RC =
728     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
729   const TargetRegisterClass *Src1RC =
730     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
731 
732   // Deal with weird cases where the class only partially supports the subreg
733   // index.
734   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
735   if (!Src0RC || !Src1RC)
736     return false;
737 
738   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
739       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
740       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
741     return false;
742 
743   const DebugLoc &DL = I.getDebugLoc();
744   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
745     .addReg(Src0Reg)
746     .addReg(Src1Reg)
747     .addImm(SubReg);
748 
749   I.eraseFromParent();
750   return true;
751 }
752 
753 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
754   if (STI.getLDSBankCount() != 16)
755     return selectImpl(MI, *CoverageInfo);
756 
757   Register Dst = MI.getOperand(0).getReg();
758   Register Src0 = MI.getOperand(2).getReg();
759   Register M0Val = MI.getOperand(6).getReg();
760   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
761       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
762       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
763     return false;
764 
765   // This requires 2 instructions. It is possible to write a pattern to support
766   // this, but the generated isel emitter doesn't correctly deal with multiple
767   // output instructions using the same physical register input. The copy to m0
768   // is incorrectly placed before the second instruction.
769   //
770   // TODO: Match source modifiers.
771 
772   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
773   const DebugLoc &DL = MI.getDebugLoc();
774   MachineBasicBlock *MBB = MI.getParent();
775 
776   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
777     .addReg(M0Val);
778   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
779     .addImm(2)
780     .addImm(MI.getOperand(4).getImm())  // $attr
781     .addImm(MI.getOperand(3).getImm()); // $attrchan
782 
783   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
784     .addImm(0)                          // $src0_modifiers
785     .addReg(Src0)                       // $src0
786     .addImm(MI.getOperand(4).getImm())  // $attr
787     .addImm(MI.getOperand(3).getImm())  // $attrchan
788     .addImm(0)                          // $src2_modifiers
789     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
790     .addImm(MI.getOperand(5).getImm())  // $high
791     .addImm(0)                          // $clamp
792     .addImm(0);                         // $omod
793 
794   MI.eraseFromParent();
795   return true;
796 }
797 
798 // Writelane is special in that it can use SGPR and M0 (which would normally
799 // count as using the constant bus twice - but in this case it is allowed since
800 // the lane selector doesn't count as a use of the constant bus). However, it is
801 // still required to abide by the 1 SGPR rule. Fix this up if we might have
802 // multiple SGPRs.
803 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
804   // With a constant bus limit of at least 2, there's no issue.
805   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
806     return selectImpl(MI, *CoverageInfo);
807 
808   MachineBasicBlock *MBB = MI.getParent();
809   const DebugLoc &DL = MI.getDebugLoc();
810   Register VDst = MI.getOperand(0).getReg();
811   Register Val = MI.getOperand(2).getReg();
812   Register LaneSelect = MI.getOperand(3).getReg();
813   Register VDstIn = MI.getOperand(4).getReg();
814 
815   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
816 
817   Optional<ValueAndVReg> ConstSelect =
818     getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true);
819   if (ConstSelect) {
820     // The selector has to be an inline immediate, so we can use whatever for
821     // the other operands.
822     MIB.addReg(Val);
823     MIB.addImm(ConstSelect->Value &
824                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
825   } else {
826     Optional<ValueAndVReg> ConstVal =
827       getConstantVRegValWithLookThrough(Val, *MRI, true, true);
828 
829     // If the value written is an inline immediate, we can get away without a
830     // copy to m0.
831     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value,
832                                                  STI.hasInv2PiInlineImm())) {
833       MIB.addImm(ConstVal->Value);
834       MIB.addReg(LaneSelect);
835     } else {
836       MIB.addReg(Val);
837 
838       // If the lane selector was originally in a VGPR and copied with
839       // readfirstlane, there's a hazard to read the same SGPR from the
840       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
841       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
842 
843       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
844         .addReg(LaneSelect);
845       MIB.addReg(AMDGPU::M0);
846     }
847   }
848 
849   MIB.addReg(VDstIn);
850 
851   MI.eraseFromParent();
852   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
853 }
854 
855 // We need to handle this here because tablegen doesn't support matching
856 // instructions with multiple outputs.
857 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
858   Register Dst0 = MI.getOperand(0).getReg();
859   Register Dst1 = MI.getOperand(1).getReg();
860 
861   LLT Ty = MRI->getType(Dst0);
862   unsigned Opc;
863   if (Ty == LLT::scalar(32))
864     Opc = AMDGPU::V_DIV_SCALE_F32;
865   else if (Ty == LLT::scalar(64))
866     Opc = AMDGPU::V_DIV_SCALE_F64;
867   else
868     return false;
869 
870   // TODO: Match source modifiers.
871 
872   const DebugLoc &DL = MI.getDebugLoc();
873   MachineBasicBlock *MBB = MI.getParent();
874 
875   Register Numer = MI.getOperand(3).getReg();
876   Register Denom = MI.getOperand(4).getReg();
877   unsigned ChooseDenom = MI.getOperand(5).getImm();
878 
879   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
880 
881   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
882     .addDef(Dst1)
883     .addImm(0)     // $src0_modifiers
884     .addUse(Src0)  // $src0
885     .addImm(0)     // $src1_modifiers
886     .addUse(Denom) // $src1
887     .addImm(0)     // $src2_modifiers
888     .addUse(Numer) // $src2
889     .addImm(0)     // $clamp
890     .addImm(0);    // $omod
891 
892   MI.eraseFromParent();
893   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
894 }
895 
896 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
897   unsigned IntrinsicID = I.getIntrinsicID();
898   switch (IntrinsicID) {
899   case Intrinsic::amdgcn_if_break: {
900     MachineBasicBlock *BB = I.getParent();
901 
902     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
903     // SelectionDAG uses for wave32 vs wave64.
904     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
905       .add(I.getOperand(0))
906       .add(I.getOperand(2))
907       .add(I.getOperand(3));
908 
909     Register DstReg = I.getOperand(0).getReg();
910     Register Src0Reg = I.getOperand(2).getReg();
911     Register Src1Reg = I.getOperand(3).getReg();
912 
913     I.eraseFromParent();
914 
915     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
916       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
917 
918     return true;
919   }
920   case Intrinsic::amdgcn_interp_p1_f16:
921     return selectInterpP1F16(I);
922   case Intrinsic::amdgcn_wqm:
923     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
924   case Intrinsic::amdgcn_softwqm:
925     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
926   case Intrinsic::amdgcn_wwm:
927     return constrainCopyLikeIntrin(I, AMDGPU::WWM);
928   case Intrinsic::amdgcn_writelane:
929     return selectWritelane(I);
930   case Intrinsic::amdgcn_div_scale:
931     return selectDivScale(I);
932   case Intrinsic::amdgcn_icmp:
933     return selectIntrinsicIcmp(I);
934   case Intrinsic::amdgcn_ballot:
935     return selectBallot(I);
936   case Intrinsic::amdgcn_reloc_constant:
937     return selectRelocConstant(I);
938   case Intrinsic::amdgcn_groupstaticsize:
939     return selectGroupStaticSize(I);
940   case Intrinsic::returnaddress:
941     return selectReturnAddress(I);
942   default:
943     return selectImpl(I, *CoverageInfo);
944   }
945 }
946 
947 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
948   if (Size != 32 && Size != 64)
949     return -1;
950   switch (P) {
951   default:
952     llvm_unreachable("Unknown condition code!");
953   case CmpInst::ICMP_NE:
954     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
955   case CmpInst::ICMP_EQ:
956     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
957   case CmpInst::ICMP_SGT:
958     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
959   case CmpInst::ICMP_SGE:
960     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
961   case CmpInst::ICMP_SLT:
962     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
963   case CmpInst::ICMP_SLE:
964     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
965   case CmpInst::ICMP_UGT:
966     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
967   case CmpInst::ICMP_UGE:
968     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
969   case CmpInst::ICMP_ULT:
970     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
971   case CmpInst::ICMP_ULE:
972     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
973   }
974 }
975 
976 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
977                                               unsigned Size) const {
978   if (Size == 64) {
979     if (!STI.hasScalarCompareEq64())
980       return -1;
981 
982     switch (P) {
983     case CmpInst::ICMP_NE:
984       return AMDGPU::S_CMP_LG_U64;
985     case CmpInst::ICMP_EQ:
986       return AMDGPU::S_CMP_EQ_U64;
987     default:
988       return -1;
989     }
990   }
991 
992   if (Size != 32)
993     return -1;
994 
995   switch (P) {
996   case CmpInst::ICMP_NE:
997     return AMDGPU::S_CMP_LG_U32;
998   case CmpInst::ICMP_EQ:
999     return AMDGPU::S_CMP_EQ_U32;
1000   case CmpInst::ICMP_SGT:
1001     return AMDGPU::S_CMP_GT_I32;
1002   case CmpInst::ICMP_SGE:
1003     return AMDGPU::S_CMP_GE_I32;
1004   case CmpInst::ICMP_SLT:
1005     return AMDGPU::S_CMP_LT_I32;
1006   case CmpInst::ICMP_SLE:
1007     return AMDGPU::S_CMP_LE_I32;
1008   case CmpInst::ICMP_UGT:
1009     return AMDGPU::S_CMP_GT_U32;
1010   case CmpInst::ICMP_UGE:
1011     return AMDGPU::S_CMP_GE_U32;
1012   case CmpInst::ICMP_ULT:
1013     return AMDGPU::S_CMP_LT_U32;
1014   case CmpInst::ICMP_ULE:
1015     return AMDGPU::S_CMP_LE_U32;
1016   default:
1017     llvm_unreachable("Unknown condition code!");
1018   }
1019 }
1020 
1021 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1022   MachineBasicBlock *BB = I.getParent();
1023   const DebugLoc &DL = I.getDebugLoc();
1024 
1025   Register SrcReg = I.getOperand(2).getReg();
1026   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1027 
1028   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1029 
1030   Register CCReg = I.getOperand(0).getReg();
1031   if (!isVCC(CCReg, *MRI)) {
1032     int Opcode = getS_CMPOpcode(Pred, Size);
1033     if (Opcode == -1)
1034       return false;
1035     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1036             .add(I.getOperand(2))
1037             .add(I.getOperand(3));
1038     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1039       .addReg(AMDGPU::SCC);
1040     bool Ret =
1041         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1042         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1043     I.eraseFromParent();
1044     return Ret;
1045   }
1046 
1047   int Opcode = getV_CMPOpcode(Pred, Size);
1048   if (Opcode == -1)
1049     return false;
1050 
1051   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1052             I.getOperand(0).getReg())
1053             .add(I.getOperand(2))
1054             .add(I.getOperand(3));
1055   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1056                                *TRI.getBoolRC(), *MRI);
1057   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1058   I.eraseFromParent();
1059   return Ret;
1060 }
1061 
1062 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1063   Register Dst = I.getOperand(0).getReg();
1064   if (isVCC(Dst, *MRI))
1065     return false;
1066 
1067   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1068     return false;
1069 
1070   MachineBasicBlock *BB = I.getParent();
1071   const DebugLoc &DL = I.getDebugLoc();
1072   Register SrcReg = I.getOperand(2).getReg();
1073   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1074   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1075 
1076   int Opcode = getV_CMPOpcode(Pred, Size);
1077   if (Opcode == -1)
1078     return false;
1079 
1080   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1081                            .add(I.getOperand(2))
1082                            .add(I.getOperand(3));
1083   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1084                                *MRI);
1085   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1086   I.eraseFromParent();
1087   return Ret;
1088 }
1089 
1090 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1091   MachineBasicBlock *BB = I.getParent();
1092   const DebugLoc &DL = I.getDebugLoc();
1093   Register DstReg = I.getOperand(0).getReg();
1094   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1095   const bool Is64 = Size == 64;
1096 
1097   if (Size != STI.getWavefrontSize())
1098     return false;
1099 
1100   Optional<ValueAndVReg> Arg =
1101       getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true);
1102 
1103   if (Arg.hasValue()) {
1104     const int64_t Value = Arg.getValue().Value;
1105     if (Value == 0) {
1106       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1107       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1108     } else if (Value == -1) { // all ones
1109       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1110       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1111     } else
1112       return false;
1113   } else {
1114     Register SrcReg = I.getOperand(2).getReg();
1115     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1116   }
1117 
1118   I.eraseFromParent();
1119   return true;
1120 }
1121 
1122 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1123   Register DstReg = I.getOperand(0).getReg();
1124   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1125   const TargetRegisterClass *DstRC =
1126     TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1127   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1128     return false;
1129 
1130   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1131 
1132   Module *M = MF->getFunction().getParent();
1133   const MDNode *Metadata = I.getOperand(2).getMetadata();
1134   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1135   auto RelocSymbol = cast<GlobalVariable>(
1136     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1137 
1138   MachineBasicBlock *BB = I.getParent();
1139   BuildMI(*BB, &I, I.getDebugLoc(),
1140           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1141     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1142 
1143   I.eraseFromParent();
1144   return true;
1145 }
1146 
1147 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1148   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1149 
1150   Register DstReg = I.getOperand(0).getReg();
1151   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1152   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1153     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1154 
1155   MachineBasicBlock *MBB = I.getParent();
1156   const DebugLoc &DL = I.getDebugLoc();
1157 
1158   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1159 
1160   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1161     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1162     MIB.addImm(MFI->getLDSSize());
1163   } else {
1164     Module *M = MF->getFunction().getParent();
1165     const GlobalValue *GV
1166       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1167     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1168   }
1169 
1170   I.eraseFromParent();
1171   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1172 }
1173 
1174 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1175   MachineBasicBlock *MBB = I.getParent();
1176   MachineFunction &MF = *MBB->getParent();
1177   const DebugLoc &DL = I.getDebugLoc();
1178 
1179   MachineOperand &Dst = I.getOperand(0);
1180   Register DstReg = Dst.getReg();
1181   unsigned Depth = I.getOperand(2).getImm();
1182 
1183   const TargetRegisterClass *RC
1184     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1185   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1186       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1187     return false;
1188 
1189   // Check for kernel and shader functions
1190   if (Depth != 0 ||
1191       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1192     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1193       .addImm(0);
1194     I.eraseFromParent();
1195     return true;
1196   }
1197 
1198   MachineFrameInfo &MFI = MF.getFrameInfo();
1199   // There is a call to @llvm.returnaddress in this function
1200   MFI.setReturnAddressIsTaken(true);
1201 
1202   // Get the return address reg and mark it as an implicit live-in
1203   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1204   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1205                                              AMDGPU::SReg_64RegClass);
1206   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1207     .addReg(LiveIn);
1208   I.eraseFromParent();
1209   return true;
1210 }
1211 
1212 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1213   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1214   // SelectionDAG uses for wave32 vs wave64.
1215   MachineBasicBlock *BB = MI.getParent();
1216   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1217       .add(MI.getOperand(1));
1218 
1219   Register Reg = MI.getOperand(1).getReg();
1220   MI.eraseFromParent();
1221 
1222   if (!MRI->getRegClassOrNull(Reg))
1223     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1224   return true;
1225 }
1226 
1227 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1228   MachineInstr &MI, Intrinsic::ID IntrID) const {
1229   MachineBasicBlock *MBB = MI.getParent();
1230   MachineFunction *MF = MBB->getParent();
1231   const DebugLoc &DL = MI.getDebugLoc();
1232 
1233   unsigned IndexOperand = MI.getOperand(7).getImm();
1234   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1235   bool WaveDone = MI.getOperand(9).getImm() != 0;
1236 
1237   if (WaveDone && !WaveRelease)
1238     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1239 
1240   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1241   IndexOperand &= ~0x3f;
1242   unsigned CountDw = 0;
1243 
1244   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1245     CountDw = (IndexOperand >> 24) & 0xf;
1246     IndexOperand &= ~(0xf << 24);
1247 
1248     if (CountDw < 1 || CountDw > 4) {
1249       report_fatal_error(
1250         "ds_ordered_count: dword count must be between 1 and 4");
1251     }
1252   }
1253 
1254   if (IndexOperand)
1255     report_fatal_error("ds_ordered_count: bad index operand");
1256 
1257   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1258   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1259 
1260   unsigned Offset0 = OrderedCountIndex << 2;
1261   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1262                      (Instruction << 4);
1263 
1264   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1265     Offset1 |= (CountDw - 1) << 6;
1266 
1267   unsigned Offset = Offset0 | (Offset1 << 8);
1268 
1269   Register M0Val = MI.getOperand(2).getReg();
1270   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1271     .addReg(M0Val);
1272 
1273   Register DstReg = MI.getOperand(0).getReg();
1274   Register ValReg = MI.getOperand(3).getReg();
1275   MachineInstrBuilder DS =
1276     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1277       .addReg(ValReg)
1278       .addImm(Offset)
1279       .cloneMemRefs(MI);
1280 
1281   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1282     return false;
1283 
1284   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1285   MI.eraseFromParent();
1286   return Ret;
1287 }
1288 
1289 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1290   switch (IntrID) {
1291   case Intrinsic::amdgcn_ds_gws_init:
1292     return AMDGPU::DS_GWS_INIT;
1293   case Intrinsic::amdgcn_ds_gws_barrier:
1294     return AMDGPU::DS_GWS_BARRIER;
1295   case Intrinsic::amdgcn_ds_gws_sema_v:
1296     return AMDGPU::DS_GWS_SEMA_V;
1297   case Intrinsic::amdgcn_ds_gws_sema_br:
1298     return AMDGPU::DS_GWS_SEMA_BR;
1299   case Intrinsic::amdgcn_ds_gws_sema_p:
1300     return AMDGPU::DS_GWS_SEMA_P;
1301   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1302     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1303   default:
1304     llvm_unreachable("not a gws intrinsic");
1305   }
1306 }
1307 
1308 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1309                                                      Intrinsic::ID IID) const {
1310   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1311       !STI.hasGWSSemaReleaseAll())
1312     return false;
1313 
1314   // intrinsic ID, vsrc, offset
1315   const bool HasVSrc = MI.getNumOperands() == 3;
1316   assert(HasVSrc || MI.getNumOperands() == 2);
1317 
1318   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1319   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1320   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1321     return false;
1322 
1323   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1324   assert(OffsetDef);
1325 
1326   unsigned ImmOffset;
1327 
1328   MachineBasicBlock *MBB = MI.getParent();
1329   const DebugLoc &DL = MI.getDebugLoc();
1330 
1331   MachineInstr *Readfirstlane = nullptr;
1332 
1333   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1334   // incoming offset, in case there's an add of a constant. We'll have to put it
1335   // back later.
1336   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1337     Readfirstlane = OffsetDef;
1338     BaseOffset = OffsetDef->getOperand(1).getReg();
1339     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1340   }
1341 
1342   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1343     // If we have a constant offset, try to use the 0 in m0 as the base.
1344     // TODO: Look into changing the default m0 initialization value. If the
1345     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1346     // the immediate offset.
1347 
1348     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1349     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1350       .addImm(0);
1351   } else {
1352     std::tie(BaseOffset, ImmOffset) =
1353         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1354 
1355     if (Readfirstlane) {
1356       // We have the constant offset now, so put the readfirstlane back on the
1357       // variable component.
1358       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1359         return false;
1360 
1361       Readfirstlane->getOperand(1).setReg(BaseOffset);
1362       BaseOffset = Readfirstlane->getOperand(0).getReg();
1363     } else {
1364       if (!RBI.constrainGenericRegister(BaseOffset,
1365                                         AMDGPU::SReg_32RegClass, *MRI))
1366         return false;
1367     }
1368 
1369     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1370     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1371       .addReg(BaseOffset)
1372       .addImm(16);
1373 
1374     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1375       .addReg(M0Base);
1376   }
1377 
1378   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1379   // offset field) % 64. Some versions of the programming guide omit the m0
1380   // part, or claim it's from offset 0.
1381   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1382 
1383   if (HasVSrc) {
1384     Register VSrc = MI.getOperand(1).getReg();
1385     MIB.addReg(VSrc);
1386     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1387       return false;
1388   }
1389 
1390   MIB.addImm(ImmOffset)
1391      .cloneMemRefs(MI);
1392 
1393   MI.eraseFromParent();
1394   return true;
1395 }
1396 
1397 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1398                                                       bool IsAppend) const {
1399   Register PtrBase = MI.getOperand(2).getReg();
1400   LLT PtrTy = MRI->getType(PtrBase);
1401   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1402 
1403   unsigned Offset;
1404   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1405 
1406   // TODO: Should this try to look through readfirstlane like GWS?
1407   if (!isDSOffsetLegal(PtrBase, Offset)) {
1408     PtrBase = MI.getOperand(2).getReg();
1409     Offset = 0;
1410   }
1411 
1412   MachineBasicBlock *MBB = MI.getParent();
1413   const DebugLoc &DL = MI.getDebugLoc();
1414   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1415 
1416   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1417     .addReg(PtrBase);
1418   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1419     return false;
1420 
1421   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1422     .addImm(Offset)
1423     .addImm(IsGDS ? -1 : 0)
1424     .cloneMemRefs(MI);
1425   MI.eraseFromParent();
1426   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1427 }
1428 
1429 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1430   if (TM.getOptLevel() > CodeGenOpt::None) {
1431     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1432     if (WGSize <= STI.getWavefrontSize()) {
1433       MachineBasicBlock *MBB = MI.getParent();
1434       const DebugLoc &DL = MI.getDebugLoc();
1435       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1436       MI.eraseFromParent();
1437       return true;
1438     }
1439   }
1440   return selectImpl(MI, *CoverageInfo);
1441 }
1442 
1443 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1444                          bool &IsTexFail) {
1445   if (TexFailCtrl)
1446     IsTexFail = true;
1447 
1448   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1449   TexFailCtrl &= ~(uint64_t)0x1;
1450   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1451   TexFailCtrl &= ~(uint64_t)0x2;
1452 
1453   return TexFailCtrl == 0;
1454 }
1455 
1456 static bool parseCachePolicy(uint64_t Value,
1457                              bool *GLC, bool *SLC, bool *DLC) {
1458   if (GLC) {
1459     *GLC = (Value & 0x1) ? 1 : 0;
1460     Value &= ~(uint64_t)0x1;
1461   }
1462   if (SLC) {
1463     *SLC = (Value & 0x2) ? 1 : 0;
1464     Value &= ~(uint64_t)0x2;
1465   }
1466   if (DLC) {
1467     *DLC = (Value & 0x4) ? 1 : 0;
1468     Value &= ~(uint64_t)0x4;
1469   }
1470 
1471   return Value == 0;
1472 }
1473 
1474 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1475   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1476   MachineBasicBlock *MBB = MI.getParent();
1477   const DebugLoc &DL = MI.getDebugLoc();
1478 
1479   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1480     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1481 
1482   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1483   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1484       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1485   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1486       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1487   unsigned IntrOpcode = Intr->BaseOpcode;
1488   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1489 
1490   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1491 
1492   Register VDataIn, VDataOut;
1493   LLT VDataTy;
1494   int NumVDataDwords = -1;
1495   bool IsD16 = false;
1496 
1497   bool Unorm;
1498   if (!BaseOpcode->Sampler)
1499     Unorm = true;
1500   else
1501     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1502 
1503   bool TFE;
1504   bool LWE;
1505   bool IsTexFail = false;
1506   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1507                     TFE, LWE, IsTexFail))
1508     return false;
1509 
1510   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1511   const bool IsA16 = (Flags & 1) != 0;
1512   const bool IsG16 = (Flags & 2) != 0;
1513 
1514   // A16 implies 16 bit gradients
1515   if (IsA16 && !IsG16)
1516     return false;
1517 
1518   unsigned DMask = 0;
1519   unsigned DMaskLanes = 0;
1520 
1521   if (BaseOpcode->Atomic) {
1522     VDataOut = MI.getOperand(0).getReg();
1523     VDataIn = MI.getOperand(2).getReg();
1524     LLT Ty = MRI->getType(VDataIn);
1525 
1526     // Be careful to allow atomic swap on 16-bit element vectors.
1527     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1528       Ty.getSizeInBits() == 128 :
1529       Ty.getSizeInBits() == 64;
1530 
1531     if (BaseOpcode->AtomicX2) {
1532       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1533 
1534       DMask = Is64Bit ? 0xf : 0x3;
1535       NumVDataDwords = Is64Bit ? 4 : 2;
1536     } else {
1537       DMask = Is64Bit ? 0x3 : 0x1;
1538       NumVDataDwords = Is64Bit ? 2 : 1;
1539     }
1540   } else {
1541     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1542     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1543 
1544     // One memoperand is mandatory, except for getresinfo.
1545     // FIXME: Check this in verifier.
1546     if (!MI.memoperands_empty()) {
1547       const MachineMemOperand *MMO = *MI.memoperands_begin();
1548 
1549       // Infer d16 from the memory size, as the register type will be mangled by
1550       // unpacked subtargets, or by TFE.
1551       IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1552     }
1553 
1554     if (BaseOpcode->Store) {
1555       VDataIn = MI.getOperand(1).getReg();
1556       VDataTy = MRI->getType(VDataIn);
1557       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1558     } else {
1559       VDataOut = MI.getOperand(0).getReg();
1560       VDataTy = MRI->getType(VDataOut);
1561       NumVDataDwords = DMaskLanes;
1562 
1563       if (IsD16 && !STI.hasUnpackedD16VMem())
1564         NumVDataDwords = (DMaskLanes + 1) / 2;
1565     }
1566   }
1567 
1568   // Optimize _L to _LZ when _L is zero
1569   if (LZMappingInfo) {
1570     // The legalizer replaced the register with an immediate 0 if we need to
1571     // change the opcode.
1572     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex);
1573     if (Lod.isImm()) {
1574       assert(Lod.getImm() == 0);
1575       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1576     }
1577   }
1578 
1579   // Optimize _mip away, when 'lod' is zero
1580   if (MIPMappingInfo) {
1581     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex);
1582     if (Lod.isImm()) {
1583       assert(Lod.getImm() == 0);
1584       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1585     }
1586   }
1587 
1588   // Set G16 opcode
1589   if (IsG16 && !IsA16) {
1590     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1591         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1592     assert(G16MappingInfo);
1593     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1594   }
1595 
1596   // TODO: Check this in verifier.
1597   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1598 
1599   bool GLC = false;
1600   bool SLC = false;
1601   bool DLC = false;
1602   if (BaseOpcode->Atomic) {
1603     GLC = true; // TODO no-return optimization
1604     if (!parseCachePolicy(
1605             MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(), nullptr,
1606             &SLC, IsGFX10Plus ? &DLC : nullptr))
1607       return false;
1608   } else {
1609     if (!parseCachePolicy(
1610             MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(), &GLC,
1611             &SLC, IsGFX10Plus ? &DLC : nullptr))
1612       return false;
1613   }
1614 
1615   int NumVAddrRegs = 0;
1616   int NumVAddrDwords = 0;
1617   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1618     // Skip the $noregs and 0s inserted during legalization.
1619     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1620     if (!AddrOp.isReg())
1621       continue; // XXX - Break?
1622 
1623     Register Addr = AddrOp.getReg();
1624     if (!Addr)
1625       break;
1626 
1627     ++NumVAddrRegs;
1628     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1629   }
1630 
1631   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1632   // NSA, these should have beeen packed into a single value in the first
1633   // address register
1634   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1635   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1636     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1637     return false;
1638   }
1639 
1640   if (IsTexFail)
1641     ++NumVDataDwords;
1642 
1643   int Opcode = -1;
1644   if (IsGFX10Plus) {
1645     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1646                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1647                                           : AMDGPU::MIMGEncGfx10Default,
1648                                    NumVDataDwords, NumVAddrDwords);
1649   } else {
1650     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1651       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1652                                      NumVDataDwords, NumVAddrDwords);
1653     if (Opcode == -1)
1654       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1655                                      NumVDataDwords, NumVAddrDwords);
1656   }
1657   assert(Opcode != -1);
1658 
1659   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1660     .cloneMemRefs(MI);
1661 
1662   if (VDataOut) {
1663     if (BaseOpcode->AtomicX2) {
1664       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1665 
1666       Register TmpReg = MRI->createVirtualRegister(
1667         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1668       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1669 
1670       MIB.addDef(TmpReg);
1671       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1672         .addReg(TmpReg, RegState::Kill, SubReg);
1673 
1674     } else {
1675       MIB.addDef(VDataOut); // vdata output
1676     }
1677   }
1678 
1679   if (VDataIn)
1680     MIB.addReg(VDataIn); // vdata input
1681 
1682   for (int I = 0; I != NumVAddrRegs; ++I) {
1683     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1684     if (SrcOp.isReg()) {
1685       assert(SrcOp.getReg() != 0);
1686       MIB.addReg(SrcOp.getReg());
1687     }
1688   }
1689 
1690   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1691   if (BaseOpcode->Sampler)
1692     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1693 
1694   MIB.addImm(DMask); // dmask
1695 
1696   if (IsGFX10Plus)
1697     MIB.addImm(DimInfo->Encoding);
1698   MIB.addImm(Unorm);
1699   if (IsGFX10Plus)
1700     MIB.addImm(DLC);
1701 
1702   MIB.addImm(GLC);
1703   MIB.addImm(SLC);
1704   MIB.addImm(IsA16 &&  // a16 or r128
1705              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1706   if (IsGFX10Plus)
1707     MIB.addImm(IsA16 ? -1 : 0);
1708 
1709   MIB.addImm(TFE); // tfe
1710   MIB.addImm(LWE); // lwe
1711   if (!IsGFX10Plus)
1712     MIB.addImm(DimInfo->DA ? -1 : 0);
1713   if (BaseOpcode->HasD16)
1714     MIB.addImm(IsD16 ? -1 : 0);
1715 
1716   MI.eraseFromParent();
1717   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1718 }
1719 
1720 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1721     MachineInstr &I) const {
1722   unsigned IntrinsicID = I.getIntrinsicID();
1723   switch (IntrinsicID) {
1724   case Intrinsic::amdgcn_end_cf:
1725     return selectEndCfIntrinsic(I);
1726   case Intrinsic::amdgcn_ds_ordered_add:
1727   case Intrinsic::amdgcn_ds_ordered_swap:
1728     return selectDSOrderedIntrinsic(I, IntrinsicID);
1729   case Intrinsic::amdgcn_ds_gws_init:
1730   case Intrinsic::amdgcn_ds_gws_barrier:
1731   case Intrinsic::amdgcn_ds_gws_sema_v:
1732   case Intrinsic::amdgcn_ds_gws_sema_br:
1733   case Intrinsic::amdgcn_ds_gws_sema_p:
1734   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1735     return selectDSGWSIntrinsic(I, IntrinsicID);
1736   case Intrinsic::amdgcn_ds_append:
1737     return selectDSAppendConsume(I, true);
1738   case Intrinsic::amdgcn_ds_consume:
1739     return selectDSAppendConsume(I, false);
1740   case Intrinsic::amdgcn_s_barrier:
1741     return selectSBarrier(I);
1742   case Intrinsic::amdgcn_global_atomic_fadd:
1743     return selectGlobalAtomicFaddIntrinsic(I);
1744   default: {
1745     return selectImpl(I, *CoverageInfo);
1746   }
1747   }
1748 }
1749 
1750 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1751   if (selectImpl(I, *CoverageInfo))
1752     return true;
1753 
1754   MachineBasicBlock *BB = I.getParent();
1755   const DebugLoc &DL = I.getDebugLoc();
1756 
1757   Register DstReg = I.getOperand(0).getReg();
1758   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1759   assert(Size <= 32 || Size == 64);
1760   const MachineOperand &CCOp = I.getOperand(1);
1761   Register CCReg = CCOp.getReg();
1762   if (!isVCC(CCReg, *MRI)) {
1763     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1764                                          AMDGPU::S_CSELECT_B32;
1765     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1766             .addReg(CCReg);
1767 
1768     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1769     // bank, because it does not cover the register class that we used to represent
1770     // for it.  So we need to manually set the register class here.
1771     if (!MRI->getRegClassOrNull(CCReg))
1772         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1773     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1774             .add(I.getOperand(2))
1775             .add(I.getOperand(3));
1776 
1777     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1778                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1779     I.eraseFromParent();
1780     return Ret;
1781   }
1782 
1783   // Wide VGPR select should have been split in RegBankSelect.
1784   if (Size > 32)
1785     return false;
1786 
1787   MachineInstr *Select =
1788       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1789               .addImm(0)
1790               .add(I.getOperand(3))
1791               .addImm(0)
1792               .add(I.getOperand(2))
1793               .add(I.getOperand(1));
1794 
1795   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1796   I.eraseFromParent();
1797   return Ret;
1798 }
1799 
1800 static int sizeToSubRegIndex(unsigned Size) {
1801   switch (Size) {
1802   case 32:
1803     return AMDGPU::sub0;
1804   case 64:
1805     return AMDGPU::sub0_sub1;
1806   case 96:
1807     return AMDGPU::sub0_sub1_sub2;
1808   case 128:
1809     return AMDGPU::sub0_sub1_sub2_sub3;
1810   case 256:
1811     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1812   default:
1813     if (Size < 32)
1814       return AMDGPU::sub0;
1815     if (Size > 256)
1816       return -1;
1817     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1818   }
1819 }
1820 
1821 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1822   Register DstReg = I.getOperand(0).getReg();
1823   Register SrcReg = I.getOperand(1).getReg();
1824   const LLT DstTy = MRI->getType(DstReg);
1825   const LLT SrcTy = MRI->getType(SrcReg);
1826   const LLT S1 = LLT::scalar(1);
1827 
1828   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1829   const RegisterBank *DstRB;
1830   if (DstTy == S1) {
1831     // This is a special case. We don't treat s1 for legalization artifacts as
1832     // vcc booleans.
1833     DstRB = SrcRB;
1834   } else {
1835     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1836     if (SrcRB != DstRB)
1837       return false;
1838   }
1839 
1840   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1841 
1842   unsigned DstSize = DstTy.getSizeInBits();
1843   unsigned SrcSize = SrcTy.getSizeInBits();
1844 
1845   const TargetRegisterClass *SrcRC
1846     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1847   const TargetRegisterClass *DstRC
1848     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1849   if (!SrcRC || !DstRC)
1850     return false;
1851 
1852   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1853       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1854     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1855     return false;
1856   }
1857 
1858   if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1859     MachineBasicBlock *MBB = I.getParent();
1860     const DebugLoc &DL = I.getDebugLoc();
1861 
1862     Register LoReg = MRI->createVirtualRegister(DstRC);
1863     Register HiReg = MRI->createVirtualRegister(DstRC);
1864     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1865       .addReg(SrcReg, 0, AMDGPU::sub0);
1866     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1867       .addReg(SrcReg, 0, AMDGPU::sub1);
1868 
1869     if (IsVALU && STI.hasSDWA()) {
1870       // Write the low 16-bits of the high element into the high 16-bits of the
1871       // low element.
1872       MachineInstr *MovSDWA =
1873         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1874         .addImm(0)                             // $src0_modifiers
1875         .addReg(HiReg)                         // $src0
1876         .addImm(0)                             // $clamp
1877         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1878         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1879         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1880         .addReg(LoReg, RegState::Implicit);
1881       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1882     } else {
1883       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1884       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1885       Register ImmReg = MRI->createVirtualRegister(DstRC);
1886       if (IsVALU) {
1887         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1888           .addImm(16)
1889           .addReg(HiReg);
1890       } else {
1891         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1892           .addReg(HiReg)
1893           .addImm(16);
1894       }
1895 
1896       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1897       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1898       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1899 
1900       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1901         .addImm(0xffff);
1902       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1903         .addReg(LoReg)
1904         .addReg(ImmReg);
1905       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1906         .addReg(TmpReg0)
1907         .addReg(TmpReg1);
1908     }
1909 
1910     I.eraseFromParent();
1911     return true;
1912   }
1913 
1914   if (!DstTy.isScalar())
1915     return false;
1916 
1917   if (SrcSize > 32) {
1918     int SubRegIdx = sizeToSubRegIndex(DstSize);
1919     if (SubRegIdx == -1)
1920       return false;
1921 
1922     // Deal with weird cases where the class only partially supports the subreg
1923     // index.
1924     const TargetRegisterClass *SrcWithSubRC
1925       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1926     if (!SrcWithSubRC)
1927       return false;
1928 
1929     if (SrcWithSubRC != SrcRC) {
1930       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1931         return false;
1932     }
1933 
1934     I.getOperand(1).setSubReg(SubRegIdx);
1935   }
1936 
1937   I.setDesc(TII.get(TargetOpcode::COPY));
1938   return true;
1939 }
1940 
1941 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1942 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1943   Mask = maskTrailingOnes<unsigned>(Size);
1944   int SignedMask = static_cast<int>(Mask);
1945   return SignedMask >= -16 && SignedMask <= 64;
1946 }
1947 
1948 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1949 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1950   Register Reg, const MachineRegisterInfo &MRI,
1951   const TargetRegisterInfo &TRI) const {
1952   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1953   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1954     return RB;
1955 
1956   // Ignore the type, since we don't use vcc in artifacts.
1957   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1958     return &RBI.getRegBankFromRegClass(*RC, LLT());
1959   return nullptr;
1960 }
1961 
1962 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1963   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1964   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1965   const DebugLoc &DL = I.getDebugLoc();
1966   MachineBasicBlock &MBB = *I.getParent();
1967   const Register DstReg = I.getOperand(0).getReg();
1968   const Register SrcReg = I.getOperand(1).getReg();
1969 
1970   const LLT DstTy = MRI->getType(DstReg);
1971   const LLT SrcTy = MRI->getType(SrcReg);
1972   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1973     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1974   const unsigned DstSize = DstTy.getSizeInBits();
1975   if (!DstTy.isScalar())
1976     return false;
1977 
1978   // Artifact casts should never use vcc.
1979   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1980 
1981   // FIXME: This should probably be illegal and split earlier.
1982   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
1983     if (DstSize <= 32)
1984       return selectCOPY(I);
1985 
1986     const TargetRegisterClass *SrcRC =
1987         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
1988     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1989     const TargetRegisterClass *DstRC =
1990         TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
1991 
1992     Register UndefReg = MRI->createVirtualRegister(SrcRC);
1993     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1994     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1995       .addReg(SrcReg)
1996       .addImm(AMDGPU::sub0)
1997       .addReg(UndefReg)
1998       .addImm(AMDGPU::sub1);
1999     I.eraseFromParent();
2000 
2001     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2002            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2003   }
2004 
2005   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2006     // 64-bit should have been split up in RegBankSelect
2007 
2008     // Try to use an and with a mask if it will save code size.
2009     unsigned Mask;
2010     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2011       MachineInstr *ExtI =
2012       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2013         .addImm(Mask)
2014         .addReg(SrcReg);
2015       I.eraseFromParent();
2016       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2017     }
2018 
2019     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
2020     MachineInstr *ExtI =
2021       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2022       .addReg(SrcReg)
2023       .addImm(0) // Offset
2024       .addImm(SrcSize); // Width
2025     I.eraseFromParent();
2026     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2027   }
2028 
2029   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2030     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2031       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2032     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2033       return false;
2034 
2035     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2036       const unsigned SextOpc = SrcSize == 8 ?
2037         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2038       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2039         .addReg(SrcReg);
2040       I.eraseFromParent();
2041       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2042     }
2043 
2044     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2045     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2046 
2047     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2048     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2049       // We need a 64-bit register source, but the high bits don't matter.
2050       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2051       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2052       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2053 
2054       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2055       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2056         .addReg(SrcReg, 0, SubReg)
2057         .addImm(AMDGPU::sub0)
2058         .addReg(UndefReg)
2059         .addImm(AMDGPU::sub1);
2060 
2061       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2062         .addReg(ExtReg)
2063         .addImm(SrcSize << 16);
2064 
2065       I.eraseFromParent();
2066       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2067     }
2068 
2069     unsigned Mask;
2070     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2071       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2072         .addReg(SrcReg)
2073         .addImm(Mask);
2074     } else {
2075       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2076         .addReg(SrcReg)
2077         .addImm(SrcSize << 16);
2078     }
2079 
2080     I.eraseFromParent();
2081     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2082   }
2083 
2084   return false;
2085 }
2086 
2087 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2088   MachineBasicBlock *BB = I.getParent();
2089   MachineOperand &ImmOp = I.getOperand(1);
2090   Register DstReg = I.getOperand(0).getReg();
2091   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2092 
2093   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2094   if (ImmOp.isFPImm()) {
2095     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2096     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2097   } else if (ImmOp.isCImm()) {
2098     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2099   } else {
2100     llvm_unreachable("Not supported by g_constants");
2101   }
2102 
2103   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2104   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2105 
2106   unsigned Opcode;
2107   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2108     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2109   } else {
2110     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2111 
2112     // We should never produce s1 values on banks other than VCC. If the user of
2113     // this already constrained the register, we may incorrectly think it's VCC
2114     // if it wasn't originally.
2115     if (Size == 1)
2116       return false;
2117   }
2118 
2119   if (Size != 64) {
2120     I.setDesc(TII.get(Opcode));
2121     I.addImplicitDefUseOperands(*MF);
2122     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2123   }
2124 
2125   const DebugLoc &DL = I.getDebugLoc();
2126 
2127   APInt Imm(Size, I.getOperand(1).getImm());
2128 
2129   MachineInstr *ResInst;
2130   if (IsSgpr && TII.isInlineConstant(Imm)) {
2131     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2132       .addImm(I.getOperand(1).getImm());
2133   } else {
2134     const TargetRegisterClass *RC = IsSgpr ?
2135       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2136     Register LoReg = MRI->createVirtualRegister(RC);
2137     Register HiReg = MRI->createVirtualRegister(RC);
2138 
2139     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2140       .addImm(Imm.trunc(32).getZExtValue());
2141 
2142     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2143       .addImm(Imm.ashr(32).getZExtValue());
2144 
2145     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2146       .addReg(LoReg)
2147       .addImm(AMDGPU::sub0)
2148       .addReg(HiReg)
2149       .addImm(AMDGPU::sub1);
2150   }
2151 
2152   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2153   // work for target independent opcodes
2154   I.eraseFromParent();
2155   const TargetRegisterClass *DstRC =
2156     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2157   if (!DstRC)
2158     return true;
2159   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2160 }
2161 
2162 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2163   // Only manually handle the f64 SGPR case.
2164   //
2165   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2166   // the bit ops theoretically have a second result due to the implicit def of
2167   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2168   // that is easy by disabling the check. The result works, but uses a
2169   // nonsensical sreg32orlds_and_sreg_1 regclass.
2170   //
2171   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2172   // the variadic REG_SEQUENCE operands.
2173 
2174   Register Dst = MI.getOperand(0).getReg();
2175   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2176   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2177       MRI->getType(Dst) != LLT::scalar(64))
2178     return false;
2179 
2180   Register Src = MI.getOperand(1).getReg();
2181   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2182   if (Fabs)
2183     Src = Fabs->getOperand(1).getReg();
2184 
2185   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2186       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2187     return false;
2188 
2189   MachineBasicBlock *BB = MI.getParent();
2190   const DebugLoc &DL = MI.getDebugLoc();
2191   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2192   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2193   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2194   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2195 
2196   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2197     .addReg(Src, 0, AMDGPU::sub0);
2198   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2199     .addReg(Src, 0, AMDGPU::sub1);
2200   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2201     .addImm(0x80000000);
2202 
2203   // Set or toggle sign bit.
2204   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2205   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2206     .addReg(HiReg)
2207     .addReg(ConstReg);
2208   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2209     .addReg(LoReg)
2210     .addImm(AMDGPU::sub0)
2211     .addReg(OpReg)
2212     .addImm(AMDGPU::sub1);
2213   MI.eraseFromParent();
2214   return true;
2215 }
2216 
2217 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2218 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2219   Register Dst = MI.getOperand(0).getReg();
2220   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2221   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2222       MRI->getType(Dst) != LLT::scalar(64))
2223     return false;
2224 
2225   Register Src = MI.getOperand(1).getReg();
2226   MachineBasicBlock *BB = MI.getParent();
2227   const DebugLoc &DL = MI.getDebugLoc();
2228   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2229   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2230   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2231   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2232 
2233   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2234       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2235     return false;
2236 
2237   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2238     .addReg(Src, 0, AMDGPU::sub0);
2239   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2240     .addReg(Src, 0, AMDGPU::sub1);
2241   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2242     .addImm(0x7fffffff);
2243 
2244   // Clear sign bit.
2245   // TODO: Should this used S_BITSET0_*?
2246   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2247     .addReg(HiReg)
2248     .addReg(ConstReg);
2249   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2250     .addReg(LoReg)
2251     .addImm(AMDGPU::sub0)
2252     .addReg(OpReg)
2253     .addImm(AMDGPU::sub1);
2254 
2255   MI.eraseFromParent();
2256   return true;
2257 }
2258 
2259 static bool isConstant(const MachineInstr &MI) {
2260   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2261 }
2262 
2263 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2264     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2265 
2266   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2267 
2268   assert(PtrMI);
2269 
2270   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2271     return;
2272 
2273   GEPInfo GEPInfo(*PtrMI);
2274 
2275   for (unsigned i = 1; i != 3; ++i) {
2276     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2277     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2278     assert(OpDef);
2279     if (i == 2 && isConstant(*OpDef)) {
2280       // TODO: Could handle constant base + variable offset, but a combine
2281       // probably should have commuted it.
2282       assert(GEPInfo.Imm == 0);
2283       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2284       continue;
2285     }
2286     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2287     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2288       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2289     else
2290       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2291   }
2292 
2293   AddrInfo.push_back(GEPInfo);
2294   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2295 }
2296 
2297 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2298   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2299 }
2300 
2301 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2302   if (!MI.hasOneMemOperand())
2303     return false;
2304 
2305   const MachineMemOperand *MMO = *MI.memoperands_begin();
2306   const Value *Ptr = MMO->getValue();
2307 
2308   // UndefValue means this is a load of a kernel input.  These are uniform.
2309   // Sometimes LDS instructions have constant pointers.
2310   // If Ptr is null, then that means this mem operand contains a
2311   // PseudoSourceValue like GOT.
2312   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2313       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2314     return true;
2315 
2316   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2317     return true;
2318 
2319   const Instruction *I = dyn_cast<Instruction>(Ptr);
2320   return I && I->getMetadata("amdgpu.uniform");
2321 }
2322 
2323 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2324   for (const GEPInfo &GEPInfo : AddrInfo) {
2325     if (!GEPInfo.VgprParts.empty())
2326       return true;
2327   }
2328   return false;
2329 }
2330 
2331 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2332   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2333   unsigned AS = PtrTy.getAddressSpace();
2334   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2335       STI.ldsRequiresM0Init()) {
2336     MachineBasicBlock *BB = I.getParent();
2337 
2338     // If DS instructions require M0 initializtion, insert it before selecting.
2339     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2340       .addImm(-1);
2341   }
2342 }
2343 
2344 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2345   MachineInstr &I) const {
2346   initM0(I);
2347   return selectImpl(I, *CoverageInfo);
2348 }
2349 
2350 // TODO: No rtn optimization.
2351 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2352   MachineInstr &MI) const {
2353   Register PtrReg = MI.getOperand(1).getReg();
2354   const LLT PtrTy = MRI->getType(PtrReg);
2355   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2356       STI.useFlatForGlobal())
2357     return selectImpl(MI, *CoverageInfo);
2358 
2359   Register DstReg = MI.getOperand(0).getReg();
2360   const LLT Ty = MRI->getType(DstReg);
2361   const bool Is64 = Ty.getSizeInBits() == 64;
2362   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2363   Register TmpReg = MRI->createVirtualRegister(
2364     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2365 
2366   const DebugLoc &DL = MI.getDebugLoc();
2367   MachineBasicBlock *BB = MI.getParent();
2368 
2369   Register VAddr, RSrcReg, SOffset;
2370   int64_t Offset = 0;
2371 
2372   unsigned Opcode;
2373   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2374     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2375                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2376   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2377                                    RSrcReg, SOffset, Offset)) {
2378     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2379                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2380   } else
2381     return selectImpl(MI, *CoverageInfo);
2382 
2383   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2384     .addReg(MI.getOperand(2).getReg());
2385 
2386   if (VAddr)
2387     MIB.addReg(VAddr);
2388 
2389   MIB.addReg(RSrcReg);
2390   if (SOffset)
2391     MIB.addReg(SOffset);
2392   else
2393     MIB.addImm(0);
2394 
2395   MIB.addImm(Offset);
2396   MIB.addImm(1); // glc
2397   MIB.addImm(0); // slc
2398   MIB.cloneMemRefs(MI);
2399 
2400   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2401     .addReg(TmpReg, RegState::Kill, SubReg);
2402 
2403   MI.eraseFromParent();
2404 
2405   MRI->setRegClass(
2406     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2407   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2408 }
2409 
2410 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2411   MachineBasicBlock *BB = I.getParent();
2412   MachineOperand &CondOp = I.getOperand(0);
2413   Register CondReg = CondOp.getReg();
2414   const DebugLoc &DL = I.getDebugLoc();
2415 
2416   unsigned BrOpcode;
2417   Register CondPhysReg;
2418   const TargetRegisterClass *ConstrainRC;
2419 
2420   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2421   // whether the branch is uniform when selecting the instruction. In
2422   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2423   // RegBankSelect knows what it's doing if the branch condition is scc, even
2424   // though it currently does not.
2425   if (!isVCC(CondReg, *MRI)) {
2426     if (MRI->getType(CondReg) != LLT::scalar(32))
2427       return false;
2428 
2429     CondPhysReg = AMDGPU::SCC;
2430     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2431     ConstrainRC = &AMDGPU::SReg_32RegClass;
2432   } else {
2433     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2434     // We sort of know that a VCC producer based on the register bank, that ands
2435     // inactive lanes with 0. What if there was a logical operation with vcc
2436     // producers in different blocks/with different exec masks?
2437     // FIXME: Should scc->vcc copies and with exec?
2438     CondPhysReg = TRI.getVCC();
2439     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2440     ConstrainRC = TRI.getBoolRC();
2441   }
2442 
2443   if (!MRI->getRegClassOrNull(CondReg))
2444     MRI->setRegClass(CondReg, ConstrainRC);
2445 
2446   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2447     .addReg(CondReg);
2448   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2449     .addMBB(I.getOperand(1).getMBB());
2450 
2451   I.eraseFromParent();
2452   return true;
2453 }
2454 
2455 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2456   MachineInstr &I) const {
2457   Register DstReg = I.getOperand(0).getReg();
2458   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2459   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2460   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2461   if (IsVGPR)
2462     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2463 
2464   return RBI.constrainGenericRegister(
2465     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2466 }
2467 
2468 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2469   Register DstReg = I.getOperand(0).getReg();
2470   Register SrcReg = I.getOperand(1).getReg();
2471   Register MaskReg = I.getOperand(2).getReg();
2472   LLT Ty = MRI->getType(DstReg);
2473   LLT MaskTy = MRI->getType(MaskReg);
2474 
2475   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2476   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2477   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2478   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2479   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2480     return false;
2481 
2482   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2483   const TargetRegisterClass &RegRC
2484     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2485 
2486   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2487                                                                   *MRI);
2488   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2489                                                                   *MRI);
2490   const TargetRegisterClass *MaskRC =
2491       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2492 
2493   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2494       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2495       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2496     return false;
2497 
2498   MachineBasicBlock *BB = I.getParent();
2499   const DebugLoc &DL = I.getDebugLoc();
2500   if (Ty.getSizeInBits() == 32) {
2501     assert(MaskTy.getSizeInBits() == 32 &&
2502            "ptrmask should have been narrowed during legalize");
2503 
2504     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2505       .addReg(SrcReg)
2506       .addReg(MaskReg);
2507     I.eraseFromParent();
2508     return true;
2509   }
2510 
2511   Register HiReg = MRI->createVirtualRegister(&RegRC);
2512   Register LoReg = MRI->createVirtualRegister(&RegRC);
2513 
2514   // Extract the subregisters from the source pointer.
2515   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2516     .addReg(SrcReg, 0, AMDGPU::sub0);
2517   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2518     .addReg(SrcReg, 0, AMDGPU::sub1);
2519 
2520   Register MaskedLo, MaskedHi;
2521 
2522   // Try to avoid emitting a bit operation when we only need to touch half of
2523   // the 64-bit pointer.
2524   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2525 
2526   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2527   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2528   if ((MaskOnes & MaskLo32) == MaskLo32) {
2529     // If all the bits in the low half are 1, we only need a copy for it.
2530     MaskedLo = LoReg;
2531   } else {
2532     // Extract the mask subregister and apply the and.
2533     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2534     MaskedLo = MRI->createVirtualRegister(&RegRC);
2535 
2536     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2537       .addReg(MaskReg, 0, AMDGPU::sub0);
2538     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2539       .addReg(LoReg)
2540       .addReg(MaskLo);
2541   }
2542 
2543   if ((MaskOnes & MaskHi32) == MaskHi32) {
2544     // If all the bits in the high half are 1, we only need a copy for it.
2545     MaskedHi = HiReg;
2546   } else {
2547     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2548     MaskedHi = MRI->createVirtualRegister(&RegRC);
2549 
2550     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2551       .addReg(MaskReg, 0, AMDGPU::sub1);
2552     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2553       .addReg(HiReg)
2554       .addReg(MaskHi);
2555   }
2556 
2557   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2558     .addReg(MaskedLo)
2559     .addImm(AMDGPU::sub0)
2560     .addReg(MaskedHi)
2561     .addImm(AMDGPU::sub1);
2562   I.eraseFromParent();
2563   return true;
2564 }
2565 
2566 /// Return the register to use for the index value, and the subregister to use
2567 /// for the indirectly accessed register.
2568 static std::pair<Register, unsigned>
2569 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2570                         const SIRegisterInfo &TRI,
2571                         const TargetRegisterClass *SuperRC,
2572                         Register IdxReg,
2573                         unsigned EltSize) {
2574   Register IdxBaseReg;
2575   int Offset;
2576 
2577   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2578   if (IdxBaseReg == AMDGPU::NoRegister) {
2579     // This will happen if the index is a known constant. This should ordinarily
2580     // be legalized out, but handle it as a register just in case.
2581     assert(Offset == 0);
2582     IdxBaseReg = IdxReg;
2583   }
2584 
2585   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2586 
2587   // Skip out of bounds offsets, or else we would end up using an undefined
2588   // register.
2589   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2590     return std::make_pair(IdxReg, SubRegs[0]);
2591   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2592 }
2593 
2594 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2595   MachineInstr &MI) const {
2596   Register DstReg = MI.getOperand(0).getReg();
2597   Register SrcReg = MI.getOperand(1).getReg();
2598   Register IdxReg = MI.getOperand(2).getReg();
2599 
2600   LLT DstTy = MRI->getType(DstReg);
2601   LLT SrcTy = MRI->getType(SrcReg);
2602 
2603   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2604   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2605   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2606 
2607   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2608   // into a waterfall loop.
2609   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2610     return false;
2611 
2612   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2613                                                                   *MRI);
2614   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2615                                                                   *MRI);
2616   if (!SrcRC || !DstRC)
2617     return false;
2618   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2619       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2620       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2621     return false;
2622 
2623   MachineBasicBlock *BB = MI.getParent();
2624   const DebugLoc &DL = MI.getDebugLoc();
2625   const bool Is64 = DstTy.getSizeInBits() == 64;
2626 
2627   unsigned SubReg;
2628   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2629                                                      DstTy.getSizeInBits() / 8);
2630 
2631   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2632     if (DstTy.getSizeInBits() != 32 && !Is64)
2633       return false;
2634 
2635     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2636       .addReg(IdxReg);
2637 
2638     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2639     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2640       .addReg(SrcReg, 0, SubReg)
2641       .addReg(SrcReg, RegState::Implicit);
2642     MI.eraseFromParent();
2643     return true;
2644   }
2645 
2646   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2647     return false;
2648 
2649   if (!STI.useVGPRIndexMode()) {
2650     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2651       .addReg(IdxReg);
2652     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2653       .addReg(SrcReg, 0, SubReg)
2654       .addReg(SrcReg, RegState::Implicit);
2655     MI.eraseFromParent();
2656     return true;
2657   }
2658 
2659   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2660     .addReg(IdxReg)
2661     .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
2662   BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
2663     .addReg(SrcReg, 0, SubReg)
2664     .addReg(SrcReg, RegState::Implicit)
2665     .addReg(AMDGPU::M0, RegState::Implicit);
2666   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2667 
2668   MI.eraseFromParent();
2669   return true;
2670 }
2671 
2672 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2673 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2674   MachineInstr &MI) const {
2675   Register DstReg = MI.getOperand(0).getReg();
2676   Register VecReg = MI.getOperand(1).getReg();
2677   Register ValReg = MI.getOperand(2).getReg();
2678   Register IdxReg = MI.getOperand(3).getReg();
2679 
2680   LLT VecTy = MRI->getType(DstReg);
2681   LLT ValTy = MRI->getType(ValReg);
2682   unsigned VecSize = VecTy.getSizeInBits();
2683   unsigned ValSize = ValTy.getSizeInBits();
2684 
2685   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2686   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2687   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2688 
2689   assert(VecTy.getElementType() == ValTy);
2690 
2691   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2692   // into a waterfall loop.
2693   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2694     return false;
2695 
2696   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2697                                                                   *MRI);
2698   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2699                                                                   *MRI);
2700 
2701   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2702       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2703       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2704       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2705     return false;
2706 
2707   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2708     return false;
2709 
2710   unsigned SubReg;
2711   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2712                                                      ValSize / 8);
2713 
2714   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2715                          STI.useVGPRIndexMode();
2716 
2717   MachineBasicBlock *BB = MI.getParent();
2718   const DebugLoc &DL = MI.getDebugLoc();
2719 
2720   if (IndexMode) {
2721     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2722       .addReg(IdxReg)
2723       .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
2724   } else {
2725     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2726       .addReg(IdxReg);
2727   }
2728 
2729   const MCInstrDesc &RegWriteOp
2730     = TII.getIndirectRegWritePseudo(VecSize, ValSize,
2731                                     VecRB->getID() == AMDGPU::SGPRRegBankID);
2732   BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2733     .addReg(VecReg)
2734     .addReg(ValReg)
2735     .addImm(SubReg);
2736 
2737   if (IndexMode)
2738     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2739 
2740   MI.eraseFromParent();
2741   return true;
2742 }
2743 
2744 static bool isZeroOrUndef(int X) {
2745   return X == 0 || X == -1;
2746 }
2747 
2748 static bool isOneOrUndef(int X) {
2749   return X == 1 || X == -1;
2750 }
2751 
2752 static bool isZeroOrOneOrUndef(int X) {
2753   return X == 0 || X == 1 || X == -1;
2754 }
2755 
2756 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2757 // 32-bit register.
2758 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2759                                    ArrayRef<int> Mask) {
2760   NewMask[0] = Mask[0];
2761   NewMask[1] = Mask[1];
2762   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2763     return Src0;
2764 
2765   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2766   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2767 
2768   // Shift the mask inputs to be 0/1;
2769   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2770   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2771   return Src1;
2772 }
2773 
2774 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2775 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2776   MachineInstr &MI) const {
2777   Register DstReg = MI.getOperand(0).getReg();
2778   Register Src0Reg = MI.getOperand(1).getReg();
2779   Register Src1Reg = MI.getOperand(2).getReg();
2780   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2781 
2782   const LLT V2S16 = LLT::vector(2, 16);
2783   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2784     return false;
2785 
2786   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2787     return false;
2788 
2789   assert(ShufMask.size() == 2);
2790   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2791 
2792   MachineBasicBlock *MBB = MI.getParent();
2793   const DebugLoc &DL = MI.getDebugLoc();
2794 
2795   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2796   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2797   const TargetRegisterClass &RC = IsVALU ?
2798     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2799 
2800   // Handle the degenerate case which should have folded out.
2801   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2802     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2803 
2804     MI.eraseFromParent();
2805     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2806   }
2807 
2808   // A legal VOP3P mask only reads one of the sources.
2809   int Mask[2];
2810   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2811 
2812   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2813       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2814     return false;
2815 
2816   // TODO: This also should have been folded out
2817   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2818     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2819       .addReg(SrcVec);
2820 
2821     MI.eraseFromParent();
2822     return true;
2823   }
2824 
2825   if (Mask[0] == 1 && Mask[1] == -1) {
2826     if (IsVALU) {
2827       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2828         .addImm(16)
2829         .addReg(SrcVec);
2830     } else {
2831       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2832         .addReg(SrcVec)
2833         .addImm(16);
2834     }
2835   } else if (Mask[0] == -1 && Mask[1] == 0) {
2836     if (IsVALU) {
2837       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2838         .addImm(16)
2839         .addReg(SrcVec);
2840     } else {
2841       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2842         .addReg(SrcVec)
2843         .addImm(16);
2844     }
2845   } else if (Mask[0] == 0 && Mask[1] == 0) {
2846     if (IsVALU) {
2847       // Write low half of the register into the high half.
2848       MachineInstr *MovSDWA =
2849         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2850         .addImm(0)                             // $src0_modifiers
2851         .addReg(SrcVec)                        // $src0
2852         .addImm(0)                             // $clamp
2853         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2854         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2855         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2856         .addReg(SrcVec, RegState::Implicit);
2857       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2858     } else {
2859       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2860         .addReg(SrcVec)
2861         .addReg(SrcVec);
2862     }
2863   } else if (Mask[0] == 1 && Mask[1] == 1) {
2864     if (IsVALU) {
2865       // Write high half of the register into the low half.
2866       MachineInstr *MovSDWA =
2867         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2868         .addImm(0)                             // $src0_modifiers
2869         .addReg(SrcVec)                        // $src0
2870         .addImm(0)                             // $clamp
2871         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2872         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2873         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2874         .addReg(SrcVec, RegState::Implicit);
2875       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2876     } else {
2877       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2878         .addReg(SrcVec)
2879         .addReg(SrcVec);
2880     }
2881   } else if (Mask[0] == 1 && Mask[1] == 0) {
2882     if (IsVALU) {
2883       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32), DstReg)
2884         .addReg(SrcVec)
2885         .addReg(SrcVec)
2886         .addImm(16);
2887     } else {
2888       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2889       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2890         .addReg(SrcVec)
2891         .addImm(16);
2892       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2893         .addReg(TmpReg)
2894         .addReg(SrcVec);
2895     }
2896   } else
2897     llvm_unreachable("all shuffle masks should be handled");
2898 
2899   MI.eraseFromParent();
2900   return true;
2901 }
2902 
2903 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2904   MachineInstr &MI) const {
2905 
2906   MachineBasicBlock *MBB = MI.getParent();
2907   const DebugLoc &DL = MI.getDebugLoc();
2908 
2909   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2910     Function &F = MBB->getParent()->getFunction();
2911     DiagnosticInfoUnsupported
2912       NoFpRet(F, "return versions of fp atomics not supported",
2913               MI.getDebugLoc(), DS_Error);
2914     F.getContext().diagnose(NoFpRet);
2915     return false;
2916   }
2917 
2918   // FIXME: This is only needed because tablegen requires number of dst operands
2919   // in match and replace pattern to be the same. Otherwise patterns can be
2920   // exported from SDag path.
2921   MachineOperand &VDataIn = MI.getOperand(1);
2922   MachineOperand &VIndex = MI.getOperand(3);
2923   MachineOperand &VOffset = MI.getOperand(4);
2924   MachineOperand &SOffset = MI.getOperand(5);
2925   int16_t Offset = MI.getOperand(6).getImm();
2926 
2927   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2928   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2929 
2930   unsigned Opcode;
2931   if (HasVOffset) {
2932     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2933                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2934   } else {
2935     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2936                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2937   }
2938 
2939   if (MRI->getType(VDataIn.getReg()).isVector()) {
2940     switch (Opcode) {
2941     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2942       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2943       break;
2944     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2945       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2946       break;
2947     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
2948       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
2949       break;
2950     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
2951       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
2952       break;
2953     }
2954   }
2955 
2956   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
2957   I.add(VDataIn);
2958 
2959   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
2960       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
2961     Register IdxReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
2962     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
2963       .addReg(VIndex.getReg())
2964       .addImm(AMDGPU::sub0)
2965       .addReg(VOffset.getReg())
2966       .addImm(AMDGPU::sub1);
2967 
2968     I.addReg(IdxReg);
2969   } else if (HasVIndex) {
2970     I.add(VIndex);
2971   } else if (HasVOffset) {
2972     I.add(VOffset);
2973   }
2974 
2975   I.add(MI.getOperand(2)); // rsrc
2976   I.add(SOffset);
2977   I.addImm(Offset);
2978   renderExtractSLC(I, MI, 7);
2979   I.cloneMemRefs(MI);
2980 
2981   MI.eraseFromParent();
2982 
2983   return true;
2984 }
2985 
2986 bool AMDGPUInstructionSelector::selectGlobalAtomicFaddIntrinsic(
2987   MachineInstr &MI) const{
2988 
2989   MachineBasicBlock *MBB = MI.getParent();
2990   const DebugLoc &DL = MI.getDebugLoc();
2991 
2992   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2993     Function &F = MBB->getParent()->getFunction();
2994     DiagnosticInfoUnsupported
2995       NoFpRet(F, "return versions of fp atomics not supported",
2996               MI.getDebugLoc(), DS_Error);
2997     F.getContext().diagnose(NoFpRet);
2998     return false;
2999   }
3000 
3001   // FIXME: This is only needed because tablegen requires number of dst operands
3002   // in match and replace pattern to be the same. Otherwise patterns can be
3003   // exported from SDag path.
3004   auto Addr = selectFlatOffsetImpl<true>(MI.getOperand(2));
3005 
3006   Register Data = MI.getOperand(3).getReg();
3007   const unsigned Opc = MRI->getType(Data).isVector() ?
3008     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3009   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3010     .addReg(Addr.first)
3011     .addReg(Data)
3012     .addImm(Addr.second)
3013     .addImm(0) // SLC
3014     .cloneMemRefs(MI);
3015 
3016   MI.eraseFromParent();
3017   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3018 }
3019 
3020 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3021   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3022   MI.RemoveOperand(1);
3023   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3024   return true;
3025 }
3026 
3027 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3028   if (I.isPHI())
3029     return selectPHI(I);
3030 
3031   if (!I.isPreISelOpcode()) {
3032     if (I.isCopy())
3033       return selectCOPY(I);
3034     return true;
3035   }
3036 
3037   switch (I.getOpcode()) {
3038   case TargetOpcode::G_AND:
3039   case TargetOpcode::G_OR:
3040   case TargetOpcode::G_XOR:
3041     if (selectImpl(I, *CoverageInfo))
3042       return true;
3043     return selectG_AND_OR_XOR(I);
3044   case TargetOpcode::G_ADD:
3045   case TargetOpcode::G_SUB:
3046     if (selectImpl(I, *CoverageInfo))
3047       return true;
3048     return selectG_ADD_SUB(I);
3049   case TargetOpcode::G_UADDO:
3050   case TargetOpcode::G_USUBO:
3051   case TargetOpcode::G_UADDE:
3052   case TargetOpcode::G_USUBE:
3053     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3054   case TargetOpcode::G_INTTOPTR:
3055   case TargetOpcode::G_BITCAST:
3056   case TargetOpcode::G_PTRTOINT:
3057     return selectCOPY(I);
3058   case TargetOpcode::G_CONSTANT:
3059   case TargetOpcode::G_FCONSTANT:
3060     return selectG_CONSTANT(I);
3061   case TargetOpcode::G_FNEG:
3062     if (selectImpl(I, *CoverageInfo))
3063       return true;
3064     return selectG_FNEG(I);
3065   case TargetOpcode::G_FABS:
3066     if (selectImpl(I, *CoverageInfo))
3067       return true;
3068     return selectG_FABS(I);
3069   case TargetOpcode::G_EXTRACT:
3070     return selectG_EXTRACT(I);
3071   case TargetOpcode::G_MERGE_VALUES:
3072   case TargetOpcode::G_BUILD_VECTOR:
3073   case TargetOpcode::G_CONCAT_VECTORS:
3074     return selectG_MERGE_VALUES(I);
3075   case TargetOpcode::G_UNMERGE_VALUES:
3076     return selectG_UNMERGE_VALUES(I);
3077   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3078     return selectG_BUILD_VECTOR_TRUNC(I);
3079   case TargetOpcode::G_PTR_ADD:
3080     return selectG_PTR_ADD(I);
3081   case TargetOpcode::G_IMPLICIT_DEF:
3082     return selectG_IMPLICIT_DEF(I);
3083   case TargetOpcode::G_FREEZE:
3084     return selectCOPY(I);
3085   case TargetOpcode::G_INSERT:
3086     return selectG_INSERT(I);
3087   case TargetOpcode::G_INTRINSIC:
3088     return selectG_INTRINSIC(I);
3089   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3090     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3091   case TargetOpcode::G_ICMP:
3092     if (selectG_ICMP(I))
3093       return true;
3094     return selectImpl(I, *CoverageInfo);
3095   case TargetOpcode::G_LOAD:
3096   case TargetOpcode::G_STORE:
3097   case TargetOpcode::G_ATOMIC_CMPXCHG:
3098   case TargetOpcode::G_ATOMICRMW_XCHG:
3099   case TargetOpcode::G_ATOMICRMW_ADD:
3100   case TargetOpcode::G_ATOMICRMW_SUB:
3101   case TargetOpcode::G_ATOMICRMW_AND:
3102   case TargetOpcode::G_ATOMICRMW_OR:
3103   case TargetOpcode::G_ATOMICRMW_XOR:
3104   case TargetOpcode::G_ATOMICRMW_MIN:
3105   case TargetOpcode::G_ATOMICRMW_MAX:
3106   case TargetOpcode::G_ATOMICRMW_UMIN:
3107   case TargetOpcode::G_ATOMICRMW_UMAX:
3108   case TargetOpcode::G_ATOMICRMW_FADD:
3109   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3110   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3111   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3112   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3113     return selectG_LOAD_STORE_ATOMICRMW(I);
3114   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3115     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3116   case TargetOpcode::G_SELECT:
3117     return selectG_SELECT(I);
3118   case TargetOpcode::G_TRUNC:
3119     return selectG_TRUNC(I);
3120   case TargetOpcode::G_SEXT:
3121   case TargetOpcode::G_ZEXT:
3122   case TargetOpcode::G_ANYEXT:
3123   case TargetOpcode::G_SEXT_INREG:
3124     if (selectImpl(I, *CoverageInfo))
3125       return true;
3126     return selectG_SZA_EXT(I);
3127   case TargetOpcode::G_BRCOND:
3128     return selectG_BRCOND(I);
3129   case TargetOpcode::G_GLOBAL_VALUE:
3130     return selectG_GLOBAL_VALUE(I);
3131   case TargetOpcode::G_PTRMASK:
3132     return selectG_PTRMASK(I);
3133   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3134     return selectG_EXTRACT_VECTOR_ELT(I);
3135   case TargetOpcode::G_INSERT_VECTOR_ELT:
3136     return selectG_INSERT_VECTOR_ELT(I);
3137   case TargetOpcode::G_SHUFFLE_VECTOR:
3138     return selectG_SHUFFLE_VECTOR(I);
3139   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3140   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
3141     const AMDGPU::ImageDimIntrinsicInfo *Intr
3142       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3143     assert(Intr && "not an image intrinsic with image pseudo");
3144     return selectImageIntrinsic(I, Intr);
3145   }
3146   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3147     return selectBVHIntrinsic(I);
3148   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3149     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3150   default:
3151     return selectImpl(I, *CoverageInfo);
3152   }
3153   return false;
3154 }
3155 
3156 InstructionSelector::ComplexRendererFns
3157 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3158   return {{
3159       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3160   }};
3161 
3162 }
3163 
3164 std::pair<Register, unsigned>
3165 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3166                                               bool AllowAbs) const {
3167   Register Src = Root.getReg();
3168   Register OrigSrc = Src;
3169   unsigned Mods = 0;
3170   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3171 
3172   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3173     Src = MI->getOperand(1).getReg();
3174     Mods |= SISrcMods::NEG;
3175     MI = getDefIgnoringCopies(Src, *MRI);
3176   }
3177 
3178   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3179     Src = MI->getOperand(1).getReg();
3180     Mods |= SISrcMods::ABS;
3181   }
3182 
3183   if (Mods != 0 &&
3184       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3185     MachineInstr *UseMI = Root.getParent();
3186 
3187     // If we looked through copies to find source modifiers on an SGPR operand,
3188     // we now have an SGPR register source. To avoid potentially violating the
3189     // constant bus restriction, we need to insert a copy to a VGPR.
3190     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3191     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3192             TII.get(AMDGPU::COPY), VGPRSrc)
3193       .addReg(Src);
3194     Src = VGPRSrc;
3195   }
3196 
3197   return std::make_pair(Src, Mods);
3198 }
3199 
3200 ///
3201 /// This will select either an SGPR or VGPR operand and will save us from
3202 /// having to write an extra tablegen pattern.
3203 InstructionSelector::ComplexRendererFns
3204 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3205   return {{
3206       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3207   }};
3208 }
3209 
3210 InstructionSelector::ComplexRendererFns
3211 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3212   Register Src;
3213   unsigned Mods;
3214   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3215 
3216   return {{
3217       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3218       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3219       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3220       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3221   }};
3222 }
3223 
3224 InstructionSelector::ComplexRendererFns
3225 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3226   Register Src;
3227   unsigned Mods;
3228   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3229 
3230   return {{
3231       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3232       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3233       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3234       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3235   }};
3236 }
3237 
3238 InstructionSelector::ComplexRendererFns
3239 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3240   return {{
3241       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3242       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3243       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3244   }};
3245 }
3246 
3247 InstructionSelector::ComplexRendererFns
3248 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3249   Register Src;
3250   unsigned Mods;
3251   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3252 
3253   return {{
3254       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3255       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3256   }};
3257 }
3258 
3259 InstructionSelector::ComplexRendererFns
3260 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3261   Register Src;
3262   unsigned Mods;
3263   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3264 
3265   return {{
3266       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3267       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3268   }};
3269 }
3270 
3271 InstructionSelector::ComplexRendererFns
3272 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3273   Register Reg = Root.getReg();
3274   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3275   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3276               Def->getOpcode() == AMDGPU::G_FABS))
3277     return {};
3278   return {{
3279       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3280   }};
3281 }
3282 
3283 std::pair<Register, unsigned>
3284 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3285   Register Src, const MachineRegisterInfo &MRI) const {
3286   unsigned Mods = 0;
3287   MachineInstr *MI = MRI.getVRegDef(Src);
3288 
3289   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3290       // It's possible to see an f32 fneg here, but unlikely.
3291       // TODO: Treat f32 fneg as only high bit.
3292       MRI.getType(Src) == LLT::vector(2, 16)) {
3293     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3294     Src = MI->getOperand(1).getReg();
3295     MI = MRI.getVRegDef(Src);
3296   }
3297 
3298   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3299 
3300   // Packed instructions do not have abs modifiers.
3301   Mods |= SISrcMods::OP_SEL_1;
3302 
3303   return std::make_pair(Src, Mods);
3304 }
3305 
3306 InstructionSelector::ComplexRendererFns
3307 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3308   MachineRegisterInfo &MRI
3309     = Root.getParent()->getParent()->getParent()->getRegInfo();
3310 
3311   Register Src;
3312   unsigned Mods;
3313   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3314 
3315   return {{
3316       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3317       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3318   }};
3319 }
3320 
3321 InstructionSelector::ComplexRendererFns
3322 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3323   Register Src;
3324   unsigned Mods;
3325   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3326   if (!isKnownNeverNaN(Src, *MRI))
3327     return None;
3328 
3329   return {{
3330       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3331       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3332   }};
3333 }
3334 
3335 InstructionSelector::ComplexRendererFns
3336 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3337   // FIXME: Handle op_sel
3338   return {{
3339       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3340       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3341   }};
3342 }
3343 
3344 InstructionSelector::ComplexRendererFns
3345 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3346   SmallVector<GEPInfo, 4> AddrInfo;
3347   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3348 
3349   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3350     return None;
3351 
3352   const GEPInfo &GEPInfo = AddrInfo[0];
3353   Optional<int64_t> EncodedImm =
3354       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3355   if (!EncodedImm)
3356     return None;
3357 
3358   unsigned PtrReg = GEPInfo.SgprParts[0];
3359   return {{
3360     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3361     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3362   }};
3363 }
3364 
3365 InstructionSelector::ComplexRendererFns
3366 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3367   SmallVector<GEPInfo, 4> AddrInfo;
3368   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3369 
3370   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3371     return None;
3372 
3373   const GEPInfo &GEPInfo = AddrInfo[0];
3374   Register PtrReg = GEPInfo.SgprParts[0];
3375   Optional<int64_t> EncodedImm =
3376       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3377   if (!EncodedImm)
3378     return None;
3379 
3380   return {{
3381     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3382     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3383   }};
3384 }
3385 
3386 InstructionSelector::ComplexRendererFns
3387 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3388   MachineInstr *MI = Root.getParent();
3389   MachineBasicBlock *MBB = MI->getParent();
3390 
3391   SmallVector<GEPInfo, 4> AddrInfo;
3392   getAddrModeInfo(*MI, *MRI, AddrInfo);
3393 
3394   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3395   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3396   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3397     return None;
3398 
3399   const GEPInfo &GEPInfo = AddrInfo[0];
3400   // SGPR offset is unsigned.
3401   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3402     return None;
3403 
3404   // If we make it this far we have a load with an 32-bit immediate offset.
3405   // It is OK to select this using a sgpr offset, because we have already
3406   // failed trying to select this load into one of the _IMM variants since
3407   // the _IMM Patterns are considered before the _SGPR patterns.
3408   Register PtrReg = GEPInfo.SgprParts[0];
3409   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3410   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3411           .addImm(GEPInfo.Imm);
3412   return {{
3413     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3414     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3415   }};
3416 }
3417 
3418 template <bool Signed>
3419 std::pair<Register, int>
3420 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
3421   MachineInstr *MI = Root.getParent();
3422 
3423   auto Default = std::make_pair(Root.getReg(), 0);
3424 
3425   if (!STI.hasFlatInstOffsets())
3426     return Default;
3427 
3428   const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
3429   if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
3430     return Default;
3431 
3432   Optional<int64_t> Offset =
3433     getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
3434   if (!Offset.hasValue())
3435     return Default;
3436 
3437   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3438   if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
3439     return Default;
3440 
3441   Register BasePtr = OpDef->getOperand(1).getReg();
3442 
3443   return std::make_pair(BasePtr, Offset.getValue());
3444 }
3445 
3446 InstructionSelector::ComplexRendererFns
3447 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3448   auto PtrWithOffset = selectFlatOffsetImpl<false>(Root);
3449 
3450   return {{
3451       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3452       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3453     }};
3454 }
3455 
3456 InstructionSelector::ComplexRendererFns
3457 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
3458   auto PtrWithOffset = selectFlatOffsetImpl<true>(Root);
3459 
3460   return {{
3461       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3462       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3463     }};
3464 }
3465 
3466 /// Match a zero extend from a 32-bit value to 64-bits.
3467 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3468   Register ZExtSrc;
3469   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3470     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3471 
3472   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3473   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3474   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3475     return false;
3476 
3477   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3478     return Def->getOperand(1).getReg();
3479   }
3480 
3481   return Register();
3482 }
3483 
3484 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3485 InstructionSelector::ComplexRendererFns
3486 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3487   Register Addr = Root.getReg();
3488   Register PtrBase;
3489   int64_t ConstOffset;
3490   int64_t ImmOffset = 0;
3491 
3492   // Match the immediate offset first, which canonically is moved as low as
3493   // possible.
3494   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3495 
3496   if (ConstOffset != 0) {
3497     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, true)) {
3498       Addr = PtrBase;
3499       ImmOffset = ConstOffset;
3500     } else if (ConstOffset > 0) {
3501       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3502       if (!PtrBaseDef)
3503         return None;
3504 
3505       if (isSGPR(PtrBaseDef->Reg)) {
3506         // Offset is too large.
3507         //
3508         // saddr + large_offset -> saddr + (voffset = large_offset & ~MaxOffset)
3509         //                         + (large_offset & MaxOffset);
3510         int64_t SplitImmOffset, RemainderOffset;
3511         std::tie(SplitImmOffset, RemainderOffset)
3512           = TII.splitFlatOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, true);
3513 
3514         if (isUInt<32>(RemainderOffset)) {
3515           MachineInstr *MI = Root.getParent();
3516           MachineBasicBlock *MBB = MI->getParent();
3517           Register HighBits
3518             = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3519 
3520           BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3521                   HighBits)
3522             .addImm(RemainderOffset);
3523 
3524           return {{
3525             [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); },  // saddr
3526             [=](MachineInstrBuilder &MIB) { MIB.addReg(HighBits); }, // voffset
3527             [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3528           }};
3529         }
3530       }
3531     }
3532   }
3533 
3534   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3535   if (!AddrDef)
3536     return None;
3537 
3538   // Match the variable offset.
3539   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD) {
3540     // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3541     // drop this.
3542     if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3543         AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT)
3544       return None;
3545 
3546     // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3547     // moves required to copy a 64-bit SGPR to VGPR.
3548     const Register SAddr = AddrDef->Reg;
3549     if (!isSGPR(SAddr))
3550       return None;
3551 
3552     MachineInstr *MI = Root.getParent();
3553     MachineBasicBlock *MBB = MI->getParent();
3554     Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3555 
3556     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3557             VOffset)
3558       .addImm(0);
3559 
3560     return {{
3561         [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); },    // saddr
3562         [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },  // voffset
3563         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3564     }};
3565   }
3566 
3567   // Look through the SGPR->VGPR copy.
3568   Register SAddr =
3569     getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3570   if (!SAddr || !isSGPR(SAddr))
3571     return None;
3572 
3573   Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3574 
3575   // It's possible voffset is an SGPR here, but the copy to VGPR will be
3576   // inserted later.
3577   Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset);
3578   if (!VOffset)
3579     return None;
3580 
3581   return {{[=](MachineInstrBuilder &MIB) { // saddr
3582              MIB.addReg(SAddr);
3583            },
3584            [=](MachineInstrBuilder &MIB) { // voffset
3585              MIB.addReg(VOffset);
3586            },
3587            [=](MachineInstrBuilder &MIB) { // offset
3588              MIB.addImm(ImmOffset);
3589            }}};
3590 }
3591 
3592 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3593   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3594   return PSV && PSV->isStack();
3595 }
3596 
3597 InstructionSelector::ComplexRendererFns
3598 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3599   MachineInstr *MI = Root.getParent();
3600   MachineBasicBlock *MBB = MI->getParent();
3601   MachineFunction *MF = MBB->getParent();
3602   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3603 
3604   int64_t Offset = 0;
3605   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3606       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3607     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3608 
3609     // TODO: Should this be inside the render function? The iterator seems to
3610     // move.
3611     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3612             HighBits)
3613       .addImm(Offset & ~4095);
3614 
3615     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3616                MIB.addReg(Info->getScratchRSrcReg());
3617              },
3618              [=](MachineInstrBuilder &MIB) { // vaddr
3619                MIB.addReg(HighBits);
3620              },
3621              [=](MachineInstrBuilder &MIB) { // soffset
3622                const MachineMemOperand *MMO = *MI->memoperands_begin();
3623                const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3624 
3625                if (isStackPtrRelative(PtrInfo))
3626                  MIB.addReg(Info->getStackPtrOffsetReg());
3627                else
3628                  MIB.addImm(0);
3629              },
3630              [=](MachineInstrBuilder &MIB) { // offset
3631                MIB.addImm(Offset & 4095);
3632              }}};
3633   }
3634 
3635   assert(Offset == 0 || Offset == -1);
3636 
3637   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3638   // offsets.
3639   Optional<int> FI;
3640   Register VAddr = Root.getReg();
3641   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3642     if (isBaseWithConstantOffset(Root, *MRI)) {
3643       const MachineOperand &LHS = RootDef->getOperand(1);
3644       const MachineOperand &RHS = RootDef->getOperand(2);
3645       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
3646       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
3647       if (LHSDef && RHSDef) {
3648         int64_t PossibleOffset =
3649             RHSDef->getOperand(1).getCImm()->getSExtValue();
3650         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
3651             (!STI.privateMemoryResourceIsRangeChecked() ||
3652              KnownBits->signBitIsZero(LHS.getReg()))) {
3653           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3654             FI = LHSDef->getOperand(1).getIndex();
3655           else
3656             VAddr = LHS.getReg();
3657           Offset = PossibleOffset;
3658         }
3659       }
3660     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3661       FI = RootDef->getOperand(1).getIndex();
3662     }
3663   }
3664 
3665   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3666              MIB.addReg(Info->getScratchRSrcReg());
3667            },
3668            [=](MachineInstrBuilder &MIB) { // vaddr
3669              if (FI.hasValue())
3670                MIB.addFrameIndex(FI.getValue());
3671              else
3672                MIB.addReg(VAddr);
3673            },
3674            [=](MachineInstrBuilder &MIB) { // soffset
3675              // If we don't know this private access is a local stack object, it
3676              // needs to be relative to the entry point's scratch wave offset.
3677              // TODO: Should split large offsets that don't fit like above.
3678              // TODO: Don't use scratch wave offset just because the offset
3679              // didn't fit.
3680              if (!Info->isEntryFunction() && FI.hasValue())
3681                MIB.addReg(Info->getStackPtrOffsetReg());
3682              else
3683                MIB.addImm(0);
3684            },
3685            [=](MachineInstrBuilder &MIB) { // offset
3686              MIB.addImm(Offset);
3687            }}};
3688 }
3689 
3690 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3691                                                 int64_t Offset) const {
3692   if (!isUInt<16>(Offset))
3693     return false;
3694 
3695   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3696     return true;
3697 
3698   // On Southern Islands instruction with a negative base value and an offset
3699   // don't seem to work.
3700   return KnownBits->signBitIsZero(Base);
3701 }
3702 
3703 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
3704                                                  int64_t Offset1,
3705                                                  unsigned Size) const {
3706   if (Offset0 % Size != 0 || Offset1 % Size != 0)
3707     return false;
3708   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
3709     return false;
3710 
3711   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3712     return true;
3713 
3714   // On Southern Islands instruction with a negative base value and an offset
3715   // don't seem to work.
3716   return KnownBits->signBitIsZero(Base);
3717 }
3718 
3719 InstructionSelector::ComplexRendererFns
3720 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3721     MachineOperand &Root) const {
3722   MachineInstr *MI = Root.getParent();
3723   MachineBasicBlock *MBB = MI->getParent();
3724 
3725   int64_t Offset = 0;
3726   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3727       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3728     return {};
3729 
3730   const MachineFunction *MF = MBB->getParent();
3731   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3732   const MachineMemOperand *MMO = *MI->memoperands_begin();
3733   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3734 
3735   return {{
3736       [=](MachineInstrBuilder &MIB) { // rsrc
3737         MIB.addReg(Info->getScratchRSrcReg());
3738       },
3739       [=](MachineInstrBuilder &MIB) { // soffset
3740         if (isStackPtrRelative(PtrInfo))
3741           MIB.addReg(Info->getStackPtrOffsetReg());
3742         else
3743           MIB.addImm(0);
3744       },
3745       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3746   }};
3747 }
3748 
3749 std::pair<Register, unsigned>
3750 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3751   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3752   if (!RootDef)
3753     return std::make_pair(Root.getReg(), 0);
3754 
3755   int64_t ConstAddr = 0;
3756 
3757   Register PtrBase;
3758   int64_t Offset;
3759   std::tie(PtrBase, Offset) =
3760     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3761 
3762   if (Offset) {
3763     if (isDSOffsetLegal(PtrBase, Offset)) {
3764       // (add n0, c0)
3765       return std::make_pair(PtrBase, Offset);
3766     }
3767   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3768     // TODO
3769 
3770 
3771   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3772     // TODO
3773 
3774   }
3775 
3776   return std::make_pair(Root.getReg(), 0);
3777 }
3778 
3779 InstructionSelector::ComplexRendererFns
3780 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3781   Register Reg;
3782   unsigned Offset;
3783   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3784   return {{
3785       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3786       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3787     }};
3788 }
3789 
3790 InstructionSelector::ComplexRendererFns
3791 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3792   return selectDSReadWrite2(Root, 4);
3793 }
3794 
3795 InstructionSelector::ComplexRendererFns
3796 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
3797   return selectDSReadWrite2(Root, 8);
3798 }
3799 
3800 InstructionSelector::ComplexRendererFns
3801 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
3802                                               unsigned Size) const {
3803   Register Reg;
3804   unsigned Offset;
3805   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
3806   return {{
3807       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3808       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3809       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3810     }};
3811 }
3812 
3813 std::pair<Register, unsigned>
3814 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
3815                                                   unsigned Size) const {
3816   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3817   if (!RootDef)
3818     return std::make_pair(Root.getReg(), 0);
3819 
3820   int64_t ConstAddr = 0;
3821 
3822   Register PtrBase;
3823   int64_t Offset;
3824   std::tie(PtrBase, Offset) =
3825     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3826 
3827   if (Offset) {
3828     int64_t OffsetValue0 = Offset;
3829     int64_t OffsetValue1 = Offset + Size;
3830     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
3831       // (add n0, c0)
3832       return std::make_pair(PtrBase, OffsetValue0 / Size);
3833     }
3834   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3835     // TODO
3836 
3837   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3838     // TODO
3839 
3840   }
3841 
3842   return std::make_pair(Root.getReg(), 0);
3843 }
3844 
3845 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3846 /// the base value with the constant offset. There may be intervening copies
3847 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
3848 /// not match the pattern.
3849 std::pair<Register, int64_t>
3850 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3851   Register Root, const MachineRegisterInfo &MRI) const {
3852   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
3853   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3854     return {Root, 0};
3855 
3856   MachineOperand &RHS = RootI->getOperand(2);
3857   Optional<ValueAndVReg> MaybeOffset
3858     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3859   if (!MaybeOffset)
3860     return {Root, 0};
3861   return {RootI->getOperand(1).getReg(), MaybeOffset->Value};
3862 }
3863 
3864 static void addZeroImm(MachineInstrBuilder &MIB) {
3865   MIB.addImm(0);
3866 }
3867 
3868 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3869 /// BasePtr is not valid, a null base pointer will be used.
3870 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3871                           uint32_t FormatLo, uint32_t FormatHi,
3872                           Register BasePtr) {
3873   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3874   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3875   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3876   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3877 
3878   B.buildInstr(AMDGPU::S_MOV_B32)
3879     .addDef(RSrc2)
3880     .addImm(FormatLo);
3881   B.buildInstr(AMDGPU::S_MOV_B32)
3882     .addDef(RSrc3)
3883     .addImm(FormatHi);
3884 
3885   // Build the half of the subregister with the constants before building the
3886   // full 128-bit register. If we are building multiple resource descriptors,
3887   // this will allow CSEing of the 2-component register.
3888   B.buildInstr(AMDGPU::REG_SEQUENCE)
3889     .addDef(RSrcHi)
3890     .addReg(RSrc2)
3891     .addImm(AMDGPU::sub0)
3892     .addReg(RSrc3)
3893     .addImm(AMDGPU::sub1);
3894 
3895   Register RSrcLo = BasePtr;
3896   if (!BasePtr) {
3897     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3898     B.buildInstr(AMDGPU::S_MOV_B64)
3899       .addDef(RSrcLo)
3900       .addImm(0);
3901   }
3902 
3903   B.buildInstr(AMDGPU::REG_SEQUENCE)
3904     .addDef(RSrc)
3905     .addReg(RSrcLo)
3906     .addImm(AMDGPU::sub0_sub1)
3907     .addReg(RSrcHi)
3908     .addImm(AMDGPU::sub2_sub3);
3909 
3910   return RSrc;
3911 }
3912 
3913 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3914                                 const SIInstrInfo &TII, Register BasePtr) {
3915   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3916 
3917   // FIXME: Why are half the "default" bits ignored based on the addressing
3918   // mode?
3919   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3920 }
3921 
3922 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3923                                const SIInstrInfo &TII, Register BasePtr) {
3924   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3925 
3926   // FIXME: Why are half the "default" bits ignored based on the addressing
3927   // mode?
3928   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
3929 }
3930 
3931 AMDGPUInstructionSelector::MUBUFAddressData
3932 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
3933   MUBUFAddressData Data;
3934   Data.N0 = Src;
3935 
3936   Register PtrBase;
3937   int64_t Offset;
3938 
3939   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
3940   if (isUInt<32>(Offset)) {
3941     Data.N0 = PtrBase;
3942     Data.Offset = Offset;
3943   }
3944 
3945   if (MachineInstr *InputAdd
3946       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
3947     Data.N2 = InputAdd->getOperand(1).getReg();
3948     Data.N3 = InputAdd->getOperand(2).getReg();
3949 
3950     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
3951     // FIXME: Don't know this was defined by operand 0
3952     //
3953     // TODO: Remove this when we have copy folding optimizations after
3954     // RegBankSelect.
3955     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
3956     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
3957   }
3958 
3959   return Data;
3960 }
3961 
3962 /// Return if the addr64 mubuf mode should be used for the given address.
3963 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
3964   // (ptr_add N2, N3) -> addr64, or
3965   // (ptr_add (ptr_add N2, N3), C1) -> addr64
3966   if (Addr.N2)
3967     return true;
3968 
3969   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
3970   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
3971 }
3972 
3973 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
3974 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
3975 /// component.
3976 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
3977   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
3978   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
3979     return;
3980 
3981   // Illegal offset, store it in soffset.
3982   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3983   B.buildInstr(AMDGPU::S_MOV_B32)
3984     .addDef(SOffset)
3985     .addImm(ImmOffset);
3986   ImmOffset = 0;
3987 }
3988 
3989 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
3990   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
3991   Register &SOffset, int64_t &Offset) const {
3992   // FIXME: Predicates should stop this from reaching here.
3993   // addr64 bit was removed for volcanic islands.
3994   if (!STI.hasAddr64() || STI.useFlatForGlobal())
3995     return false;
3996 
3997   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3998   if (!shouldUseAddr64(AddrData))
3999     return false;
4000 
4001   Register N0 = AddrData.N0;
4002   Register N2 = AddrData.N2;
4003   Register N3 = AddrData.N3;
4004   Offset = AddrData.Offset;
4005 
4006   // Base pointer for the SRD.
4007   Register SRDPtr;
4008 
4009   if (N2) {
4010     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4011       assert(N3);
4012       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4013         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4014         // addr64, and construct the default resource from a 0 address.
4015         VAddr = N0;
4016       } else {
4017         SRDPtr = N3;
4018         VAddr = N2;
4019       }
4020     } else {
4021       // N2 is not divergent.
4022       SRDPtr = N2;
4023       VAddr = N3;
4024     }
4025   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4026     // Use the default null pointer in the resource
4027     VAddr = N0;
4028   } else {
4029     // N0 -> offset, or
4030     // (N0 + C1) -> offset
4031     SRDPtr = N0;
4032   }
4033 
4034   MachineIRBuilder B(*Root.getParent());
4035   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4036   splitIllegalMUBUFOffset(B, SOffset, Offset);
4037   return true;
4038 }
4039 
4040 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4041   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4042   int64_t &Offset) const {
4043 
4044   // FIXME: Pattern should not reach here.
4045   if (STI.useFlatForGlobal())
4046     return false;
4047 
4048   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4049   if (shouldUseAddr64(AddrData))
4050     return false;
4051 
4052   // N0 -> offset, or
4053   // (N0 + C1) -> offset
4054   Register SRDPtr = AddrData.N0;
4055   Offset = AddrData.Offset;
4056 
4057   // TODO: Look through extensions for 32-bit soffset.
4058   MachineIRBuilder B(*Root.getParent());
4059 
4060   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4061   splitIllegalMUBUFOffset(B, SOffset, Offset);
4062   return true;
4063 }
4064 
4065 InstructionSelector::ComplexRendererFns
4066 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4067   Register VAddr;
4068   Register RSrcReg;
4069   Register SOffset;
4070   int64_t Offset = 0;
4071 
4072   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4073     return {};
4074 
4075   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4076   // pattern.
4077   return {{
4078       [=](MachineInstrBuilder &MIB) {  // rsrc
4079         MIB.addReg(RSrcReg);
4080       },
4081       [=](MachineInstrBuilder &MIB) { // vaddr
4082         MIB.addReg(VAddr);
4083       },
4084       [=](MachineInstrBuilder &MIB) { // soffset
4085         if (SOffset)
4086           MIB.addReg(SOffset);
4087         else
4088           MIB.addImm(0);
4089       },
4090       [=](MachineInstrBuilder &MIB) { // offset
4091         MIB.addImm(Offset);
4092       },
4093       addZeroImm, //  glc
4094       addZeroImm, //  slc
4095       addZeroImm, //  tfe
4096       addZeroImm, //  dlc
4097       addZeroImm  //  swz
4098     }};
4099 }
4100 
4101 InstructionSelector::ComplexRendererFns
4102 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4103   Register RSrcReg;
4104   Register SOffset;
4105   int64_t Offset = 0;
4106 
4107   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4108     return {};
4109 
4110   return {{
4111       [=](MachineInstrBuilder &MIB) {  // rsrc
4112         MIB.addReg(RSrcReg);
4113       },
4114       [=](MachineInstrBuilder &MIB) { // soffset
4115         if (SOffset)
4116           MIB.addReg(SOffset);
4117         else
4118           MIB.addImm(0);
4119       },
4120       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4121       addZeroImm, //  glc
4122       addZeroImm, //  slc
4123       addZeroImm, //  tfe
4124       addZeroImm, //  dlc
4125       addZeroImm  //  swz
4126     }};
4127 }
4128 
4129 InstructionSelector::ComplexRendererFns
4130 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4131   Register VAddr;
4132   Register RSrcReg;
4133   Register SOffset;
4134   int64_t Offset = 0;
4135 
4136   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4137     return {};
4138 
4139   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4140   // pattern.
4141   return {{
4142       [=](MachineInstrBuilder &MIB) {  // rsrc
4143         MIB.addReg(RSrcReg);
4144       },
4145       [=](MachineInstrBuilder &MIB) { // vaddr
4146         MIB.addReg(VAddr);
4147       },
4148       [=](MachineInstrBuilder &MIB) { // soffset
4149         if (SOffset)
4150           MIB.addReg(SOffset);
4151         else
4152           MIB.addImm(0);
4153       },
4154       [=](MachineInstrBuilder &MIB) { // offset
4155         MIB.addImm(Offset);
4156       },
4157       addZeroImm //  slc
4158     }};
4159 }
4160 
4161 InstructionSelector::ComplexRendererFns
4162 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4163   Register RSrcReg;
4164   Register SOffset;
4165   int64_t Offset = 0;
4166 
4167   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4168     return {};
4169 
4170   return {{
4171       [=](MachineInstrBuilder &MIB) {  // rsrc
4172         MIB.addReg(RSrcReg);
4173       },
4174       [=](MachineInstrBuilder &MIB) { // soffset
4175         if (SOffset)
4176           MIB.addReg(SOffset);
4177         else
4178           MIB.addImm(0);
4179       },
4180       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4181       addZeroImm //  slc
4182     }};
4183 }
4184 
4185 /// Get an immediate that must be 32-bits, and treated as zero extended.
4186 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4187                                                const MachineRegisterInfo &MRI) {
4188   // getConstantVRegVal sexts any values, so see if that matters.
4189   Optional<int64_t> OffsetVal = getConstantVRegVal(Reg, MRI);
4190   if (!OffsetVal || !isInt<32>(*OffsetVal))
4191     return None;
4192   return Lo_32(*OffsetVal);
4193 }
4194 
4195 InstructionSelector::ComplexRendererFns
4196 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4197   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4198   if (!OffsetVal)
4199     return {};
4200 
4201   Optional<int64_t> EncodedImm =
4202       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4203   if (!EncodedImm)
4204     return {};
4205 
4206   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4207 }
4208 
4209 InstructionSelector::ComplexRendererFns
4210 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4211   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4212 
4213   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4214   if (!OffsetVal)
4215     return {};
4216 
4217   Optional<int64_t> EncodedImm
4218     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4219   if (!EncodedImm)
4220     return {};
4221 
4222   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4223 }
4224 
4225 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4226                                                  const MachineInstr &MI,
4227                                                  int OpIdx) const {
4228   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4229          "Expected G_CONSTANT");
4230   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4231 }
4232 
4233 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4234                                                 const MachineInstr &MI,
4235                                                 int OpIdx) const {
4236   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4237          "Expected G_CONSTANT");
4238   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4239 }
4240 
4241 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4242                                                  const MachineInstr &MI,
4243                                                  int OpIdx) const {
4244   assert(OpIdx == -1);
4245 
4246   const MachineOperand &Op = MI.getOperand(1);
4247   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4248     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4249   else {
4250     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4251     MIB.addImm(Op.getCImm()->getSExtValue());
4252   }
4253 }
4254 
4255 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4256                                                 const MachineInstr &MI,
4257                                                 int OpIdx) const {
4258   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4259          "Expected G_CONSTANT");
4260   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4261 }
4262 
4263 /// This only really exists to satisfy DAG type checking machinery, so is a
4264 /// no-op here.
4265 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4266                                                 const MachineInstr &MI,
4267                                                 int OpIdx) const {
4268   MIB.addImm(MI.getOperand(OpIdx).getImm());
4269 }
4270 
4271 void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
4272                                                  const MachineInstr &MI,
4273                                                  int OpIdx) const {
4274   assert(OpIdx >= 0 && "expected to match an immediate operand");
4275   MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
4276 }
4277 
4278 void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
4279                                                  const MachineInstr &MI,
4280                                                  int OpIdx) const {
4281   assert(OpIdx >= 0 && "expected to match an immediate operand");
4282   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
4283 }
4284 
4285 void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
4286                                                  const MachineInstr &MI,
4287                                                  int OpIdx) const {
4288   assert(OpIdx >= 0 && "expected to match an immediate operand");
4289   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
4290 }
4291 
4292 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4293                                                  const MachineInstr &MI,
4294                                                  int OpIdx) const {
4295   assert(OpIdx >= 0 && "expected to match an immediate operand");
4296   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4297 }
4298 
4299 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4300                                                  const MachineInstr &MI,
4301                                                  int OpIdx) const {
4302   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4303 }
4304 
4305 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4306   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4307 }
4308 
4309 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4310   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4311 }
4312 
4313 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4314   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4315 }
4316 
4317 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4318   return TII.isInlineConstant(Imm);
4319 }
4320