1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
23 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
24 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
25 #include "llvm/IR/DiagnosticInfo.h"
26 
27 #define DEBUG_TYPE "amdgpu-isel"
28 
29 using namespace llvm;
30 using namespace MIPatternMatch;
31 
32 static cl::opt<bool> AllowRiskySelect(
33   "amdgpu-global-isel-risky-select",
34   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
35   cl::init(false),
36   cl::ReallyHidden);
37 
38 #define GET_GLOBALISEL_IMPL
39 #define AMDGPUSubtarget GCNSubtarget
40 #include "AMDGPUGenGlobalISel.inc"
41 #undef GET_GLOBALISEL_IMPL
42 #undef AMDGPUSubtarget
43 
44 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
45     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
46     const AMDGPUTargetMachine &TM)
47     : InstructionSelector(), TII(*STI.getInstrInfo()),
48       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
49       STI(STI),
50       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
51 #define GET_GLOBALISEL_PREDICATES_INIT
52 #include "AMDGPUGenGlobalISel.inc"
53 #undef GET_GLOBALISEL_PREDICATES_INIT
54 #define GET_GLOBALISEL_TEMPORARIES_INIT
55 #include "AMDGPUGenGlobalISel.inc"
56 #undef GET_GLOBALISEL_TEMPORARIES_INIT
57 {
58 }
59 
60 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
61 
62 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
63                                         CodeGenCoverage &CoverageInfo,
64                                         ProfileSummaryInfo *PSI,
65                                         BlockFrequencyInfo *BFI) {
66   MRI = &MF.getRegInfo();
67   Subtarget = &MF.getSubtarget<GCNSubtarget>();
68   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
69 }
70 
71 bool AMDGPUInstructionSelector::isVCC(Register Reg,
72                                       const MachineRegisterInfo &MRI) const {
73   // The verifier is oblivious to s1 being a valid value for wavesize registers.
74   if (Reg.isPhysical())
75     return false;
76 
77   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
78   const TargetRegisterClass *RC =
79       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
80   if (RC) {
81     const LLT Ty = MRI.getType(Reg);
82     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
83            Ty.isValid() && Ty.getSizeInBits() == 1;
84   }
85 
86   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
87   return RB->getID() == AMDGPU::VCCRegBankID;
88 }
89 
90 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
91                                                         unsigned NewOpc) const {
92   MI.setDesc(TII.get(NewOpc));
93   MI.RemoveOperand(1); // Remove intrinsic ID.
94   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
95 
96   MachineOperand &Dst = MI.getOperand(0);
97   MachineOperand &Src = MI.getOperand(1);
98 
99   // TODO: This should be legalized to s32 if needed
100   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
101     return false;
102 
103   const TargetRegisterClass *DstRC
104     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
105   const TargetRegisterClass *SrcRC
106     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
107   if (!DstRC || DstRC != SrcRC)
108     return false;
109 
110   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
111          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
112 }
113 
114 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
115   const DebugLoc &DL = I.getDebugLoc();
116   MachineBasicBlock *BB = I.getParent();
117   I.setDesc(TII.get(TargetOpcode::COPY));
118 
119   const MachineOperand &Src = I.getOperand(1);
120   MachineOperand &Dst = I.getOperand(0);
121   Register DstReg = Dst.getReg();
122   Register SrcReg = Src.getReg();
123 
124   if (isVCC(DstReg, *MRI)) {
125     if (SrcReg == AMDGPU::SCC) {
126       const TargetRegisterClass *RC
127         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
128       if (!RC)
129         return true;
130       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
131     }
132 
133     if (!isVCC(SrcReg, *MRI)) {
134       // TODO: Should probably leave the copy and let copyPhysReg expand it.
135       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
136         return false;
137 
138       const TargetRegisterClass *SrcRC
139         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
140 
141       Optional<ValueAndVReg> ConstVal =
142           getConstantVRegValWithLookThrough(SrcReg, *MRI, true, true);
143       if (ConstVal) {
144         unsigned MovOpc =
145             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
146         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
147             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
148       } else {
149         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
150 
151         // We can't trust the high bits at this point, so clear them.
152 
153         // TODO: Skip masking high bits if def is known boolean.
154 
155         unsigned AndOpc =
156             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
157         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
158             .addImm(1)
159             .addReg(SrcReg);
160         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
161             .addImm(0)
162             .addReg(MaskedReg);
163       }
164 
165       if (!MRI->getRegClassOrNull(SrcReg))
166         MRI->setRegClass(SrcReg, SrcRC);
167       I.eraseFromParent();
168       return true;
169     }
170 
171     const TargetRegisterClass *RC =
172       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
173     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
174       return false;
175 
176     return true;
177   }
178 
179   for (const MachineOperand &MO : I.operands()) {
180     if (MO.getReg().isPhysical())
181       continue;
182 
183     const TargetRegisterClass *RC =
184             TRI.getConstrainedRegClassForOperand(MO, *MRI);
185     if (!RC)
186       continue;
187     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
188   }
189   return true;
190 }
191 
192 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
193   const Register DefReg = I.getOperand(0).getReg();
194   const LLT DefTy = MRI->getType(DefReg);
195   if (DefTy == LLT::scalar(1)) {
196     if (!AllowRiskySelect) {
197       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
198       return false;
199     }
200 
201     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
202   }
203 
204   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
205 
206   const RegClassOrRegBank &RegClassOrBank =
207     MRI->getRegClassOrRegBank(DefReg);
208 
209   const TargetRegisterClass *DefRC
210     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
211   if (!DefRC) {
212     if (!DefTy.isValid()) {
213       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
214       return false;
215     }
216 
217     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
218     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
219     if (!DefRC) {
220       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
221       return false;
222     }
223   }
224 
225   // TODO: Verify that all registers have the same bank
226   I.setDesc(TII.get(TargetOpcode::PHI));
227   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
228 }
229 
230 MachineOperand
231 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
232                                            const TargetRegisterClass &SubRC,
233                                            unsigned SubIdx) const {
234 
235   MachineInstr *MI = MO.getParent();
236   MachineBasicBlock *BB = MO.getParent()->getParent();
237   Register DstReg = MRI->createVirtualRegister(&SubRC);
238 
239   if (MO.isReg()) {
240     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
241     Register Reg = MO.getReg();
242     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
243             .addReg(Reg, 0, ComposedSubIdx);
244 
245     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
246                                      MO.isKill(), MO.isDead(), MO.isUndef(),
247                                      MO.isEarlyClobber(), 0, MO.isDebug(),
248                                      MO.isInternalRead());
249   }
250 
251   assert(MO.isImm());
252 
253   APInt Imm(64, MO.getImm());
254 
255   switch (SubIdx) {
256   default:
257     llvm_unreachable("do not know to split immediate with this sub index.");
258   case AMDGPU::sub0:
259     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
260   case AMDGPU::sub1:
261     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
262   }
263 }
264 
265 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
266   switch (Opc) {
267   case AMDGPU::G_AND:
268     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
269   case AMDGPU::G_OR:
270     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
271   case AMDGPU::G_XOR:
272     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
273   default:
274     llvm_unreachable("not a bit op");
275   }
276 }
277 
278 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
279   Register DstReg = I.getOperand(0).getReg();
280   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
281 
282   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
283   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
284       DstRB->getID() != AMDGPU::VCCRegBankID)
285     return false;
286 
287   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
288                             STI.isWave64());
289   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
290 
291   // Dead implicit-def of scc
292   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
293                                          true, // isImp
294                                          false, // isKill
295                                          true)); // isDead
296   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
297 }
298 
299 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
300   MachineBasicBlock *BB = I.getParent();
301   MachineFunction *MF = BB->getParent();
302   Register DstReg = I.getOperand(0).getReg();
303   const DebugLoc &DL = I.getDebugLoc();
304   LLT Ty = MRI->getType(DstReg);
305   if (Ty.isVector())
306     return false;
307 
308   unsigned Size = Ty.getSizeInBits();
309   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
310   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
311   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
312 
313   if (Size == 32) {
314     if (IsSALU) {
315       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
316       MachineInstr *Add =
317         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
318         .add(I.getOperand(1))
319         .add(I.getOperand(2));
320       I.eraseFromParent();
321       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
322     }
323 
324     if (STI.hasAddNoCarry()) {
325       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
326       I.setDesc(TII.get(Opc));
327       I.addOperand(*MF, MachineOperand::CreateImm(0));
328       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
329       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
330     }
331 
332     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
333 
334     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
335     MachineInstr *Add
336       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
337       .addDef(UnusedCarry, RegState::Dead)
338       .add(I.getOperand(1))
339       .add(I.getOperand(2))
340       .addImm(0);
341     I.eraseFromParent();
342     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
343   }
344 
345   assert(!Sub && "illegal sub should not reach here");
346 
347   const TargetRegisterClass &RC
348     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
349   const TargetRegisterClass &HalfRC
350     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
351 
352   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
353   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
354   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
355   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
356 
357   Register DstLo = MRI->createVirtualRegister(&HalfRC);
358   Register DstHi = MRI->createVirtualRegister(&HalfRC);
359 
360   if (IsSALU) {
361     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
362       .add(Lo1)
363       .add(Lo2);
364     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
365       .add(Hi1)
366       .add(Hi2);
367   } else {
368     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
369     Register CarryReg = MRI->createVirtualRegister(CarryRC);
370     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
371       .addDef(CarryReg)
372       .add(Lo1)
373       .add(Lo2)
374       .addImm(0);
375     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
376       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
377       .add(Hi1)
378       .add(Hi2)
379       .addReg(CarryReg, RegState::Kill)
380       .addImm(0);
381 
382     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
383       return false;
384   }
385 
386   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
387     .addReg(DstLo)
388     .addImm(AMDGPU::sub0)
389     .addReg(DstHi)
390     .addImm(AMDGPU::sub1);
391 
392 
393   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
394     return false;
395 
396   I.eraseFromParent();
397   return true;
398 }
399 
400 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
401   MachineInstr &I) const {
402   MachineBasicBlock *BB = I.getParent();
403   MachineFunction *MF = BB->getParent();
404   const DebugLoc &DL = I.getDebugLoc();
405   Register Dst0Reg = I.getOperand(0).getReg();
406   Register Dst1Reg = I.getOperand(1).getReg();
407   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
408                      I.getOpcode() == AMDGPU::G_UADDE;
409   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
410                           I.getOpcode() == AMDGPU::G_USUBE;
411 
412   if (isVCC(Dst1Reg, *MRI)) {
413     unsigned NoCarryOpc =
414         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
415     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
416     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
417     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
418     I.addOperand(*MF, MachineOperand::CreateImm(0));
419     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
420   }
421 
422   Register Src0Reg = I.getOperand(2).getReg();
423   Register Src1Reg = I.getOperand(3).getReg();
424 
425   if (HasCarryIn) {
426     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
427       .addReg(I.getOperand(4).getReg());
428   }
429 
430   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
431   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
432 
433   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
434     .add(I.getOperand(2))
435     .add(I.getOperand(3));
436   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
437     .addReg(AMDGPU::SCC);
438 
439   if (!MRI->getRegClassOrNull(Dst1Reg))
440     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
441 
442   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
443       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
444       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
445     return false;
446 
447   if (HasCarryIn &&
448       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
449                                     AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   I.eraseFromParent();
453   return true;
454 }
455 
456 // TODO: We should probably legalize these to only using 32-bit results.
457 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
458   MachineBasicBlock *BB = I.getParent();
459   Register DstReg = I.getOperand(0).getReg();
460   Register SrcReg = I.getOperand(1).getReg();
461   LLT DstTy = MRI->getType(DstReg);
462   LLT SrcTy = MRI->getType(SrcReg);
463   const unsigned SrcSize = SrcTy.getSizeInBits();
464   unsigned DstSize = DstTy.getSizeInBits();
465 
466   // TODO: Should handle any multiple of 32 offset.
467   unsigned Offset = I.getOperand(2).getImm();
468   if (Offset % 32 != 0 || DstSize > 128)
469     return false;
470 
471   // 16-bit operations really use 32-bit registers.
472   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
473   if (DstSize == 16)
474     DstSize = 32;
475 
476   const TargetRegisterClass *DstRC =
477     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
478   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
479     return false;
480 
481   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
482   const TargetRegisterClass *SrcRC =
483     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
484   if (!SrcRC)
485     return false;
486   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
487                                                          DstSize / 32);
488   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
489   if (!SrcRC)
490     return false;
491 
492   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
493                                     *SrcRC, I.getOperand(1));
494   const DebugLoc &DL = I.getDebugLoc();
495   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
496     .addReg(SrcReg, 0, SubReg);
497 
498   I.eraseFromParent();
499   return true;
500 }
501 
502 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
503   MachineBasicBlock *BB = MI.getParent();
504   Register DstReg = MI.getOperand(0).getReg();
505   LLT DstTy = MRI->getType(DstReg);
506   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
507 
508   const unsigned SrcSize = SrcTy.getSizeInBits();
509   if (SrcSize < 32)
510     return selectImpl(MI, *CoverageInfo);
511 
512   const DebugLoc &DL = MI.getDebugLoc();
513   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
514   const unsigned DstSize = DstTy.getSizeInBits();
515   const TargetRegisterClass *DstRC =
516     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
517   if (!DstRC)
518     return false;
519 
520   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
521   MachineInstrBuilder MIB =
522     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
523   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
524     MachineOperand &Src = MI.getOperand(I + 1);
525     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
526     MIB.addImm(SubRegs[I]);
527 
528     const TargetRegisterClass *SrcRC
529       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
530     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
531       return false;
532   }
533 
534   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
535     return false;
536 
537   MI.eraseFromParent();
538   return true;
539 }
540 
541 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
542   MachineBasicBlock *BB = MI.getParent();
543   const int NumDst = MI.getNumOperands() - 1;
544 
545   MachineOperand &Src = MI.getOperand(NumDst);
546 
547   Register SrcReg = Src.getReg();
548   Register DstReg0 = MI.getOperand(0).getReg();
549   LLT DstTy = MRI->getType(DstReg0);
550   LLT SrcTy = MRI->getType(SrcReg);
551 
552   const unsigned DstSize = DstTy.getSizeInBits();
553   const unsigned SrcSize = SrcTy.getSizeInBits();
554   const DebugLoc &DL = MI.getDebugLoc();
555   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
556 
557   const TargetRegisterClass *SrcRC =
558     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
559   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
560     return false;
561 
562   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
563   // source, and this relies on the fact that the same subregister indices are
564   // used for both.
565   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
566   for (int I = 0, E = NumDst; I != E; ++I) {
567     MachineOperand &Dst = MI.getOperand(I);
568     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
569       .addReg(SrcReg, 0, SubRegs[I]);
570 
571     // Make sure the subregister index is valid for the source register.
572     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
573     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
574       return false;
575 
576     const TargetRegisterClass *DstRC =
577       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
578     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
579       return false;
580   }
581 
582   MI.eraseFromParent();
583   return true;
584 }
585 
586 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
587   MachineInstr &MI) const {
588   if (selectImpl(MI, *CoverageInfo))
589     return true;
590 
591   const LLT S32 = LLT::scalar(32);
592   const LLT V2S16 = LLT::vector(2, 16);
593 
594   Register Dst = MI.getOperand(0).getReg();
595   if (MRI->getType(Dst) != V2S16)
596     return false;
597 
598   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
599   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
600     return false;
601 
602   Register Src0 = MI.getOperand(1).getReg();
603   Register Src1 = MI.getOperand(2).getReg();
604   if (MRI->getType(Src0) != S32)
605     return false;
606 
607   const DebugLoc &DL = MI.getDebugLoc();
608   MachineBasicBlock *BB = MI.getParent();
609 
610   auto ConstSrc1 =
611       getConstantVRegValWithLookThrough(Src1, *MRI, true, true, true);
612   if (ConstSrc1) {
613     auto ConstSrc0 =
614         getConstantVRegValWithLookThrough(Src0, *MRI, true, true, true);
615     if (ConstSrc0) {
616       const int64_t K0 = ConstSrc0->Value.getSExtValue();
617       const int64_t K1 = ConstSrc1->Value.getSExtValue();
618       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
619       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
620 
621       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
622         .addImm(Lo16 | (Hi16 << 16));
623       MI.eraseFromParent();
624       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
625     }
626   }
627 
628   // TODO: This should probably be a combine somewhere
629   // (build_vector_trunc $src0, undef -> copy $src0
630   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
631   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
632     MI.setDesc(TII.get(AMDGPU::COPY));
633     MI.RemoveOperand(2);
634     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
635            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
636   }
637 
638   Register ShiftSrc0;
639   Register ShiftSrc1;
640 
641   // With multiple uses of the shift, this will duplicate the shift and
642   // increase register pressure.
643   //
644   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
645   //  => (S_PACK_HH_B32_B16 $src0, $src1)
646   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
647   //  => (S_PACK_LH_B32_B16 $src0, $src1)
648   // (build_vector_trunc $src0, $src1)
649   //  => (S_PACK_LL_B32_B16 $src0, $src1)
650 
651   bool Shift0 = mi_match(
652       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
653 
654   bool Shift1 = mi_match(
655       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
656 
657   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
658   if (Shift0 && Shift1) {
659     Opc = AMDGPU::S_PACK_HH_B32_B16;
660     MI.getOperand(1).setReg(ShiftSrc0);
661     MI.getOperand(2).setReg(ShiftSrc1);
662   } else if (Shift1) {
663     Opc = AMDGPU::S_PACK_LH_B32_B16;
664     MI.getOperand(2).setReg(ShiftSrc1);
665   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
666     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
667     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
668       .addReg(ShiftSrc0)
669       .addImm(16);
670 
671     MI.eraseFromParent();
672     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
673   }
674 
675   MI.setDesc(TII.get(Opc));
676   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
677 }
678 
679 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
680   return selectG_ADD_SUB(I);
681 }
682 
683 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
684   const MachineOperand &MO = I.getOperand(0);
685 
686   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
687   // regbank check here is to know why getConstrainedRegClassForOperand failed.
688   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
689   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
690       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
691     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
692     return true;
693   }
694 
695   return false;
696 }
697 
698 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
699   MachineBasicBlock *BB = I.getParent();
700 
701   Register DstReg = I.getOperand(0).getReg();
702   Register Src0Reg = I.getOperand(1).getReg();
703   Register Src1Reg = I.getOperand(2).getReg();
704   LLT Src1Ty = MRI->getType(Src1Reg);
705 
706   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
707   unsigned InsSize = Src1Ty.getSizeInBits();
708 
709   int64_t Offset = I.getOperand(3).getImm();
710 
711   // FIXME: These cases should have been illegal and unnecessary to check here.
712   if (Offset % 32 != 0 || InsSize % 32 != 0)
713     return false;
714 
715   // Currently not handled by getSubRegFromChannel.
716   if (InsSize > 128)
717     return false;
718 
719   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
720   if (SubReg == AMDGPU::NoSubRegister)
721     return false;
722 
723   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
724   const TargetRegisterClass *DstRC =
725     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
726   if (!DstRC)
727     return false;
728 
729   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
730   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
731   const TargetRegisterClass *Src0RC =
732     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
733   const TargetRegisterClass *Src1RC =
734     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
735 
736   // Deal with weird cases where the class only partially supports the subreg
737   // index.
738   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
739   if (!Src0RC || !Src1RC)
740     return false;
741 
742   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
743       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
744       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
745     return false;
746 
747   const DebugLoc &DL = I.getDebugLoc();
748   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
749     .addReg(Src0Reg)
750     .addReg(Src1Reg)
751     .addImm(SubReg);
752 
753   I.eraseFromParent();
754   return true;
755 }
756 
757 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
758   if (STI.getLDSBankCount() != 16)
759     return selectImpl(MI, *CoverageInfo);
760 
761   Register Dst = MI.getOperand(0).getReg();
762   Register Src0 = MI.getOperand(2).getReg();
763   Register M0Val = MI.getOperand(6).getReg();
764   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
765       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
766       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
767     return false;
768 
769   // This requires 2 instructions. It is possible to write a pattern to support
770   // this, but the generated isel emitter doesn't correctly deal with multiple
771   // output instructions using the same physical register input. The copy to m0
772   // is incorrectly placed before the second instruction.
773   //
774   // TODO: Match source modifiers.
775 
776   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
777   const DebugLoc &DL = MI.getDebugLoc();
778   MachineBasicBlock *MBB = MI.getParent();
779 
780   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
781     .addReg(M0Val);
782   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
783     .addImm(2)
784     .addImm(MI.getOperand(4).getImm())  // $attr
785     .addImm(MI.getOperand(3).getImm()); // $attrchan
786 
787   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
788     .addImm(0)                          // $src0_modifiers
789     .addReg(Src0)                       // $src0
790     .addImm(MI.getOperand(4).getImm())  // $attr
791     .addImm(MI.getOperand(3).getImm())  // $attrchan
792     .addImm(0)                          // $src2_modifiers
793     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
794     .addImm(MI.getOperand(5).getImm())  // $high
795     .addImm(0)                          // $clamp
796     .addImm(0);                         // $omod
797 
798   MI.eraseFromParent();
799   return true;
800 }
801 
802 // Writelane is special in that it can use SGPR and M0 (which would normally
803 // count as using the constant bus twice - but in this case it is allowed since
804 // the lane selector doesn't count as a use of the constant bus). However, it is
805 // still required to abide by the 1 SGPR rule. Fix this up if we might have
806 // multiple SGPRs.
807 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
808   // With a constant bus limit of at least 2, there's no issue.
809   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
810     return selectImpl(MI, *CoverageInfo);
811 
812   MachineBasicBlock *MBB = MI.getParent();
813   const DebugLoc &DL = MI.getDebugLoc();
814   Register VDst = MI.getOperand(0).getReg();
815   Register Val = MI.getOperand(2).getReg();
816   Register LaneSelect = MI.getOperand(3).getReg();
817   Register VDstIn = MI.getOperand(4).getReg();
818 
819   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
820 
821   Optional<ValueAndVReg> ConstSelect =
822     getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true);
823   if (ConstSelect) {
824     // The selector has to be an inline immediate, so we can use whatever for
825     // the other operands.
826     MIB.addReg(Val);
827     MIB.addImm(ConstSelect->Value.getSExtValue() &
828                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
829   } else {
830     Optional<ValueAndVReg> ConstVal =
831       getConstantVRegValWithLookThrough(Val, *MRI, true, true);
832 
833     // If the value written is an inline immediate, we can get away without a
834     // copy to m0.
835     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
836                                                  STI.hasInv2PiInlineImm())) {
837       MIB.addImm(ConstVal->Value.getSExtValue());
838       MIB.addReg(LaneSelect);
839     } else {
840       MIB.addReg(Val);
841 
842       // If the lane selector was originally in a VGPR and copied with
843       // readfirstlane, there's a hazard to read the same SGPR from the
844       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
845       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
846 
847       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
848         .addReg(LaneSelect);
849       MIB.addReg(AMDGPU::M0);
850     }
851   }
852 
853   MIB.addReg(VDstIn);
854 
855   MI.eraseFromParent();
856   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
857 }
858 
859 // We need to handle this here because tablegen doesn't support matching
860 // instructions with multiple outputs.
861 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
862   Register Dst0 = MI.getOperand(0).getReg();
863   Register Dst1 = MI.getOperand(1).getReg();
864 
865   LLT Ty = MRI->getType(Dst0);
866   unsigned Opc;
867   if (Ty == LLT::scalar(32))
868     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
869   else if (Ty == LLT::scalar(64))
870     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
871   else
872     return false;
873 
874   // TODO: Match source modifiers.
875 
876   const DebugLoc &DL = MI.getDebugLoc();
877   MachineBasicBlock *MBB = MI.getParent();
878 
879   Register Numer = MI.getOperand(3).getReg();
880   Register Denom = MI.getOperand(4).getReg();
881   unsigned ChooseDenom = MI.getOperand(5).getImm();
882 
883   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
884 
885   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
886     .addDef(Dst1)
887     .addImm(0)     // $src0_modifiers
888     .addUse(Src0)  // $src0
889     .addImm(0)     // $src1_modifiers
890     .addUse(Denom) // $src1
891     .addImm(0)     // $src2_modifiers
892     .addUse(Numer) // $src2
893     .addImm(0)     // $clamp
894     .addImm(0);    // $omod
895 
896   MI.eraseFromParent();
897   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
898 }
899 
900 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
901   unsigned IntrinsicID = I.getIntrinsicID();
902   switch (IntrinsicID) {
903   case Intrinsic::amdgcn_if_break: {
904     MachineBasicBlock *BB = I.getParent();
905 
906     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
907     // SelectionDAG uses for wave32 vs wave64.
908     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
909       .add(I.getOperand(0))
910       .add(I.getOperand(2))
911       .add(I.getOperand(3));
912 
913     Register DstReg = I.getOperand(0).getReg();
914     Register Src0Reg = I.getOperand(2).getReg();
915     Register Src1Reg = I.getOperand(3).getReg();
916 
917     I.eraseFromParent();
918 
919     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
920       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
921 
922     return true;
923   }
924   case Intrinsic::amdgcn_interp_p1_f16:
925     return selectInterpP1F16(I);
926   case Intrinsic::amdgcn_wqm:
927     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
928   case Intrinsic::amdgcn_softwqm:
929     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
930   case Intrinsic::amdgcn_strict_wwm:
931   case Intrinsic::amdgcn_wwm:
932     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
933   case Intrinsic::amdgcn_strict_wqm:
934     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
935   case Intrinsic::amdgcn_writelane:
936     return selectWritelane(I);
937   case Intrinsic::amdgcn_div_scale:
938     return selectDivScale(I);
939   case Intrinsic::amdgcn_icmp:
940     return selectIntrinsicIcmp(I);
941   case Intrinsic::amdgcn_ballot:
942     return selectBallot(I);
943   case Intrinsic::amdgcn_reloc_constant:
944     return selectRelocConstant(I);
945   case Intrinsic::amdgcn_groupstaticsize:
946     return selectGroupStaticSize(I);
947   case Intrinsic::returnaddress:
948     return selectReturnAddress(I);
949   default:
950     return selectImpl(I, *CoverageInfo);
951   }
952 }
953 
954 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
955   if (Size != 32 && Size != 64)
956     return -1;
957   switch (P) {
958   default:
959     llvm_unreachable("Unknown condition code!");
960   case CmpInst::ICMP_NE:
961     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
962   case CmpInst::ICMP_EQ:
963     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
964   case CmpInst::ICMP_SGT:
965     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
966   case CmpInst::ICMP_SGE:
967     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
968   case CmpInst::ICMP_SLT:
969     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
970   case CmpInst::ICMP_SLE:
971     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
972   case CmpInst::ICMP_UGT:
973     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
974   case CmpInst::ICMP_UGE:
975     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
976   case CmpInst::ICMP_ULT:
977     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
978   case CmpInst::ICMP_ULE:
979     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
980   }
981 }
982 
983 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
984                                               unsigned Size) const {
985   if (Size == 64) {
986     if (!STI.hasScalarCompareEq64())
987       return -1;
988 
989     switch (P) {
990     case CmpInst::ICMP_NE:
991       return AMDGPU::S_CMP_LG_U64;
992     case CmpInst::ICMP_EQ:
993       return AMDGPU::S_CMP_EQ_U64;
994     default:
995       return -1;
996     }
997   }
998 
999   if (Size != 32)
1000     return -1;
1001 
1002   switch (P) {
1003   case CmpInst::ICMP_NE:
1004     return AMDGPU::S_CMP_LG_U32;
1005   case CmpInst::ICMP_EQ:
1006     return AMDGPU::S_CMP_EQ_U32;
1007   case CmpInst::ICMP_SGT:
1008     return AMDGPU::S_CMP_GT_I32;
1009   case CmpInst::ICMP_SGE:
1010     return AMDGPU::S_CMP_GE_I32;
1011   case CmpInst::ICMP_SLT:
1012     return AMDGPU::S_CMP_LT_I32;
1013   case CmpInst::ICMP_SLE:
1014     return AMDGPU::S_CMP_LE_I32;
1015   case CmpInst::ICMP_UGT:
1016     return AMDGPU::S_CMP_GT_U32;
1017   case CmpInst::ICMP_UGE:
1018     return AMDGPU::S_CMP_GE_U32;
1019   case CmpInst::ICMP_ULT:
1020     return AMDGPU::S_CMP_LT_U32;
1021   case CmpInst::ICMP_ULE:
1022     return AMDGPU::S_CMP_LE_U32;
1023   default:
1024     llvm_unreachable("Unknown condition code!");
1025   }
1026 }
1027 
1028 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1029   MachineBasicBlock *BB = I.getParent();
1030   const DebugLoc &DL = I.getDebugLoc();
1031 
1032   Register SrcReg = I.getOperand(2).getReg();
1033   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1034 
1035   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1036 
1037   Register CCReg = I.getOperand(0).getReg();
1038   if (!isVCC(CCReg, *MRI)) {
1039     int Opcode = getS_CMPOpcode(Pred, Size);
1040     if (Opcode == -1)
1041       return false;
1042     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1043             .add(I.getOperand(2))
1044             .add(I.getOperand(3));
1045     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1046       .addReg(AMDGPU::SCC);
1047     bool Ret =
1048         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1049         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1050     I.eraseFromParent();
1051     return Ret;
1052   }
1053 
1054   int Opcode = getV_CMPOpcode(Pred, Size);
1055   if (Opcode == -1)
1056     return false;
1057 
1058   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1059             I.getOperand(0).getReg())
1060             .add(I.getOperand(2))
1061             .add(I.getOperand(3));
1062   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1063                                *TRI.getBoolRC(), *MRI);
1064   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1065   I.eraseFromParent();
1066   return Ret;
1067 }
1068 
1069 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1070   Register Dst = I.getOperand(0).getReg();
1071   if (isVCC(Dst, *MRI))
1072     return false;
1073 
1074   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1075     return false;
1076 
1077   MachineBasicBlock *BB = I.getParent();
1078   const DebugLoc &DL = I.getDebugLoc();
1079   Register SrcReg = I.getOperand(2).getReg();
1080   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1081   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1082 
1083   int Opcode = getV_CMPOpcode(Pred, Size);
1084   if (Opcode == -1)
1085     return false;
1086 
1087   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1088                            .add(I.getOperand(2))
1089                            .add(I.getOperand(3));
1090   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1091                                *MRI);
1092   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1093   I.eraseFromParent();
1094   return Ret;
1095 }
1096 
1097 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1098   MachineBasicBlock *BB = I.getParent();
1099   const DebugLoc &DL = I.getDebugLoc();
1100   Register DstReg = I.getOperand(0).getReg();
1101   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1102   const bool Is64 = Size == 64;
1103 
1104   if (Size != STI.getWavefrontSize())
1105     return false;
1106 
1107   Optional<ValueAndVReg> Arg =
1108       getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true);
1109 
1110   if (Arg.hasValue()) {
1111     const int64_t Value = Arg.getValue().Value.getSExtValue();
1112     if (Value == 0) {
1113       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1114       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1115     } else if (Value == -1) { // all ones
1116       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1117       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1118     } else
1119       return false;
1120   } else {
1121     Register SrcReg = I.getOperand(2).getReg();
1122     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1123   }
1124 
1125   I.eraseFromParent();
1126   return true;
1127 }
1128 
1129 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1130   Register DstReg = I.getOperand(0).getReg();
1131   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1132   const TargetRegisterClass *DstRC =
1133     TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1134   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1135     return false;
1136 
1137   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1138 
1139   Module *M = MF->getFunction().getParent();
1140   const MDNode *Metadata = I.getOperand(2).getMetadata();
1141   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1142   auto RelocSymbol = cast<GlobalVariable>(
1143     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1144 
1145   MachineBasicBlock *BB = I.getParent();
1146   BuildMI(*BB, &I, I.getDebugLoc(),
1147           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1148     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1149 
1150   I.eraseFromParent();
1151   return true;
1152 }
1153 
1154 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1155   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1156 
1157   Register DstReg = I.getOperand(0).getReg();
1158   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1159   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1160     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1161 
1162   MachineBasicBlock *MBB = I.getParent();
1163   const DebugLoc &DL = I.getDebugLoc();
1164 
1165   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1166 
1167   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1168     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1169     MIB.addImm(MFI->getLDSSize());
1170   } else {
1171     Module *M = MF->getFunction().getParent();
1172     const GlobalValue *GV
1173       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1174     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1175   }
1176 
1177   I.eraseFromParent();
1178   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1179 }
1180 
1181 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1182   MachineBasicBlock *MBB = I.getParent();
1183   MachineFunction &MF = *MBB->getParent();
1184   const DebugLoc &DL = I.getDebugLoc();
1185 
1186   MachineOperand &Dst = I.getOperand(0);
1187   Register DstReg = Dst.getReg();
1188   unsigned Depth = I.getOperand(2).getImm();
1189 
1190   const TargetRegisterClass *RC
1191     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1192   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1193       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1194     return false;
1195 
1196   // Check for kernel and shader functions
1197   if (Depth != 0 ||
1198       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1199     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1200       .addImm(0);
1201     I.eraseFromParent();
1202     return true;
1203   }
1204 
1205   MachineFrameInfo &MFI = MF.getFrameInfo();
1206   // There is a call to @llvm.returnaddress in this function
1207   MFI.setReturnAddressIsTaken(true);
1208 
1209   // Get the return address reg and mark it as an implicit live-in
1210   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1211   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1212                                              AMDGPU::SReg_64RegClass);
1213   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1214     .addReg(LiveIn);
1215   I.eraseFromParent();
1216   return true;
1217 }
1218 
1219 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1220   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1221   // SelectionDAG uses for wave32 vs wave64.
1222   MachineBasicBlock *BB = MI.getParent();
1223   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1224       .add(MI.getOperand(1));
1225 
1226   Register Reg = MI.getOperand(1).getReg();
1227   MI.eraseFromParent();
1228 
1229   if (!MRI->getRegClassOrNull(Reg))
1230     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1231   return true;
1232 }
1233 
1234 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1235   MachineInstr &MI, Intrinsic::ID IntrID) const {
1236   MachineBasicBlock *MBB = MI.getParent();
1237   MachineFunction *MF = MBB->getParent();
1238   const DebugLoc &DL = MI.getDebugLoc();
1239 
1240   unsigned IndexOperand = MI.getOperand(7).getImm();
1241   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1242   bool WaveDone = MI.getOperand(9).getImm() != 0;
1243 
1244   if (WaveDone && !WaveRelease)
1245     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1246 
1247   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1248   IndexOperand &= ~0x3f;
1249   unsigned CountDw = 0;
1250 
1251   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1252     CountDw = (IndexOperand >> 24) & 0xf;
1253     IndexOperand &= ~(0xf << 24);
1254 
1255     if (CountDw < 1 || CountDw > 4) {
1256       report_fatal_error(
1257         "ds_ordered_count: dword count must be between 1 and 4");
1258     }
1259   }
1260 
1261   if (IndexOperand)
1262     report_fatal_error("ds_ordered_count: bad index operand");
1263 
1264   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1265   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1266 
1267   unsigned Offset0 = OrderedCountIndex << 2;
1268   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1269                      (Instruction << 4);
1270 
1271   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1272     Offset1 |= (CountDw - 1) << 6;
1273 
1274   unsigned Offset = Offset0 | (Offset1 << 8);
1275 
1276   Register M0Val = MI.getOperand(2).getReg();
1277   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1278     .addReg(M0Val);
1279 
1280   Register DstReg = MI.getOperand(0).getReg();
1281   Register ValReg = MI.getOperand(3).getReg();
1282   MachineInstrBuilder DS =
1283     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1284       .addReg(ValReg)
1285       .addImm(Offset)
1286       .cloneMemRefs(MI);
1287 
1288   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1289     return false;
1290 
1291   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1292   MI.eraseFromParent();
1293   return Ret;
1294 }
1295 
1296 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1297   switch (IntrID) {
1298   case Intrinsic::amdgcn_ds_gws_init:
1299     return AMDGPU::DS_GWS_INIT;
1300   case Intrinsic::amdgcn_ds_gws_barrier:
1301     return AMDGPU::DS_GWS_BARRIER;
1302   case Intrinsic::amdgcn_ds_gws_sema_v:
1303     return AMDGPU::DS_GWS_SEMA_V;
1304   case Intrinsic::amdgcn_ds_gws_sema_br:
1305     return AMDGPU::DS_GWS_SEMA_BR;
1306   case Intrinsic::amdgcn_ds_gws_sema_p:
1307     return AMDGPU::DS_GWS_SEMA_P;
1308   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1309     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1310   default:
1311     llvm_unreachable("not a gws intrinsic");
1312   }
1313 }
1314 
1315 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1316                                                      Intrinsic::ID IID) const {
1317   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1318       !STI.hasGWSSemaReleaseAll())
1319     return false;
1320 
1321   // intrinsic ID, vsrc, offset
1322   const bool HasVSrc = MI.getNumOperands() == 3;
1323   assert(HasVSrc || MI.getNumOperands() == 2);
1324 
1325   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1326   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1327   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1328     return false;
1329 
1330   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1331   assert(OffsetDef);
1332 
1333   unsigned ImmOffset;
1334 
1335   MachineBasicBlock *MBB = MI.getParent();
1336   const DebugLoc &DL = MI.getDebugLoc();
1337 
1338   MachineInstr *Readfirstlane = nullptr;
1339 
1340   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1341   // incoming offset, in case there's an add of a constant. We'll have to put it
1342   // back later.
1343   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1344     Readfirstlane = OffsetDef;
1345     BaseOffset = OffsetDef->getOperand(1).getReg();
1346     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1347   }
1348 
1349   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1350     // If we have a constant offset, try to use the 0 in m0 as the base.
1351     // TODO: Look into changing the default m0 initialization value. If the
1352     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1353     // the immediate offset.
1354 
1355     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1356     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1357       .addImm(0);
1358   } else {
1359     std::tie(BaseOffset, ImmOffset) =
1360         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1361 
1362     if (Readfirstlane) {
1363       // We have the constant offset now, so put the readfirstlane back on the
1364       // variable component.
1365       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1366         return false;
1367 
1368       Readfirstlane->getOperand(1).setReg(BaseOffset);
1369       BaseOffset = Readfirstlane->getOperand(0).getReg();
1370     } else {
1371       if (!RBI.constrainGenericRegister(BaseOffset,
1372                                         AMDGPU::SReg_32RegClass, *MRI))
1373         return false;
1374     }
1375 
1376     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1377     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1378       .addReg(BaseOffset)
1379       .addImm(16);
1380 
1381     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1382       .addReg(M0Base);
1383   }
1384 
1385   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1386   // offset field) % 64. Some versions of the programming guide omit the m0
1387   // part, or claim it's from offset 0.
1388   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1389 
1390   if (HasVSrc) {
1391     Register VSrc = MI.getOperand(1).getReg();
1392     MIB.addReg(VSrc);
1393     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1394       return false;
1395   }
1396 
1397   MIB.addImm(ImmOffset)
1398      .cloneMemRefs(MI);
1399 
1400   MI.eraseFromParent();
1401   return true;
1402 }
1403 
1404 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1405                                                       bool IsAppend) const {
1406   Register PtrBase = MI.getOperand(2).getReg();
1407   LLT PtrTy = MRI->getType(PtrBase);
1408   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1409 
1410   unsigned Offset;
1411   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1412 
1413   // TODO: Should this try to look through readfirstlane like GWS?
1414   if (!isDSOffsetLegal(PtrBase, Offset)) {
1415     PtrBase = MI.getOperand(2).getReg();
1416     Offset = 0;
1417   }
1418 
1419   MachineBasicBlock *MBB = MI.getParent();
1420   const DebugLoc &DL = MI.getDebugLoc();
1421   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1422 
1423   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1424     .addReg(PtrBase);
1425   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1426     return false;
1427 
1428   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1429     .addImm(Offset)
1430     .addImm(IsGDS ? -1 : 0)
1431     .cloneMemRefs(MI);
1432   MI.eraseFromParent();
1433   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1434 }
1435 
1436 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1437   if (TM.getOptLevel() > CodeGenOpt::None) {
1438     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1439     if (WGSize <= STI.getWavefrontSize()) {
1440       MachineBasicBlock *MBB = MI.getParent();
1441       const DebugLoc &DL = MI.getDebugLoc();
1442       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1443       MI.eraseFromParent();
1444       return true;
1445     }
1446   }
1447   return selectImpl(MI, *CoverageInfo);
1448 }
1449 
1450 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1451                          bool &IsTexFail) {
1452   if (TexFailCtrl)
1453     IsTexFail = true;
1454 
1455   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1456   TexFailCtrl &= ~(uint64_t)0x1;
1457   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1458   TexFailCtrl &= ~(uint64_t)0x2;
1459 
1460   return TexFailCtrl == 0;
1461 }
1462 
1463 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1464   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1465   MachineBasicBlock *MBB = MI.getParent();
1466   const DebugLoc &DL = MI.getDebugLoc();
1467 
1468   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1469     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1470 
1471   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1472   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1473       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1474   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1475       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1476   unsigned IntrOpcode = Intr->BaseOpcode;
1477   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1478 
1479   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1480 
1481   Register VDataIn, VDataOut;
1482   LLT VDataTy;
1483   int NumVDataDwords = -1;
1484   bool IsD16 = false;
1485 
1486   bool Unorm;
1487   if (!BaseOpcode->Sampler)
1488     Unorm = true;
1489   else
1490     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1491 
1492   bool TFE;
1493   bool LWE;
1494   bool IsTexFail = false;
1495   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1496                     TFE, LWE, IsTexFail))
1497     return false;
1498 
1499   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1500   const bool IsA16 = (Flags & 1) != 0;
1501   const bool IsG16 = (Flags & 2) != 0;
1502 
1503   // A16 implies 16 bit gradients
1504   if (IsA16 && !IsG16)
1505     return false;
1506 
1507   unsigned DMask = 0;
1508   unsigned DMaskLanes = 0;
1509 
1510   if (BaseOpcode->Atomic) {
1511     VDataOut = MI.getOperand(0).getReg();
1512     VDataIn = MI.getOperand(2).getReg();
1513     LLT Ty = MRI->getType(VDataIn);
1514 
1515     // Be careful to allow atomic swap on 16-bit element vectors.
1516     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1517       Ty.getSizeInBits() == 128 :
1518       Ty.getSizeInBits() == 64;
1519 
1520     if (BaseOpcode->AtomicX2) {
1521       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1522 
1523       DMask = Is64Bit ? 0xf : 0x3;
1524       NumVDataDwords = Is64Bit ? 4 : 2;
1525     } else {
1526       DMask = Is64Bit ? 0x3 : 0x1;
1527       NumVDataDwords = Is64Bit ? 2 : 1;
1528     }
1529   } else {
1530     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1531     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1532 
1533     // One memoperand is mandatory, except for getresinfo.
1534     // FIXME: Check this in verifier.
1535     if (!MI.memoperands_empty()) {
1536       const MachineMemOperand *MMO = *MI.memoperands_begin();
1537 
1538       // Infer d16 from the memory size, as the register type will be mangled by
1539       // unpacked subtargets, or by TFE.
1540       IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1541     }
1542 
1543     if (BaseOpcode->Store) {
1544       VDataIn = MI.getOperand(1).getReg();
1545       VDataTy = MRI->getType(VDataIn);
1546       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1547     } else {
1548       VDataOut = MI.getOperand(0).getReg();
1549       VDataTy = MRI->getType(VDataOut);
1550       NumVDataDwords = DMaskLanes;
1551 
1552       if (IsD16 && !STI.hasUnpackedD16VMem())
1553         NumVDataDwords = (DMaskLanes + 1) / 2;
1554     }
1555   }
1556 
1557   // Optimize _L to _LZ when _L is zero
1558   if (LZMappingInfo) {
1559     // The legalizer replaced the register with an immediate 0 if we need to
1560     // change the opcode.
1561     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex);
1562     if (Lod.isImm()) {
1563       assert(Lod.getImm() == 0);
1564       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1565     }
1566   }
1567 
1568   // Optimize _mip away, when 'lod' is zero
1569   if (MIPMappingInfo) {
1570     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex);
1571     if (Lod.isImm()) {
1572       assert(Lod.getImm() == 0);
1573       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1574     }
1575   }
1576 
1577   // Set G16 opcode
1578   if (IsG16 && !IsA16) {
1579     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1580         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1581     assert(G16MappingInfo);
1582     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1583   }
1584 
1585   // TODO: Check this in verifier.
1586   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1587 
1588   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1589   if (BaseOpcode->Atomic)
1590     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1591   if (CPol & ~AMDGPU::CPol::ALL)
1592     return false;
1593 
1594   int NumVAddrRegs = 0;
1595   int NumVAddrDwords = 0;
1596   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1597     // Skip the $noregs and 0s inserted during legalization.
1598     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1599     if (!AddrOp.isReg())
1600       continue; // XXX - Break?
1601 
1602     Register Addr = AddrOp.getReg();
1603     if (!Addr)
1604       break;
1605 
1606     ++NumVAddrRegs;
1607     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1608   }
1609 
1610   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1611   // NSA, these should have beeen packed into a single value in the first
1612   // address register
1613   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1614   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1615     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1616     return false;
1617   }
1618 
1619   if (IsTexFail)
1620     ++NumVDataDwords;
1621 
1622   int Opcode = -1;
1623   if (IsGFX10Plus) {
1624     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1625                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1626                                           : AMDGPU::MIMGEncGfx10Default,
1627                                    NumVDataDwords, NumVAddrDwords);
1628   } else {
1629     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1630       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1631                                      NumVDataDwords, NumVAddrDwords);
1632     if (Opcode == -1)
1633       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1634                                      NumVDataDwords, NumVAddrDwords);
1635   }
1636   assert(Opcode != -1);
1637 
1638   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1639     .cloneMemRefs(MI);
1640 
1641   if (VDataOut) {
1642     if (BaseOpcode->AtomicX2) {
1643       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1644 
1645       Register TmpReg = MRI->createVirtualRegister(
1646         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1647       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1648 
1649       MIB.addDef(TmpReg);
1650       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1651         .addReg(TmpReg, RegState::Kill, SubReg);
1652 
1653     } else {
1654       MIB.addDef(VDataOut); // vdata output
1655     }
1656   }
1657 
1658   if (VDataIn)
1659     MIB.addReg(VDataIn); // vdata input
1660 
1661   for (int I = 0; I != NumVAddrRegs; ++I) {
1662     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1663     if (SrcOp.isReg()) {
1664       assert(SrcOp.getReg() != 0);
1665       MIB.addReg(SrcOp.getReg());
1666     }
1667   }
1668 
1669   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1670   if (BaseOpcode->Sampler)
1671     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1672 
1673   MIB.addImm(DMask); // dmask
1674 
1675   if (IsGFX10Plus)
1676     MIB.addImm(DimInfo->Encoding);
1677   MIB.addImm(Unorm);
1678 
1679   MIB.addImm(CPol);
1680   MIB.addImm(IsA16 &&  // a16 or r128
1681              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1682   if (IsGFX10Plus)
1683     MIB.addImm(IsA16 ? -1 : 0);
1684 
1685   MIB.addImm(TFE); // tfe
1686   MIB.addImm(LWE); // lwe
1687   if (!IsGFX10Plus)
1688     MIB.addImm(DimInfo->DA ? -1 : 0);
1689   if (BaseOpcode->HasD16)
1690     MIB.addImm(IsD16 ? -1 : 0);
1691 
1692   MI.eraseFromParent();
1693   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1694 }
1695 
1696 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1697     MachineInstr &I) const {
1698   unsigned IntrinsicID = I.getIntrinsicID();
1699   switch (IntrinsicID) {
1700   case Intrinsic::amdgcn_end_cf:
1701     return selectEndCfIntrinsic(I);
1702   case Intrinsic::amdgcn_ds_ordered_add:
1703   case Intrinsic::amdgcn_ds_ordered_swap:
1704     return selectDSOrderedIntrinsic(I, IntrinsicID);
1705   case Intrinsic::amdgcn_ds_gws_init:
1706   case Intrinsic::amdgcn_ds_gws_barrier:
1707   case Intrinsic::amdgcn_ds_gws_sema_v:
1708   case Intrinsic::amdgcn_ds_gws_sema_br:
1709   case Intrinsic::amdgcn_ds_gws_sema_p:
1710   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1711     return selectDSGWSIntrinsic(I, IntrinsicID);
1712   case Intrinsic::amdgcn_ds_append:
1713     return selectDSAppendConsume(I, true);
1714   case Intrinsic::amdgcn_ds_consume:
1715     return selectDSAppendConsume(I, false);
1716   case Intrinsic::amdgcn_s_barrier:
1717     return selectSBarrier(I);
1718   case Intrinsic::amdgcn_global_atomic_fadd:
1719     return selectGlobalAtomicFaddIntrinsic(I);
1720   default: {
1721     return selectImpl(I, *CoverageInfo);
1722   }
1723   }
1724 }
1725 
1726 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1727   if (selectImpl(I, *CoverageInfo))
1728     return true;
1729 
1730   MachineBasicBlock *BB = I.getParent();
1731   const DebugLoc &DL = I.getDebugLoc();
1732 
1733   Register DstReg = I.getOperand(0).getReg();
1734   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1735   assert(Size <= 32 || Size == 64);
1736   const MachineOperand &CCOp = I.getOperand(1);
1737   Register CCReg = CCOp.getReg();
1738   if (!isVCC(CCReg, *MRI)) {
1739     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1740                                          AMDGPU::S_CSELECT_B32;
1741     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1742             .addReg(CCReg);
1743 
1744     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1745     // bank, because it does not cover the register class that we used to represent
1746     // for it.  So we need to manually set the register class here.
1747     if (!MRI->getRegClassOrNull(CCReg))
1748         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1749     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1750             .add(I.getOperand(2))
1751             .add(I.getOperand(3));
1752 
1753     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1754                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1755     I.eraseFromParent();
1756     return Ret;
1757   }
1758 
1759   // Wide VGPR select should have been split in RegBankSelect.
1760   if (Size > 32)
1761     return false;
1762 
1763   MachineInstr *Select =
1764       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1765               .addImm(0)
1766               .add(I.getOperand(3))
1767               .addImm(0)
1768               .add(I.getOperand(2))
1769               .add(I.getOperand(1));
1770 
1771   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1772   I.eraseFromParent();
1773   return Ret;
1774 }
1775 
1776 static int sizeToSubRegIndex(unsigned Size) {
1777   switch (Size) {
1778   case 32:
1779     return AMDGPU::sub0;
1780   case 64:
1781     return AMDGPU::sub0_sub1;
1782   case 96:
1783     return AMDGPU::sub0_sub1_sub2;
1784   case 128:
1785     return AMDGPU::sub0_sub1_sub2_sub3;
1786   case 256:
1787     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1788   default:
1789     if (Size < 32)
1790       return AMDGPU::sub0;
1791     if (Size > 256)
1792       return -1;
1793     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1794   }
1795 }
1796 
1797 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1798   Register DstReg = I.getOperand(0).getReg();
1799   Register SrcReg = I.getOperand(1).getReg();
1800   const LLT DstTy = MRI->getType(DstReg);
1801   const LLT SrcTy = MRI->getType(SrcReg);
1802   const LLT S1 = LLT::scalar(1);
1803 
1804   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1805   const RegisterBank *DstRB;
1806   if (DstTy == S1) {
1807     // This is a special case. We don't treat s1 for legalization artifacts as
1808     // vcc booleans.
1809     DstRB = SrcRB;
1810   } else {
1811     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1812     if (SrcRB != DstRB)
1813       return false;
1814   }
1815 
1816   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1817 
1818   unsigned DstSize = DstTy.getSizeInBits();
1819   unsigned SrcSize = SrcTy.getSizeInBits();
1820 
1821   const TargetRegisterClass *SrcRC
1822     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1823   const TargetRegisterClass *DstRC
1824     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1825   if (!SrcRC || !DstRC)
1826     return false;
1827 
1828   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1829       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1830     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1831     return false;
1832   }
1833 
1834   if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1835     MachineBasicBlock *MBB = I.getParent();
1836     const DebugLoc &DL = I.getDebugLoc();
1837 
1838     Register LoReg = MRI->createVirtualRegister(DstRC);
1839     Register HiReg = MRI->createVirtualRegister(DstRC);
1840     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1841       .addReg(SrcReg, 0, AMDGPU::sub0);
1842     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1843       .addReg(SrcReg, 0, AMDGPU::sub1);
1844 
1845     if (IsVALU && STI.hasSDWA()) {
1846       // Write the low 16-bits of the high element into the high 16-bits of the
1847       // low element.
1848       MachineInstr *MovSDWA =
1849         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1850         .addImm(0)                             // $src0_modifiers
1851         .addReg(HiReg)                         // $src0
1852         .addImm(0)                             // $clamp
1853         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1854         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1855         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1856         .addReg(LoReg, RegState::Implicit);
1857       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1858     } else {
1859       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1860       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1861       Register ImmReg = MRI->createVirtualRegister(DstRC);
1862       if (IsVALU) {
1863         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1864           .addImm(16)
1865           .addReg(HiReg);
1866       } else {
1867         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1868           .addReg(HiReg)
1869           .addImm(16);
1870       }
1871 
1872       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1873       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1874       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1875 
1876       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1877         .addImm(0xffff);
1878       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1879         .addReg(LoReg)
1880         .addReg(ImmReg);
1881       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1882         .addReg(TmpReg0)
1883         .addReg(TmpReg1);
1884     }
1885 
1886     I.eraseFromParent();
1887     return true;
1888   }
1889 
1890   if (!DstTy.isScalar())
1891     return false;
1892 
1893   if (SrcSize > 32) {
1894     int SubRegIdx = sizeToSubRegIndex(DstSize);
1895     if (SubRegIdx == -1)
1896       return false;
1897 
1898     // Deal with weird cases where the class only partially supports the subreg
1899     // index.
1900     const TargetRegisterClass *SrcWithSubRC
1901       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1902     if (!SrcWithSubRC)
1903       return false;
1904 
1905     if (SrcWithSubRC != SrcRC) {
1906       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1907         return false;
1908     }
1909 
1910     I.getOperand(1).setSubReg(SubRegIdx);
1911   }
1912 
1913   I.setDesc(TII.get(TargetOpcode::COPY));
1914   return true;
1915 }
1916 
1917 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1918 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1919   Mask = maskTrailingOnes<unsigned>(Size);
1920   int SignedMask = static_cast<int>(Mask);
1921   return SignedMask >= -16 && SignedMask <= 64;
1922 }
1923 
1924 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1925 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1926   Register Reg, const MachineRegisterInfo &MRI,
1927   const TargetRegisterInfo &TRI) const {
1928   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1929   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1930     return RB;
1931 
1932   // Ignore the type, since we don't use vcc in artifacts.
1933   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1934     return &RBI.getRegBankFromRegClass(*RC, LLT());
1935   return nullptr;
1936 }
1937 
1938 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1939   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1940   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1941   const DebugLoc &DL = I.getDebugLoc();
1942   MachineBasicBlock &MBB = *I.getParent();
1943   const Register DstReg = I.getOperand(0).getReg();
1944   const Register SrcReg = I.getOperand(1).getReg();
1945 
1946   const LLT DstTy = MRI->getType(DstReg);
1947   const LLT SrcTy = MRI->getType(SrcReg);
1948   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1949     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1950   const unsigned DstSize = DstTy.getSizeInBits();
1951   if (!DstTy.isScalar())
1952     return false;
1953 
1954   // Artifact casts should never use vcc.
1955   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1956 
1957   // FIXME: This should probably be illegal and split earlier.
1958   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
1959     if (DstSize <= 32)
1960       return selectCOPY(I);
1961 
1962     const TargetRegisterClass *SrcRC =
1963         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
1964     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1965     const TargetRegisterClass *DstRC =
1966         TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
1967 
1968     Register UndefReg = MRI->createVirtualRegister(SrcRC);
1969     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1970     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1971       .addReg(SrcReg)
1972       .addImm(AMDGPU::sub0)
1973       .addReg(UndefReg)
1974       .addImm(AMDGPU::sub1);
1975     I.eraseFromParent();
1976 
1977     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
1978            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
1979   }
1980 
1981   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1982     // 64-bit should have been split up in RegBankSelect
1983 
1984     // Try to use an and with a mask if it will save code size.
1985     unsigned Mask;
1986     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1987       MachineInstr *ExtI =
1988       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1989         .addImm(Mask)
1990         .addReg(SrcReg);
1991       I.eraseFromParent();
1992       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1993     }
1994 
1995     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
1996     MachineInstr *ExtI =
1997       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1998       .addReg(SrcReg)
1999       .addImm(0) // Offset
2000       .addImm(SrcSize); // Width
2001     I.eraseFromParent();
2002     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2003   }
2004 
2005   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2006     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2007       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2008     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2009       return false;
2010 
2011     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2012       const unsigned SextOpc = SrcSize == 8 ?
2013         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2014       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2015         .addReg(SrcReg);
2016       I.eraseFromParent();
2017       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2018     }
2019 
2020     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2021     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2022 
2023     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2024     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2025       // We need a 64-bit register source, but the high bits don't matter.
2026       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2027       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2028       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2029 
2030       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2031       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2032         .addReg(SrcReg, 0, SubReg)
2033         .addImm(AMDGPU::sub0)
2034         .addReg(UndefReg)
2035         .addImm(AMDGPU::sub1);
2036 
2037       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2038         .addReg(ExtReg)
2039         .addImm(SrcSize << 16);
2040 
2041       I.eraseFromParent();
2042       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2043     }
2044 
2045     unsigned Mask;
2046     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2047       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2048         .addReg(SrcReg)
2049         .addImm(Mask);
2050     } else {
2051       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2052         .addReg(SrcReg)
2053         .addImm(SrcSize << 16);
2054     }
2055 
2056     I.eraseFromParent();
2057     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2058   }
2059 
2060   return false;
2061 }
2062 
2063 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2064   MachineBasicBlock *BB = I.getParent();
2065   MachineOperand &ImmOp = I.getOperand(1);
2066   Register DstReg = I.getOperand(0).getReg();
2067   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2068 
2069   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2070   if (ImmOp.isFPImm()) {
2071     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2072     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2073   } else if (ImmOp.isCImm()) {
2074     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2075   } else {
2076     llvm_unreachable("Not supported by g_constants");
2077   }
2078 
2079   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2080   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2081 
2082   unsigned Opcode;
2083   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2084     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2085   } else {
2086     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2087 
2088     // We should never produce s1 values on banks other than VCC. If the user of
2089     // this already constrained the register, we may incorrectly think it's VCC
2090     // if it wasn't originally.
2091     if (Size == 1)
2092       return false;
2093   }
2094 
2095   if (Size != 64) {
2096     I.setDesc(TII.get(Opcode));
2097     I.addImplicitDefUseOperands(*MF);
2098     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2099   }
2100 
2101   const DebugLoc &DL = I.getDebugLoc();
2102 
2103   APInt Imm(Size, I.getOperand(1).getImm());
2104 
2105   MachineInstr *ResInst;
2106   if (IsSgpr && TII.isInlineConstant(Imm)) {
2107     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2108       .addImm(I.getOperand(1).getImm());
2109   } else {
2110     const TargetRegisterClass *RC = IsSgpr ?
2111       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2112     Register LoReg = MRI->createVirtualRegister(RC);
2113     Register HiReg = MRI->createVirtualRegister(RC);
2114 
2115     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2116       .addImm(Imm.trunc(32).getZExtValue());
2117 
2118     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2119       .addImm(Imm.ashr(32).getZExtValue());
2120 
2121     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2122       .addReg(LoReg)
2123       .addImm(AMDGPU::sub0)
2124       .addReg(HiReg)
2125       .addImm(AMDGPU::sub1);
2126   }
2127 
2128   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2129   // work for target independent opcodes
2130   I.eraseFromParent();
2131   const TargetRegisterClass *DstRC =
2132     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2133   if (!DstRC)
2134     return true;
2135   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2136 }
2137 
2138 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2139   // Only manually handle the f64 SGPR case.
2140   //
2141   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2142   // the bit ops theoretically have a second result due to the implicit def of
2143   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2144   // that is easy by disabling the check. The result works, but uses a
2145   // nonsensical sreg32orlds_and_sreg_1 regclass.
2146   //
2147   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2148   // the variadic REG_SEQUENCE operands.
2149 
2150   Register Dst = MI.getOperand(0).getReg();
2151   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2152   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2153       MRI->getType(Dst) != LLT::scalar(64))
2154     return false;
2155 
2156   Register Src = MI.getOperand(1).getReg();
2157   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2158   if (Fabs)
2159     Src = Fabs->getOperand(1).getReg();
2160 
2161   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2162       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2163     return false;
2164 
2165   MachineBasicBlock *BB = MI.getParent();
2166   const DebugLoc &DL = MI.getDebugLoc();
2167   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2168   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2169   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2170   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2171 
2172   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2173     .addReg(Src, 0, AMDGPU::sub0);
2174   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2175     .addReg(Src, 0, AMDGPU::sub1);
2176   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2177     .addImm(0x80000000);
2178 
2179   // Set or toggle sign bit.
2180   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2181   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2182     .addReg(HiReg)
2183     .addReg(ConstReg);
2184   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2185     .addReg(LoReg)
2186     .addImm(AMDGPU::sub0)
2187     .addReg(OpReg)
2188     .addImm(AMDGPU::sub1);
2189   MI.eraseFromParent();
2190   return true;
2191 }
2192 
2193 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2194 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2195   Register Dst = MI.getOperand(0).getReg();
2196   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2197   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2198       MRI->getType(Dst) != LLT::scalar(64))
2199     return false;
2200 
2201   Register Src = MI.getOperand(1).getReg();
2202   MachineBasicBlock *BB = MI.getParent();
2203   const DebugLoc &DL = MI.getDebugLoc();
2204   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2205   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2206   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2207   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2208 
2209   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2210       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2211     return false;
2212 
2213   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2214     .addReg(Src, 0, AMDGPU::sub0);
2215   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2216     .addReg(Src, 0, AMDGPU::sub1);
2217   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2218     .addImm(0x7fffffff);
2219 
2220   // Clear sign bit.
2221   // TODO: Should this used S_BITSET0_*?
2222   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2223     .addReg(HiReg)
2224     .addReg(ConstReg);
2225   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2226     .addReg(LoReg)
2227     .addImm(AMDGPU::sub0)
2228     .addReg(OpReg)
2229     .addImm(AMDGPU::sub1);
2230 
2231   MI.eraseFromParent();
2232   return true;
2233 }
2234 
2235 static bool isConstant(const MachineInstr &MI) {
2236   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2237 }
2238 
2239 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2240     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2241 
2242   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2243 
2244   assert(PtrMI);
2245 
2246   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2247     return;
2248 
2249   GEPInfo GEPInfo(*PtrMI);
2250 
2251   for (unsigned i = 1; i != 3; ++i) {
2252     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2253     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2254     assert(OpDef);
2255     if (i == 2 && isConstant(*OpDef)) {
2256       // TODO: Could handle constant base + variable offset, but a combine
2257       // probably should have commuted it.
2258       assert(GEPInfo.Imm == 0);
2259       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2260       continue;
2261     }
2262     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2263     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2264       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2265     else
2266       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2267   }
2268 
2269   AddrInfo.push_back(GEPInfo);
2270   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2271 }
2272 
2273 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2274   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2275 }
2276 
2277 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2278   if (!MI.hasOneMemOperand())
2279     return false;
2280 
2281   const MachineMemOperand *MMO = *MI.memoperands_begin();
2282   const Value *Ptr = MMO->getValue();
2283 
2284   // UndefValue means this is a load of a kernel input.  These are uniform.
2285   // Sometimes LDS instructions have constant pointers.
2286   // If Ptr is null, then that means this mem operand contains a
2287   // PseudoSourceValue like GOT.
2288   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2289       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2290     return true;
2291 
2292   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2293     return true;
2294 
2295   const Instruction *I = dyn_cast<Instruction>(Ptr);
2296   return I && I->getMetadata("amdgpu.uniform");
2297 }
2298 
2299 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2300   for (const GEPInfo &GEPInfo : AddrInfo) {
2301     if (!GEPInfo.VgprParts.empty())
2302       return true;
2303   }
2304   return false;
2305 }
2306 
2307 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2308   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2309   unsigned AS = PtrTy.getAddressSpace();
2310   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2311       STI.ldsRequiresM0Init()) {
2312     MachineBasicBlock *BB = I.getParent();
2313 
2314     // If DS instructions require M0 initializtion, insert it before selecting.
2315     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2316       .addImm(-1);
2317   }
2318 }
2319 
2320 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2321   MachineInstr &I) const {
2322   initM0(I);
2323   return selectImpl(I, *CoverageInfo);
2324 }
2325 
2326 // TODO: No rtn optimization.
2327 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2328   MachineInstr &MI) const {
2329   Register PtrReg = MI.getOperand(1).getReg();
2330   const LLT PtrTy = MRI->getType(PtrReg);
2331   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2332       STI.useFlatForGlobal())
2333     return selectImpl(MI, *CoverageInfo);
2334 
2335   Register DstReg = MI.getOperand(0).getReg();
2336   const LLT Ty = MRI->getType(DstReg);
2337   const bool Is64 = Ty.getSizeInBits() == 64;
2338   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2339   Register TmpReg = MRI->createVirtualRegister(
2340     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2341 
2342   const DebugLoc &DL = MI.getDebugLoc();
2343   MachineBasicBlock *BB = MI.getParent();
2344 
2345   Register VAddr, RSrcReg, SOffset;
2346   int64_t Offset = 0;
2347 
2348   unsigned Opcode;
2349   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2350     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2351                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2352   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2353                                    RSrcReg, SOffset, Offset)) {
2354     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2355                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2356   } else
2357     return selectImpl(MI, *CoverageInfo);
2358 
2359   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2360     .addReg(MI.getOperand(2).getReg());
2361 
2362   if (VAddr)
2363     MIB.addReg(VAddr);
2364 
2365   MIB.addReg(RSrcReg);
2366   if (SOffset)
2367     MIB.addReg(SOffset);
2368   else
2369     MIB.addImm(0);
2370 
2371   MIB.addImm(Offset);
2372   MIB.addImm(AMDGPU::CPol::GLC);
2373   MIB.cloneMemRefs(MI);
2374 
2375   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2376     .addReg(TmpReg, RegState::Kill, SubReg);
2377 
2378   MI.eraseFromParent();
2379 
2380   MRI->setRegClass(
2381     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2382   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2383 }
2384 
2385 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2386   MachineBasicBlock *BB = I.getParent();
2387   MachineOperand &CondOp = I.getOperand(0);
2388   Register CondReg = CondOp.getReg();
2389   const DebugLoc &DL = I.getDebugLoc();
2390 
2391   unsigned BrOpcode;
2392   Register CondPhysReg;
2393   const TargetRegisterClass *ConstrainRC;
2394 
2395   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2396   // whether the branch is uniform when selecting the instruction. In
2397   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2398   // RegBankSelect knows what it's doing if the branch condition is scc, even
2399   // though it currently does not.
2400   if (!isVCC(CondReg, *MRI)) {
2401     if (MRI->getType(CondReg) != LLT::scalar(32))
2402       return false;
2403 
2404     CondPhysReg = AMDGPU::SCC;
2405     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2406     ConstrainRC = &AMDGPU::SReg_32RegClass;
2407   } else {
2408     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2409     // We sort of know that a VCC producer based on the register bank, that ands
2410     // inactive lanes with 0. What if there was a logical operation with vcc
2411     // producers in different blocks/with different exec masks?
2412     // FIXME: Should scc->vcc copies and with exec?
2413     CondPhysReg = TRI.getVCC();
2414     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2415     ConstrainRC = TRI.getBoolRC();
2416   }
2417 
2418   if (!MRI->getRegClassOrNull(CondReg))
2419     MRI->setRegClass(CondReg, ConstrainRC);
2420 
2421   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2422     .addReg(CondReg);
2423   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2424     .addMBB(I.getOperand(1).getMBB());
2425 
2426   I.eraseFromParent();
2427   return true;
2428 }
2429 
2430 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2431   MachineInstr &I) const {
2432   Register DstReg = I.getOperand(0).getReg();
2433   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2434   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2435   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2436   if (IsVGPR)
2437     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2438 
2439   return RBI.constrainGenericRegister(
2440     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2441 }
2442 
2443 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2444   Register DstReg = I.getOperand(0).getReg();
2445   Register SrcReg = I.getOperand(1).getReg();
2446   Register MaskReg = I.getOperand(2).getReg();
2447   LLT Ty = MRI->getType(DstReg);
2448   LLT MaskTy = MRI->getType(MaskReg);
2449 
2450   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2451   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2452   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2453   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2454   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2455     return false;
2456 
2457   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2458   const TargetRegisterClass &RegRC
2459     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2460 
2461   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2462                                                                   *MRI);
2463   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2464                                                                   *MRI);
2465   const TargetRegisterClass *MaskRC =
2466       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2467 
2468   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2469       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2470       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2471     return false;
2472 
2473   MachineBasicBlock *BB = I.getParent();
2474   const DebugLoc &DL = I.getDebugLoc();
2475   if (Ty.getSizeInBits() == 32) {
2476     assert(MaskTy.getSizeInBits() == 32 &&
2477            "ptrmask should have been narrowed during legalize");
2478 
2479     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2480       .addReg(SrcReg)
2481       .addReg(MaskReg);
2482     I.eraseFromParent();
2483     return true;
2484   }
2485 
2486   Register HiReg = MRI->createVirtualRegister(&RegRC);
2487   Register LoReg = MRI->createVirtualRegister(&RegRC);
2488 
2489   // Extract the subregisters from the source pointer.
2490   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2491     .addReg(SrcReg, 0, AMDGPU::sub0);
2492   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2493     .addReg(SrcReg, 0, AMDGPU::sub1);
2494 
2495   Register MaskedLo, MaskedHi;
2496 
2497   // Try to avoid emitting a bit operation when we only need to touch half of
2498   // the 64-bit pointer.
2499   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2500 
2501   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2502   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2503   if ((MaskOnes & MaskLo32) == MaskLo32) {
2504     // If all the bits in the low half are 1, we only need a copy for it.
2505     MaskedLo = LoReg;
2506   } else {
2507     // Extract the mask subregister and apply the and.
2508     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2509     MaskedLo = MRI->createVirtualRegister(&RegRC);
2510 
2511     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2512       .addReg(MaskReg, 0, AMDGPU::sub0);
2513     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2514       .addReg(LoReg)
2515       .addReg(MaskLo);
2516   }
2517 
2518   if ((MaskOnes & MaskHi32) == MaskHi32) {
2519     // If all the bits in the high half are 1, we only need a copy for it.
2520     MaskedHi = HiReg;
2521   } else {
2522     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2523     MaskedHi = MRI->createVirtualRegister(&RegRC);
2524 
2525     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2526       .addReg(MaskReg, 0, AMDGPU::sub1);
2527     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2528       .addReg(HiReg)
2529       .addReg(MaskHi);
2530   }
2531 
2532   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2533     .addReg(MaskedLo)
2534     .addImm(AMDGPU::sub0)
2535     .addReg(MaskedHi)
2536     .addImm(AMDGPU::sub1);
2537   I.eraseFromParent();
2538   return true;
2539 }
2540 
2541 /// Return the register to use for the index value, and the subregister to use
2542 /// for the indirectly accessed register.
2543 static std::pair<Register, unsigned>
2544 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2545                         const SIRegisterInfo &TRI,
2546                         const TargetRegisterClass *SuperRC,
2547                         Register IdxReg,
2548                         unsigned EltSize) {
2549   Register IdxBaseReg;
2550   int Offset;
2551 
2552   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2553   if (IdxBaseReg == AMDGPU::NoRegister) {
2554     // This will happen if the index is a known constant. This should ordinarily
2555     // be legalized out, but handle it as a register just in case.
2556     assert(Offset == 0);
2557     IdxBaseReg = IdxReg;
2558   }
2559 
2560   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2561 
2562   // Skip out of bounds offsets, or else we would end up using an undefined
2563   // register.
2564   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2565     return std::make_pair(IdxReg, SubRegs[0]);
2566   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2567 }
2568 
2569 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2570   MachineInstr &MI) const {
2571   Register DstReg = MI.getOperand(0).getReg();
2572   Register SrcReg = MI.getOperand(1).getReg();
2573   Register IdxReg = MI.getOperand(2).getReg();
2574 
2575   LLT DstTy = MRI->getType(DstReg);
2576   LLT SrcTy = MRI->getType(SrcReg);
2577 
2578   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2579   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2580   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2581 
2582   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2583   // into a waterfall loop.
2584   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2585     return false;
2586 
2587   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2588                                                                   *MRI);
2589   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2590                                                                   *MRI);
2591   if (!SrcRC || !DstRC)
2592     return false;
2593   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2594       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2595       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2596     return false;
2597 
2598   MachineBasicBlock *BB = MI.getParent();
2599   const DebugLoc &DL = MI.getDebugLoc();
2600   const bool Is64 = DstTy.getSizeInBits() == 64;
2601 
2602   unsigned SubReg;
2603   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2604                                                      DstTy.getSizeInBits() / 8);
2605 
2606   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2607     if (DstTy.getSizeInBits() != 32 && !Is64)
2608       return false;
2609 
2610     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2611       .addReg(IdxReg);
2612 
2613     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2614     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2615       .addReg(SrcReg, 0, SubReg)
2616       .addReg(SrcReg, RegState::Implicit);
2617     MI.eraseFromParent();
2618     return true;
2619   }
2620 
2621   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2622     return false;
2623 
2624   if (!STI.useVGPRIndexMode()) {
2625     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2626       .addReg(IdxReg);
2627     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2628       .addReg(SrcReg, 0, SubReg)
2629       .addReg(SrcReg, RegState::Implicit);
2630     MI.eraseFromParent();
2631     return true;
2632   }
2633 
2634   const MCInstrDesc &GPRIDXDesc =
2635       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2636   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2637       .addReg(SrcReg)
2638       .addReg(IdxReg)
2639       .addImm(SubReg);
2640 
2641   MI.eraseFromParent();
2642   return true;
2643 }
2644 
2645 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2646 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2647   MachineInstr &MI) const {
2648   Register DstReg = MI.getOperand(0).getReg();
2649   Register VecReg = MI.getOperand(1).getReg();
2650   Register ValReg = MI.getOperand(2).getReg();
2651   Register IdxReg = MI.getOperand(3).getReg();
2652 
2653   LLT VecTy = MRI->getType(DstReg);
2654   LLT ValTy = MRI->getType(ValReg);
2655   unsigned VecSize = VecTy.getSizeInBits();
2656   unsigned ValSize = ValTy.getSizeInBits();
2657 
2658   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2659   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2660   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2661 
2662   assert(VecTy.getElementType() == ValTy);
2663 
2664   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2665   // into a waterfall loop.
2666   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2667     return false;
2668 
2669   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2670                                                                   *MRI);
2671   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2672                                                                   *MRI);
2673 
2674   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2675       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2676       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2677       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2678     return false;
2679 
2680   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2681     return false;
2682 
2683   unsigned SubReg;
2684   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2685                                                      ValSize / 8);
2686 
2687   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2688                          STI.useVGPRIndexMode();
2689 
2690   MachineBasicBlock *BB = MI.getParent();
2691   const DebugLoc &DL = MI.getDebugLoc();
2692 
2693   if (!IndexMode) {
2694     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2695       .addReg(IdxReg);
2696 
2697     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2698         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2699     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2700         .addReg(VecReg)
2701         .addReg(ValReg)
2702         .addImm(SubReg);
2703     MI.eraseFromParent();
2704     return true;
2705   }
2706 
2707   const MCInstrDesc &GPRIDXDesc =
2708       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2709   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2710       .addReg(VecReg)
2711       .addReg(ValReg)
2712       .addReg(IdxReg)
2713       .addImm(SubReg);
2714 
2715   MI.eraseFromParent();
2716   return true;
2717 }
2718 
2719 static bool isZeroOrUndef(int X) {
2720   return X == 0 || X == -1;
2721 }
2722 
2723 static bool isOneOrUndef(int X) {
2724   return X == 1 || X == -1;
2725 }
2726 
2727 static bool isZeroOrOneOrUndef(int X) {
2728   return X == 0 || X == 1 || X == -1;
2729 }
2730 
2731 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2732 // 32-bit register.
2733 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2734                                    ArrayRef<int> Mask) {
2735   NewMask[0] = Mask[0];
2736   NewMask[1] = Mask[1];
2737   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2738     return Src0;
2739 
2740   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2741   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2742 
2743   // Shift the mask inputs to be 0/1;
2744   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2745   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2746   return Src1;
2747 }
2748 
2749 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2750 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2751   MachineInstr &MI) const {
2752   Register DstReg = MI.getOperand(0).getReg();
2753   Register Src0Reg = MI.getOperand(1).getReg();
2754   Register Src1Reg = MI.getOperand(2).getReg();
2755   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2756 
2757   const LLT V2S16 = LLT::vector(2, 16);
2758   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2759     return false;
2760 
2761   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2762     return false;
2763 
2764   assert(ShufMask.size() == 2);
2765   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2766 
2767   MachineBasicBlock *MBB = MI.getParent();
2768   const DebugLoc &DL = MI.getDebugLoc();
2769 
2770   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2771   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2772   const TargetRegisterClass &RC = IsVALU ?
2773     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2774 
2775   // Handle the degenerate case which should have folded out.
2776   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2777     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2778 
2779     MI.eraseFromParent();
2780     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2781   }
2782 
2783   // A legal VOP3P mask only reads one of the sources.
2784   int Mask[2];
2785   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2786 
2787   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2788       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2789     return false;
2790 
2791   // TODO: This also should have been folded out
2792   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2793     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2794       .addReg(SrcVec);
2795 
2796     MI.eraseFromParent();
2797     return true;
2798   }
2799 
2800   if (Mask[0] == 1 && Mask[1] == -1) {
2801     if (IsVALU) {
2802       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2803         .addImm(16)
2804         .addReg(SrcVec);
2805     } else {
2806       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2807         .addReg(SrcVec)
2808         .addImm(16);
2809     }
2810   } else if (Mask[0] == -1 && Mask[1] == 0) {
2811     if (IsVALU) {
2812       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2813         .addImm(16)
2814         .addReg(SrcVec);
2815     } else {
2816       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2817         .addReg(SrcVec)
2818         .addImm(16);
2819     }
2820   } else if (Mask[0] == 0 && Mask[1] == 0) {
2821     if (IsVALU) {
2822       // Write low half of the register into the high half.
2823       MachineInstr *MovSDWA =
2824         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2825         .addImm(0)                             // $src0_modifiers
2826         .addReg(SrcVec)                        // $src0
2827         .addImm(0)                             // $clamp
2828         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2829         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2830         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2831         .addReg(SrcVec, RegState::Implicit);
2832       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2833     } else {
2834       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2835         .addReg(SrcVec)
2836         .addReg(SrcVec);
2837     }
2838   } else if (Mask[0] == 1 && Mask[1] == 1) {
2839     if (IsVALU) {
2840       // Write high half of the register into the low half.
2841       MachineInstr *MovSDWA =
2842         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2843         .addImm(0)                             // $src0_modifiers
2844         .addReg(SrcVec)                        // $src0
2845         .addImm(0)                             // $clamp
2846         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2847         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2848         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2849         .addReg(SrcVec, RegState::Implicit);
2850       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2851     } else {
2852       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2853         .addReg(SrcVec)
2854         .addReg(SrcVec);
2855     }
2856   } else if (Mask[0] == 1 && Mask[1] == 0) {
2857     if (IsVALU) {
2858       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2859         .addReg(SrcVec)
2860         .addReg(SrcVec)
2861         .addImm(16);
2862     } else {
2863       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2864       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2865         .addReg(SrcVec)
2866         .addImm(16);
2867       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2868         .addReg(TmpReg)
2869         .addReg(SrcVec);
2870     }
2871   } else
2872     llvm_unreachable("all shuffle masks should be handled");
2873 
2874   MI.eraseFromParent();
2875   return true;
2876 }
2877 
2878 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2879   MachineInstr &MI) const {
2880   if (STI.hasGFX90AInsts())
2881     return selectImpl(MI, *CoverageInfo);
2882 
2883   MachineBasicBlock *MBB = MI.getParent();
2884   const DebugLoc &DL = MI.getDebugLoc();
2885 
2886   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2887     Function &F = MBB->getParent()->getFunction();
2888     DiagnosticInfoUnsupported
2889       NoFpRet(F, "return versions of fp atomics not supported",
2890               MI.getDebugLoc(), DS_Error);
2891     F.getContext().diagnose(NoFpRet);
2892     return false;
2893   }
2894 
2895   // FIXME: This is only needed because tablegen requires number of dst operands
2896   // in match and replace pattern to be the same. Otherwise patterns can be
2897   // exported from SDag path.
2898   MachineOperand &VDataIn = MI.getOperand(1);
2899   MachineOperand &VIndex = MI.getOperand(3);
2900   MachineOperand &VOffset = MI.getOperand(4);
2901   MachineOperand &SOffset = MI.getOperand(5);
2902   int16_t Offset = MI.getOperand(6).getImm();
2903 
2904   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2905   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2906 
2907   unsigned Opcode;
2908   if (HasVOffset) {
2909     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2910                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2911   } else {
2912     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2913                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2914   }
2915 
2916   if (MRI->getType(VDataIn.getReg()).isVector()) {
2917     switch (Opcode) {
2918     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2919       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2920       break;
2921     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2922       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2923       break;
2924     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
2925       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
2926       break;
2927     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
2928       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
2929       break;
2930     }
2931   }
2932 
2933   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
2934   I.add(VDataIn);
2935 
2936   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
2937       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
2938     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
2939     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
2940       .addReg(VIndex.getReg())
2941       .addImm(AMDGPU::sub0)
2942       .addReg(VOffset.getReg())
2943       .addImm(AMDGPU::sub1);
2944 
2945     I.addReg(IdxReg);
2946   } else if (HasVIndex) {
2947     I.add(VIndex);
2948   } else if (HasVOffset) {
2949     I.add(VOffset);
2950   }
2951 
2952   I.add(MI.getOperand(2)); // rsrc
2953   I.add(SOffset);
2954   I.addImm(Offset);
2955   I.addImm(MI.getOperand(7).getImm()); // cpol
2956   I.cloneMemRefs(MI);
2957 
2958   MI.eraseFromParent();
2959 
2960   return true;
2961 }
2962 
2963 bool AMDGPUInstructionSelector::selectGlobalAtomicFaddIntrinsic(
2964   MachineInstr &MI) const{
2965 
2966   if (STI.hasGFX90AInsts())
2967     return selectImpl(MI, *CoverageInfo);
2968 
2969   MachineBasicBlock *MBB = MI.getParent();
2970   const DebugLoc &DL = MI.getDebugLoc();
2971 
2972   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2973     Function &F = MBB->getParent()->getFunction();
2974     DiagnosticInfoUnsupported
2975       NoFpRet(F, "return versions of fp atomics not supported",
2976               MI.getDebugLoc(), DS_Error);
2977     F.getContext().diagnose(NoFpRet);
2978     return false;
2979   }
2980 
2981   // FIXME: This is only needed because tablegen requires number of dst operands
2982   // in match and replace pattern to be the same. Otherwise patterns can be
2983   // exported from SDag path.
2984   auto Addr = selectFlatOffsetImpl<true>(MI.getOperand(2));
2985 
2986   Register Data = MI.getOperand(3).getReg();
2987   const unsigned Opc = MRI->getType(Data).isVector() ?
2988     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
2989   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
2990     .addReg(Addr.first)
2991     .addReg(Data)
2992     .addImm(Addr.second)
2993     .addImm(0) // cpol
2994     .cloneMemRefs(MI);
2995 
2996   MI.eraseFromParent();
2997   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2998 }
2999 
3000 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3001   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3002   MI.RemoveOperand(1);
3003   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3004   return true;
3005 }
3006 
3007 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3008   if (I.isPHI())
3009     return selectPHI(I);
3010 
3011   if (!I.isPreISelOpcode()) {
3012     if (I.isCopy())
3013       return selectCOPY(I);
3014     return true;
3015   }
3016 
3017   switch (I.getOpcode()) {
3018   case TargetOpcode::G_AND:
3019   case TargetOpcode::G_OR:
3020   case TargetOpcode::G_XOR:
3021     if (selectImpl(I, *CoverageInfo))
3022       return true;
3023     return selectG_AND_OR_XOR(I);
3024   case TargetOpcode::G_ADD:
3025   case TargetOpcode::G_SUB:
3026     if (selectImpl(I, *CoverageInfo))
3027       return true;
3028     return selectG_ADD_SUB(I);
3029   case TargetOpcode::G_UADDO:
3030   case TargetOpcode::G_USUBO:
3031   case TargetOpcode::G_UADDE:
3032   case TargetOpcode::G_USUBE:
3033     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3034   case TargetOpcode::G_INTTOPTR:
3035   case TargetOpcode::G_BITCAST:
3036   case TargetOpcode::G_PTRTOINT:
3037     return selectCOPY(I);
3038   case TargetOpcode::G_CONSTANT:
3039   case TargetOpcode::G_FCONSTANT:
3040     return selectG_CONSTANT(I);
3041   case TargetOpcode::G_FNEG:
3042     if (selectImpl(I, *CoverageInfo))
3043       return true;
3044     return selectG_FNEG(I);
3045   case TargetOpcode::G_FABS:
3046     if (selectImpl(I, *CoverageInfo))
3047       return true;
3048     return selectG_FABS(I);
3049   case TargetOpcode::G_EXTRACT:
3050     return selectG_EXTRACT(I);
3051   case TargetOpcode::G_MERGE_VALUES:
3052   case TargetOpcode::G_BUILD_VECTOR:
3053   case TargetOpcode::G_CONCAT_VECTORS:
3054     return selectG_MERGE_VALUES(I);
3055   case TargetOpcode::G_UNMERGE_VALUES:
3056     return selectG_UNMERGE_VALUES(I);
3057   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3058     return selectG_BUILD_VECTOR_TRUNC(I);
3059   case TargetOpcode::G_PTR_ADD:
3060     return selectG_PTR_ADD(I);
3061   case TargetOpcode::G_IMPLICIT_DEF:
3062     return selectG_IMPLICIT_DEF(I);
3063   case TargetOpcode::G_FREEZE:
3064     return selectCOPY(I);
3065   case TargetOpcode::G_INSERT:
3066     return selectG_INSERT(I);
3067   case TargetOpcode::G_INTRINSIC:
3068     return selectG_INTRINSIC(I);
3069   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3070     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3071   case TargetOpcode::G_ICMP:
3072     if (selectG_ICMP(I))
3073       return true;
3074     return selectImpl(I, *CoverageInfo);
3075   case TargetOpcode::G_LOAD:
3076   case TargetOpcode::G_STORE:
3077   case TargetOpcode::G_ATOMIC_CMPXCHG:
3078   case TargetOpcode::G_ATOMICRMW_XCHG:
3079   case TargetOpcode::G_ATOMICRMW_ADD:
3080   case TargetOpcode::G_ATOMICRMW_SUB:
3081   case TargetOpcode::G_ATOMICRMW_AND:
3082   case TargetOpcode::G_ATOMICRMW_OR:
3083   case TargetOpcode::G_ATOMICRMW_XOR:
3084   case TargetOpcode::G_ATOMICRMW_MIN:
3085   case TargetOpcode::G_ATOMICRMW_MAX:
3086   case TargetOpcode::G_ATOMICRMW_UMIN:
3087   case TargetOpcode::G_ATOMICRMW_UMAX:
3088   case TargetOpcode::G_ATOMICRMW_FADD:
3089   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3090   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3091   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3092   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3093     return selectG_LOAD_STORE_ATOMICRMW(I);
3094   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3095     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3096   case TargetOpcode::G_SELECT:
3097     return selectG_SELECT(I);
3098   case TargetOpcode::G_TRUNC:
3099     return selectG_TRUNC(I);
3100   case TargetOpcode::G_SEXT:
3101   case TargetOpcode::G_ZEXT:
3102   case TargetOpcode::G_ANYEXT:
3103   case TargetOpcode::G_SEXT_INREG:
3104     if (selectImpl(I, *CoverageInfo))
3105       return true;
3106     return selectG_SZA_EXT(I);
3107   case TargetOpcode::G_BRCOND:
3108     return selectG_BRCOND(I);
3109   case TargetOpcode::G_GLOBAL_VALUE:
3110     return selectG_GLOBAL_VALUE(I);
3111   case TargetOpcode::G_PTRMASK:
3112     return selectG_PTRMASK(I);
3113   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3114     return selectG_EXTRACT_VECTOR_ELT(I);
3115   case TargetOpcode::G_INSERT_VECTOR_ELT:
3116     return selectG_INSERT_VECTOR_ELT(I);
3117   case TargetOpcode::G_SHUFFLE_VECTOR:
3118     return selectG_SHUFFLE_VECTOR(I);
3119   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3120   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
3121     const AMDGPU::ImageDimIntrinsicInfo *Intr
3122       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3123     assert(Intr && "not an image intrinsic with image pseudo");
3124     return selectImageIntrinsic(I, Intr);
3125   }
3126   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3127     return selectBVHIntrinsic(I);
3128   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3129     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3130   default:
3131     return selectImpl(I, *CoverageInfo);
3132   }
3133   return false;
3134 }
3135 
3136 InstructionSelector::ComplexRendererFns
3137 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3138   return {{
3139       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3140   }};
3141 
3142 }
3143 
3144 std::pair<Register, unsigned>
3145 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3146                                               bool AllowAbs) const {
3147   Register Src = Root.getReg();
3148   Register OrigSrc = Src;
3149   unsigned Mods = 0;
3150   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3151 
3152   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3153     Src = MI->getOperand(1).getReg();
3154     Mods |= SISrcMods::NEG;
3155     MI = getDefIgnoringCopies(Src, *MRI);
3156   }
3157 
3158   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3159     Src = MI->getOperand(1).getReg();
3160     Mods |= SISrcMods::ABS;
3161   }
3162 
3163   if (Mods != 0 &&
3164       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3165     MachineInstr *UseMI = Root.getParent();
3166 
3167     // If we looked through copies to find source modifiers on an SGPR operand,
3168     // we now have an SGPR register source. To avoid potentially violating the
3169     // constant bus restriction, we need to insert a copy to a VGPR.
3170     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3171     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3172             TII.get(AMDGPU::COPY), VGPRSrc)
3173       .addReg(Src);
3174     Src = VGPRSrc;
3175   }
3176 
3177   return std::make_pair(Src, Mods);
3178 }
3179 
3180 ///
3181 /// This will select either an SGPR or VGPR operand and will save us from
3182 /// having to write an extra tablegen pattern.
3183 InstructionSelector::ComplexRendererFns
3184 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3185   return {{
3186       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3187   }};
3188 }
3189 
3190 InstructionSelector::ComplexRendererFns
3191 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3192   Register Src;
3193   unsigned Mods;
3194   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3195 
3196   return {{
3197       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3198       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3199       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3200       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3201   }};
3202 }
3203 
3204 InstructionSelector::ComplexRendererFns
3205 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3206   Register Src;
3207   unsigned Mods;
3208   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3209 
3210   return {{
3211       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3212       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3213       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3214       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3215   }};
3216 }
3217 
3218 InstructionSelector::ComplexRendererFns
3219 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3220   return {{
3221       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3222       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3223       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3224   }};
3225 }
3226 
3227 InstructionSelector::ComplexRendererFns
3228 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3229   Register Src;
3230   unsigned Mods;
3231   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3232 
3233   return {{
3234       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3235       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3236   }};
3237 }
3238 
3239 InstructionSelector::ComplexRendererFns
3240 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3241   Register Src;
3242   unsigned Mods;
3243   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3244 
3245   return {{
3246       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3247       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3248   }};
3249 }
3250 
3251 InstructionSelector::ComplexRendererFns
3252 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3253   Register Reg = Root.getReg();
3254   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3255   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3256               Def->getOpcode() == AMDGPU::G_FABS))
3257     return {};
3258   return {{
3259       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3260   }};
3261 }
3262 
3263 std::pair<Register, unsigned>
3264 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3265   Register Src, const MachineRegisterInfo &MRI) const {
3266   unsigned Mods = 0;
3267   MachineInstr *MI = MRI.getVRegDef(Src);
3268 
3269   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3270       // It's possible to see an f32 fneg here, but unlikely.
3271       // TODO: Treat f32 fneg as only high bit.
3272       MRI.getType(Src) == LLT::vector(2, 16)) {
3273     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3274     Src = MI->getOperand(1).getReg();
3275     MI = MRI.getVRegDef(Src);
3276   }
3277 
3278   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3279 
3280   // Packed instructions do not have abs modifiers.
3281   Mods |= SISrcMods::OP_SEL_1;
3282 
3283   return std::make_pair(Src, Mods);
3284 }
3285 
3286 InstructionSelector::ComplexRendererFns
3287 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3288   MachineRegisterInfo &MRI
3289     = Root.getParent()->getParent()->getParent()->getRegInfo();
3290 
3291   Register Src;
3292   unsigned Mods;
3293   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3294 
3295   return {{
3296       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3297       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3298   }};
3299 }
3300 
3301 InstructionSelector::ComplexRendererFns
3302 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3303   Register Src;
3304   unsigned Mods;
3305   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3306   if (!isKnownNeverNaN(Src, *MRI))
3307     return None;
3308 
3309   return {{
3310       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3311       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3312   }};
3313 }
3314 
3315 InstructionSelector::ComplexRendererFns
3316 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3317   // FIXME: Handle op_sel
3318   return {{
3319       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3320       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3321   }};
3322 }
3323 
3324 InstructionSelector::ComplexRendererFns
3325 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3326   SmallVector<GEPInfo, 4> AddrInfo;
3327   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3328 
3329   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3330     return None;
3331 
3332   const GEPInfo &GEPInfo = AddrInfo[0];
3333   Optional<int64_t> EncodedImm =
3334       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3335   if (!EncodedImm)
3336     return None;
3337 
3338   unsigned PtrReg = GEPInfo.SgprParts[0];
3339   return {{
3340     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3341     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3342   }};
3343 }
3344 
3345 InstructionSelector::ComplexRendererFns
3346 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3347   SmallVector<GEPInfo, 4> AddrInfo;
3348   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3349 
3350   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3351     return None;
3352 
3353   const GEPInfo &GEPInfo = AddrInfo[0];
3354   Register PtrReg = GEPInfo.SgprParts[0];
3355   Optional<int64_t> EncodedImm =
3356       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3357   if (!EncodedImm)
3358     return None;
3359 
3360   return {{
3361     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3362     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3363   }};
3364 }
3365 
3366 InstructionSelector::ComplexRendererFns
3367 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3368   MachineInstr *MI = Root.getParent();
3369   MachineBasicBlock *MBB = MI->getParent();
3370 
3371   SmallVector<GEPInfo, 4> AddrInfo;
3372   getAddrModeInfo(*MI, *MRI, AddrInfo);
3373 
3374   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3375   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3376   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3377     return None;
3378 
3379   const GEPInfo &GEPInfo = AddrInfo[0];
3380   // SGPR offset is unsigned.
3381   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3382     return None;
3383 
3384   // If we make it this far we have a load with an 32-bit immediate offset.
3385   // It is OK to select this using a sgpr offset, because we have already
3386   // failed trying to select this load into one of the _IMM variants since
3387   // the _IMM Patterns are considered before the _SGPR patterns.
3388   Register PtrReg = GEPInfo.SgprParts[0];
3389   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3390   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3391           .addImm(GEPInfo.Imm);
3392   return {{
3393     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3394     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3395   }};
3396 }
3397 
3398 template <bool Signed>
3399 std::pair<Register, int>
3400 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
3401   MachineInstr *MI = Root.getParent();
3402 
3403   auto Default = std::make_pair(Root.getReg(), 0);
3404 
3405   if (!STI.hasFlatInstOffsets())
3406     return Default;
3407 
3408   Register PtrBase;
3409   int64_t ConstOffset;
3410   std::tie(PtrBase, ConstOffset) =
3411       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3412   if (ConstOffset == 0)
3413     return Default;
3414 
3415   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3416   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, Signed))
3417     return Default;
3418 
3419   return std::make_pair(PtrBase, ConstOffset);
3420 }
3421 
3422 InstructionSelector::ComplexRendererFns
3423 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3424   auto PtrWithOffset = selectFlatOffsetImpl<false>(Root);
3425 
3426   return {{
3427       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3428       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3429     }};
3430 }
3431 
3432 InstructionSelector::ComplexRendererFns
3433 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
3434   auto PtrWithOffset = selectFlatOffsetImpl<true>(Root);
3435 
3436   return {{
3437       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3438       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3439     }};
3440 }
3441 
3442 /// Match a zero extend from a 32-bit value to 64-bits.
3443 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3444   Register ZExtSrc;
3445   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3446     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3447 
3448   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3449   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3450   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3451     return false;
3452 
3453   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3454     return Def->getOperand(1).getReg();
3455   }
3456 
3457   return Register();
3458 }
3459 
3460 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3461 InstructionSelector::ComplexRendererFns
3462 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3463   Register Addr = Root.getReg();
3464   Register PtrBase;
3465   int64_t ConstOffset;
3466   int64_t ImmOffset = 0;
3467 
3468   // Match the immediate offset first, which canonically is moved as low as
3469   // possible.
3470   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3471 
3472   if (ConstOffset != 0) {
3473     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, true)) {
3474       Addr = PtrBase;
3475       ImmOffset = ConstOffset;
3476     } else if (ConstOffset > 0) {
3477       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3478       if (!PtrBaseDef)
3479         return None;
3480 
3481       if (isSGPR(PtrBaseDef->Reg)) {
3482         // Offset is too large.
3483         //
3484         // saddr + large_offset -> saddr + (voffset = large_offset & ~MaxOffset)
3485         //                         + (large_offset & MaxOffset);
3486         int64_t SplitImmOffset, RemainderOffset;
3487         std::tie(SplitImmOffset, RemainderOffset)
3488           = TII.splitFlatOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, true);
3489 
3490         if (isUInt<32>(RemainderOffset)) {
3491           MachineInstr *MI = Root.getParent();
3492           MachineBasicBlock *MBB = MI->getParent();
3493           Register HighBits
3494             = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3495 
3496           BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3497                   HighBits)
3498             .addImm(RemainderOffset);
3499 
3500           return {{
3501             [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); },  // saddr
3502             [=](MachineInstrBuilder &MIB) { MIB.addReg(HighBits); }, // voffset
3503             [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3504           }};
3505         }
3506       }
3507     }
3508   }
3509 
3510   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3511   if (!AddrDef)
3512     return None;
3513 
3514   // Match the variable offset.
3515   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD) {
3516     // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3517     // drop this.
3518     if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3519         AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT)
3520       return None;
3521 
3522     // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3523     // moves required to copy a 64-bit SGPR to VGPR.
3524     const Register SAddr = AddrDef->Reg;
3525     if (!isSGPR(SAddr))
3526       return None;
3527 
3528     MachineInstr *MI = Root.getParent();
3529     MachineBasicBlock *MBB = MI->getParent();
3530     Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3531 
3532     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3533             VOffset)
3534       .addImm(0);
3535 
3536     return {{
3537         [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); },    // saddr
3538         [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },  // voffset
3539         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3540     }};
3541   }
3542 
3543   // Look through the SGPR->VGPR copy.
3544   Register SAddr =
3545     getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3546   if (!SAddr || !isSGPR(SAddr))
3547     return None;
3548 
3549   Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3550 
3551   // It's possible voffset is an SGPR here, but the copy to VGPR will be
3552   // inserted later.
3553   Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset);
3554   if (!VOffset)
3555     return None;
3556 
3557   return {{[=](MachineInstrBuilder &MIB) { // saddr
3558              MIB.addReg(SAddr);
3559            },
3560            [=](MachineInstrBuilder &MIB) { // voffset
3561              MIB.addReg(VOffset);
3562            },
3563            [=](MachineInstrBuilder &MIB) { // offset
3564              MIB.addImm(ImmOffset);
3565            }}};
3566 }
3567 
3568 InstructionSelector::ComplexRendererFns
3569 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3570   Register Addr = Root.getReg();
3571   Register PtrBase;
3572   int64_t ConstOffset;
3573   int64_t ImmOffset = 0;
3574 
3575   // Match the immediate offset first, which canonically is moved as low as
3576   // possible.
3577   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3578 
3579   if (ConstOffset != 0 &&
3580       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
3581     Addr = PtrBase;
3582     ImmOffset = ConstOffset;
3583   }
3584 
3585   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3586   if (!AddrDef)
3587     return None;
3588 
3589   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3590     int FI = AddrDef->MI->getOperand(1).getIndex();
3591     return {{
3592         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3593         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3594     }};
3595   }
3596 
3597   Register SAddr = AddrDef->Reg;
3598 
3599   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3600     Register LHS = AddrDef->MI->getOperand(1).getReg();
3601     Register RHS = AddrDef->MI->getOperand(2).getReg();
3602     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3603     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3604 
3605     if (LHSDef && RHSDef &&
3606         LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3607         isSGPR(RHSDef->Reg)) {
3608       int FI = LHSDef->MI->getOperand(1).getIndex();
3609       MachineInstr &I = *Root.getParent();
3610       MachineBasicBlock *BB = I.getParent();
3611       const DebugLoc &DL = I.getDebugLoc();
3612       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3613 
3614       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), SAddr)
3615         .addFrameIndex(FI)
3616         .addReg(RHSDef->Reg);
3617     }
3618   }
3619 
3620   if (!isSGPR(SAddr))
3621     return None;
3622 
3623   return {{
3624       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3625       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3626   }};
3627 }
3628 
3629 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3630   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3631   return PSV && PSV->isStack();
3632 }
3633 
3634 InstructionSelector::ComplexRendererFns
3635 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3636   MachineInstr *MI = Root.getParent();
3637   MachineBasicBlock *MBB = MI->getParent();
3638   MachineFunction *MF = MBB->getParent();
3639   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3640 
3641   int64_t Offset = 0;
3642   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3643       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3644     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3645 
3646     // TODO: Should this be inside the render function? The iterator seems to
3647     // move.
3648     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3649             HighBits)
3650       .addImm(Offset & ~4095);
3651 
3652     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3653                MIB.addReg(Info->getScratchRSrcReg());
3654              },
3655              [=](MachineInstrBuilder &MIB) { // vaddr
3656                MIB.addReg(HighBits);
3657              },
3658              [=](MachineInstrBuilder &MIB) { // soffset
3659                // Use constant zero for soffset and rely on eliminateFrameIndex
3660                // to choose the appropriate frame register if need be.
3661                MIB.addImm(0);
3662              },
3663              [=](MachineInstrBuilder &MIB) { // offset
3664                MIB.addImm(Offset & 4095);
3665              }}};
3666   }
3667 
3668   assert(Offset == 0 || Offset == -1);
3669 
3670   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3671   // offsets.
3672   Optional<int> FI;
3673   Register VAddr = Root.getReg();
3674   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3675     Register PtrBase;
3676     int64_t ConstOffset;
3677     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
3678     if (ConstOffset != 0) {
3679       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
3680           (!STI.privateMemoryResourceIsRangeChecked() ||
3681            KnownBits->signBitIsZero(PtrBase))) {
3682         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
3683         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3684           FI = PtrBaseDef->getOperand(1).getIndex();
3685         else
3686           VAddr = PtrBase;
3687         Offset = ConstOffset;
3688       }
3689     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3690       FI = RootDef->getOperand(1).getIndex();
3691     }
3692   }
3693 
3694   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3695              MIB.addReg(Info->getScratchRSrcReg());
3696            },
3697            [=](MachineInstrBuilder &MIB) { // vaddr
3698              if (FI.hasValue())
3699                MIB.addFrameIndex(FI.getValue());
3700              else
3701                MIB.addReg(VAddr);
3702            },
3703            [=](MachineInstrBuilder &MIB) { // soffset
3704              // Use constant zero for soffset and rely on eliminateFrameIndex
3705              // to choose the appropriate frame register if need be.
3706              MIB.addImm(0);
3707            },
3708            [=](MachineInstrBuilder &MIB) { // offset
3709              MIB.addImm(Offset);
3710            }}};
3711 }
3712 
3713 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3714                                                 int64_t Offset) const {
3715   if (!isUInt<16>(Offset))
3716     return false;
3717 
3718   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3719     return true;
3720 
3721   // On Southern Islands instruction with a negative base value and an offset
3722   // don't seem to work.
3723   return KnownBits->signBitIsZero(Base);
3724 }
3725 
3726 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
3727                                                  int64_t Offset1,
3728                                                  unsigned Size) const {
3729   if (Offset0 % Size != 0 || Offset1 % Size != 0)
3730     return false;
3731   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
3732     return false;
3733 
3734   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3735     return true;
3736 
3737   // On Southern Islands instruction with a negative base value and an offset
3738   // don't seem to work.
3739   return KnownBits->signBitIsZero(Base);
3740 }
3741 
3742 InstructionSelector::ComplexRendererFns
3743 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3744     MachineOperand &Root) const {
3745   MachineInstr *MI = Root.getParent();
3746   MachineBasicBlock *MBB = MI->getParent();
3747 
3748   int64_t Offset = 0;
3749   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3750       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3751     return {};
3752 
3753   const MachineFunction *MF = MBB->getParent();
3754   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3755   const MachineMemOperand *MMO = *MI->memoperands_begin();
3756   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3757 
3758   return {{
3759       [=](MachineInstrBuilder &MIB) { // rsrc
3760         MIB.addReg(Info->getScratchRSrcReg());
3761       },
3762       [=](MachineInstrBuilder &MIB) { // soffset
3763         if (isStackPtrRelative(PtrInfo))
3764           MIB.addReg(Info->getStackPtrOffsetReg());
3765         else
3766           MIB.addImm(0);
3767       },
3768       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3769   }};
3770 }
3771 
3772 std::pair<Register, unsigned>
3773 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3774   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3775   if (!RootDef)
3776     return std::make_pair(Root.getReg(), 0);
3777 
3778   int64_t ConstAddr = 0;
3779 
3780   Register PtrBase;
3781   int64_t Offset;
3782   std::tie(PtrBase, Offset) =
3783     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3784 
3785   if (Offset) {
3786     if (isDSOffsetLegal(PtrBase, Offset)) {
3787       // (add n0, c0)
3788       return std::make_pair(PtrBase, Offset);
3789     }
3790   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3791     // TODO
3792 
3793 
3794   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3795     // TODO
3796 
3797   }
3798 
3799   return std::make_pair(Root.getReg(), 0);
3800 }
3801 
3802 InstructionSelector::ComplexRendererFns
3803 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3804   Register Reg;
3805   unsigned Offset;
3806   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3807   return {{
3808       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3809       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3810     }};
3811 }
3812 
3813 InstructionSelector::ComplexRendererFns
3814 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3815   return selectDSReadWrite2(Root, 4);
3816 }
3817 
3818 InstructionSelector::ComplexRendererFns
3819 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
3820   return selectDSReadWrite2(Root, 8);
3821 }
3822 
3823 InstructionSelector::ComplexRendererFns
3824 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
3825                                               unsigned Size) const {
3826   Register Reg;
3827   unsigned Offset;
3828   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
3829   return {{
3830       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3831       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3832       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3833     }};
3834 }
3835 
3836 std::pair<Register, unsigned>
3837 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
3838                                                   unsigned Size) const {
3839   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3840   if (!RootDef)
3841     return std::make_pair(Root.getReg(), 0);
3842 
3843   int64_t ConstAddr = 0;
3844 
3845   Register PtrBase;
3846   int64_t Offset;
3847   std::tie(PtrBase, Offset) =
3848     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3849 
3850   if (Offset) {
3851     int64_t OffsetValue0 = Offset;
3852     int64_t OffsetValue1 = Offset + Size;
3853     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
3854       // (add n0, c0)
3855       return std::make_pair(PtrBase, OffsetValue0 / Size);
3856     }
3857   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3858     // TODO
3859 
3860   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3861     // TODO
3862 
3863   }
3864 
3865   return std::make_pair(Root.getReg(), 0);
3866 }
3867 
3868 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3869 /// the base value with the constant offset. There may be intervening copies
3870 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
3871 /// not match the pattern.
3872 std::pair<Register, int64_t>
3873 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3874   Register Root, const MachineRegisterInfo &MRI) const {
3875   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
3876   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3877     return {Root, 0};
3878 
3879   MachineOperand &RHS = RootI->getOperand(2);
3880   Optional<ValueAndVReg> MaybeOffset
3881     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3882   if (!MaybeOffset)
3883     return {Root, 0};
3884   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
3885 }
3886 
3887 static void addZeroImm(MachineInstrBuilder &MIB) {
3888   MIB.addImm(0);
3889 }
3890 
3891 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3892 /// BasePtr is not valid, a null base pointer will be used.
3893 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3894                           uint32_t FormatLo, uint32_t FormatHi,
3895                           Register BasePtr) {
3896   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3897   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3898   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3899   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3900 
3901   B.buildInstr(AMDGPU::S_MOV_B32)
3902     .addDef(RSrc2)
3903     .addImm(FormatLo);
3904   B.buildInstr(AMDGPU::S_MOV_B32)
3905     .addDef(RSrc3)
3906     .addImm(FormatHi);
3907 
3908   // Build the half of the subregister with the constants before building the
3909   // full 128-bit register. If we are building multiple resource descriptors,
3910   // this will allow CSEing of the 2-component register.
3911   B.buildInstr(AMDGPU::REG_SEQUENCE)
3912     .addDef(RSrcHi)
3913     .addReg(RSrc2)
3914     .addImm(AMDGPU::sub0)
3915     .addReg(RSrc3)
3916     .addImm(AMDGPU::sub1);
3917 
3918   Register RSrcLo = BasePtr;
3919   if (!BasePtr) {
3920     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3921     B.buildInstr(AMDGPU::S_MOV_B64)
3922       .addDef(RSrcLo)
3923       .addImm(0);
3924   }
3925 
3926   B.buildInstr(AMDGPU::REG_SEQUENCE)
3927     .addDef(RSrc)
3928     .addReg(RSrcLo)
3929     .addImm(AMDGPU::sub0_sub1)
3930     .addReg(RSrcHi)
3931     .addImm(AMDGPU::sub2_sub3);
3932 
3933   return RSrc;
3934 }
3935 
3936 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3937                                 const SIInstrInfo &TII, Register BasePtr) {
3938   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3939 
3940   // FIXME: Why are half the "default" bits ignored based on the addressing
3941   // mode?
3942   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3943 }
3944 
3945 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3946                                const SIInstrInfo &TII, Register BasePtr) {
3947   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3948 
3949   // FIXME: Why are half the "default" bits ignored based on the addressing
3950   // mode?
3951   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
3952 }
3953 
3954 AMDGPUInstructionSelector::MUBUFAddressData
3955 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
3956   MUBUFAddressData Data;
3957   Data.N0 = Src;
3958 
3959   Register PtrBase;
3960   int64_t Offset;
3961 
3962   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
3963   if (isUInt<32>(Offset)) {
3964     Data.N0 = PtrBase;
3965     Data.Offset = Offset;
3966   }
3967 
3968   if (MachineInstr *InputAdd
3969       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
3970     Data.N2 = InputAdd->getOperand(1).getReg();
3971     Data.N3 = InputAdd->getOperand(2).getReg();
3972 
3973     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
3974     // FIXME: Don't know this was defined by operand 0
3975     //
3976     // TODO: Remove this when we have copy folding optimizations after
3977     // RegBankSelect.
3978     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
3979     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
3980   }
3981 
3982   return Data;
3983 }
3984 
3985 /// Return if the addr64 mubuf mode should be used for the given address.
3986 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
3987   // (ptr_add N2, N3) -> addr64, or
3988   // (ptr_add (ptr_add N2, N3), C1) -> addr64
3989   if (Addr.N2)
3990     return true;
3991 
3992   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
3993   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
3994 }
3995 
3996 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
3997 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
3998 /// component.
3999 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4000   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4001   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4002     return;
4003 
4004   // Illegal offset, store it in soffset.
4005   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4006   B.buildInstr(AMDGPU::S_MOV_B32)
4007     .addDef(SOffset)
4008     .addImm(ImmOffset);
4009   ImmOffset = 0;
4010 }
4011 
4012 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4013   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4014   Register &SOffset, int64_t &Offset) const {
4015   // FIXME: Predicates should stop this from reaching here.
4016   // addr64 bit was removed for volcanic islands.
4017   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4018     return false;
4019 
4020   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4021   if (!shouldUseAddr64(AddrData))
4022     return false;
4023 
4024   Register N0 = AddrData.N0;
4025   Register N2 = AddrData.N2;
4026   Register N3 = AddrData.N3;
4027   Offset = AddrData.Offset;
4028 
4029   // Base pointer for the SRD.
4030   Register SRDPtr;
4031 
4032   if (N2) {
4033     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4034       assert(N3);
4035       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4036         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4037         // addr64, and construct the default resource from a 0 address.
4038         VAddr = N0;
4039       } else {
4040         SRDPtr = N3;
4041         VAddr = N2;
4042       }
4043     } else {
4044       // N2 is not divergent.
4045       SRDPtr = N2;
4046       VAddr = N3;
4047     }
4048   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4049     // Use the default null pointer in the resource
4050     VAddr = N0;
4051   } else {
4052     // N0 -> offset, or
4053     // (N0 + C1) -> offset
4054     SRDPtr = N0;
4055   }
4056 
4057   MachineIRBuilder B(*Root.getParent());
4058   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4059   splitIllegalMUBUFOffset(B, SOffset, Offset);
4060   return true;
4061 }
4062 
4063 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4064   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4065   int64_t &Offset) const {
4066 
4067   // FIXME: Pattern should not reach here.
4068   if (STI.useFlatForGlobal())
4069     return false;
4070 
4071   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4072   if (shouldUseAddr64(AddrData))
4073     return false;
4074 
4075   // N0 -> offset, or
4076   // (N0 + C1) -> offset
4077   Register SRDPtr = AddrData.N0;
4078   Offset = AddrData.Offset;
4079 
4080   // TODO: Look through extensions for 32-bit soffset.
4081   MachineIRBuilder B(*Root.getParent());
4082 
4083   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4084   splitIllegalMUBUFOffset(B, SOffset, Offset);
4085   return true;
4086 }
4087 
4088 InstructionSelector::ComplexRendererFns
4089 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4090   Register VAddr;
4091   Register RSrcReg;
4092   Register SOffset;
4093   int64_t Offset = 0;
4094 
4095   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4096     return {};
4097 
4098   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4099   // pattern.
4100   return {{
4101       [=](MachineInstrBuilder &MIB) {  // rsrc
4102         MIB.addReg(RSrcReg);
4103       },
4104       [=](MachineInstrBuilder &MIB) { // vaddr
4105         MIB.addReg(VAddr);
4106       },
4107       [=](MachineInstrBuilder &MIB) { // soffset
4108         if (SOffset)
4109           MIB.addReg(SOffset);
4110         else
4111           MIB.addImm(0);
4112       },
4113       [=](MachineInstrBuilder &MIB) { // offset
4114         MIB.addImm(Offset);
4115       },
4116       addZeroImm, //  cpol
4117       addZeroImm, //  tfe
4118       addZeroImm  //  swz
4119     }};
4120 }
4121 
4122 InstructionSelector::ComplexRendererFns
4123 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4124   Register RSrcReg;
4125   Register SOffset;
4126   int64_t Offset = 0;
4127 
4128   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4129     return {};
4130 
4131   return {{
4132       [=](MachineInstrBuilder &MIB) {  // rsrc
4133         MIB.addReg(RSrcReg);
4134       },
4135       [=](MachineInstrBuilder &MIB) { // soffset
4136         if (SOffset)
4137           MIB.addReg(SOffset);
4138         else
4139           MIB.addImm(0);
4140       },
4141       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4142       addZeroImm, //  cpol
4143       addZeroImm, //  tfe
4144       addZeroImm, //  swz
4145     }};
4146 }
4147 
4148 InstructionSelector::ComplexRendererFns
4149 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4150   Register VAddr;
4151   Register RSrcReg;
4152   Register SOffset;
4153   int64_t Offset = 0;
4154 
4155   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4156     return {};
4157 
4158   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4159   // pattern.
4160   return {{
4161       [=](MachineInstrBuilder &MIB) {  // rsrc
4162         MIB.addReg(RSrcReg);
4163       },
4164       [=](MachineInstrBuilder &MIB) { // vaddr
4165         MIB.addReg(VAddr);
4166       },
4167       [=](MachineInstrBuilder &MIB) { // soffset
4168         if (SOffset)
4169           MIB.addReg(SOffset);
4170         else
4171           MIB.addImm(0);
4172       },
4173       [=](MachineInstrBuilder &MIB) { // offset
4174         MIB.addImm(Offset);
4175       },
4176       [=](MachineInstrBuilder &MIB) {
4177         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4178       }
4179     }};
4180 }
4181 
4182 InstructionSelector::ComplexRendererFns
4183 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4184   Register RSrcReg;
4185   Register SOffset;
4186   int64_t Offset = 0;
4187 
4188   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4189     return {};
4190 
4191   return {{
4192       [=](MachineInstrBuilder &MIB) {  // rsrc
4193         MIB.addReg(RSrcReg);
4194       },
4195       [=](MachineInstrBuilder &MIB) { // soffset
4196         if (SOffset)
4197           MIB.addReg(SOffset);
4198         else
4199           MIB.addImm(0);
4200       },
4201       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4202       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4203     }};
4204 }
4205 
4206 /// Get an immediate that must be 32-bits, and treated as zero extended.
4207 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4208                                                const MachineRegisterInfo &MRI) {
4209   // getConstantVRegVal sexts any values, so see if that matters.
4210   Optional<int64_t> OffsetVal = getConstantVRegSExtVal(Reg, MRI);
4211   if (!OffsetVal || !isInt<32>(*OffsetVal))
4212     return None;
4213   return Lo_32(*OffsetVal);
4214 }
4215 
4216 InstructionSelector::ComplexRendererFns
4217 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4218   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4219   if (!OffsetVal)
4220     return {};
4221 
4222   Optional<int64_t> EncodedImm =
4223       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4224   if (!EncodedImm)
4225     return {};
4226 
4227   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4228 }
4229 
4230 InstructionSelector::ComplexRendererFns
4231 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4232   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4233 
4234   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4235   if (!OffsetVal)
4236     return {};
4237 
4238   Optional<int64_t> EncodedImm
4239     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4240   if (!EncodedImm)
4241     return {};
4242 
4243   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4244 }
4245 
4246 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4247                                                  const MachineInstr &MI,
4248                                                  int OpIdx) const {
4249   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4250          "Expected G_CONSTANT");
4251   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4252 }
4253 
4254 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4255                                                 const MachineInstr &MI,
4256                                                 int OpIdx) const {
4257   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4258          "Expected G_CONSTANT");
4259   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4260 }
4261 
4262 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4263                                                  const MachineInstr &MI,
4264                                                  int OpIdx) const {
4265   assert(OpIdx == -1);
4266 
4267   const MachineOperand &Op = MI.getOperand(1);
4268   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4269     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4270   else {
4271     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4272     MIB.addImm(Op.getCImm()->getSExtValue());
4273   }
4274 }
4275 
4276 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4277                                                 const MachineInstr &MI,
4278                                                 int OpIdx) const {
4279   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4280          "Expected G_CONSTANT");
4281   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4282 }
4283 
4284 /// This only really exists to satisfy DAG type checking machinery, so is a
4285 /// no-op here.
4286 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4287                                                 const MachineInstr &MI,
4288                                                 int OpIdx) const {
4289   MIB.addImm(MI.getOperand(OpIdx).getImm());
4290 }
4291 
4292 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4293                                                   const MachineInstr &MI,
4294                                                   int OpIdx) const {
4295   assert(OpIdx >= 0 && "expected to match an immediate operand");
4296   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4297 }
4298 
4299 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4300                                                  const MachineInstr &MI,
4301                                                  int OpIdx) const {
4302   assert(OpIdx >= 0 && "expected to match an immediate operand");
4303   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4304 }
4305 
4306 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4307                                              const MachineInstr &MI,
4308                                              int OpIdx) const {
4309   assert(OpIdx >= 0 && "expected to match an immediate operand");
4310   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4311 }
4312 
4313 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4314                                                  const MachineInstr &MI,
4315                                                  int OpIdx) const {
4316   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4317 }
4318 
4319 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4320   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4321 }
4322 
4323 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4324   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4325 }
4326 
4327 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4328   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4329 }
4330 
4331 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4332   return TII.isInlineConstant(Imm);
4333 }
4334