1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 
37 #define DEBUG_TYPE "amdgpu-isel"
38 
39 using namespace llvm;
40 using namespace MIPatternMatch;
41 
42 static cl::opt<bool> AllowRiskySelect(
43   "amdgpu-global-isel-risky-select",
44   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
45   cl::init(false),
46   cl::ReallyHidden);
47 
48 #define GET_GLOBALISEL_IMPL
49 #define AMDGPUSubtarget GCNSubtarget
50 #include "AMDGPUGenGlobalISel.inc"
51 #undef GET_GLOBALISEL_IMPL
52 #undef AMDGPUSubtarget
53 
54 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
55     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
56     const AMDGPUTargetMachine &TM)
57     : InstructionSelector(), TII(*STI.getInstrInfo()),
58       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
59       STI(STI),
60       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
61 #define GET_GLOBALISEL_PREDICATES_INIT
62 #include "AMDGPUGenGlobalISel.inc"
63 #undef GET_GLOBALISEL_PREDICATES_INIT
64 #define GET_GLOBALISEL_TEMPORARIES_INIT
65 #include "AMDGPUGenGlobalISel.inc"
66 #undef GET_GLOBALISEL_TEMPORARIES_INIT
67 {
68 }
69 
70 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
71 
72 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
73                                         CodeGenCoverage &CoverageInfo) {
74   MRI = &MF.getRegInfo();
75   InstructionSelector::setupMF(MF, KB, CoverageInfo);
76 }
77 
78 bool AMDGPUInstructionSelector::isVCC(Register Reg,
79                                       const MachineRegisterInfo &MRI) const {
80   if (Register::isPhysicalRegister(Reg))
81     return Reg == TRI.getVCC();
82 
83   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
84   const TargetRegisterClass *RC =
85       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
86   if (RC) {
87     const LLT Ty = MRI.getType(Reg);
88     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
89            Ty.isValid() && Ty.getSizeInBits() == 1;
90   }
91 
92   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
93   return RB->getID() == AMDGPU::VCCRegBankID;
94 }
95 
96 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
97                                                         unsigned NewOpc) const {
98   MI.setDesc(TII.get(NewOpc));
99   MI.RemoveOperand(1); // Remove intrinsic ID.
100   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
101 
102   MachineOperand &Dst = MI.getOperand(0);
103   MachineOperand &Src = MI.getOperand(1);
104 
105   // TODO: This should be legalized to s32 if needed
106   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
107     return false;
108 
109   const TargetRegisterClass *DstRC
110     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
111   const TargetRegisterClass *SrcRC
112     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
113   if (!DstRC || DstRC != SrcRC)
114     return false;
115 
116   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
117          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
118 }
119 
120 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
121   const DebugLoc &DL = I.getDebugLoc();
122   MachineBasicBlock *BB = I.getParent();
123   I.setDesc(TII.get(TargetOpcode::COPY));
124 
125   const MachineOperand &Src = I.getOperand(1);
126   MachineOperand &Dst = I.getOperand(0);
127   Register DstReg = Dst.getReg();
128   Register SrcReg = Src.getReg();
129 
130   if (isVCC(DstReg, *MRI)) {
131     if (SrcReg == AMDGPU::SCC) {
132       const TargetRegisterClass *RC
133         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
134       if (!RC)
135         return true;
136       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
137     }
138 
139     if (!isVCC(SrcReg, *MRI)) {
140       // TODO: Should probably leave the copy and let copyPhysReg expand it.
141       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
142         return false;
143 
144       const TargetRegisterClass *SrcRC
145         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
146 
147       Register MaskedReg = MRI->createVirtualRegister(SrcRC);
148 
149       // We can't trust the high bits at this point, so clear them.
150 
151       // TODO: Skip masking high bits if def is known boolean.
152 
153       unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
154         AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
155       BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
156         .addImm(1)
157         .addReg(SrcReg);
158       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
159         .addImm(0)
160         .addReg(MaskedReg);
161 
162       if (!MRI->getRegClassOrNull(SrcReg))
163         MRI->setRegClass(SrcReg, SrcRC);
164       I.eraseFromParent();
165       return true;
166     }
167 
168     const TargetRegisterClass *RC =
169       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
170     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
171       return false;
172 
173     // Don't constrain the source register to a class so the def instruction
174     // handles it (unless it's undef).
175     //
176     // FIXME: This is a hack. When selecting the def, we neeed to know
177     // specifically know that the result is VCCRegBank, and not just an SGPR
178     // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
179     if (Src.isUndef()) {
180       const TargetRegisterClass *SrcRC =
181         TRI.getConstrainedRegClassForOperand(Src, *MRI);
182       if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
183         return false;
184     }
185 
186     return true;
187   }
188 
189   for (const MachineOperand &MO : I.operands()) {
190     if (Register::isPhysicalRegister(MO.getReg()))
191       continue;
192 
193     const TargetRegisterClass *RC =
194             TRI.getConstrainedRegClassForOperand(MO, *MRI);
195     if (!RC)
196       continue;
197     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
198   }
199   return true;
200 }
201 
202 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
203   const Register DefReg = I.getOperand(0).getReg();
204   const LLT DefTy = MRI->getType(DefReg);
205   if (DefTy == LLT::scalar(1)) {
206     if (!AllowRiskySelect) {
207       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
208       return false;
209     }
210 
211     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
212   }
213 
214   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
215 
216   const RegClassOrRegBank &RegClassOrBank =
217     MRI->getRegClassOrRegBank(DefReg);
218 
219   const TargetRegisterClass *DefRC
220     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
221   if (!DefRC) {
222     if (!DefTy.isValid()) {
223       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
224       return false;
225     }
226 
227     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
228     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
229     if (!DefRC) {
230       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
231       return false;
232     }
233   }
234 
235   // TODO: Verify that all registers have the same bank
236   I.setDesc(TII.get(TargetOpcode::PHI));
237   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
238 }
239 
240 MachineOperand
241 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
242                                            const TargetRegisterClass &SubRC,
243                                            unsigned SubIdx) const {
244 
245   MachineInstr *MI = MO.getParent();
246   MachineBasicBlock *BB = MO.getParent()->getParent();
247   Register DstReg = MRI->createVirtualRegister(&SubRC);
248 
249   if (MO.isReg()) {
250     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
251     Register Reg = MO.getReg();
252     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
253             .addReg(Reg, 0, ComposedSubIdx);
254 
255     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
256                                      MO.isKill(), MO.isDead(), MO.isUndef(),
257                                      MO.isEarlyClobber(), 0, MO.isDebug(),
258                                      MO.isInternalRead());
259   }
260 
261   assert(MO.isImm());
262 
263   APInt Imm(64, MO.getImm());
264 
265   switch (SubIdx) {
266   default:
267     llvm_unreachable("do not know to split immediate with this sub index.");
268   case AMDGPU::sub0:
269     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
270   case AMDGPU::sub1:
271     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
272   }
273 }
274 
275 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
276   switch (Opc) {
277   case AMDGPU::G_AND:
278     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
279   case AMDGPU::G_OR:
280     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
281   case AMDGPU::G_XOR:
282     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
283   default:
284     llvm_unreachable("not a bit op");
285   }
286 }
287 
288 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
289   MachineOperand &Dst = I.getOperand(0);
290   MachineOperand &Src0 = I.getOperand(1);
291   MachineOperand &Src1 = I.getOperand(2);
292   Register DstReg = Dst.getReg();
293   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
294 
295   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
296   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
297     const TargetRegisterClass *RC = TRI.getBoolRC();
298     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
299                                            RC == &AMDGPU::SReg_64RegClass);
300     I.setDesc(TII.get(InstOpc));
301     // Dead implicit-def of scc
302     I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
303                                            true, // isImp
304                                            false, // isKill
305                                            true)); // isDead
306 
307     // FIXME: Hack to avoid turning the register bank into a register class.
308     // The selector for G_ICMP relies on seeing the register bank for the result
309     // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
310     // be ambiguous whether it's a scalar or vector bool.
311     if (Src0.isUndef() && !MRI->getRegClassOrNull(Src0.getReg()))
312       MRI->setRegClass(Src0.getReg(), RC);
313     if (Src1.isUndef() && !MRI->getRegClassOrNull(Src1.getReg()))
314       MRI->setRegClass(Src1.getReg(), RC);
315 
316     return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
317   }
318 
319   // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
320   // the result?
321   if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
322     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
323     I.setDesc(TII.get(InstOpc));
324     // Dead implicit-def of scc
325     I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
326                                            true, // isImp
327                                            false, // isKill
328                                            true)); // isDead
329     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
330   }
331 
332   return false;
333 }
334 
335 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
336   MachineBasicBlock *BB = I.getParent();
337   MachineFunction *MF = BB->getParent();
338   Register DstReg = I.getOperand(0).getReg();
339   const DebugLoc &DL = I.getDebugLoc();
340   LLT Ty = MRI->getType(DstReg);
341   if (Ty.isVector())
342     return false;
343 
344   unsigned Size = Ty.getSizeInBits();
345   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
346   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
347   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
348 
349   if (Size == 32) {
350     if (IsSALU) {
351       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
352       MachineInstr *Add =
353         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
354         .add(I.getOperand(1))
355         .add(I.getOperand(2));
356       I.eraseFromParent();
357       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
358     }
359 
360     if (STI.hasAddNoCarry()) {
361       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
362       I.setDesc(TII.get(Opc));
363       I.addOperand(*MF, MachineOperand::CreateImm(0));
364       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
365       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
366     }
367 
368     const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
369 
370     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
371     MachineInstr *Add
372       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
373       .addDef(UnusedCarry, RegState::Dead)
374       .add(I.getOperand(1))
375       .add(I.getOperand(2))
376       .addImm(0);
377     I.eraseFromParent();
378     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
379   }
380 
381   assert(!Sub && "illegal sub should not reach here");
382 
383   const TargetRegisterClass &RC
384     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
385   const TargetRegisterClass &HalfRC
386     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
387 
388   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
389   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
390   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
391   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
392 
393   Register DstLo = MRI->createVirtualRegister(&HalfRC);
394   Register DstHi = MRI->createVirtualRegister(&HalfRC);
395 
396   if (IsSALU) {
397     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
398       .add(Lo1)
399       .add(Lo2);
400     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
401       .add(Hi1)
402       .add(Hi2);
403   } else {
404     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
405     Register CarryReg = MRI->createVirtualRegister(CarryRC);
406     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
407       .addDef(CarryReg)
408       .add(Lo1)
409       .add(Lo2)
410       .addImm(0);
411     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
412       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
413       .add(Hi1)
414       .add(Hi2)
415       .addReg(CarryReg, RegState::Kill)
416       .addImm(0);
417 
418     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
419       return false;
420   }
421 
422   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
423     .addReg(DstLo)
424     .addImm(AMDGPU::sub0)
425     .addReg(DstHi)
426     .addImm(AMDGPU::sub1);
427 
428 
429   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
430     return false;
431 
432   I.eraseFromParent();
433   return true;
434 }
435 
436 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
437   MachineInstr &I) const {
438   MachineBasicBlock *BB = I.getParent();
439   MachineFunction *MF = BB->getParent();
440   const DebugLoc &DL = I.getDebugLoc();
441   Register Dst0Reg = I.getOperand(0).getReg();
442   Register Dst1Reg = I.getOperand(1).getReg();
443   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
444                      I.getOpcode() == AMDGPU::G_UADDE;
445   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
446                           I.getOpcode() == AMDGPU::G_USUBE;
447 
448   if (isVCC(Dst1Reg, *MRI)) {
449       // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
450       // carry out despite the _i32 name. These were renamed in VI to _U32.
451       // FIXME: We should probably rename the opcodes here.
452     unsigned NoCarryOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
453     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
454     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
455     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
456     I.addOperand(*MF, MachineOperand::CreateImm(0));
457     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
458   }
459 
460   Register Src0Reg = I.getOperand(2).getReg();
461   Register Src1Reg = I.getOperand(3).getReg();
462 
463   if (HasCarryIn) {
464     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
465       .addReg(I.getOperand(4).getReg());
466   }
467 
468   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
469   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
470 
471   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
472     .add(I.getOperand(2))
473     .add(I.getOperand(3));
474   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
475     .addReg(AMDGPU::SCC);
476 
477   if (!MRI->getRegClassOrNull(Dst1Reg))
478     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
479 
480   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
481       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
482       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
483     return false;
484 
485   if (HasCarryIn &&
486       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
487                                     AMDGPU::SReg_32RegClass, *MRI))
488     return false;
489 
490   I.eraseFromParent();
491   return true;
492 }
493 
494 // TODO: We should probably legalize these to only using 32-bit results.
495 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
496   MachineBasicBlock *BB = I.getParent();
497   Register DstReg = I.getOperand(0).getReg();
498   Register SrcReg = I.getOperand(1).getReg();
499   LLT DstTy = MRI->getType(DstReg);
500   LLT SrcTy = MRI->getType(SrcReg);
501   const unsigned SrcSize = SrcTy.getSizeInBits();
502   unsigned DstSize = DstTy.getSizeInBits();
503 
504   // TODO: Should handle any multiple of 32 offset.
505   unsigned Offset = I.getOperand(2).getImm();
506   if (Offset % 32 != 0 || DstSize > 128)
507     return false;
508 
509   // 16-bit operations really use 32-bit registers.
510   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
511   if (DstSize == 16)
512     DstSize = 32;
513 
514   const TargetRegisterClass *DstRC =
515     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
516   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
517     return false;
518 
519   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
520   const TargetRegisterClass *SrcRC =
521     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
522   if (!SrcRC)
523     return false;
524   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
525                                                          DstSize / 32);
526   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
527   if (!SrcRC)
528     return false;
529 
530   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
531                                     *SrcRC, I.getOperand(1));
532   const DebugLoc &DL = I.getDebugLoc();
533   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
534     .addReg(SrcReg, 0, SubReg);
535 
536   I.eraseFromParent();
537   return true;
538 }
539 
540 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
541   MachineBasicBlock *BB = MI.getParent();
542   Register DstReg = MI.getOperand(0).getReg();
543   LLT DstTy = MRI->getType(DstReg);
544   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
545 
546   const unsigned SrcSize = SrcTy.getSizeInBits();
547   if (SrcSize < 32)
548     return selectImpl(MI, *CoverageInfo);
549 
550   const DebugLoc &DL = MI.getDebugLoc();
551   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
552   const unsigned DstSize = DstTy.getSizeInBits();
553   const TargetRegisterClass *DstRC =
554     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
555   if (!DstRC)
556     return false;
557 
558   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
559   MachineInstrBuilder MIB =
560     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
561   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
562     MachineOperand &Src = MI.getOperand(I + 1);
563     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
564     MIB.addImm(SubRegs[I]);
565 
566     const TargetRegisterClass *SrcRC
567       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
568     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
569       return false;
570   }
571 
572   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
573     return false;
574 
575   MI.eraseFromParent();
576   return true;
577 }
578 
579 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
580   MachineBasicBlock *BB = MI.getParent();
581   const int NumDst = MI.getNumOperands() - 1;
582 
583   MachineOperand &Src = MI.getOperand(NumDst);
584 
585   Register SrcReg = Src.getReg();
586   Register DstReg0 = MI.getOperand(0).getReg();
587   LLT DstTy = MRI->getType(DstReg0);
588   LLT SrcTy = MRI->getType(SrcReg);
589 
590   const unsigned DstSize = DstTy.getSizeInBits();
591   const unsigned SrcSize = SrcTy.getSizeInBits();
592   const DebugLoc &DL = MI.getDebugLoc();
593   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
594 
595   const TargetRegisterClass *SrcRC =
596     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
597   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
598     return false;
599 
600   const unsigned SrcFlags = getUndefRegState(Src.isUndef());
601 
602   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
603   // source, and this relies on the fact that the same subregister indices are
604   // used for both.
605   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
606   for (int I = 0, E = NumDst; I != E; ++I) {
607     MachineOperand &Dst = MI.getOperand(I);
608     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
609       .addReg(SrcReg, SrcFlags, SubRegs[I]);
610 
611     const TargetRegisterClass *DstRC =
612       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
613     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
614       return false;
615   }
616 
617   MI.eraseFromParent();
618   return true;
619 }
620 
621 static bool isZero(Register Reg, const MachineRegisterInfo &MRI) {
622   int64_t Val;
623   return mi_match(Reg, MRI, m_ICst(Val)) && Val == 0;
624 }
625 
626 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
627   MachineInstr &MI) const {
628   if (selectImpl(MI, *CoverageInfo))
629     return true;
630 
631   const LLT S32 = LLT::scalar(32);
632   const LLT V2S16 = LLT::vector(2, 16);
633 
634   Register Dst = MI.getOperand(0).getReg();
635   if (MRI->getType(Dst) != V2S16)
636     return false;
637 
638   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
639   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
640     return false;
641 
642   Register Src0 = MI.getOperand(1).getReg();
643   Register Src1 = MI.getOperand(2).getReg();
644   if (MRI->getType(Src0) != S32)
645     return false;
646 
647   const DebugLoc &DL = MI.getDebugLoc();
648   MachineBasicBlock *BB = MI.getParent();
649 
650   // TODO: This should probably be a combine somewhere
651   // (build_vector_trunc $src0, undef -> copy $src0
652   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
653   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
654     MI.setDesc(TII.get(AMDGPU::COPY));
655     MI.RemoveOperand(2);
656     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
657            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
658   }
659 
660   Register ShiftSrc0;
661   Register ShiftSrc1;
662   int64_t ShiftAmt;
663 
664   // With multiple uses of the shift, this will duplicate the shift and
665   // increase register pressure.
666   //
667   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
668   //  => (S_PACK_HH_B32_B16 $src0, $src1)
669   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
670   //  => (S_PACK_LH_B32_B16 $src0, $src1)
671   // (build_vector_trunc $src0, $src1)
672   //  => (S_PACK_LL_B32_B16 $src0, $src1)
673 
674   // FIXME: This is an inconvenient way to check a specific value
675   bool Shift0 = mi_match(
676     Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_ICst(ShiftAmt)))) &&
677     ShiftAmt == 16;
678 
679   bool Shift1 = mi_match(
680     Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_ICst(ShiftAmt)))) &&
681     ShiftAmt == 16;
682 
683   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
684   if (Shift0 && Shift1) {
685     Opc = AMDGPU::S_PACK_HH_B32_B16;
686     MI.getOperand(1).setReg(ShiftSrc0);
687     MI.getOperand(2).setReg(ShiftSrc1);
688   } else if (Shift1) {
689     Opc = AMDGPU::S_PACK_LH_B32_B16;
690     MI.getOperand(2).setReg(ShiftSrc1);
691   } else if (Shift0 && isZero(Src1, *MRI)) {
692     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
693     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
694       .addReg(ShiftSrc0)
695       .addImm(16);
696 
697     MI.eraseFromParent();
698     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
699   }
700 
701   MI.setDesc(TII.get(Opc));
702   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
703 }
704 
705 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
706   return selectG_ADD_SUB(I);
707 }
708 
709 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
710   const MachineOperand &MO = I.getOperand(0);
711 
712   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
713   // regbank check here is to know why getConstrainedRegClassForOperand failed.
714   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
715   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
716       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
717     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
718     return true;
719   }
720 
721   return false;
722 }
723 
724 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
725   MachineBasicBlock *BB = I.getParent();
726 
727   Register DstReg = I.getOperand(0).getReg();
728   Register Src0Reg = I.getOperand(1).getReg();
729   Register Src1Reg = I.getOperand(2).getReg();
730   LLT Src1Ty = MRI->getType(Src1Reg);
731 
732   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
733   unsigned InsSize = Src1Ty.getSizeInBits();
734 
735   int64_t Offset = I.getOperand(3).getImm();
736 
737   // FIXME: These cases should have been illegal and unnecessary to check here.
738   if (Offset % 32 != 0 || InsSize % 32 != 0)
739     return false;
740 
741   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
742   if (SubReg == AMDGPU::NoSubRegister)
743     return false;
744 
745   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
746   const TargetRegisterClass *DstRC =
747     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
748   if (!DstRC)
749     return false;
750 
751   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
752   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
753   const TargetRegisterClass *Src0RC =
754     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
755   const TargetRegisterClass *Src1RC =
756     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
757 
758   // Deal with weird cases where the class only partially supports the subreg
759   // index.
760   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
761   if (!Src0RC || !Src1RC)
762     return false;
763 
764   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
765       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
766       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
767     return false;
768 
769   const DebugLoc &DL = I.getDebugLoc();
770   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
771     .addReg(Src0Reg)
772     .addReg(Src1Reg)
773     .addImm(SubReg);
774 
775   I.eraseFromParent();
776   return true;
777 }
778 
779 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
780   if (STI.getLDSBankCount() != 16)
781     return selectImpl(MI, *CoverageInfo);
782 
783   Register Dst = MI.getOperand(0).getReg();
784   Register Src0 = MI.getOperand(2).getReg();
785   Register M0Val = MI.getOperand(6).getReg();
786   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
787       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
788       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
789     return false;
790 
791   // This requires 2 instructions. It is possible to write a pattern to support
792   // this, but the generated isel emitter doesn't correctly deal with multiple
793   // output instructions using the same physical register input. The copy to m0
794   // is incorrectly placed before the second instruction.
795   //
796   // TODO: Match source modifiers.
797 
798   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
799   const DebugLoc &DL = MI.getDebugLoc();
800   MachineBasicBlock *MBB = MI.getParent();
801 
802   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
803     .addReg(M0Val);
804   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
805     .addImm(2)
806     .addImm(MI.getOperand(4).getImm())  // $attr
807     .addImm(MI.getOperand(3).getImm()); // $attrchan
808 
809   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
810     .addImm(0)                          // $src0_modifiers
811     .addReg(Src0)                       // $src0
812     .addImm(MI.getOperand(4).getImm())  // $attr
813     .addImm(MI.getOperand(3).getImm())  // $attrchan
814     .addImm(0)                          // $src2_modifiers
815     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
816     .addImm(MI.getOperand(5).getImm())  // $high
817     .addImm(0)                          // $clamp
818     .addImm(0);                         // $omod
819 
820   MI.eraseFromParent();
821   return true;
822 }
823 
824 // We need to handle this here because tablegen doesn't support matching
825 // instructions with multiple outputs.
826 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
827   Register Dst0 = MI.getOperand(0).getReg();
828   Register Dst1 = MI.getOperand(1).getReg();
829 
830   LLT Ty = MRI->getType(Dst0);
831   unsigned Opc;
832   if (Ty == LLT::scalar(32))
833     Opc = AMDGPU::V_DIV_SCALE_F32;
834   else if (Ty == LLT::scalar(64))
835     Opc = AMDGPU::V_DIV_SCALE_F64;
836   else
837     return false;
838 
839   const DebugLoc &DL = MI.getDebugLoc();
840   MachineBasicBlock *MBB = MI.getParent();
841 
842   Register Numer = MI.getOperand(3).getReg();
843   Register Denom = MI.getOperand(4).getReg();
844   unsigned ChooseDenom = MI.getOperand(5).getImm();
845 
846   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
847 
848   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
849     .addDef(Dst1)
850     .addUse(Src0)
851     .addUse(Denom)
852     .addUse(Numer);
853 
854   MI.eraseFromParent();
855   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
856 }
857 
858 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
859   unsigned IntrinsicID = I.getIntrinsicID();
860   switch (IntrinsicID) {
861   case Intrinsic::amdgcn_if_break: {
862     MachineBasicBlock *BB = I.getParent();
863 
864     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
865     // SelectionDAG uses for wave32 vs wave64.
866     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
867       .add(I.getOperand(0))
868       .add(I.getOperand(2))
869       .add(I.getOperand(3));
870 
871     Register DstReg = I.getOperand(0).getReg();
872     Register Src0Reg = I.getOperand(2).getReg();
873     Register Src1Reg = I.getOperand(3).getReg();
874 
875     I.eraseFromParent();
876 
877     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
878       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
879 
880     return true;
881   }
882   case Intrinsic::amdgcn_interp_p1_f16:
883     return selectInterpP1F16(I);
884   case Intrinsic::amdgcn_wqm:
885     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
886   case Intrinsic::amdgcn_softwqm:
887     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
888   case Intrinsic::amdgcn_wwm:
889     return constrainCopyLikeIntrin(I, AMDGPU::WWM);
890   case Intrinsic::amdgcn_div_scale:
891     return selectDivScale(I);
892   default:
893     return selectImpl(I, *CoverageInfo);
894   }
895 }
896 
897 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
898   if (Size != 32 && Size != 64)
899     return -1;
900   switch (P) {
901   default:
902     llvm_unreachable("Unknown condition code!");
903   case CmpInst::ICMP_NE:
904     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
905   case CmpInst::ICMP_EQ:
906     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
907   case CmpInst::ICMP_SGT:
908     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
909   case CmpInst::ICMP_SGE:
910     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
911   case CmpInst::ICMP_SLT:
912     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
913   case CmpInst::ICMP_SLE:
914     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
915   case CmpInst::ICMP_UGT:
916     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
917   case CmpInst::ICMP_UGE:
918     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
919   case CmpInst::ICMP_ULT:
920     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
921   case CmpInst::ICMP_ULE:
922     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
923   }
924 }
925 
926 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
927                                               unsigned Size) const {
928   if (Size == 64) {
929     if (!STI.hasScalarCompareEq64())
930       return -1;
931 
932     switch (P) {
933     case CmpInst::ICMP_NE:
934       return AMDGPU::S_CMP_LG_U64;
935     case CmpInst::ICMP_EQ:
936       return AMDGPU::S_CMP_EQ_U64;
937     default:
938       return -1;
939     }
940   }
941 
942   if (Size != 32)
943     return -1;
944 
945   switch (P) {
946   case CmpInst::ICMP_NE:
947     return AMDGPU::S_CMP_LG_U32;
948   case CmpInst::ICMP_EQ:
949     return AMDGPU::S_CMP_EQ_U32;
950   case CmpInst::ICMP_SGT:
951     return AMDGPU::S_CMP_GT_I32;
952   case CmpInst::ICMP_SGE:
953     return AMDGPU::S_CMP_GE_I32;
954   case CmpInst::ICMP_SLT:
955     return AMDGPU::S_CMP_LT_I32;
956   case CmpInst::ICMP_SLE:
957     return AMDGPU::S_CMP_LE_I32;
958   case CmpInst::ICMP_UGT:
959     return AMDGPU::S_CMP_GT_U32;
960   case CmpInst::ICMP_UGE:
961     return AMDGPU::S_CMP_GE_U32;
962   case CmpInst::ICMP_ULT:
963     return AMDGPU::S_CMP_LT_U32;
964   case CmpInst::ICMP_ULE:
965     return AMDGPU::S_CMP_LE_U32;
966   default:
967     llvm_unreachable("Unknown condition code!");
968   }
969 }
970 
971 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
972   MachineBasicBlock *BB = I.getParent();
973   const DebugLoc &DL = I.getDebugLoc();
974 
975   Register SrcReg = I.getOperand(2).getReg();
976   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
977 
978   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
979 
980   Register CCReg = I.getOperand(0).getReg();
981   if (!isVCC(CCReg, *MRI)) {
982     int Opcode = getS_CMPOpcode(Pred, Size);
983     if (Opcode == -1)
984       return false;
985     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
986             .add(I.getOperand(2))
987             .add(I.getOperand(3));
988     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
989       .addReg(AMDGPU::SCC);
990     bool Ret =
991         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
992         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
993     I.eraseFromParent();
994     return Ret;
995   }
996 
997   int Opcode = getV_CMPOpcode(Pred, Size);
998   if (Opcode == -1)
999     return false;
1000 
1001   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1002             I.getOperand(0).getReg())
1003             .add(I.getOperand(2))
1004             .add(I.getOperand(3));
1005   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1006                                *TRI.getBoolRC(), *MRI);
1007   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1008   I.eraseFromParent();
1009   return Ret;
1010 }
1011 
1012 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1013   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1014   // SelectionDAG uses for wave32 vs wave64.
1015   MachineBasicBlock *BB = MI.getParent();
1016   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1017       .add(MI.getOperand(1));
1018 
1019   Register Reg = MI.getOperand(1).getReg();
1020   MI.eraseFromParent();
1021 
1022   if (!MRI->getRegClassOrNull(Reg))
1023     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1024   return true;
1025 }
1026 
1027 static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
1028   switch (MF.getFunction().getCallingConv()) {
1029   case CallingConv::AMDGPU_PS:
1030     return 1;
1031   case CallingConv::AMDGPU_VS:
1032     return 2;
1033   case CallingConv::AMDGPU_GS:
1034     return 3;
1035   case CallingConv::AMDGPU_HS:
1036   case CallingConv::AMDGPU_LS:
1037   case CallingConv::AMDGPU_ES:
1038     report_fatal_error("ds_ordered_count unsupported for this calling conv");
1039   case CallingConv::AMDGPU_CS:
1040   case CallingConv::AMDGPU_KERNEL:
1041   case CallingConv::C:
1042   case CallingConv::Fast:
1043   default:
1044     // Assume other calling conventions are various compute callable functions
1045     return 0;
1046   }
1047 }
1048 
1049 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1050   MachineInstr &MI, Intrinsic::ID IntrID) const {
1051   MachineBasicBlock *MBB = MI.getParent();
1052   MachineFunction *MF = MBB->getParent();
1053   const DebugLoc &DL = MI.getDebugLoc();
1054 
1055   unsigned IndexOperand = MI.getOperand(7).getImm();
1056   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1057   bool WaveDone = MI.getOperand(9).getImm() != 0;
1058 
1059   if (WaveDone && !WaveRelease)
1060     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1061 
1062   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1063   IndexOperand &= ~0x3f;
1064   unsigned CountDw = 0;
1065 
1066   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1067     CountDw = (IndexOperand >> 24) & 0xf;
1068     IndexOperand &= ~(0xf << 24);
1069 
1070     if (CountDw < 1 || CountDw > 4) {
1071       report_fatal_error(
1072         "ds_ordered_count: dword count must be between 1 and 4");
1073     }
1074   }
1075 
1076   if (IndexOperand)
1077     report_fatal_error("ds_ordered_count: bad index operand");
1078 
1079   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1080   unsigned ShaderType = getDSShaderTypeValue(*MF);
1081 
1082   unsigned Offset0 = OrderedCountIndex << 2;
1083   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1084                      (Instruction << 4);
1085 
1086   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1087     Offset1 |= (CountDw - 1) << 6;
1088 
1089   unsigned Offset = Offset0 | (Offset1 << 8);
1090 
1091   Register M0Val = MI.getOperand(2).getReg();
1092   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1093     .addReg(M0Val);
1094 
1095   Register DstReg = MI.getOperand(0).getReg();
1096   Register ValReg = MI.getOperand(3).getReg();
1097   MachineInstrBuilder DS =
1098     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1099       .addReg(ValReg)
1100       .addImm(Offset)
1101       .cloneMemRefs(MI);
1102 
1103   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1104     return false;
1105 
1106   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1107   MI.eraseFromParent();
1108   return Ret;
1109 }
1110 
1111 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1112   switch (IntrID) {
1113   case Intrinsic::amdgcn_ds_gws_init:
1114     return AMDGPU::DS_GWS_INIT;
1115   case Intrinsic::amdgcn_ds_gws_barrier:
1116     return AMDGPU::DS_GWS_BARRIER;
1117   case Intrinsic::amdgcn_ds_gws_sema_v:
1118     return AMDGPU::DS_GWS_SEMA_V;
1119   case Intrinsic::amdgcn_ds_gws_sema_br:
1120     return AMDGPU::DS_GWS_SEMA_BR;
1121   case Intrinsic::amdgcn_ds_gws_sema_p:
1122     return AMDGPU::DS_GWS_SEMA_P;
1123   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1124     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1125   default:
1126     llvm_unreachable("not a gws intrinsic");
1127   }
1128 }
1129 
1130 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1131                                                      Intrinsic::ID IID) const {
1132   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1133       !STI.hasGWSSemaReleaseAll())
1134     return false;
1135 
1136   // intrinsic ID, vsrc, offset
1137   const bool HasVSrc = MI.getNumOperands() == 3;
1138   assert(HasVSrc || MI.getNumOperands() == 2);
1139 
1140   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1141   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1142   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1143     return false;
1144 
1145   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1146   assert(OffsetDef);
1147 
1148   unsigned ImmOffset;
1149 
1150   MachineBasicBlock *MBB = MI.getParent();
1151   const DebugLoc &DL = MI.getDebugLoc();
1152 
1153   MachineInstr *Readfirstlane = nullptr;
1154 
1155   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1156   // incoming offset, in case there's an add of a constant. We'll have to put it
1157   // back later.
1158   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1159     Readfirstlane = OffsetDef;
1160     BaseOffset = OffsetDef->getOperand(1).getReg();
1161     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1162   }
1163 
1164   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1165     // If we have a constant offset, try to use the 0 in m0 as the base.
1166     // TODO: Look into changing the default m0 initialization value. If the
1167     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1168     // the immediate offset.
1169 
1170     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1171     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1172       .addImm(0);
1173   } else {
1174     std::tie(BaseOffset, ImmOffset, OffsetDef)
1175       = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1176 
1177     if (Readfirstlane) {
1178       // We have the constant offset now, so put the readfirstlane back on the
1179       // variable component.
1180       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1181         return false;
1182 
1183       Readfirstlane->getOperand(1).setReg(BaseOffset);
1184       BaseOffset = Readfirstlane->getOperand(0).getReg();
1185     } else {
1186       if (!RBI.constrainGenericRegister(BaseOffset,
1187                                         AMDGPU::SReg_32RegClass, *MRI))
1188         return false;
1189     }
1190 
1191     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1192     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1193       .addReg(BaseOffset)
1194       .addImm(16);
1195 
1196     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1197       .addReg(M0Base);
1198   }
1199 
1200   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1201   // offset field) % 64. Some versions of the programming guide omit the m0
1202   // part, or claim it's from offset 0.
1203   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1204 
1205   if (HasVSrc) {
1206     Register VSrc = MI.getOperand(1).getReg();
1207     MIB.addReg(VSrc);
1208     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1209       return false;
1210   }
1211 
1212   MIB.addImm(ImmOffset)
1213      .addImm(-1) // $gds
1214      .cloneMemRefs(MI);
1215 
1216   MI.eraseFromParent();
1217   return true;
1218 }
1219 
1220 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1221                                                       bool IsAppend) const {
1222   Register PtrBase = MI.getOperand(2).getReg();
1223   LLT PtrTy = MRI->getType(PtrBase);
1224   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1225 
1226   unsigned Offset;
1227   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1228 
1229   // TODO: Should this try to look through readfirstlane like GWS?
1230   if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
1231     PtrBase = MI.getOperand(2).getReg();
1232     Offset = 0;
1233   }
1234 
1235   MachineBasicBlock *MBB = MI.getParent();
1236   const DebugLoc &DL = MI.getDebugLoc();
1237   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1238 
1239   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1240     .addReg(PtrBase);
1241   BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1242     .addImm(Offset)
1243     .addImm(IsGDS ? -1 : 0)
1244     .cloneMemRefs(MI);
1245   MI.eraseFromParent();
1246   return true;
1247 }
1248 
1249 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1250                          bool &IsTexFail) {
1251   if (TexFailCtrl)
1252     IsTexFail = true;
1253 
1254   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1255   TexFailCtrl &= ~(uint64_t)0x1;
1256   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1257   TexFailCtrl &= ~(uint64_t)0x2;
1258 
1259   return TexFailCtrl == 0;
1260 }
1261 
1262 static bool parseCachePolicy(uint64_t Value,
1263                              bool *GLC, bool *SLC, bool *DLC) {
1264   if (GLC) {
1265     *GLC = (Value & 0x1) ? 1 : 0;
1266     Value &= ~(uint64_t)0x1;
1267   }
1268   if (SLC) {
1269     *SLC = (Value & 0x2) ? 1 : 0;
1270     Value &= ~(uint64_t)0x2;
1271   }
1272   if (DLC) {
1273     *DLC = (Value & 0x4) ? 1 : 0;
1274     Value &= ~(uint64_t)0x4;
1275   }
1276 
1277   return Value == 0;
1278 }
1279 
1280 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1281   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1282   MachineBasicBlock *MBB = MI.getParent();
1283   const DebugLoc &DL = MI.getDebugLoc();
1284 
1285   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1286     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1287 
1288   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1289   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1290       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1291   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1292       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1293   unsigned IntrOpcode = Intr->BaseOpcode;
1294   const bool IsGFX10 = STI.getGeneration() >= AMDGPUSubtarget::GFX10;
1295 
1296   const LLT S16 = LLT::scalar(16);
1297   const int VAddrIdx = getImageVAddrIdxBegin(BaseOpcode,
1298                                              MI.getNumExplicitDefs());
1299   int NumVAddr, NumGradients;
1300   std::tie(NumVAddr, NumGradients) = getImageNumVAddr(Intr, BaseOpcode);
1301 
1302   const LLT AddrTy = MRI->getType(MI.getOperand(VAddrIdx).getReg());
1303   const bool IsA16 = AddrTy.getScalarType() == S16;
1304 
1305   Register VDataIn, VDataOut;
1306   LLT VDataTy;
1307   int NumVDataDwords = -1;
1308   bool IsD16 = false;
1309 
1310   // XXX - Can we just get the second to last argument for ctrl?
1311   unsigned CtrlIdx; // Index of texfailctrl argument
1312   bool Unorm;
1313   if (!BaseOpcode->Sampler) {
1314     Unorm = true;
1315     CtrlIdx = VAddrIdx + NumVAddr + 1;
1316   } else {
1317     Unorm = MI.getOperand(VAddrIdx + NumVAddr + 2).getImm() != 0;
1318     CtrlIdx = VAddrIdx + NumVAddr + 3;
1319   }
1320 
1321   bool TFE;
1322   bool LWE;
1323   bool IsTexFail = false;
1324   if (!parseTexFail(MI.getOperand(CtrlIdx).getImm(), TFE, LWE, IsTexFail))
1325     return false;
1326 
1327   unsigned DMask = 0;
1328   unsigned DMaskLanes = 0;
1329 
1330   if (BaseOpcode->Atomic) {
1331     VDataOut = MI.getOperand(0).getReg();
1332     VDataIn = MI.getOperand(2).getReg();
1333     LLT Ty = MRI->getType(VDataIn);
1334 
1335     // Be careful to allow atomic swap on 16-bit element vectors.
1336     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1337       Ty.getSizeInBits() == 128 :
1338       Ty.getSizeInBits() == 64;
1339 
1340     if (BaseOpcode->AtomicX2) {
1341       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1342 
1343       DMask = Is64Bit ? 0xf : 0x3;
1344       NumVDataDwords = Is64Bit ? 4 : 2;
1345     } else {
1346       DMask = Is64Bit ? 0x3 : 0x1;
1347       NumVDataDwords = Is64Bit ? 2 : 1;
1348     }
1349   } else {
1350     const int DMaskIdx = 2; // Input/output + intrinsic ID.
1351 
1352     DMask = MI.getOperand(DMaskIdx).getImm();
1353     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1354 
1355     if (BaseOpcode->Store) {
1356       VDataIn = MI.getOperand(1).getReg();
1357       VDataTy = MRI->getType(VDataIn);
1358       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1359     } else {
1360       VDataOut = MI.getOperand(0).getReg();
1361       VDataTy = MRI->getType(VDataOut);
1362       NumVDataDwords = DMaskLanes;
1363 
1364       // One memoperand is mandatory, except for getresinfo.
1365       // FIXME: Check this in verifier.
1366       if (!MI.memoperands_empty()) {
1367         const MachineMemOperand *MMO = *MI.memoperands_begin();
1368 
1369         // Infer d16 from the memory size, as the register type will be mangled by
1370         // unpacked subtargets, or by TFE.
1371         IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1372 
1373         if (IsD16 && !STI.hasUnpackedD16VMem())
1374           NumVDataDwords = (DMaskLanes + 1) / 2;
1375       }
1376     }
1377   }
1378 
1379   // Optimize _L to _LZ when _L is zero
1380   if (LZMappingInfo) {
1381     // The legalizer replaced the register with an immediate 0 if we need to
1382     // change the opcode.
1383     const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
1384     if (Lod.isImm()) {
1385       assert(Lod.getImm() == 0);
1386       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1387     }
1388   }
1389 
1390   // Optimize _mip away, when 'lod' is zero
1391   if (MIPMappingInfo) {
1392     const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
1393     if (Lod.isImm()) {
1394       assert(Lod.getImm() == 0);
1395       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1396     }
1397   }
1398 
1399   // TODO: Check this in verifier.
1400   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1401 
1402   bool GLC = false;
1403   bool SLC = false;
1404   bool DLC = false;
1405   if (BaseOpcode->Atomic) {
1406     GLC = true; // TODO no-return optimization
1407     if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), nullptr, &SLC,
1408                           IsGFX10 ? &DLC : nullptr))
1409       return false;
1410   } else {
1411     if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), &GLC, &SLC,
1412                           IsGFX10 ? &DLC : nullptr))
1413       return false;
1414   }
1415 
1416   int NumVAddrRegs = 0;
1417   int NumVAddrDwords = 0;
1418   for (int I = 0; I < NumVAddr; ++I) {
1419     // Skip the $noregs and 0s inserted during legalization.
1420     MachineOperand &AddrOp = MI.getOperand(VAddrIdx + I);
1421     if (!AddrOp.isReg())
1422       continue; // XXX - Break?
1423 
1424     Register Addr = AddrOp.getReg();
1425     if (!Addr)
1426       break;
1427 
1428     ++NumVAddrRegs;
1429     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1430   }
1431 
1432   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1433   // NSA, these should have beeen packed into a single value in the first
1434   // address register
1435   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1436   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1437     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1438     return false;
1439   }
1440 
1441   if (IsTexFail)
1442     ++NumVDataDwords;
1443 
1444   int Opcode = -1;
1445   if (IsGFX10) {
1446     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1447                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1448                                           : AMDGPU::MIMGEncGfx10Default,
1449                                    NumVDataDwords, NumVAddrDwords);
1450   } else {
1451     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1452       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1453                                      NumVDataDwords, NumVAddrDwords);
1454     if (Opcode == -1)
1455       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1456                                      NumVDataDwords, NumVAddrDwords);
1457   }
1458   assert(Opcode != -1);
1459 
1460   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1461     .cloneMemRefs(MI);
1462 
1463   if (VDataOut) {
1464     if (BaseOpcode->AtomicX2) {
1465       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1466 
1467       Register TmpReg = MRI->createVirtualRegister(
1468         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1469       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1470 
1471       MIB.addDef(TmpReg);
1472       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1473         .addReg(TmpReg, RegState::Kill, SubReg);
1474 
1475     } else {
1476       MIB.addDef(VDataOut); // vdata output
1477     }
1478   }
1479 
1480   if (VDataIn)
1481     MIB.addReg(VDataIn); // vdata input
1482 
1483   for (int i = 0; i != NumVAddrRegs; ++i) {
1484     MachineOperand &SrcOp = MI.getOperand(VAddrIdx + i);
1485     if (SrcOp.isReg()) {
1486       assert(SrcOp.getReg() != 0);
1487       MIB.addReg(SrcOp.getReg());
1488     }
1489   }
1490 
1491   MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr).getReg()); // rsrc
1492   if (BaseOpcode->Sampler)
1493     MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr + 1).getReg()); // sampler
1494 
1495   MIB.addImm(DMask); // dmask
1496 
1497   if (IsGFX10)
1498     MIB.addImm(DimInfo->Encoding);
1499   MIB.addImm(Unorm);
1500   if (IsGFX10)
1501     MIB.addImm(DLC);
1502 
1503   MIB.addImm(GLC);
1504   MIB.addImm(SLC);
1505   MIB.addImm(IsA16 &&  // a16 or r128
1506              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1507   if (IsGFX10)
1508     MIB.addImm(IsA16 ? -1 : 0);
1509 
1510   MIB.addImm(TFE); // tfe
1511   MIB.addImm(LWE); // lwe
1512   if (!IsGFX10)
1513     MIB.addImm(DimInfo->DA ? -1 : 0);
1514   if (BaseOpcode->HasD16)
1515     MIB.addImm(IsD16 ? -1 : 0);
1516 
1517   MI.eraseFromParent();
1518   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1519 }
1520 
1521 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1522     MachineInstr &I) const {
1523   unsigned IntrinsicID = I.getIntrinsicID();
1524   switch (IntrinsicID) {
1525   case Intrinsic::amdgcn_end_cf:
1526     return selectEndCfIntrinsic(I);
1527   case Intrinsic::amdgcn_ds_ordered_add:
1528   case Intrinsic::amdgcn_ds_ordered_swap:
1529     return selectDSOrderedIntrinsic(I, IntrinsicID);
1530   case Intrinsic::amdgcn_ds_gws_init:
1531   case Intrinsic::amdgcn_ds_gws_barrier:
1532   case Intrinsic::amdgcn_ds_gws_sema_v:
1533   case Intrinsic::amdgcn_ds_gws_sema_br:
1534   case Intrinsic::amdgcn_ds_gws_sema_p:
1535   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1536     return selectDSGWSIntrinsic(I, IntrinsicID);
1537   case Intrinsic::amdgcn_ds_append:
1538     return selectDSAppendConsume(I, true);
1539   case Intrinsic::amdgcn_ds_consume:
1540     return selectDSAppendConsume(I, false);
1541   default: {
1542     return selectImpl(I, *CoverageInfo);
1543   }
1544   }
1545 }
1546 
1547 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1548   if (selectImpl(I, *CoverageInfo))
1549     return true;
1550 
1551   MachineBasicBlock *BB = I.getParent();
1552   const DebugLoc &DL = I.getDebugLoc();
1553 
1554   Register DstReg = I.getOperand(0).getReg();
1555   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1556   assert(Size <= 32 || Size == 64);
1557   const MachineOperand &CCOp = I.getOperand(1);
1558   Register CCReg = CCOp.getReg();
1559   if (!isVCC(CCReg, *MRI)) {
1560     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1561                                          AMDGPU::S_CSELECT_B32;
1562     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1563             .addReg(CCReg);
1564 
1565     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1566     // bank, because it does not cover the register class that we used to represent
1567     // for it.  So we need to manually set the register class here.
1568     if (!MRI->getRegClassOrNull(CCReg))
1569         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1570     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1571             .add(I.getOperand(2))
1572             .add(I.getOperand(3));
1573 
1574     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1575                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1576     I.eraseFromParent();
1577     return Ret;
1578   }
1579 
1580   // Wide VGPR select should have been split in RegBankSelect.
1581   if (Size > 32)
1582     return false;
1583 
1584   MachineInstr *Select =
1585       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1586               .addImm(0)
1587               .add(I.getOperand(3))
1588               .addImm(0)
1589               .add(I.getOperand(2))
1590               .add(I.getOperand(1));
1591 
1592   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1593   I.eraseFromParent();
1594   return Ret;
1595 }
1596 
1597 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
1598   initM0(I);
1599   return selectImpl(I, *CoverageInfo);
1600 }
1601 
1602 static int sizeToSubRegIndex(unsigned Size) {
1603   switch (Size) {
1604   case 32:
1605     return AMDGPU::sub0;
1606   case 64:
1607     return AMDGPU::sub0_sub1;
1608   case 96:
1609     return AMDGPU::sub0_sub1_sub2;
1610   case 128:
1611     return AMDGPU::sub0_sub1_sub2_sub3;
1612   case 256:
1613     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1614   default:
1615     if (Size < 32)
1616       return AMDGPU::sub0;
1617     if (Size > 256)
1618       return -1;
1619     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1620   }
1621 }
1622 
1623 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1624   Register DstReg = I.getOperand(0).getReg();
1625   Register SrcReg = I.getOperand(1).getReg();
1626   const LLT DstTy = MRI->getType(DstReg);
1627   const LLT SrcTy = MRI->getType(SrcReg);
1628   const LLT S1 = LLT::scalar(1);
1629 
1630   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1631   const RegisterBank *DstRB;
1632   if (DstTy == S1) {
1633     // This is a special case. We don't treat s1 for legalization artifacts as
1634     // vcc booleans.
1635     DstRB = SrcRB;
1636   } else {
1637     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1638     if (SrcRB != DstRB)
1639       return false;
1640   }
1641 
1642   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1643 
1644   unsigned DstSize = DstTy.getSizeInBits();
1645   unsigned SrcSize = SrcTy.getSizeInBits();
1646 
1647   const TargetRegisterClass *SrcRC
1648     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1649   const TargetRegisterClass *DstRC
1650     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1651   if (!SrcRC || !DstRC)
1652     return false;
1653 
1654   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1655       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1656     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1657     return false;
1658   }
1659 
1660   if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1661     MachineBasicBlock *MBB = I.getParent();
1662     const DebugLoc &DL = I.getDebugLoc();
1663 
1664     Register LoReg = MRI->createVirtualRegister(DstRC);
1665     Register HiReg = MRI->createVirtualRegister(DstRC);
1666     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1667       .addReg(SrcReg, 0, AMDGPU::sub0);
1668     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1669       .addReg(SrcReg, 0, AMDGPU::sub1);
1670 
1671     if (IsVALU && STI.hasSDWA()) {
1672       // Write the low 16-bits of the high element into the high 16-bits of the
1673       // low element.
1674       MachineInstr *MovSDWA =
1675         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1676         .addImm(0)                             // $src0_modifiers
1677         .addReg(HiReg)                         // $src0
1678         .addImm(0)                             // $clamp
1679         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1680         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1681         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1682         .addReg(LoReg, RegState::Implicit);
1683       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1684     } else {
1685       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1686       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1687       Register ImmReg = MRI->createVirtualRegister(DstRC);
1688       if (IsVALU) {
1689         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1690           .addImm(16)
1691           .addReg(HiReg);
1692       } else {
1693         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1694           .addReg(HiReg)
1695           .addImm(16);
1696       }
1697 
1698       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1699       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1700       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1701 
1702       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1703         .addImm(0xffff);
1704       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1705         .addReg(LoReg)
1706         .addReg(ImmReg);
1707       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1708         .addReg(TmpReg0)
1709         .addReg(TmpReg1);
1710     }
1711 
1712     I.eraseFromParent();
1713     return true;
1714   }
1715 
1716   if (!DstTy.isScalar())
1717     return false;
1718 
1719   if (SrcSize > 32) {
1720     int SubRegIdx = sizeToSubRegIndex(DstSize);
1721     if (SubRegIdx == -1)
1722       return false;
1723 
1724     // Deal with weird cases where the class only partially supports the subreg
1725     // index.
1726     const TargetRegisterClass *SrcWithSubRC
1727       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1728     if (!SrcWithSubRC)
1729       return false;
1730 
1731     if (SrcWithSubRC != SrcRC) {
1732       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1733         return false;
1734     }
1735 
1736     I.getOperand(1).setSubReg(SubRegIdx);
1737   }
1738 
1739   I.setDesc(TII.get(TargetOpcode::COPY));
1740   return true;
1741 }
1742 
1743 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1744 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1745   Mask = maskTrailingOnes<unsigned>(Size);
1746   int SignedMask = static_cast<int>(Mask);
1747   return SignedMask >= -16 && SignedMask <= 64;
1748 }
1749 
1750 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1751 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1752   Register Reg, const MachineRegisterInfo &MRI,
1753   const TargetRegisterInfo &TRI) const {
1754   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1755   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1756     return RB;
1757 
1758   // Ignore the type, since we don't use vcc in artifacts.
1759   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1760     return &RBI.getRegBankFromRegClass(*RC, LLT());
1761   return nullptr;
1762 }
1763 
1764 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1765   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1766   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1767   const DebugLoc &DL = I.getDebugLoc();
1768   MachineBasicBlock &MBB = *I.getParent();
1769   const Register DstReg = I.getOperand(0).getReg();
1770   const Register SrcReg = I.getOperand(1).getReg();
1771 
1772   const LLT DstTy = MRI->getType(DstReg);
1773   const LLT SrcTy = MRI->getType(SrcReg);
1774   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1775     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1776   const unsigned DstSize = DstTy.getSizeInBits();
1777   if (!DstTy.isScalar())
1778     return false;
1779 
1780   if (I.getOpcode() == AMDGPU::G_ANYEXT)
1781     return selectCOPY(I);
1782 
1783   // Artifact casts should never use vcc.
1784   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1785 
1786   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1787     // 64-bit should have been split up in RegBankSelect
1788 
1789     // Try to use an and with a mask if it will save code size.
1790     unsigned Mask;
1791     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1792       MachineInstr *ExtI =
1793       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1794         .addImm(Mask)
1795         .addReg(SrcReg);
1796       I.eraseFromParent();
1797       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1798     }
1799 
1800     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1801     MachineInstr *ExtI =
1802       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1803       .addReg(SrcReg)
1804       .addImm(0) // Offset
1805       .addImm(SrcSize); // Width
1806     I.eraseFromParent();
1807     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1808   }
1809 
1810   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1811     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
1812       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
1813     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
1814       return false;
1815 
1816     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1817       const unsigned SextOpc = SrcSize == 8 ?
1818         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1819       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1820         .addReg(SrcReg);
1821       I.eraseFromParent();
1822       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1823     }
1824 
1825     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1826     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1827 
1828     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1829     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
1830       // We need a 64-bit register source, but the high bits don't matter.
1831       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
1832       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1833       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
1834 
1835       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1836       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1837         .addReg(SrcReg, 0, SubReg)
1838         .addImm(AMDGPU::sub0)
1839         .addReg(UndefReg)
1840         .addImm(AMDGPU::sub1);
1841 
1842       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1843         .addReg(ExtReg)
1844         .addImm(SrcSize << 16);
1845 
1846       I.eraseFromParent();
1847       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
1848     }
1849 
1850     unsigned Mask;
1851     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1852       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1853         .addReg(SrcReg)
1854         .addImm(Mask);
1855     } else {
1856       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1857         .addReg(SrcReg)
1858         .addImm(SrcSize << 16);
1859     }
1860 
1861     I.eraseFromParent();
1862     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1863   }
1864 
1865   return false;
1866 }
1867 
1868 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1869   MachineBasicBlock *BB = I.getParent();
1870   MachineOperand &ImmOp = I.getOperand(1);
1871 
1872   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1873   if (ImmOp.isFPImm()) {
1874     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1875     ImmOp.ChangeToImmediate(Imm.getZExtValue());
1876   } else if (ImmOp.isCImm()) {
1877     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1878   }
1879 
1880   Register DstReg = I.getOperand(0).getReg();
1881   unsigned Size;
1882   bool IsSgpr;
1883   const RegisterBank *RB = MRI->getRegBankOrNull(I.getOperand(0).getReg());
1884   if (RB) {
1885     IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1886     Size = MRI->getType(DstReg).getSizeInBits();
1887   } else {
1888     const TargetRegisterClass *RC = TRI.getRegClassForReg(*MRI, DstReg);
1889     IsSgpr = TRI.isSGPRClass(RC);
1890     Size = TRI.getRegSizeInBits(*RC);
1891   }
1892 
1893   if (Size != 32 && Size != 64)
1894     return false;
1895 
1896   unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1897   if (Size == 32) {
1898     I.setDesc(TII.get(Opcode));
1899     I.addImplicitDefUseOperands(*MF);
1900     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1901   }
1902 
1903   const DebugLoc &DL = I.getDebugLoc();
1904 
1905   APInt Imm(Size, I.getOperand(1).getImm());
1906 
1907   MachineInstr *ResInst;
1908   if (IsSgpr && TII.isInlineConstant(Imm)) {
1909     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1910       .addImm(I.getOperand(1).getImm());
1911   } else {
1912     const TargetRegisterClass *RC = IsSgpr ?
1913       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
1914     Register LoReg = MRI->createVirtualRegister(RC);
1915     Register HiReg = MRI->createVirtualRegister(RC);
1916 
1917     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
1918       .addImm(Imm.trunc(32).getZExtValue());
1919 
1920     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
1921       .addImm(Imm.ashr(32).getZExtValue());
1922 
1923     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1924       .addReg(LoReg)
1925       .addImm(AMDGPU::sub0)
1926       .addReg(HiReg)
1927       .addImm(AMDGPU::sub1);
1928   }
1929 
1930   // We can't call constrainSelectedInstRegOperands here, because it doesn't
1931   // work for target independent opcodes
1932   I.eraseFromParent();
1933   const TargetRegisterClass *DstRC =
1934     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
1935   if (!DstRC)
1936     return true;
1937   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
1938 }
1939 
1940 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
1941   // Only manually handle the f64 SGPR case.
1942   //
1943   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
1944   // the bit ops theoretically have a second result due to the implicit def of
1945   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
1946   // that is easy by disabling the check. The result works, but uses a
1947   // nonsensical sreg32orlds_and_sreg_1 regclass.
1948   //
1949   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
1950   // the variadic REG_SEQUENCE operands.
1951 
1952   Register Dst = MI.getOperand(0).getReg();
1953   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
1954   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
1955       MRI->getType(Dst) != LLT::scalar(64))
1956     return false;
1957 
1958   Register Src = MI.getOperand(1).getReg();
1959   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
1960   if (Fabs)
1961     Src = Fabs->getOperand(1).getReg();
1962 
1963   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
1964       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
1965     return false;
1966 
1967   MachineBasicBlock *BB = MI.getParent();
1968   const DebugLoc &DL = MI.getDebugLoc();
1969   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1970   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1971   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1972   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1973 
1974   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
1975     .addReg(Src, 0, AMDGPU::sub0);
1976   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
1977     .addReg(Src, 0, AMDGPU::sub1);
1978   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
1979     .addImm(0x80000000);
1980 
1981   // Set or toggle sign bit.
1982   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
1983   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
1984     .addReg(HiReg)
1985     .addReg(ConstReg);
1986   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
1987     .addReg(LoReg)
1988     .addImm(AMDGPU::sub0)
1989     .addReg(OpReg)
1990     .addImm(AMDGPU::sub1);
1991   MI.eraseFromParent();
1992   return true;
1993 }
1994 
1995 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
1996 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
1997   Register Dst = MI.getOperand(0).getReg();
1998   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
1999   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2000       MRI->getType(Dst) != LLT::scalar(64))
2001     return false;
2002 
2003   Register Src = MI.getOperand(1).getReg();
2004   MachineBasicBlock *BB = MI.getParent();
2005   const DebugLoc &DL = MI.getDebugLoc();
2006   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2007   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2008   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2009   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2010 
2011   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2012       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2013     return false;
2014 
2015   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2016     .addReg(Src, 0, AMDGPU::sub0);
2017   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2018     .addReg(Src, 0, AMDGPU::sub1);
2019   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2020     .addImm(0x7fffffff);
2021 
2022   // Clear sign bit.
2023   // TODO: Should this used S_BITSET0_*?
2024   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2025     .addReg(HiReg)
2026     .addReg(ConstReg);
2027   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2028     .addReg(LoReg)
2029     .addImm(AMDGPU::sub0)
2030     .addReg(OpReg)
2031     .addImm(AMDGPU::sub1);
2032 
2033   MI.eraseFromParent();
2034   return true;
2035 }
2036 
2037 static bool isConstant(const MachineInstr &MI) {
2038   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2039 }
2040 
2041 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2042     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2043 
2044   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2045 
2046   assert(PtrMI);
2047 
2048   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2049     return;
2050 
2051   GEPInfo GEPInfo(*PtrMI);
2052 
2053   for (unsigned i = 1; i != 3; ++i) {
2054     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2055     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2056     assert(OpDef);
2057     if (i == 2 && isConstant(*OpDef)) {
2058       // TODO: Could handle constant base + variable offset, but a combine
2059       // probably should have commuted it.
2060       assert(GEPInfo.Imm == 0);
2061       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2062       continue;
2063     }
2064     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2065     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2066       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2067     else
2068       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2069   }
2070 
2071   AddrInfo.push_back(GEPInfo);
2072   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2073 }
2074 
2075 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2076   if (!MI.hasOneMemOperand())
2077     return false;
2078 
2079   const MachineMemOperand *MMO = *MI.memoperands_begin();
2080   const Value *Ptr = MMO->getValue();
2081 
2082   // UndefValue means this is a load of a kernel input.  These are uniform.
2083   // Sometimes LDS instructions have constant pointers.
2084   // If Ptr is null, then that means this mem operand contains a
2085   // PseudoSourceValue like GOT.
2086   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2087       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2088     return true;
2089 
2090   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2091     return true;
2092 
2093   const Instruction *I = dyn_cast<Instruction>(Ptr);
2094   return I && I->getMetadata("amdgpu.uniform");
2095 }
2096 
2097 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2098   for (const GEPInfo &GEPInfo : AddrInfo) {
2099     if (!GEPInfo.VgprParts.empty())
2100       return true;
2101   }
2102   return false;
2103 }
2104 
2105 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2106   MachineBasicBlock *BB = I.getParent();
2107 
2108   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2109   unsigned AS = PtrTy.getAddressSpace();
2110   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2111       STI.ldsRequiresM0Init()) {
2112     // If DS instructions require M0 initializtion, insert it before selecting.
2113     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2114       .addImm(-1);
2115   }
2116 }
2117 
2118 bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I) const {
2119   initM0(I);
2120   return selectImpl(I, *CoverageInfo);
2121 }
2122 
2123 // TODO: No rtn optimization.
2124 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2125   MachineInstr &MI) const {
2126   Register PtrReg = MI.getOperand(1).getReg();
2127   const LLT PtrTy = MRI->getType(PtrReg);
2128   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2129       STI.useFlatForGlobal())
2130     return selectImpl(MI, *CoverageInfo);
2131 
2132   Register DstReg = MI.getOperand(0).getReg();
2133   const LLT Ty = MRI->getType(DstReg);
2134   const bool Is64 = Ty.getSizeInBits() == 64;
2135   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2136   Register TmpReg = MRI->createVirtualRegister(
2137     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2138 
2139   const DebugLoc &DL = MI.getDebugLoc();
2140   MachineBasicBlock *BB = MI.getParent();
2141 
2142   Register VAddr, RSrcReg, SOffset;
2143   int64_t Offset = 0;
2144 
2145   unsigned Opcode;
2146   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2147     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2148                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2149   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2150                                    RSrcReg, SOffset, Offset)) {
2151     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2152                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2153   } else
2154     return selectImpl(MI, *CoverageInfo);
2155 
2156   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2157     .addReg(MI.getOperand(2).getReg());
2158 
2159   if (VAddr)
2160     MIB.addReg(VAddr);
2161 
2162   MIB.addReg(RSrcReg);
2163   if (SOffset)
2164     MIB.addReg(SOffset);
2165   else
2166     MIB.addImm(0);
2167 
2168   MIB.addImm(Offset);
2169   MIB.addImm(0); // slc
2170   MIB.cloneMemRefs(MI);
2171 
2172   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2173     .addReg(TmpReg, RegState::Kill, SubReg);
2174 
2175   MI.eraseFromParent();
2176 
2177   MRI->setRegClass(
2178     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2179   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2180 }
2181 
2182 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2183   MachineBasicBlock *BB = I.getParent();
2184   MachineOperand &CondOp = I.getOperand(0);
2185   Register CondReg = CondOp.getReg();
2186   const DebugLoc &DL = I.getDebugLoc();
2187 
2188   unsigned BrOpcode;
2189   Register CondPhysReg;
2190   const TargetRegisterClass *ConstrainRC;
2191 
2192   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2193   // whether the branch is uniform when selecting the instruction. In
2194   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2195   // RegBankSelect knows what it's doing if the branch condition is scc, even
2196   // though it currently does not.
2197   if (!isVCC(CondReg, *MRI)) {
2198     if (MRI->getType(CondReg) != LLT::scalar(32))
2199       return false;
2200 
2201     CondPhysReg = AMDGPU::SCC;
2202     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2203     // FIXME: Hack for isSCC tests
2204     ConstrainRC = &AMDGPU::SGPR_32RegClass;
2205   } else {
2206     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2207     // We sort of know that a VCC producer based on the register bank, that ands
2208     // inactive lanes with 0. What if there was a logical operation with vcc
2209     // producers in different blocks/with different exec masks?
2210     // FIXME: Should scc->vcc copies and with exec?
2211     CondPhysReg = TRI.getVCC();
2212     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2213     ConstrainRC = TRI.getBoolRC();
2214   }
2215 
2216   if (!MRI->getRegClassOrNull(CondReg))
2217     MRI->setRegClass(CondReg, ConstrainRC);
2218 
2219   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2220     .addReg(CondReg);
2221   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2222     .addMBB(I.getOperand(1).getMBB());
2223 
2224   I.eraseFromParent();
2225   return true;
2226 }
2227 
2228 bool AMDGPUInstructionSelector::selectG_FRAME_INDEX_GLOBAL_VALUE(
2229   MachineInstr &I) const {
2230   Register DstReg = I.getOperand(0).getReg();
2231   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2232   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2233   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2234   if (IsVGPR)
2235     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2236 
2237   return RBI.constrainGenericRegister(
2238     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2239 }
2240 
2241 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2242   Register MaskReg = I.getOperand(2).getReg();
2243   Optional<int64_t> MaskVal = getConstantVRegVal(MaskReg, *MRI);
2244   // TODO: Implement arbitrary cases
2245   if (!MaskVal || !isShiftedMask_64(*MaskVal))
2246     return false;
2247 
2248   const uint64_t Mask = *MaskVal;
2249 
2250   MachineBasicBlock *BB = I.getParent();
2251 
2252   Register DstReg = I.getOperand(0).getReg();
2253   Register SrcReg = I.getOperand(1).getReg();
2254 
2255   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2256   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2257   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2258   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2259   unsigned MovOpc = IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2260   const TargetRegisterClass &RegRC
2261     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2262 
2263   LLT Ty = MRI->getType(DstReg);
2264 
2265   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2266                                                                   *MRI);
2267   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2268                                                                   *MRI);
2269   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2270       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
2271     return false;
2272 
2273   const DebugLoc &DL = I.getDebugLoc();
2274   Register ImmReg = MRI->createVirtualRegister(&RegRC);
2275   BuildMI(*BB, &I, DL, TII.get(MovOpc), ImmReg)
2276     .addImm(Mask);
2277 
2278   if (Ty.getSizeInBits() == 32) {
2279     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2280       .addReg(SrcReg)
2281       .addReg(ImmReg);
2282     I.eraseFromParent();
2283     return true;
2284   }
2285 
2286   Register HiReg = MRI->createVirtualRegister(&RegRC);
2287   Register LoReg = MRI->createVirtualRegister(&RegRC);
2288   Register MaskLo = MRI->createVirtualRegister(&RegRC);
2289 
2290   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2291     .addReg(SrcReg, 0, AMDGPU::sub0);
2292   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2293     .addReg(SrcReg, 0, AMDGPU::sub1);
2294 
2295   BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskLo)
2296     .addReg(LoReg)
2297     .addReg(ImmReg);
2298   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2299     .addReg(MaskLo)
2300     .addImm(AMDGPU::sub0)
2301     .addReg(HiReg)
2302     .addImm(AMDGPU::sub1);
2303   I.eraseFromParent();
2304   return true;
2305 }
2306 
2307 /// Return the register to use for the index value, and the subregister to use
2308 /// for the indirectly accessed register.
2309 static std::pair<Register, unsigned>
2310 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2311                         const SIRegisterInfo &TRI,
2312                         const TargetRegisterClass *SuperRC,
2313                         Register IdxReg,
2314                         unsigned EltSize) {
2315   Register IdxBaseReg;
2316   int Offset;
2317   MachineInstr *Unused;
2318 
2319   std::tie(IdxBaseReg, Offset, Unused)
2320     = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2321   if (IdxBaseReg == AMDGPU::NoRegister) {
2322     // This will happen if the index is a known constant. This should ordinarily
2323     // be legalized out, but handle it as a register just in case.
2324     assert(Offset == 0);
2325     IdxBaseReg = IdxReg;
2326   }
2327 
2328   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2329 
2330   // Skip out of bounds offsets, or else we would end up using an undefined
2331   // register.
2332   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2333     return std::make_pair(IdxReg, SubRegs[0]);
2334   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2335 }
2336 
2337 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2338   MachineInstr &MI) const {
2339   Register DstReg = MI.getOperand(0).getReg();
2340   Register SrcReg = MI.getOperand(1).getReg();
2341   Register IdxReg = MI.getOperand(2).getReg();
2342 
2343   LLT DstTy = MRI->getType(DstReg);
2344   LLT SrcTy = MRI->getType(SrcReg);
2345 
2346   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2347   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2348   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2349 
2350   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2351   // into a waterfall loop.
2352   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2353     return false;
2354 
2355   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2356                                                                   *MRI);
2357   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2358                                                                   *MRI);
2359   if (!SrcRC || !DstRC)
2360     return false;
2361   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2362       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2363       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2364     return false;
2365 
2366   MachineBasicBlock *BB = MI.getParent();
2367   const DebugLoc &DL = MI.getDebugLoc();
2368   const bool Is64 = DstTy.getSizeInBits() == 64;
2369 
2370   unsigned SubReg;
2371   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2372                                                      DstTy.getSizeInBits() / 8);
2373 
2374   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2375     if (DstTy.getSizeInBits() != 32 && !Is64)
2376       return false;
2377 
2378     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2379       .addReg(IdxReg);
2380 
2381     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2382     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2383       .addReg(SrcReg, 0, SubReg)
2384       .addReg(SrcReg, RegState::Implicit);
2385     MI.eraseFromParent();
2386     return true;
2387   }
2388 
2389   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2390     return false;
2391 
2392   if (!STI.useVGPRIndexMode()) {
2393     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2394       .addReg(IdxReg);
2395     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2396       .addReg(SrcReg, RegState::Undef, SubReg)
2397       .addReg(SrcReg, RegState::Implicit);
2398     MI.eraseFromParent();
2399     return true;
2400   }
2401 
2402   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2403     .addReg(IdxReg)
2404     .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
2405   BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
2406     .addReg(SrcReg, RegState::Undef, SubReg)
2407     .addReg(SrcReg, RegState::Implicit)
2408     .addReg(AMDGPU::M0, RegState::Implicit);
2409   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2410 
2411   MI.eraseFromParent();
2412   return true;
2413 }
2414 
2415 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2416 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2417   MachineInstr &MI) const {
2418   Register DstReg = MI.getOperand(0).getReg();
2419   Register VecReg = MI.getOperand(1).getReg();
2420   Register ValReg = MI.getOperand(2).getReg();
2421   Register IdxReg = MI.getOperand(3).getReg();
2422 
2423   LLT VecTy = MRI->getType(DstReg);
2424   LLT ValTy = MRI->getType(ValReg);
2425   unsigned VecSize = VecTy.getSizeInBits();
2426   unsigned ValSize = ValTy.getSizeInBits();
2427 
2428   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2429   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2430   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2431 
2432   assert(VecTy.getElementType() == ValTy);
2433 
2434   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2435   // into a waterfall loop.
2436   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2437     return false;
2438 
2439   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2440                                                                   *MRI);
2441   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2442                                                                   *MRI);
2443 
2444   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2445       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2446       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2447       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2448     return false;
2449 
2450   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2451     return false;
2452 
2453   unsigned SubReg;
2454   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2455                                                      ValSize / 8);
2456 
2457   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2458                          STI.useVGPRIndexMode();
2459 
2460   MachineBasicBlock *BB = MI.getParent();
2461   const DebugLoc &DL = MI.getDebugLoc();
2462 
2463   if (IndexMode) {
2464     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2465       .addReg(IdxReg)
2466       .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
2467   } else {
2468     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2469       .addReg(IdxReg);
2470   }
2471 
2472   const MCInstrDesc &RegWriteOp
2473     = TII.getIndirectRegWritePseudo(VecSize, ValSize,
2474                                     VecRB->getID() == AMDGPU::SGPRRegBankID);
2475   BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2476     .addReg(VecReg)
2477     .addReg(ValReg)
2478     .addImm(SubReg);
2479 
2480   if (IndexMode)
2481     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2482 
2483   MI.eraseFromParent();
2484   return true;
2485 }
2486 
2487 static bool isZeroOrUndef(int X) {
2488   return X == 0 || X == -1;
2489 }
2490 
2491 static bool isOneOrUndef(int X) {
2492   return X == 1 || X == -1;
2493 }
2494 
2495 static bool isZeroOrOneOrUndef(int X) {
2496   return X == 0 || X == 1 || X == -1;
2497 }
2498 
2499 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2500 // 32-bit register.
2501 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2502                                    ArrayRef<int> Mask) {
2503   NewMask[0] = Mask[0];
2504   NewMask[1] = Mask[1];
2505   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2506     return Src0;
2507 
2508   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2509   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2510 
2511   // Shift the mask inputs to be 0/1;
2512   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2513   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2514   return Src1;
2515 }
2516 
2517 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2518 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2519   MachineInstr &MI) const {
2520   Register DstReg = MI.getOperand(0).getReg();
2521   Register Src0Reg = MI.getOperand(1).getReg();
2522   Register Src1Reg = MI.getOperand(2).getReg();
2523   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2524 
2525   const LLT V2S16 = LLT::vector(2, 16);
2526   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2527     return false;
2528 
2529   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2530     return false;
2531 
2532   assert(ShufMask.size() == 2);
2533   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2534 
2535   MachineBasicBlock *MBB = MI.getParent();
2536   const DebugLoc &DL = MI.getDebugLoc();
2537 
2538   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2539   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2540   const TargetRegisterClass &RC = IsVALU ?
2541     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2542 
2543   // Handle the degenerate case which should have folded out.
2544   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2545     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2546 
2547     MI.eraseFromParent();
2548     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2549   }
2550 
2551   // A legal VOP3P mask only reads one of the sources.
2552   int Mask[2];
2553   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2554 
2555   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2556       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2557     return false;
2558 
2559   // TODO: This also should have been folded out
2560   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2561     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2562       .addReg(SrcVec);
2563 
2564     MI.eraseFromParent();
2565     return true;
2566   }
2567 
2568   if (Mask[0] == 1 && Mask[1] == -1) {
2569     if (IsVALU) {
2570       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2571         .addImm(16)
2572         .addReg(SrcVec);
2573     } else {
2574       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2575         .addReg(SrcVec)
2576         .addImm(16);
2577     }
2578   } else if (Mask[0] == -1 && Mask[1] == 0) {
2579     if (IsVALU) {
2580       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2581         .addImm(16)
2582         .addReg(SrcVec);
2583     } else {
2584       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2585         .addReg(SrcVec)
2586         .addImm(16);
2587     }
2588   } else if (Mask[0] == 0 && Mask[1] == 0) {
2589     if (IsVALU) {
2590       // Write low half of the register into the high half.
2591       MachineInstr *MovSDWA =
2592         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2593         .addImm(0)                             // $src0_modifiers
2594         .addReg(SrcVec)                        // $src0
2595         .addImm(0)                             // $clamp
2596         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2597         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2598         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2599         .addReg(SrcVec, RegState::Implicit);
2600       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2601     } else {
2602       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2603         .addReg(SrcVec)
2604         .addReg(SrcVec);
2605     }
2606   } else if (Mask[0] == 1 && Mask[1] == 1) {
2607     if (IsVALU) {
2608       // Write high half of the register into the low half.
2609       MachineInstr *MovSDWA =
2610         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2611         .addImm(0)                             // $src0_modifiers
2612         .addReg(SrcVec)                        // $src0
2613         .addImm(0)                             // $clamp
2614         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2615         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2616         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2617         .addReg(SrcVec, RegState::Implicit);
2618       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2619     } else {
2620       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2621         .addReg(SrcVec)
2622         .addReg(SrcVec);
2623     }
2624   } else if (Mask[0] == 1 && Mask[1] == 0) {
2625     if (IsVALU) {
2626       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32), DstReg)
2627         .addReg(SrcVec)
2628         .addReg(SrcVec)
2629         .addImm(16);
2630     } else {
2631       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2632       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2633         .addReg(SrcVec)
2634         .addImm(16);
2635       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2636         .addReg(TmpReg)
2637         .addReg(SrcVec);
2638     }
2639   } else
2640     llvm_unreachable("all shuffle masks should be handled");
2641 
2642   MI.eraseFromParent();
2643   return true;
2644 }
2645 
2646 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
2647   if (I.isPHI())
2648     return selectPHI(I);
2649 
2650   if (!I.isPreISelOpcode()) {
2651     if (I.isCopy())
2652       return selectCOPY(I);
2653     return true;
2654   }
2655 
2656   switch (I.getOpcode()) {
2657   case TargetOpcode::G_AND:
2658   case TargetOpcode::G_OR:
2659   case TargetOpcode::G_XOR:
2660     if (selectImpl(I, *CoverageInfo))
2661       return true;
2662     return selectG_AND_OR_XOR(I);
2663   case TargetOpcode::G_ADD:
2664   case TargetOpcode::G_SUB:
2665     if (selectImpl(I, *CoverageInfo))
2666       return true;
2667     return selectG_ADD_SUB(I);
2668   case TargetOpcode::G_UADDO:
2669   case TargetOpcode::G_USUBO:
2670   case TargetOpcode::G_UADDE:
2671   case TargetOpcode::G_USUBE:
2672     return selectG_UADDO_USUBO_UADDE_USUBE(I);
2673   case TargetOpcode::G_INTTOPTR:
2674   case TargetOpcode::G_BITCAST:
2675   case TargetOpcode::G_PTRTOINT:
2676     return selectCOPY(I);
2677   case TargetOpcode::G_CONSTANT:
2678   case TargetOpcode::G_FCONSTANT:
2679     return selectG_CONSTANT(I);
2680   case TargetOpcode::G_FNEG:
2681     if (selectImpl(I, *CoverageInfo))
2682       return true;
2683     return selectG_FNEG(I);
2684   case TargetOpcode::G_FABS:
2685     if (selectImpl(I, *CoverageInfo))
2686       return true;
2687     return selectG_FABS(I);
2688   case TargetOpcode::G_EXTRACT:
2689     return selectG_EXTRACT(I);
2690   case TargetOpcode::G_MERGE_VALUES:
2691   case TargetOpcode::G_BUILD_VECTOR:
2692   case TargetOpcode::G_CONCAT_VECTORS:
2693     return selectG_MERGE_VALUES(I);
2694   case TargetOpcode::G_UNMERGE_VALUES:
2695     return selectG_UNMERGE_VALUES(I);
2696   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
2697     return selectG_BUILD_VECTOR_TRUNC(I);
2698   case TargetOpcode::G_PTR_ADD:
2699     return selectG_PTR_ADD(I);
2700   case TargetOpcode::G_IMPLICIT_DEF:
2701     return selectG_IMPLICIT_DEF(I);
2702   case TargetOpcode::G_INSERT:
2703     return selectG_INSERT(I);
2704   case TargetOpcode::G_INTRINSIC:
2705     return selectG_INTRINSIC(I);
2706   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2707     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
2708   case TargetOpcode::G_ICMP:
2709     if (selectG_ICMP(I))
2710       return true;
2711     return selectImpl(I, *CoverageInfo);
2712   case TargetOpcode::G_LOAD:
2713   case TargetOpcode::G_ATOMIC_CMPXCHG:
2714   case TargetOpcode::G_ATOMICRMW_XCHG:
2715   case TargetOpcode::G_ATOMICRMW_ADD:
2716   case TargetOpcode::G_ATOMICRMW_SUB:
2717   case TargetOpcode::G_ATOMICRMW_AND:
2718   case TargetOpcode::G_ATOMICRMW_OR:
2719   case TargetOpcode::G_ATOMICRMW_XOR:
2720   case TargetOpcode::G_ATOMICRMW_MIN:
2721   case TargetOpcode::G_ATOMICRMW_MAX:
2722   case TargetOpcode::G_ATOMICRMW_UMIN:
2723   case TargetOpcode::G_ATOMICRMW_UMAX:
2724   case TargetOpcode::G_ATOMICRMW_FADD:
2725     return selectG_LOAD_ATOMICRMW(I);
2726   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
2727     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
2728   case TargetOpcode::G_SELECT:
2729     return selectG_SELECT(I);
2730   case TargetOpcode::G_STORE:
2731     return selectG_STORE(I);
2732   case TargetOpcode::G_TRUNC:
2733     return selectG_TRUNC(I);
2734   case TargetOpcode::G_SEXT:
2735   case TargetOpcode::G_ZEXT:
2736   case TargetOpcode::G_ANYEXT:
2737   case TargetOpcode::G_SEXT_INREG:
2738     if (selectImpl(I, *CoverageInfo))
2739       return true;
2740     return selectG_SZA_EXT(I);
2741   case TargetOpcode::G_BRCOND:
2742     return selectG_BRCOND(I);
2743   case TargetOpcode::G_FRAME_INDEX:
2744   case TargetOpcode::G_GLOBAL_VALUE:
2745     return selectG_FRAME_INDEX_GLOBAL_VALUE(I);
2746   case TargetOpcode::G_PTRMASK:
2747     return selectG_PTRMASK(I);
2748   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
2749     return selectG_EXTRACT_VECTOR_ELT(I);
2750   case TargetOpcode::G_INSERT_VECTOR_ELT:
2751     return selectG_INSERT_VECTOR_ELT(I);
2752   case TargetOpcode::G_SHUFFLE_VECTOR:
2753     return selectG_SHUFFLE_VECTOR(I);
2754   case AMDGPU::G_AMDGPU_ATOMIC_INC:
2755   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
2756     initM0(I);
2757     return selectImpl(I, *CoverageInfo);
2758   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
2759   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
2760     const AMDGPU::ImageDimIntrinsicInfo *Intr
2761       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
2762     assert(Intr && "not an image intrinsic with image pseudo");
2763     return selectImageIntrinsic(I, Intr);
2764   }
2765   default:
2766     return selectImpl(I, *CoverageInfo);
2767   }
2768   return false;
2769 }
2770 
2771 InstructionSelector::ComplexRendererFns
2772 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
2773   return {{
2774       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2775   }};
2776 
2777 }
2778 
2779 std::pair<Register, unsigned>
2780 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root) const {
2781   Register Src = Root.getReg();
2782   Register OrigSrc = Src;
2783   unsigned Mods = 0;
2784   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
2785 
2786   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
2787     Src = MI->getOperand(1).getReg();
2788     Mods |= SISrcMods::NEG;
2789     MI = getDefIgnoringCopies(Src, *MRI);
2790   }
2791 
2792   if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
2793     Src = MI->getOperand(1).getReg();
2794     Mods |= SISrcMods::ABS;
2795   }
2796 
2797   if (Mods != 0 &&
2798       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
2799     MachineInstr *UseMI = Root.getParent();
2800 
2801     // If we looked through copies to find source modifiers on an SGPR operand,
2802     // we now have an SGPR register source. To avoid potentially violating the
2803     // constant bus restriction, we need to insert a copy to a VGPR.
2804     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
2805     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
2806             TII.get(AMDGPU::COPY), VGPRSrc)
2807       .addReg(Src);
2808     Src = VGPRSrc;
2809   }
2810 
2811   return std::make_pair(Src, Mods);
2812 }
2813 
2814 ///
2815 /// This will select either an SGPR or VGPR operand and will save us from
2816 /// having to write an extra tablegen pattern.
2817 InstructionSelector::ComplexRendererFns
2818 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
2819   return {{
2820       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2821   }};
2822 }
2823 
2824 InstructionSelector::ComplexRendererFns
2825 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
2826   Register Src;
2827   unsigned Mods;
2828   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
2829 
2830   return {{
2831       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2832       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
2833       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
2834       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
2835   }};
2836 }
2837 
2838 InstructionSelector::ComplexRendererFns
2839 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
2840   return {{
2841       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2842       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
2843       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
2844   }};
2845 }
2846 
2847 InstructionSelector::ComplexRendererFns
2848 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
2849   Register Src;
2850   unsigned Mods;
2851   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
2852 
2853   return {{
2854       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2855       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2856   }};
2857 }
2858 
2859 InstructionSelector::ComplexRendererFns
2860 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
2861   Register Reg = Root.getReg();
2862   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
2863   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
2864               Def->getOpcode() == AMDGPU::G_FABS))
2865     return {};
2866   return {{
2867       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2868   }};
2869 }
2870 
2871 std::pair<Register, unsigned>
2872 AMDGPUInstructionSelector::selectVOP3PModsImpl(
2873   Register Src, const MachineRegisterInfo &MRI) const {
2874   unsigned Mods = 0;
2875   MachineInstr *MI = MRI.getVRegDef(Src);
2876 
2877   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
2878       // It's possible to see an f32 fneg here, but unlikely.
2879       // TODO: Treat f32 fneg as only high bit.
2880       MRI.getType(Src) == LLT::vector(2, 16)) {
2881     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
2882     Src = MI->getOperand(1).getReg();
2883     MI = MRI.getVRegDef(Src);
2884   }
2885 
2886   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
2887 
2888   // Packed instructions do not have abs modifiers.
2889   Mods |= SISrcMods::OP_SEL_1;
2890 
2891   return std::make_pair(Src, Mods);
2892 }
2893 
2894 InstructionSelector::ComplexRendererFns
2895 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
2896   MachineRegisterInfo &MRI
2897     = Root.getParent()->getParent()->getParent()->getRegInfo();
2898 
2899   Register Src;
2900   unsigned Mods;
2901   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
2902 
2903   return {{
2904       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2905       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2906   }};
2907 }
2908 
2909 InstructionSelector::ComplexRendererFns
2910 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
2911   Register Src;
2912   unsigned Mods;
2913   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
2914   if (!TM.Options.NoNaNsFPMath && !isKnownNeverNaN(Src, *MRI))
2915     return None;
2916 
2917   return {{
2918       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2919       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2920   }};
2921 }
2922 
2923 InstructionSelector::ComplexRendererFns
2924 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
2925   // FIXME: Handle op_sel
2926   return {{
2927       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2928       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
2929   }};
2930 }
2931 
2932 InstructionSelector::ComplexRendererFns
2933 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
2934   SmallVector<GEPInfo, 4> AddrInfo;
2935   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2936 
2937   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2938     return None;
2939 
2940   const GEPInfo &GEPInfo = AddrInfo[0];
2941   Optional<int64_t> EncodedImm =
2942       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
2943   if (!EncodedImm)
2944     return None;
2945 
2946   unsigned PtrReg = GEPInfo.SgprParts[0];
2947   return {{
2948     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2949     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
2950   }};
2951 }
2952 
2953 InstructionSelector::ComplexRendererFns
2954 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
2955   SmallVector<GEPInfo, 4> AddrInfo;
2956   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2957 
2958   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2959     return None;
2960 
2961   const GEPInfo &GEPInfo = AddrInfo[0];
2962   Register PtrReg = GEPInfo.SgprParts[0];
2963   Optional<int64_t> EncodedImm =
2964       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
2965   if (!EncodedImm)
2966     return None;
2967 
2968   return {{
2969     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2970     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
2971   }};
2972 }
2973 
2974 InstructionSelector::ComplexRendererFns
2975 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
2976   MachineInstr *MI = Root.getParent();
2977   MachineBasicBlock *MBB = MI->getParent();
2978 
2979   SmallVector<GEPInfo, 4> AddrInfo;
2980   getAddrModeInfo(*MI, *MRI, AddrInfo);
2981 
2982   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
2983   // then we can select all ptr + 32-bit offsets not just immediate offsets.
2984   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2985     return None;
2986 
2987   const GEPInfo &GEPInfo = AddrInfo[0];
2988   // SGPR offset is unsigned.
2989   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
2990     return None;
2991 
2992   // If we make it this far we have a load with an 32-bit immediate offset.
2993   // It is OK to select this using a sgpr offset, because we have already
2994   // failed trying to select this load into one of the _IMM variants since
2995   // the _IMM Patterns are considered before the _SGPR patterns.
2996   Register PtrReg = GEPInfo.SgprParts[0];
2997   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2998   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
2999           .addImm(GEPInfo.Imm);
3000   return {{
3001     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3002     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3003   }};
3004 }
3005 
3006 template <bool Signed>
3007 InstructionSelector::ComplexRendererFns
3008 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
3009   MachineInstr *MI = Root.getParent();
3010 
3011   InstructionSelector::ComplexRendererFns Default = {{
3012       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3013       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },  // offset
3014       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
3015     }};
3016 
3017   if (!STI.hasFlatInstOffsets())
3018     return Default;
3019 
3020   const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
3021   if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
3022     return Default;
3023 
3024   Optional<int64_t> Offset =
3025     getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
3026   if (!Offset.hasValue())
3027     return Default;
3028 
3029   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3030   if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
3031     return Default;
3032 
3033   Register BasePtr = OpDef->getOperand(1).getReg();
3034 
3035   return {{
3036       [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
3037       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
3038       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
3039     }};
3040 }
3041 
3042 InstructionSelector::ComplexRendererFns
3043 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3044   return selectFlatOffsetImpl<false>(Root);
3045 }
3046 
3047 InstructionSelector::ComplexRendererFns
3048 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
3049   return selectFlatOffsetImpl<true>(Root);
3050 }
3051 
3052 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3053   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3054   return PSV && PSV->isStack();
3055 }
3056 
3057 InstructionSelector::ComplexRendererFns
3058 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3059   MachineInstr *MI = Root.getParent();
3060   MachineBasicBlock *MBB = MI->getParent();
3061   MachineFunction *MF = MBB->getParent();
3062   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3063 
3064   int64_t Offset = 0;
3065   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3066       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3067     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3068 
3069     // TODO: Should this be inside the render function? The iterator seems to
3070     // move.
3071     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3072             HighBits)
3073       .addImm(Offset & ~4095);
3074 
3075     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3076                MIB.addReg(Info->getScratchRSrcReg());
3077              },
3078              [=](MachineInstrBuilder &MIB) { // vaddr
3079                MIB.addReg(HighBits);
3080              },
3081              [=](MachineInstrBuilder &MIB) { // soffset
3082                const MachineMemOperand *MMO = *MI->memoperands_begin();
3083                const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3084 
3085                if (isStackPtrRelative(PtrInfo))
3086                  MIB.addReg(Info->getStackPtrOffsetReg());
3087                else
3088                  MIB.addImm(0);
3089              },
3090              [=](MachineInstrBuilder &MIB) { // offset
3091                MIB.addImm(Offset & 4095);
3092              }}};
3093   }
3094 
3095   assert(Offset == 0 || Offset == -1);
3096 
3097   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3098   // offsets.
3099   Optional<int> FI;
3100   Register VAddr = Root.getReg();
3101   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3102     if (isBaseWithConstantOffset(Root, *MRI)) {
3103       const MachineOperand &LHS = RootDef->getOperand(1);
3104       const MachineOperand &RHS = RootDef->getOperand(2);
3105       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
3106       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
3107       if (LHSDef && RHSDef) {
3108         int64_t PossibleOffset =
3109             RHSDef->getOperand(1).getCImm()->getSExtValue();
3110         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
3111             (!STI.privateMemoryResourceIsRangeChecked() ||
3112              KnownBits->signBitIsZero(LHS.getReg()))) {
3113           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3114             FI = LHSDef->getOperand(1).getIndex();
3115           else
3116             VAddr = LHS.getReg();
3117           Offset = PossibleOffset;
3118         }
3119       }
3120     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3121       FI = RootDef->getOperand(1).getIndex();
3122     }
3123   }
3124 
3125   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3126              MIB.addReg(Info->getScratchRSrcReg());
3127            },
3128            [=](MachineInstrBuilder &MIB) { // vaddr
3129              if (FI.hasValue())
3130                MIB.addFrameIndex(FI.getValue());
3131              else
3132                MIB.addReg(VAddr);
3133            },
3134            [=](MachineInstrBuilder &MIB) { // soffset
3135              // If we don't know this private access is a local stack object, it
3136              // needs to be relative to the entry point's scratch wave offset.
3137              // TODO: Should split large offsets that don't fit like above.
3138              // TODO: Don't use scratch wave offset just because the offset
3139              // didn't fit.
3140              if (!Info->isEntryFunction() && FI.hasValue())
3141                MIB.addReg(Info->getStackPtrOffsetReg());
3142              else
3143                MIB.addImm(0);
3144            },
3145            [=](MachineInstrBuilder &MIB) { // offset
3146              MIB.addImm(Offset);
3147            }}};
3148 }
3149 
3150 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3151                                                 int64_t Offset,
3152                                                 unsigned OffsetBits) const {
3153   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
3154       (OffsetBits == 8 && !isUInt<8>(Offset)))
3155     return false;
3156 
3157   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3158     return true;
3159 
3160   // On Southern Islands instruction with a negative base value and an offset
3161   // don't seem to work.
3162   return KnownBits->signBitIsZero(Base);
3163 }
3164 
3165 InstructionSelector::ComplexRendererFns
3166 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3167     MachineOperand &Root) const {
3168   MachineInstr *MI = Root.getParent();
3169   MachineBasicBlock *MBB = MI->getParent();
3170 
3171   int64_t Offset = 0;
3172   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3173       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3174     return {};
3175 
3176   const MachineFunction *MF = MBB->getParent();
3177   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3178   const MachineMemOperand *MMO = *MI->memoperands_begin();
3179   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3180 
3181   return {{
3182       [=](MachineInstrBuilder &MIB) { // rsrc
3183         MIB.addReg(Info->getScratchRSrcReg());
3184       },
3185       [=](MachineInstrBuilder &MIB) { // soffset
3186         if (isStackPtrRelative(PtrInfo))
3187           MIB.addReg(Info->getStackPtrOffsetReg());
3188         else
3189           MIB.addImm(0);
3190       },
3191       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3192   }};
3193 }
3194 
3195 std::pair<Register, unsigned>
3196 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3197   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3198   if (!RootDef)
3199     return std::make_pair(Root.getReg(), 0);
3200 
3201   int64_t ConstAddr = 0;
3202 
3203   Register PtrBase;
3204   int64_t Offset;
3205   std::tie(PtrBase, Offset) =
3206     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3207 
3208   if (Offset) {
3209     if (isDSOffsetLegal(PtrBase, Offset, 16)) {
3210       // (add n0, c0)
3211       return std::make_pair(PtrBase, Offset);
3212     }
3213   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3214     // TODO
3215 
3216 
3217   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3218     // TODO
3219 
3220   }
3221 
3222   return std::make_pair(Root.getReg(), 0);
3223 }
3224 
3225 InstructionSelector::ComplexRendererFns
3226 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3227   Register Reg;
3228   unsigned Offset;
3229   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3230   return {{
3231       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3232       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3233     }};
3234 }
3235 
3236 InstructionSelector::ComplexRendererFns
3237 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3238   Register Reg;
3239   unsigned Offset;
3240   std::tie(Reg, Offset) = selectDS64Bit4ByteAlignedImpl(Root);
3241   return {{
3242       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3243       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3244       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3245     }};
3246 }
3247 
3248 std::pair<Register, unsigned>
3249 AMDGPUInstructionSelector::selectDS64Bit4ByteAlignedImpl(MachineOperand &Root) const {
3250   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3251   if (!RootDef)
3252     return std::make_pair(Root.getReg(), 0);
3253 
3254   int64_t ConstAddr = 0;
3255 
3256   Register PtrBase;
3257   int64_t Offset;
3258   std::tie(PtrBase, Offset) =
3259     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3260 
3261   if (Offset) {
3262     int64_t DWordOffset0 = Offset / 4;
3263     int64_t DWordOffset1 = DWordOffset0 + 1;
3264     if (isDSOffsetLegal(PtrBase, DWordOffset1, 8)) {
3265       // (add n0, c0)
3266       return std::make_pair(PtrBase, DWordOffset0);
3267     }
3268   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3269     // TODO
3270 
3271   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3272     // TODO
3273 
3274   }
3275 
3276   return std::make_pair(Root.getReg(), 0);
3277 }
3278 
3279 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3280 /// the base value with the constant offset. There may be intervening copies
3281 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
3282 /// not match the pattern.
3283 std::pair<Register, int64_t>
3284 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3285   Register Root, const MachineRegisterInfo &MRI) const {
3286   MachineInstr *RootI = MRI.getVRegDef(Root);
3287   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3288     return {Root, 0};
3289 
3290   MachineOperand &RHS = RootI->getOperand(2);
3291   Optional<ValueAndVReg> MaybeOffset
3292     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3293   if (!MaybeOffset)
3294     return {Root, 0};
3295   return {RootI->getOperand(1).getReg(), MaybeOffset->Value};
3296 }
3297 
3298 static void addZeroImm(MachineInstrBuilder &MIB) {
3299   MIB.addImm(0);
3300 }
3301 
3302 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3303 /// BasePtr is not valid, a null base pointer will be used.
3304 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3305                           uint32_t FormatLo, uint32_t FormatHi,
3306                           Register BasePtr) {
3307   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3308   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3309   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3310   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3311 
3312   B.buildInstr(AMDGPU::S_MOV_B32)
3313     .addDef(RSrc2)
3314     .addImm(FormatLo);
3315   B.buildInstr(AMDGPU::S_MOV_B32)
3316     .addDef(RSrc3)
3317     .addImm(FormatHi);
3318 
3319   // Build the half of the subregister with the constants before building the
3320   // full 128-bit register. If we are building multiple resource descriptors,
3321   // this will allow CSEing of the 2-component register.
3322   B.buildInstr(AMDGPU::REG_SEQUENCE)
3323     .addDef(RSrcHi)
3324     .addReg(RSrc2)
3325     .addImm(AMDGPU::sub0)
3326     .addReg(RSrc3)
3327     .addImm(AMDGPU::sub1);
3328 
3329   Register RSrcLo = BasePtr;
3330   if (!BasePtr) {
3331     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3332     B.buildInstr(AMDGPU::S_MOV_B64)
3333       .addDef(RSrcLo)
3334       .addImm(0);
3335   }
3336 
3337   B.buildInstr(AMDGPU::REG_SEQUENCE)
3338     .addDef(RSrc)
3339     .addReg(RSrcLo)
3340     .addImm(AMDGPU::sub0_sub1)
3341     .addReg(RSrcHi)
3342     .addImm(AMDGPU::sub2_sub3);
3343 
3344   return RSrc;
3345 }
3346 
3347 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3348                                 const SIInstrInfo &TII, Register BasePtr) {
3349   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3350 
3351   // FIXME: Why are half the "default" bits ignored based on the addressing
3352   // mode?
3353   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3354 }
3355 
3356 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3357                                const SIInstrInfo &TII, Register BasePtr) {
3358   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3359 
3360   // FIXME: Why are half the "default" bits ignored based on the addressing
3361   // mode?
3362   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
3363 }
3364 
3365 AMDGPUInstructionSelector::MUBUFAddressData
3366 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
3367   MUBUFAddressData Data;
3368   Data.N0 = Src;
3369 
3370   Register PtrBase;
3371   int64_t Offset;
3372 
3373   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
3374   if (isUInt<32>(Offset)) {
3375     Data.N0 = PtrBase;
3376     Data.Offset = Offset;
3377   }
3378 
3379   if (MachineInstr *InputAdd
3380       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
3381     Data.N2 = InputAdd->getOperand(1).getReg();
3382     Data.N3 = InputAdd->getOperand(2).getReg();
3383 
3384     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
3385     // FIXME: Don't know this was defined by operand 0
3386     //
3387     // TODO: Remove this when we have copy folding optimizations after
3388     // RegBankSelect.
3389     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
3390     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
3391   }
3392 
3393   return Data;
3394 }
3395 
3396 /// Return if the addr64 mubuf mode should be used for the given address.
3397 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
3398   // (ptr_add N2, N3) -> addr64, or
3399   // (ptr_add (ptr_add N2, N3), C1) -> addr64
3400   if (Addr.N2)
3401     return true;
3402 
3403   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
3404   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
3405 }
3406 
3407 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
3408 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
3409 /// component.
3410 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
3411   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
3412   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
3413     return;
3414 
3415   // Illegal offset, store it in soffset.
3416   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3417   B.buildInstr(AMDGPU::S_MOV_B32)
3418     .addDef(SOffset)
3419     .addImm(ImmOffset);
3420   ImmOffset = 0;
3421 }
3422 
3423 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
3424   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
3425   Register &SOffset, int64_t &Offset) const {
3426   // FIXME: Predicates should stop this from reaching here.
3427   // addr64 bit was removed for volcanic islands.
3428   if (!STI.hasAddr64() || STI.useFlatForGlobal())
3429     return false;
3430 
3431   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3432   if (!shouldUseAddr64(AddrData))
3433     return false;
3434 
3435   Register N0 = AddrData.N0;
3436   Register N2 = AddrData.N2;
3437   Register N3 = AddrData.N3;
3438   Offset = AddrData.Offset;
3439 
3440   // Base pointer for the SRD.
3441   Register SRDPtr;
3442 
3443   if (N2) {
3444     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3445       assert(N3);
3446       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3447         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
3448         // addr64, and construct the default resource from a 0 address.
3449         VAddr = N0;
3450       } else {
3451         SRDPtr = N3;
3452         VAddr = N2;
3453       }
3454     } else {
3455       // N2 is not divergent.
3456       SRDPtr = N2;
3457       VAddr = N3;
3458     }
3459   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3460     // Use the default null pointer in the resource
3461     VAddr = N0;
3462   } else {
3463     // N0 -> offset, or
3464     // (N0 + C1) -> offset
3465     SRDPtr = N0;
3466   }
3467 
3468   MachineIRBuilder B(*Root.getParent());
3469   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
3470   splitIllegalMUBUFOffset(B, SOffset, Offset);
3471   return true;
3472 }
3473 
3474 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
3475   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
3476   int64_t &Offset) const {
3477   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3478   if (shouldUseAddr64(AddrData))
3479     return false;
3480 
3481   // N0 -> offset, or
3482   // (N0 + C1) -> offset
3483   Register SRDPtr = AddrData.N0;
3484   Offset = AddrData.Offset;
3485 
3486   // TODO: Look through extensions for 32-bit soffset.
3487   MachineIRBuilder B(*Root.getParent());
3488 
3489   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
3490   splitIllegalMUBUFOffset(B, SOffset, Offset);
3491   return true;
3492 }
3493 
3494 InstructionSelector::ComplexRendererFns
3495 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
3496   Register VAddr;
3497   Register RSrcReg;
3498   Register SOffset;
3499   int64_t Offset = 0;
3500 
3501   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
3502     return {};
3503 
3504   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
3505   // pattern.
3506   return {{
3507       [=](MachineInstrBuilder &MIB) {  // rsrc
3508         MIB.addReg(RSrcReg);
3509       },
3510       [=](MachineInstrBuilder &MIB) { // vaddr
3511         MIB.addReg(VAddr);
3512       },
3513       [=](MachineInstrBuilder &MIB) { // soffset
3514         if (SOffset)
3515           MIB.addReg(SOffset);
3516         else
3517           MIB.addImm(0);
3518       },
3519       [=](MachineInstrBuilder &MIB) { // offset
3520         MIB.addImm(Offset);
3521       },
3522       addZeroImm, //  glc
3523       addZeroImm, //  slc
3524       addZeroImm, //  tfe
3525       addZeroImm, //  dlc
3526       addZeroImm  //  swz
3527     }};
3528 }
3529 
3530 InstructionSelector::ComplexRendererFns
3531 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
3532   Register RSrcReg;
3533   Register SOffset;
3534   int64_t Offset = 0;
3535 
3536   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
3537     return {};
3538 
3539   return {{
3540       [=](MachineInstrBuilder &MIB) {  // rsrc
3541         MIB.addReg(RSrcReg);
3542       },
3543       [=](MachineInstrBuilder &MIB) { // soffset
3544         if (SOffset)
3545           MIB.addReg(SOffset);
3546         else
3547           MIB.addImm(0);
3548       },
3549       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
3550       addZeroImm, //  glc
3551       addZeroImm, //  slc
3552       addZeroImm, //  tfe
3553       addZeroImm, //  dlc
3554       addZeroImm  //  swz
3555     }};
3556 }
3557 
3558 InstructionSelector::ComplexRendererFns
3559 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
3560   Register VAddr;
3561   Register RSrcReg;
3562   Register SOffset;
3563   int64_t Offset = 0;
3564 
3565   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
3566     return {};
3567 
3568   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
3569   // pattern.
3570   return {{
3571       [=](MachineInstrBuilder &MIB) {  // rsrc
3572         MIB.addReg(RSrcReg);
3573       },
3574       [=](MachineInstrBuilder &MIB) { // vaddr
3575         MIB.addReg(VAddr);
3576       },
3577       [=](MachineInstrBuilder &MIB) { // soffset
3578         if (SOffset)
3579           MIB.addReg(SOffset);
3580         else
3581           MIB.addImm(0);
3582       },
3583       [=](MachineInstrBuilder &MIB) { // offset
3584         MIB.addImm(Offset);
3585       },
3586       addZeroImm //  slc
3587     }};
3588 }
3589 
3590 InstructionSelector::ComplexRendererFns
3591 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
3592   Register RSrcReg;
3593   Register SOffset;
3594   int64_t Offset = 0;
3595 
3596   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
3597     return {};
3598 
3599   return {{
3600       [=](MachineInstrBuilder &MIB) {  // rsrc
3601         MIB.addReg(RSrcReg);
3602       },
3603       [=](MachineInstrBuilder &MIB) { // soffset
3604         if (SOffset)
3605           MIB.addReg(SOffset);
3606         else
3607           MIB.addImm(0);
3608       },
3609       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
3610       addZeroImm //  slc
3611     }};
3612 }
3613 
3614 /// Get an immediate that must be 32-bits, and treated as zero extended.
3615 static Optional<uint64_t> getConstantZext32Val(Register Reg,
3616                                                const MachineRegisterInfo &MRI) {
3617   // getConstantVRegVal sexts any values, so see if that matters.
3618   Optional<int64_t> OffsetVal = getConstantVRegVal(Reg, MRI);
3619   if (!OffsetVal || !isInt<32>(*OffsetVal))
3620     return None;
3621   return Lo_32(*OffsetVal);
3622 }
3623 
3624 InstructionSelector::ComplexRendererFns
3625 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
3626   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
3627   if (!OffsetVal)
3628     return {};
3629 
3630   Optional<int64_t> EncodedImm =
3631       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
3632   if (!EncodedImm)
3633     return {};
3634 
3635   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
3636 }
3637 
3638 InstructionSelector::ComplexRendererFns
3639 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
3640   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
3641 
3642   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
3643   if (!OffsetVal)
3644     return {};
3645 
3646   Optional<int64_t> EncodedImm
3647     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
3648   if (!EncodedImm)
3649     return {};
3650 
3651   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
3652 }
3653 
3654 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
3655                                                  const MachineInstr &MI,
3656                                                  int OpIdx) const {
3657   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
3658          "Expected G_CONSTANT");
3659   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
3660 }
3661 
3662 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
3663                                                 const MachineInstr &MI,
3664                                                 int OpIdx) const {
3665   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
3666          "Expected G_CONSTANT");
3667   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
3668 }
3669 
3670 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
3671                                                  const MachineInstr &MI,
3672                                                  int OpIdx) const {
3673   assert(OpIdx == -1);
3674 
3675   const MachineOperand &Op = MI.getOperand(1);
3676   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
3677     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
3678   else {
3679     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
3680     MIB.addImm(Op.getCImm()->getSExtValue());
3681   }
3682 }
3683 
3684 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
3685                                                 const MachineInstr &MI,
3686                                                 int OpIdx) const {
3687   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
3688          "Expected G_CONSTANT");
3689   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
3690 }
3691 
3692 /// This only really exists to satisfy DAG type checking machinery, so is a
3693 /// no-op here.
3694 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
3695                                                 const MachineInstr &MI,
3696                                                 int OpIdx) const {
3697   MIB.addImm(MI.getOperand(OpIdx).getImm());
3698 }
3699 
3700 void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
3701                                                  const MachineInstr &MI,
3702                                                  int OpIdx) const {
3703   assert(OpIdx >= 0 && "expected to match an immediate operand");
3704   MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
3705 }
3706 
3707 void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
3708                                                  const MachineInstr &MI,
3709                                                  int OpIdx) const {
3710   assert(OpIdx >= 0 && "expected to match an immediate operand");
3711   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
3712 }
3713 
3714 void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
3715                                                  const MachineInstr &MI,
3716                                                  int OpIdx) const {
3717   assert(OpIdx >= 0 && "expected to match an immediate operand");
3718   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
3719 }
3720 
3721 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
3722                                                  const MachineInstr &MI,
3723                                                  int OpIdx) const {
3724   assert(OpIdx >= 0 && "expected to match an immediate operand");
3725   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
3726 }
3727 
3728 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
3729   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
3730 }
3731 
3732 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
3733   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
3734 }
3735 
3736 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
3737   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
3738 }
3739 
3740 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
3741   return TII.isInlineConstant(Imm);
3742 }
3743