1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 
37 #define DEBUG_TYPE "amdgpu-isel"
38 
39 using namespace llvm;
40 using namespace MIPatternMatch;
41 
42 #define GET_GLOBALISEL_IMPL
43 #define AMDGPUSubtarget GCNSubtarget
44 #include "AMDGPUGenGlobalISel.inc"
45 #undef GET_GLOBALISEL_IMPL
46 #undef AMDGPUSubtarget
47 
48 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
49     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
50     const AMDGPUTargetMachine &TM)
51     : InstructionSelector(), TII(*STI.getInstrInfo()),
52       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
53       STI(STI),
54       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
55 #define GET_GLOBALISEL_PREDICATES_INIT
56 #include "AMDGPUGenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATES_INIT
58 #define GET_GLOBALISEL_TEMPORARIES_INIT
59 #include "AMDGPUGenGlobalISel.inc"
60 #undef GET_GLOBALISEL_TEMPORARIES_INIT
61 {
62 }
63 
64 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
65 
66 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
67                                         CodeGenCoverage &CoverageInfo) {
68   MRI = &MF.getRegInfo();
69   InstructionSelector::setupMF(MF, KB, CoverageInfo);
70 }
71 
72 bool AMDGPUInstructionSelector::isVCC(Register Reg,
73                                       const MachineRegisterInfo &MRI) const {
74   if (Register::isPhysicalRegister(Reg))
75     return Reg == TRI.getVCC();
76 
77   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
78   const TargetRegisterClass *RC =
79       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
80   if (RC) {
81     const LLT Ty = MRI.getType(Reg);
82     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
83            Ty.isValid() && Ty.getSizeInBits() == 1;
84   }
85 
86   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
87   return RB->getID() == AMDGPU::VCCRegBankID;
88 }
89 
90 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
91                                                         unsigned NewOpc) const {
92   MI.setDesc(TII.get(NewOpc));
93   MI.RemoveOperand(1); // Remove intrinsic ID.
94   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
95 
96   MachineOperand &Dst = MI.getOperand(0);
97   MachineOperand &Src = MI.getOperand(1);
98 
99   // TODO: This should be legalized to s32 if needed
100   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
101     return false;
102 
103   const TargetRegisterClass *DstRC
104     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
105   const TargetRegisterClass *SrcRC
106     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
107   if (!DstRC || DstRC != SrcRC)
108     return false;
109 
110   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
111          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
112 }
113 
114 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
115   const DebugLoc &DL = I.getDebugLoc();
116   MachineBasicBlock *BB = I.getParent();
117   I.setDesc(TII.get(TargetOpcode::COPY));
118 
119   const MachineOperand &Src = I.getOperand(1);
120   MachineOperand &Dst = I.getOperand(0);
121   Register DstReg = Dst.getReg();
122   Register SrcReg = Src.getReg();
123 
124   if (isVCC(DstReg, *MRI)) {
125     if (SrcReg == AMDGPU::SCC) {
126       const TargetRegisterClass *RC
127         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
128       if (!RC)
129         return true;
130       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
131     }
132 
133     if (!isVCC(SrcReg, *MRI)) {
134       // TODO: Should probably leave the copy and let copyPhysReg expand it.
135       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
136         return false;
137 
138       const TargetRegisterClass *SrcRC
139         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
140 
141       Register MaskedReg = MRI->createVirtualRegister(SrcRC);
142 
143       // We can't trust the high bits at this point, so clear them.
144 
145       // TODO: Skip masking high bits if def is known boolean.
146 
147       unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
148         AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
149       BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
150         .addImm(1)
151         .addReg(SrcReg);
152       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
153         .addImm(0)
154         .addReg(MaskedReg);
155 
156       if (!MRI->getRegClassOrNull(SrcReg))
157         MRI->setRegClass(SrcReg, SrcRC);
158       I.eraseFromParent();
159       return true;
160     }
161 
162     const TargetRegisterClass *RC =
163       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
164     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
165       return false;
166 
167     // Don't constrain the source register to a class so the def instruction
168     // handles it (unless it's undef).
169     //
170     // FIXME: This is a hack. When selecting the def, we neeed to know
171     // specifically know that the result is VCCRegBank, and not just an SGPR
172     // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
173     if (Src.isUndef()) {
174       const TargetRegisterClass *SrcRC =
175         TRI.getConstrainedRegClassForOperand(Src, *MRI);
176       if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
177         return false;
178     }
179 
180     return true;
181   }
182 
183   for (const MachineOperand &MO : I.operands()) {
184     if (Register::isPhysicalRegister(MO.getReg()))
185       continue;
186 
187     const TargetRegisterClass *RC =
188             TRI.getConstrainedRegClassForOperand(MO, *MRI);
189     if (!RC)
190       continue;
191     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
192   }
193   return true;
194 }
195 
196 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
197   const Register DefReg = I.getOperand(0).getReg();
198   const LLT DefTy = MRI->getType(DefReg);
199 
200   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
201 
202   const RegClassOrRegBank &RegClassOrBank =
203     MRI->getRegClassOrRegBank(DefReg);
204 
205   const TargetRegisterClass *DefRC
206     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
207   if (!DefRC) {
208     if (!DefTy.isValid()) {
209       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
210       return false;
211     }
212 
213     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
214     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
215     if (!DefRC) {
216       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
217       return false;
218     }
219   }
220 
221   // TODO: Verify that all registers have the same bank
222   I.setDesc(TII.get(TargetOpcode::PHI));
223   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
224 }
225 
226 MachineOperand
227 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
228                                            const TargetRegisterClass &SubRC,
229                                            unsigned SubIdx) const {
230 
231   MachineInstr *MI = MO.getParent();
232   MachineBasicBlock *BB = MO.getParent()->getParent();
233   Register DstReg = MRI->createVirtualRegister(&SubRC);
234 
235   if (MO.isReg()) {
236     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
237     Register Reg = MO.getReg();
238     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
239             .addReg(Reg, 0, ComposedSubIdx);
240 
241     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
242                                      MO.isKill(), MO.isDead(), MO.isUndef(),
243                                      MO.isEarlyClobber(), 0, MO.isDebug(),
244                                      MO.isInternalRead());
245   }
246 
247   assert(MO.isImm());
248 
249   APInt Imm(64, MO.getImm());
250 
251   switch (SubIdx) {
252   default:
253     llvm_unreachable("do not know to split immediate with this sub index.");
254   case AMDGPU::sub0:
255     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
256   case AMDGPU::sub1:
257     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
258   }
259 }
260 
261 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
262   switch (Opc) {
263   case AMDGPU::G_AND:
264     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
265   case AMDGPU::G_OR:
266     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
267   case AMDGPU::G_XOR:
268     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
269   default:
270     llvm_unreachable("not a bit op");
271   }
272 }
273 
274 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
275   MachineOperand &Dst = I.getOperand(0);
276   MachineOperand &Src0 = I.getOperand(1);
277   MachineOperand &Src1 = I.getOperand(2);
278   Register DstReg = Dst.getReg();
279   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
280 
281   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
282   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
283     const TargetRegisterClass *RC = TRI.getBoolRC();
284     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
285                                            RC == &AMDGPU::SReg_64RegClass);
286     I.setDesc(TII.get(InstOpc));
287     // Dead implicit-def of scc
288     I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
289                                            true, // isImp
290                                            false, // isKill
291                                            true)); // isDead
292 
293     // FIXME: Hack to avoid turning the register bank into a register class.
294     // The selector for G_ICMP relies on seeing the register bank for the result
295     // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
296     // be ambiguous whether it's a scalar or vector bool.
297     if (Src0.isUndef() && !MRI->getRegClassOrNull(Src0.getReg()))
298       MRI->setRegClass(Src0.getReg(), RC);
299     if (Src1.isUndef() && !MRI->getRegClassOrNull(Src1.getReg()))
300       MRI->setRegClass(Src1.getReg(), RC);
301 
302     return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
303   }
304 
305   // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
306   // the result?
307   if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
308     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
309     I.setDesc(TII.get(InstOpc));
310     // Dead implicit-def of scc
311     I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
312                                            true, // isImp
313                                            false, // isKill
314                                            true)); // isDead
315     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
316   }
317 
318   return false;
319 }
320 
321 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
322   MachineBasicBlock *BB = I.getParent();
323   MachineFunction *MF = BB->getParent();
324   Register DstReg = I.getOperand(0).getReg();
325   const DebugLoc &DL = I.getDebugLoc();
326   LLT Ty = MRI->getType(DstReg);
327   if (Ty.isVector())
328     return false;
329 
330   unsigned Size = Ty.getSizeInBits();
331   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
332   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
333   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
334 
335   if (Size == 32) {
336     if (IsSALU) {
337       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
338       MachineInstr *Add =
339         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
340         .add(I.getOperand(1))
341         .add(I.getOperand(2));
342       I.eraseFromParent();
343       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
344     }
345 
346     if (STI.hasAddNoCarry()) {
347       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
348       I.setDesc(TII.get(Opc));
349       I.addOperand(*MF, MachineOperand::CreateImm(0));
350       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
351       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
352     }
353 
354     const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
355 
356     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
357     MachineInstr *Add
358       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
359       .addDef(UnusedCarry, RegState::Dead)
360       .add(I.getOperand(1))
361       .add(I.getOperand(2))
362       .addImm(0);
363     I.eraseFromParent();
364     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
365   }
366 
367   assert(!Sub && "illegal sub should not reach here");
368 
369   const TargetRegisterClass &RC
370     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
371   const TargetRegisterClass &HalfRC
372     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
373 
374   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
375   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
376   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
377   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
378 
379   Register DstLo = MRI->createVirtualRegister(&HalfRC);
380   Register DstHi = MRI->createVirtualRegister(&HalfRC);
381 
382   if (IsSALU) {
383     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
384       .add(Lo1)
385       .add(Lo2);
386     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
387       .add(Hi1)
388       .add(Hi2);
389   } else {
390     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
391     Register CarryReg = MRI->createVirtualRegister(CarryRC);
392     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
393       .addDef(CarryReg)
394       .add(Lo1)
395       .add(Lo2)
396       .addImm(0);
397     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
398       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
399       .add(Hi1)
400       .add(Hi2)
401       .addReg(CarryReg, RegState::Kill)
402       .addImm(0);
403 
404     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
405       return false;
406   }
407 
408   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
409     .addReg(DstLo)
410     .addImm(AMDGPU::sub0)
411     .addReg(DstHi)
412     .addImm(AMDGPU::sub1);
413 
414 
415   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
416     return false;
417 
418   I.eraseFromParent();
419   return true;
420 }
421 
422 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
423   MachineInstr &I) const {
424   MachineBasicBlock *BB = I.getParent();
425   MachineFunction *MF = BB->getParent();
426   const DebugLoc &DL = I.getDebugLoc();
427   Register Dst0Reg = I.getOperand(0).getReg();
428   Register Dst1Reg = I.getOperand(1).getReg();
429   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
430                      I.getOpcode() == AMDGPU::G_UADDE;
431   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
432                           I.getOpcode() == AMDGPU::G_USUBE;
433 
434   if (isVCC(Dst1Reg, *MRI)) {
435       // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
436       // carry out despite the _i32 name. These were renamed in VI to _U32.
437       // FIXME: We should probably rename the opcodes here.
438     unsigned NoCarryOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
439     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
440     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
441     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
442     I.addOperand(*MF, MachineOperand::CreateImm(0));
443     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
444   }
445 
446   Register Src0Reg = I.getOperand(2).getReg();
447   Register Src1Reg = I.getOperand(3).getReg();
448 
449   if (HasCarryIn) {
450     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
451       .addReg(I.getOperand(4).getReg());
452   }
453 
454   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
455   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
456 
457   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
458     .add(I.getOperand(2))
459     .add(I.getOperand(3));
460   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
461     .addReg(AMDGPU::SCC);
462 
463   if (!MRI->getRegClassOrNull(Dst1Reg))
464     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
465 
466   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
467       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
468       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
469     return false;
470 
471   if (HasCarryIn &&
472       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
473                                     AMDGPU::SReg_32RegClass, *MRI))
474     return false;
475 
476   I.eraseFromParent();
477   return true;
478 }
479 
480 // TODO: We should probably legalize these to only using 32-bit results.
481 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
482   MachineBasicBlock *BB = I.getParent();
483   Register DstReg = I.getOperand(0).getReg();
484   Register SrcReg = I.getOperand(1).getReg();
485   LLT DstTy = MRI->getType(DstReg);
486   LLT SrcTy = MRI->getType(SrcReg);
487   const unsigned SrcSize = SrcTy.getSizeInBits();
488   const unsigned DstSize = DstTy.getSizeInBits();
489 
490   // TODO: Should handle any multiple of 32 offset.
491   unsigned Offset = I.getOperand(2).getImm();
492   if (Offset % 32 != 0 || DstSize > 128)
493     return false;
494 
495   const TargetRegisterClass *DstRC =
496     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
497   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
498     return false;
499 
500   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
501   const TargetRegisterClass *SrcRC =
502     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
503   if (!SrcRC)
504     return false;
505   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
506                                                          DstSize / 32);
507   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
508   if (!SrcRC)
509     return false;
510 
511   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
512                                     *SrcRC, I.getOperand(1));
513   const DebugLoc &DL = I.getDebugLoc();
514   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
515     .addReg(SrcReg, 0, SubReg);
516 
517   I.eraseFromParent();
518   return true;
519 }
520 
521 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
522   MachineBasicBlock *BB = MI.getParent();
523   Register DstReg = MI.getOperand(0).getReg();
524   LLT DstTy = MRI->getType(DstReg);
525   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
526 
527   const unsigned SrcSize = SrcTy.getSizeInBits();
528   if (SrcSize < 32)
529     return selectImpl(MI, *CoverageInfo);
530 
531   const DebugLoc &DL = MI.getDebugLoc();
532   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
533   const unsigned DstSize = DstTy.getSizeInBits();
534   const TargetRegisterClass *DstRC =
535     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
536   if (!DstRC)
537     return false;
538 
539   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
540   MachineInstrBuilder MIB =
541     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
542   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
543     MachineOperand &Src = MI.getOperand(I + 1);
544     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
545     MIB.addImm(SubRegs[I]);
546 
547     const TargetRegisterClass *SrcRC
548       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
549     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
550       return false;
551   }
552 
553   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
554     return false;
555 
556   MI.eraseFromParent();
557   return true;
558 }
559 
560 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
561   MachineBasicBlock *BB = MI.getParent();
562   const int NumDst = MI.getNumOperands() - 1;
563 
564   MachineOperand &Src = MI.getOperand(NumDst);
565 
566   Register SrcReg = Src.getReg();
567   Register DstReg0 = MI.getOperand(0).getReg();
568   LLT DstTy = MRI->getType(DstReg0);
569   LLT SrcTy = MRI->getType(SrcReg);
570 
571   const unsigned DstSize = DstTy.getSizeInBits();
572   const unsigned SrcSize = SrcTy.getSizeInBits();
573   const DebugLoc &DL = MI.getDebugLoc();
574   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
575 
576   const TargetRegisterClass *SrcRC =
577     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
578   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
579     return false;
580 
581   const unsigned SrcFlags = getUndefRegState(Src.isUndef());
582 
583   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
584   // source, and this relies on the fact that the same subregister indices are
585   // used for both.
586   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
587   for (int I = 0, E = NumDst; I != E; ++I) {
588     MachineOperand &Dst = MI.getOperand(I);
589     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
590       .addReg(SrcReg, SrcFlags, SubRegs[I]);
591 
592     const TargetRegisterClass *DstRC =
593       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
594     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
595       return false;
596   }
597 
598   MI.eraseFromParent();
599   return true;
600 }
601 
602 static bool isZero(Register Reg, const MachineRegisterInfo &MRI) {
603   int64_t Val;
604   return mi_match(Reg, MRI, m_ICst(Val)) && Val == 0;
605 }
606 
607 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
608   MachineInstr &MI) const {
609   if (selectImpl(MI, *CoverageInfo))
610     return true;
611 
612   const LLT S32 = LLT::scalar(32);
613   const LLT V2S16 = LLT::vector(2, 16);
614 
615   Register Dst = MI.getOperand(0).getReg();
616   if (MRI->getType(Dst) != V2S16)
617     return false;
618 
619   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
620   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
621     return false;
622 
623   Register Src0 = MI.getOperand(1).getReg();
624   Register Src1 = MI.getOperand(2).getReg();
625   if (MRI->getType(Src0) != S32)
626     return false;
627 
628   const DebugLoc &DL = MI.getDebugLoc();
629   MachineBasicBlock *BB = MI.getParent();
630 
631   // TODO: This should probably be a combine somewhere
632   // (build_vector_trunc $src0, undef -> copy $src0
633   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
634   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
635     MI.setDesc(TII.get(AMDGPU::COPY));
636     MI.RemoveOperand(2);
637     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
638            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
639   }
640 
641   Register ShiftSrc0;
642   Register ShiftSrc1;
643   int64_t ShiftAmt;
644 
645   // With multiple uses of the shift, this will duplicate the shift and
646   // increase register pressure.
647   //
648   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
649   //  => (S_PACK_HH_B32_B16 $src0, $src1)
650   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
651   //  => (S_PACK_LH_B32_B16 $src0, $src1)
652   // (build_vector_trunc $src0, $src1)
653   //  => (S_PACK_LL_B32_B16 $src0, $src1)
654 
655   // FIXME: This is an inconvenient way to check a specific value
656   bool Shift0 = mi_match(
657     Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_ICst(ShiftAmt)))) &&
658     ShiftAmt == 16;
659 
660   bool Shift1 = mi_match(
661     Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_ICst(ShiftAmt)))) &&
662     ShiftAmt == 16;
663 
664   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
665   if (Shift0 && Shift1) {
666     Opc = AMDGPU::S_PACK_HH_B32_B16;
667     MI.getOperand(1).setReg(ShiftSrc0);
668     MI.getOperand(2).setReg(ShiftSrc1);
669   } else if (Shift1) {
670     Opc = AMDGPU::S_PACK_LH_B32_B16;
671     MI.getOperand(2).setReg(ShiftSrc1);
672   } else if (Shift0 && isZero(Src1, *MRI)) {
673     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
674     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
675       .addReg(ShiftSrc0)
676       .addImm(16);
677 
678     MI.eraseFromParent();
679     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
680   }
681 
682   MI.setDesc(TII.get(Opc));
683   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
684 }
685 
686 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
687   return selectG_ADD_SUB(I);
688 }
689 
690 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
691   const MachineOperand &MO = I.getOperand(0);
692 
693   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
694   // regbank check here is to know why getConstrainedRegClassForOperand failed.
695   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
696   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
697       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
698     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
699     return true;
700   }
701 
702   return false;
703 }
704 
705 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
706   MachineBasicBlock *BB = I.getParent();
707 
708   Register DstReg = I.getOperand(0).getReg();
709   Register Src0Reg = I.getOperand(1).getReg();
710   Register Src1Reg = I.getOperand(2).getReg();
711   LLT Src1Ty = MRI->getType(Src1Reg);
712 
713   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
714   unsigned InsSize = Src1Ty.getSizeInBits();
715 
716   int64_t Offset = I.getOperand(3).getImm();
717   if (Offset % 32 != 0)
718     return false;
719 
720   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
721   if (SubReg == AMDGPU::NoSubRegister)
722     return false;
723 
724   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
725   const TargetRegisterClass *DstRC =
726     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
727   if (!DstRC)
728     return false;
729 
730   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
731   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
732   const TargetRegisterClass *Src0RC =
733     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
734   const TargetRegisterClass *Src1RC =
735     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
736 
737   // Deal with weird cases where the class only partially supports the subreg
738   // index.
739   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
740   if (!Src0RC || !Src1RC)
741     return false;
742 
743   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
744       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
745       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
746     return false;
747 
748   const DebugLoc &DL = I.getDebugLoc();
749   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
750     .addReg(Src0Reg)
751     .addReg(Src1Reg)
752     .addImm(SubReg);
753 
754   I.eraseFromParent();
755   return true;
756 }
757 
758 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
759   if (STI.getLDSBankCount() != 16)
760     return selectImpl(MI, *CoverageInfo);
761 
762   Register Dst = MI.getOperand(0).getReg();
763   Register Src0 = MI.getOperand(2).getReg();
764   Register M0Val = MI.getOperand(6).getReg();
765   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
766       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
767       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
768     return false;
769 
770   // This requires 2 instructions. It is possible to write a pattern to support
771   // this, but the generated isel emitter doesn't correctly deal with multiple
772   // output instructions using the same physical register input. The copy to m0
773   // is incorrectly placed before the second instruction.
774   //
775   // TODO: Match source modifiers.
776 
777   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
778   const DebugLoc &DL = MI.getDebugLoc();
779   MachineBasicBlock *MBB = MI.getParent();
780 
781   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
782     .addReg(M0Val);
783   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
784     .addImm(2)
785     .addImm(MI.getOperand(4).getImm())  // $attr
786     .addImm(MI.getOperand(3).getImm()); // $attrchan
787 
788   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
789     .addImm(0)                          // $src0_modifiers
790     .addReg(Src0)                       // $src0
791     .addImm(MI.getOperand(4).getImm())  // $attr
792     .addImm(MI.getOperand(3).getImm())  // $attrchan
793     .addImm(0)                          // $src2_modifiers
794     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
795     .addImm(MI.getOperand(5).getImm())  // $high
796     .addImm(0)                          // $clamp
797     .addImm(0);                         // $omod
798 
799   MI.eraseFromParent();
800   return true;
801 }
802 
803 // We need to handle this here because tablegen doesn't support matching
804 // instructions with multiple outputs.
805 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
806   Register Dst0 = MI.getOperand(0).getReg();
807   Register Dst1 = MI.getOperand(1).getReg();
808 
809   LLT Ty = MRI->getType(Dst0);
810   unsigned Opc;
811   if (Ty == LLT::scalar(32))
812     Opc = AMDGPU::V_DIV_SCALE_F32;
813   else if (Ty == LLT::scalar(64))
814     Opc = AMDGPU::V_DIV_SCALE_F64;
815   else
816     return false;
817 
818   const DebugLoc &DL = MI.getDebugLoc();
819   MachineBasicBlock *MBB = MI.getParent();
820 
821   Register Numer = MI.getOperand(3).getReg();
822   Register Denom = MI.getOperand(4).getReg();
823   unsigned ChooseDenom = MI.getOperand(5).getImm();
824 
825   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
826 
827   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
828     .addDef(Dst1)
829     .addUse(Src0)
830     .addUse(Denom)
831     .addUse(Numer);
832 
833   MI.eraseFromParent();
834   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
835 }
836 
837 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
838   unsigned IntrinsicID = I.getIntrinsicID();
839   switch (IntrinsicID) {
840   case Intrinsic::amdgcn_if_break: {
841     MachineBasicBlock *BB = I.getParent();
842 
843     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
844     // SelectionDAG uses for wave32 vs wave64.
845     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
846       .add(I.getOperand(0))
847       .add(I.getOperand(2))
848       .add(I.getOperand(3));
849 
850     Register DstReg = I.getOperand(0).getReg();
851     Register Src0Reg = I.getOperand(2).getReg();
852     Register Src1Reg = I.getOperand(3).getReg();
853 
854     I.eraseFromParent();
855 
856     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
857       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
858 
859     return true;
860   }
861   case Intrinsic::amdgcn_interp_p1_f16:
862     return selectInterpP1F16(I);
863   case Intrinsic::amdgcn_wqm:
864     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
865   case Intrinsic::amdgcn_softwqm:
866     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
867   case Intrinsic::amdgcn_wwm:
868     return constrainCopyLikeIntrin(I, AMDGPU::WWM);
869   case Intrinsic::amdgcn_div_scale:
870     return selectDivScale(I);
871   default:
872     return selectImpl(I, *CoverageInfo);
873   }
874 }
875 
876 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
877   if (Size != 32 && Size != 64)
878     return -1;
879   switch (P) {
880   default:
881     llvm_unreachable("Unknown condition code!");
882   case CmpInst::ICMP_NE:
883     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
884   case CmpInst::ICMP_EQ:
885     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
886   case CmpInst::ICMP_SGT:
887     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
888   case CmpInst::ICMP_SGE:
889     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
890   case CmpInst::ICMP_SLT:
891     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
892   case CmpInst::ICMP_SLE:
893     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
894   case CmpInst::ICMP_UGT:
895     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
896   case CmpInst::ICMP_UGE:
897     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
898   case CmpInst::ICMP_ULT:
899     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
900   case CmpInst::ICMP_ULE:
901     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
902   }
903 }
904 
905 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
906                                               unsigned Size) const {
907   if (Size == 64) {
908     if (!STI.hasScalarCompareEq64())
909       return -1;
910 
911     switch (P) {
912     case CmpInst::ICMP_NE:
913       return AMDGPU::S_CMP_LG_U64;
914     case CmpInst::ICMP_EQ:
915       return AMDGPU::S_CMP_EQ_U64;
916     default:
917       return -1;
918     }
919   }
920 
921   if (Size != 32)
922     return -1;
923 
924   switch (P) {
925   case CmpInst::ICMP_NE:
926     return AMDGPU::S_CMP_LG_U32;
927   case CmpInst::ICMP_EQ:
928     return AMDGPU::S_CMP_EQ_U32;
929   case CmpInst::ICMP_SGT:
930     return AMDGPU::S_CMP_GT_I32;
931   case CmpInst::ICMP_SGE:
932     return AMDGPU::S_CMP_GE_I32;
933   case CmpInst::ICMP_SLT:
934     return AMDGPU::S_CMP_LT_I32;
935   case CmpInst::ICMP_SLE:
936     return AMDGPU::S_CMP_LE_I32;
937   case CmpInst::ICMP_UGT:
938     return AMDGPU::S_CMP_GT_U32;
939   case CmpInst::ICMP_UGE:
940     return AMDGPU::S_CMP_GE_U32;
941   case CmpInst::ICMP_ULT:
942     return AMDGPU::S_CMP_LT_U32;
943   case CmpInst::ICMP_ULE:
944     return AMDGPU::S_CMP_LE_U32;
945   default:
946     llvm_unreachable("Unknown condition code!");
947   }
948 }
949 
950 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
951   MachineBasicBlock *BB = I.getParent();
952   const DebugLoc &DL = I.getDebugLoc();
953 
954   Register SrcReg = I.getOperand(2).getReg();
955   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
956 
957   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
958 
959   Register CCReg = I.getOperand(0).getReg();
960   if (!isVCC(CCReg, *MRI)) {
961     int Opcode = getS_CMPOpcode(Pred, Size);
962     if (Opcode == -1)
963       return false;
964     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
965             .add(I.getOperand(2))
966             .add(I.getOperand(3));
967     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
968       .addReg(AMDGPU::SCC);
969     bool Ret =
970         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
971         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
972     I.eraseFromParent();
973     return Ret;
974   }
975 
976   int Opcode = getV_CMPOpcode(Pred, Size);
977   if (Opcode == -1)
978     return false;
979 
980   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
981             I.getOperand(0).getReg())
982             .add(I.getOperand(2))
983             .add(I.getOperand(3));
984   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
985                                *TRI.getBoolRC(), *MRI);
986   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
987   I.eraseFromParent();
988   return Ret;
989 }
990 
991 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
992   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
993   // SelectionDAG uses for wave32 vs wave64.
994   MachineBasicBlock *BB = MI.getParent();
995   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
996       .add(MI.getOperand(1));
997 
998   Register Reg = MI.getOperand(1).getReg();
999   MI.eraseFromParent();
1000 
1001   if (!MRI->getRegClassOrNull(Reg))
1002     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1003   return true;
1004 }
1005 
1006 static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
1007   switch (MF.getFunction().getCallingConv()) {
1008   case CallingConv::AMDGPU_PS:
1009     return 1;
1010   case CallingConv::AMDGPU_VS:
1011     return 2;
1012   case CallingConv::AMDGPU_GS:
1013     return 3;
1014   case CallingConv::AMDGPU_HS:
1015   case CallingConv::AMDGPU_LS:
1016   case CallingConv::AMDGPU_ES:
1017     report_fatal_error("ds_ordered_count unsupported for this calling conv");
1018   case CallingConv::AMDGPU_CS:
1019   case CallingConv::AMDGPU_KERNEL:
1020   case CallingConv::C:
1021   case CallingConv::Fast:
1022   default:
1023     // Assume other calling conventions are various compute callable functions
1024     return 0;
1025   }
1026 }
1027 
1028 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1029   MachineInstr &MI, Intrinsic::ID IntrID) const {
1030   MachineBasicBlock *MBB = MI.getParent();
1031   MachineFunction *MF = MBB->getParent();
1032   const DebugLoc &DL = MI.getDebugLoc();
1033 
1034   unsigned IndexOperand = MI.getOperand(7).getImm();
1035   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1036   bool WaveDone = MI.getOperand(9).getImm() != 0;
1037 
1038   if (WaveDone && !WaveRelease)
1039     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1040 
1041   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1042   IndexOperand &= ~0x3f;
1043   unsigned CountDw = 0;
1044 
1045   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1046     CountDw = (IndexOperand >> 24) & 0xf;
1047     IndexOperand &= ~(0xf << 24);
1048 
1049     if (CountDw < 1 || CountDw > 4) {
1050       report_fatal_error(
1051         "ds_ordered_count: dword count must be between 1 and 4");
1052     }
1053   }
1054 
1055   if (IndexOperand)
1056     report_fatal_error("ds_ordered_count: bad index operand");
1057 
1058   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1059   unsigned ShaderType = getDSShaderTypeValue(*MF);
1060 
1061   unsigned Offset0 = OrderedCountIndex << 2;
1062   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1063                      (Instruction << 4);
1064 
1065   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1066     Offset1 |= (CountDw - 1) << 6;
1067 
1068   unsigned Offset = Offset0 | (Offset1 << 8);
1069 
1070   Register M0Val = MI.getOperand(2).getReg();
1071   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1072     .addReg(M0Val);
1073 
1074   Register DstReg = MI.getOperand(0).getReg();
1075   Register ValReg = MI.getOperand(3).getReg();
1076   MachineInstrBuilder DS =
1077     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1078       .addReg(ValReg)
1079       .addImm(Offset)
1080       .cloneMemRefs(MI);
1081 
1082   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1083     return false;
1084 
1085   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1086   MI.eraseFromParent();
1087   return Ret;
1088 }
1089 
1090 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1091   switch (IntrID) {
1092   case Intrinsic::amdgcn_ds_gws_init:
1093     return AMDGPU::DS_GWS_INIT;
1094   case Intrinsic::amdgcn_ds_gws_barrier:
1095     return AMDGPU::DS_GWS_BARRIER;
1096   case Intrinsic::amdgcn_ds_gws_sema_v:
1097     return AMDGPU::DS_GWS_SEMA_V;
1098   case Intrinsic::amdgcn_ds_gws_sema_br:
1099     return AMDGPU::DS_GWS_SEMA_BR;
1100   case Intrinsic::amdgcn_ds_gws_sema_p:
1101     return AMDGPU::DS_GWS_SEMA_P;
1102   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1103     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1104   default:
1105     llvm_unreachable("not a gws intrinsic");
1106   }
1107 }
1108 
1109 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1110                                                      Intrinsic::ID IID) const {
1111   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1112       !STI.hasGWSSemaReleaseAll())
1113     return false;
1114 
1115   // intrinsic ID, vsrc, offset
1116   const bool HasVSrc = MI.getNumOperands() == 3;
1117   assert(HasVSrc || MI.getNumOperands() == 2);
1118 
1119   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1120   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1121   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1122     return false;
1123 
1124   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1125   assert(OffsetDef);
1126 
1127   unsigned ImmOffset;
1128 
1129   MachineBasicBlock *MBB = MI.getParent();
1130   const DebugLoc &DL = MI.getDebugLoc();
1131 
1132   MachineInstr *Readfirstlane = nullptr;
1133 
1134   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1135   // incoming offset, in case there's an add of a constant. We'll have to put it
1136   // back later.
1137   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1138     Readfirstlane = OffsetDef;
1139     BaseOffset = OffsetDef->getOperand(1).getReg();
1140     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1141   }
1142 
1143   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1144     // If we have a constant offset, try to use the 0 in m0 as the base.
1145     // TODO: Look into changing the default m0 initialization value. If the
1146     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1147     // the immediate offset.
1148 
1149     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1150     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1151       .addImm(0);
1152   } else {
1153     std::tie(BaseOffset, ImmOffset, OffsetDef)
1154       = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1155 
1156     if (Readfirstlane) {
1157       // We have the constant offset now, so put the readfirstlane back on the
1158       // variable component.
1159       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1160         return false;
1161 
1162       Readfirstlane->getOperand(1).setReg(BaseOffset);
1163       BaseOffset = Readfirstlane->getOperand(0).getReg();
1164     } else {
1165       if (!RBI.constrainGenericRegister(BaseOffset,
1166                                         AMDGPU::SReg_32RegClass, *MRI))
1167         return false;
1168     }
1169 
1170     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1171     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1172       .addReg(BaseOffset)
1173       .addImm(16);
1174 
1175     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1176       .addReg(M0Base);
1177   }
1178 
1179   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1180   // offset field) % 64. Some versions of the programming guide omit the m0
1181   // part, or claim it's from offset 0.
1182   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1183 
1184   if (HasVSrc) {
1185     Register VSrc = MI.getOperand(1).getReg();
1186     MIB.addReg(VSrc);
1187     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1188       return false;
1189   }
1190 
1191   MIB.addImm(ImmOffset)
1192      .addImm(-1) // $gds
1193      .cloneMemRefs(MI);
1194 
1195   MI.eraseFromParent();
1196   return true;
1197 }
1198 
1199 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1200                                                       bool IsAppend) const {
1201   Register PtrBase = MI.getOperand(2).getReg();
1202   LLT PtrTy = MRI->getType(PtrBase);
1203   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1204 
1205   unsigned Offset;
1206   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1207 
1208   // TODO: Should this try to look through readfirstlane like GWS?
1209   if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
1210     PtrBase = MI.getOperand(2).getReg();
1211     Offset = 0;
1212   }
1213 
1214   MachineBasicBlock *MBB = MI.getParent();
1215   const DebugLoc &DL = MI.getDebugLoc();
1216   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1217 
1218   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1219     .addReg(PtrBase);
1220   BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1221     .addImm(Offset)
1222     .addImm(IsGDS ? -1 : 0)
1223     .cloneMemRefs(MI);
1224   MI.eraseFromParent();
1225   return true;
1226 }
1227 
1228 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1229                          bool &IsTexFail) {
1230   if (TexFailCtrl)
1231     IsTexFail = true;
1232 
1233   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1234   TexFailCtrl &= ~(uint64_t)0x1;
1235   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1236   TexFailCtrl &= ~(uint64_t)0x2;
1237 
1238   return TexFailCtrl == 0;
1239 }
1240 
1241 static bool parseCachePolicy(uint64_t Value,
1242                              bool *GLC, bool *SLC, bool *DLC) {
1243   if (GLC) {
1244     *GLC = (Value & 0x1) ? 1 : 0;
1245     Value &= ~(uint64_t)0x1;
1246   }
1247   if (SLC) {
1248     *SLC = (Value & 0x2) ? 1 : 0;
1249     Value &= ~(uint64_t)0x2;
1250   }
1251   if (DLC) {
1252     *DLC = (Value & 0x4) ? 1 : 0;
1253     Value &= ~(uint64_t)0x4;
1254   }
1255 
1256   return Value == 0;
1257 }
1258 
1259 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1260   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1261   MachineBasicBlock *MBB = MI.getParent();
1262   const DebugLoc &DL = MI.getDebugLoc();
1263 
1264   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1265     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1266 
1267   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1268   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1269       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1270   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1271       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1272   unsigned IntrOpcode = Intr->BaseOpcode;
1273   const bool IsGFX10 = STI.getGeneration() >= AMDGPUSubtarget::GFX10;
1274 
1275   const LLT S16 = LLT::scalar(16);
1276   const int VAddrIdx = getImageVAddrIdxBegin(BaseOpcode,
1277                                              MI.getNumExplicitDefs());
1278   int NumVAddr, NumGradients;
1279   std::tie(NumVAddr, NumGradients) = getImageNumVAddr(Intr, BaseOpcode);
1280 
1281   const LLT AddrTy = MRI->getType(MI.getOperand(VAddrIdx).getReg());
1282   const bool IsA16 = AddrTy.getScalarType() == S16;
1283 
1284   Register VDataIn, VDataOut;
1285   LLT VDataTy;
1286   int NumVDataDwords = -1;
1287   bool IsD16 = false;
1288 
1289   // XXX - Can we just get the second to last argument for ctrl?
1290   unsigned CtrlIdx; // Index of texfailctrl argument
1291   bool Unorm;
1292   if (!BaseOpcode->Sampler) {
1293     Unorm = true;
1294     CtrlIdx = VAddrIdx + NumVAddr + 1;
1295   } else {
1296     Unorm = MI.getOperand(VAddrIdx + NumVAddr + 2).getImm() != 0;
1297     CtrlIdx = VAddrIdx + NumVAddr + 3;
1298   }
1299 
1300   bool TFE;
1301   bool LWE;
1302   bool IsTexFail = false;
1303   if (!parseTexFail(MI.getOperand(CtrlIdx).getImm(), TFE, LWE, IsTexFail))
1304     return false;
1305 
1306   unsigned DMask = 0;
1307   unsigned DMaskLanes = 0;
1308 
1309   if (BaseOpcode->Atomic) {
1310     VDataOut = MI.getOperand(0).getReg();
1311     VDataIn = MI.getOperand(2).getReg();
1312     LLT Ty = MRI->getType(VDataIn);
1313 
1314     // Be careful to allow atomic swap on 16-bit element vectors.
1315     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1316       Ty.getSizeInBits() == 128 :
1317       Ty.getSizeInBits() == 64;
1318 
1319     if (BaseOpcode->AtomicX2) {
1320       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1321 
1322       DMask = Is64Bit ? 0xf : 0x3;
1323       NumVDataDwords = Is64Bit ? 4 : 2;
1324     } else {
1325       DMask = Is64Bit ? 0x3 : 0x1;
1326       NumVDataDwords = Is64Bit ? 2 : 1;
1327     }
1328   } else {
1329     const int DMaskIdx = 2; // Input/output + intrinsic ID.
1330 
1331     DMask = MI.getOperand(DMaskIdx).getImm();
1332     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1333 
1334     if (BaseOpcode->Store) {
1335       VDataIn = MI.getOperand(1).getReg();
1336       VDataTy = MRI->getType(VDataIn);
1337       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1338     } else {
1339       VDataOut = MI.getOperand(0).getReg();
1340       VDataTy = MRI->getType(VDataOut);
1341       NumVDataDwords = DMaskLanes;
1342 
1343       // One memoperand is mandatory, except for getresinfo.
1344       // FIXME: Check this in verifier.
1345       if (!MI.memoperands_empty()) {
1346         const MachineMemOperand *MMO = *MI.memoperands_begin();
1347 
1348         // Infer d16 from the memory size, as the register type will be mangled by
1349         // unpacked subtargets, or by TFE.
1350         IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1351 
1352         if (IsD16 && !STI.hasUnpackedD16VMem())
1353           NumVDataDwords = (DMaskLanes + 1) / 2;
1354       }
1355     }
1356   }
1357 
1358   // Optimize _L to _LZ when _L is zero
1359   if (LZMappingInfo) {
1360     // The legalizer replaced the register with an immediate 0 if we need to
1361     // change the opcode.
1362     const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
1363     if (Lod.isImm()) {
1364       assert(Lod.getImm() == 0);
1365       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1366     }
1367   }
1368 
1369   // Optimize _mip away, when 'lod' is zero
1370   if (MIPMappingInfo) {
1371     const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
1372     if (Lod.isImm()) {
1373       assert(Lod.getImm() == 0);
1374       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1375     }
1376   }
1377 
1378   // TODO: Check this in verifier.
1379   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1380 
1381   bool GLC = false;
1382   bool SLC = false;
1383   bool DLC = false;
1384   if (BaseOpcode->Atomic) {
1385     GLC = true; // TODO no-return optimization
1386     if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), nullptr, &SLC,
1387                           IsGFX10 ? &DLC : nullptr))
1388       return false;
1389   } else {
1390     if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), &GLC, &SLC,
1391                           IsGFX10 ? &DLC : nullptr))
1392       return false;
1393   }
1394 
1395   int NumVAddrRegs = 0;
1396   int NumVAddrDwords = 0;
1397   for (int I = 0; I < NumVAddr; ++I) {
1398     // Skip the $noregs and 0s inserted during legalization.
1399     MachineOperand &AddrOp = MI.getOperand(VAddrIdx + I);
1400     if (!AddrOp.isReg())
1401       continue; // XXX - Break?
1402 
1403     Register Addr = AddrOp.getReg();
1404     if (!Addr)
1405       break;
1406 
1407     ++NumVAddrRegs;
1408     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1409   }
1410 
1411   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1412   // NSA, these should have beeen packed into a single value in the first
1413   // address register
1414   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1415   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1416     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1417     return false;
1418   }
1419 
1420   if (IsTexFail)
1421     ++NumVDataDwords;
1422 
1423   int Opcode = -1;
1424   if (IsGFX10) {
1425     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1426                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1427                                           : AMDGPU::MIMGEncGfx10Default,
1428                                    NumVDataDwords, NumVAddrDwords);
1429   } else {
1430     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1431       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1432                                      NumVDataDwords, NumVAddrDwords);
1433     if (Opcode == -1)
1434       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1435                                      NumVDataDwords, NumVAddrDwords);
1436   }
1437   assert(Opcode != -1);
1438 
1439   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1440     .cloneMemRefs(MI);
1441 
1442   if (VDataOut) {
1443     if (BaseOpcode->AtomicX2) {
1444       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1445 
1446       Register TmpReg = MRI->createVirtualRegister(
1447         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1448       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1449 
1450       MIB.addDef(TmpReg);
1451       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1452         .addReg(TmpReg, RegState::Kill, SubReg);
1453 
1454     } else {
1455       MIB.addDef(VDataOut); // vdata output
1456     }
1457   }
1458 
1459   if (VDataIn)
1460     MIB.addReg(VDataIn); // vdata input
1461 
1462   for (int i = 0; i != NumVAddrRegs; ++i) {
1463     MachineOperand &SrcOp = MI.getOperand(VAddrIdx + i);
1464     if (SrcOp.isReg()) {
1465       assert(SrcOp.getReg() != 0);
1466       MIB.addReg(SrcOp.getReg());
1467     }
1468   }
1469 
1470   MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr).getReg()); // rsrc
1471   if (BaseOpcode->Sampler)
1472     MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr + 1).getReg()); // sampler
1473 
1474   MIB.addImm(DMask); // dmask
1475 
1476   if (IsGFX10)
1477     MIB.addImm(DimInfo->Encoding);
1478   MIB.addImm(Unorm);
1479   if (IsGFX10)
1480     MIB.addImm(DLC);
1481 
1482   MIB.addImm(GLC);
1483   MIB.addImm(SLC);
1484   MIB.addImm(IsA16 &&  // a16 or r128
1485              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1486   if (IsGFX10)
1487     MIB.addImm(IsA16 ? -1 : 0);
1488 
1489   MIB.addImm(TFE); // tfe
1490   MIB.addImm(LWE); // lwe
1491   if (!IsGFX10)
1492     MIB.addImm(DimInfo->DA ? -1 : 0);
1493   if (BaseOpcode->HasD16)
1494     MIB.addImm(IsD16 ? -1 : 0);
1495 
1496   MI.eraseFromParent();
1497   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1498 }
1499 
1500 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1501     MachineInstr &I) const {
1502   unsigned IntrinsicID = I.getIntrinsicID();
1503   switch (IntrinsicID) {
1504   case Intrinsic::amdgcn_end_cf:
1505     return selectEndCfIntrinsic(I);
1506   case Intrinsic::amdgcn_ds_ordered_add:
1507   case Intrinsic::amdgcn_ds_ordered_swap:
1508     return selectDSOrderedIntrinsic(I, IntrinsicID);
1509   case Intrinsic::amdgcn_ds_gws_init:
1510   case Intrinsic::amdgcn_ds_gws_barrier:
1511   case Intrinsic::amdgcn_ds_gws_sema_v:
1512   case Intrinsic::amdgcn_ds_gws_sema_br:
1513   case Intrinsic::amdgcn_ds_gws_sema_p:
1514   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1515     return selectDSGWSIntrinsic(I, IntrinsicID);
1516   case Intrinsic::amdgcn_ds_append:
1517     return selectDSAppendConsume(I, true);
1518   case Intrinsic::amdgcn_ds_consume:
1519     return selectDSAppendConsume(I, false);
1520   default: {
1521     return selectImpl(I, *CoverageInfo);
1522   }
1523   }
1524 }
1525 
1526 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1527   if (selectImpl(I, *CoverageInfo))
1528     return true;
1529 
1530   MachineBasicBlock *BB = I.getParent();
1531   const DebugLoc &DL = I.getDebugLoc();
1532 
1533   Register DstReg = I.getOperand(0).getReg();
1534   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1535   assert(Size <= 32 || Size == 64);
1536   const MachineOperand &CCOp = I.getOperand(1);
1537   Register CCReg = CCOp.getReg();
1538   if (!isVCC(CCReg, *MRI)) {
1539     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1540                                          AMDGPU::S_CSELECT_B32;
1541     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1542             .addReg(CCReg);
1543 
1544     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1545     // bank, because it does not cover the register class that we used to represent
1546     // for it.  So we need to manually set the register class here.
1547     if (!MRI->getRegClassOrNull(CCReg))
1548         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1549     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1550             .add(I.getOperand(2))
1551             .add(I.getOperand(3));
1552 
1553     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1554                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1555     I.eraseFromParent();
1556     return Ret;
1557   }
1558 
1559   // Wide VGPR select should have been split in RegBankSelect.
1560   if (Size > 32)
1561     return false;
1562 
1563   MachineInstr *Select =
1564       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1565               .addImm(0)
1566               .add(I.getOperand(3))
1567               .addImm(0)
1568               .add(I.getOperand(2))
1569               .add(I.getOperand(1));
1570 
1571   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1572   I.eraseFromParent();
1573   return Ret;
1574 }
1575 
1576 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
1577   initM0(I);
1578   return selectImpl(I, *CoverageInfo);
1579 }
1580 
1581 static int sizeToSubRegIndex(unsigned Size) {
1582   switch (Size) {
1583   case 32:
1584     return AMDGPU::sub0;
1585   case 64:
1586     return AMDGPU::sub0_sub1;
1587   case 96:
1588     return AMDGPU::sub0_sub1_sub2;
1589   case 128:
1590     return AMDGPU::sub0_sub1_sub2_sub3;
1591   case 256:
1592     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1593   default:
1594     if (Size < 32)
1595       return AMDGPU::sub0;
1596     if (Size > 256)
1597       return -1;
1598     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1599   }
1600 }
1601 
1602 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1603   Register DstReg = I.getOperand(0).getReg();
1604   Register SrcReg = I.getOperand(1).getReg();
1605   const LLT DstTy = MRI->getType(DstReg);
1606   const LLT SrcTy = MRI->getType(SrcReg);
1607   const LLT S1 = LLT::scalar(1);
1608 
1609   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1610   const RegisterBank *DstRB;
1611   if (DstTy == S1) {
1612     // This is a special case. We don't treat s1 for legalization artifacts as
1613     // vcc booleans.
1614     DstRB = SrcRB;
1615   } else {
1616     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1617     if (SrcRB != DstRB)
1618       return false;
1619   }
1620 
1621   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1622 
1623   unsigned DstSize = DstTy.getSizeInBits();
1624   unsigned SrcSize = SrcTy.getSizeInBits();
1625 
1626   const TargetRegisterClass *SrcRC
1627     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1628   const TargetRegisterClass *DstRC
1629     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1630   if (!SrcRC || !DstRC)
1631     return false;
1632 
1633   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1634       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1635     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1636     return false;
1637   }
1638 
1639   if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1640     MachineBasicBlock *MBB = I.getParent();
1641     const DebugLoc &DL = I.getDebugLoc();
1642 
1643     Register LoReg = MRI->createVirtualRegister(DstRC);
1644     Register HiReg = MRI->createVirtualRegister(DstRC);
1645     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1646       .addReg(SrcReg, 0, AMDGPU::sub0);
1647     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1648       .addReg(SrcReg, 0, AMDGPU::sub1);
1649 
1650     if (IsVALU && STI.hasSDWA()) {
1651       // Write the low 16-bits of the high element into the high 16-bits of the
1652       // low element.
1653       MachineInstr *MovSDWA =
1654         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1655         .addImm(0)                             // $src0_modifiers
1656         .addReg(HiReg)                         // $src0
1657         .addImm(0)                             // $clamp
1658         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1659         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1660         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1661         .addReg(LoReg, RegState::Implicit);
1662       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1663     } else {
1664       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1665       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1666       Register ImmReg = MRI->createVirtualRegister(DstRC);
1667       if (IsVALU) {
1668         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1669           .addImm(16)
1670           .addReg(HiReg);
1671       } else {
1672         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1673           .addReg(HiReg)
1674           .addImm(16);
1675       }
1676 
1677       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1678       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1679       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1680 
1681       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1682         .addImm(0xffff);
1683       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1684         .addReg(LoReg)
1685         .addReg(ImmReg);
1686       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1687         .addReg(TmpReg0)
1688         .addReg(TmpReg1);
1689     }
1690 
1691     I.eraseFromParent();
1692     return true;
1693   }
1694 
1695   if (!DstTy.isScalar())
1696     return false;
1697 
1698   if (SrcSize > 32) {
1699     int SubRegIdx = sizeToSubRegIndex(DstSize);
1700     if (SubRegIdx == -1)
1701       return false;
1702 
1703     // Deal with weird cases where the class only partially supports the subreg
1704     // index.
1705     const TargetRegisterClass *SrcWithSubRC
1706       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1707     if (!SrcWithSubRC)
1708       return false;
1709 
1710     if (SrcWithSubRC != SrcRC) {
1711       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1712         return false;
1713     }
1714 
1715     I.getOperand(1).setSubReg(SubRegIdx);
1716   }
1717 
1718   I.setDesc(TII.get(TargetOpcode::COPY));
1719   return true;
1720 }
1721 
1722 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1723 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1724   Mask = maskTrailingOnes<unsigned>(Size);
1725   int SignedMask = static_cast<int>(Mask);
1726   return SignedMask >= -16 && SignedMask <= 64;
1727 }
1728 
1729 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1730 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1731   Register Reg, const MachineRegisterInfo &MRI,
1732   const TargetRegisterInfo &TRI) const {
1733   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1734   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1735     return RB;
1736 
1737   // Ignore the type, since we don't use vcc in artifacts.
1738   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1739     return &RBI.getRegBankFromRegClass(*RC, LLT());
1740   return nullptr;
1741 }
1742 
1743 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1744   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1745   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1746   const DebugLoc &DL = I.getDebugLoc();
1747   MachineBasicBlock &MBB = *I.getParent();
1748   const Register DstReg = I.getOperand(0).getReg();
1749   const Register SrcReg = I.getOperand(1).getReg();
1750 
1751   const LLT DstTy = MRI->getType(DstReg);
1752   const LLT SrcTy = MRI->getType(SrcReg);
1753   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1754     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1755   const unsigned DstSize = DstTy.getSizeInBits();
1756   if (!DstTy.isScalar())
1757     return false;
1758 
1759   if (I.getOpcode() == AMDGPU::G_ANYEXT)
1760     return selectCOPY(I);
1761 
1762   // Artifact casts should never use vcc.
1763   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1764 
1765   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1766     // 64-bit should have been split up in RegBankSelect
1767 
1768     // Try to use an and with a mask if it will save code size.
1769     unsigned Mask;
1770     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1771       MachineInstr *ExtI =
1772       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1773         .addImm(Mask)
1774         .addReg(SrcReg);
1775       I.eraseFromParent();
1776       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1777     }
1778 
1779     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1780     MachineInstr *ExtI =
1781       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1782       .addReg(SrcReg)
1783       .addImm(0) // Offset
1784       .addImm(SrcSize); // Width
1785     I.eraseFromParent();
1786     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1787   }
1788 
1789   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1790     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
1791       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
1792     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
1793       return false;
1794 
1795     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1796       const unsigned SextOpc = SrcSize == 8 ?
1797         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1798       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1799         .addReg(SrcReg);
1800       I.eraseFromParent();
1801       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1802     }
1803 
1804     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1805     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1806 
1807     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1808     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
1809       // We need a 64-bit register source, but the high bits don't matter.
1810       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
1811       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1812       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
1813 
1814       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1815       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1816         .addReg(SrcReg, 0, SubReg)
1817         .addImm(AMDGPU::sub0)
1818         .addReg(UndefReg)
1819         .addImm(AMDGPU::sub1);
1820 
1821       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1822         .addReg(ExtReg)
1823         .addImm(SrcSize << 16);
1824 
1825       I.eraseFromParent();
1826       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
1827     }
1828 
1829     unsigned Mask;
1830     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1831       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1832         .addReg(SrcReg)
1833         .addImm(Mask);
1834     } else {
1835       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1836         .addReg(SrcReg)
1837         .addImm(SrcSize << 16);
1838     }
1839 
1840     I.eraseFromParent();
1841     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1842   }
1843 
1844   return false;
1845 }
1846 
1847 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1848   MachineBasicBlock *BB = I.getParent();
1849   MachineOperand &ImmOp = I.getOperand(1);
1850 
1851   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1852   if (ImmOp.isFPImm()) {
1853     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1854     ImmOp.ChangeToImmediate(Imm.getZExtValue());
1855   } else if (ImmOp.isCImm()) {
1856     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1857   }
1858 
1859   Register DstReg = I.getOperand(0).getReg();
1860   unsigned Size;
1861   bool IsSgpr;
1862   const RegisterBank *RB = MRI->getRegBankOrNull(I.getOperand(0).getReg());
1863   if (RB) {
1864     IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1865     Size = MRI->getType(DstReg).getSizeInBits();
1866   } else {
1867     const TargetRegisterClass *RC = TRI.getRegClassForReg(*MRI, DstReg);
1868     IsSgpr = TRI.isSGPRClass(RC);
1869     Size = TRI.getRegSizeInBits(*RC);
1870   }
1871 
1872   if (Size != 32 && Size != 64)
1873     return false;
1874 
1875   unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1876   if (Size == 32) {
1877     I.setDesc(TII.get(Opcode));
1878     I.addImplicitDefUseOperands(*MF);
1879     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1880   }
1881 
1882   const DebugLoc &DL = I.getDebugLoc();
1883 
1884   APInt Imm(Size, I.getOperand(1).getImm());
1885 
1886   MachineInstr *ResInst;
1887   if (IsSgpr && TII.isInlineConstant(Imm)) {
1888     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1889       .addImm(I.getOperand(1).getImm());
1890   } else {
1891     const TargetRegisterClass *RC = IsSgpr ?
1892       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
1893     Register LoReg = MRI->createVirtualRegister(RC);
1894     Register HiReg = MRI->createVirtualRegister(RC);
1895 
1896     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
1897       .addImm(Imm.trunc(32).getZExtValue());
1898 
1899     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
1900       .addImm(Imm.ashr(32).getZExtValue());
1901 
1902     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1903       .addReg(LoReg)
1904       .addImm(AMDGPU::sub0)
1905       .addReg(HiReg)
1906       .addImm(AMDGPU::sub1);
1907   }
1908 
1909   // We can't call constrainSelectedInstRegOperands here, because it doesn't
1910   // work for target independent opcodes
1911   I.eraseFromParent();
1912   const TargetRegisterClass *DstRC =
1913     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
1914   if (!DstRC)
1915     return true;
1916   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
1917 }
1918 
1919 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
1920   // Only manually handle the f64 SGPR case.
1921   //
1922   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
1923   // the bit ops theoretically have a second result due to the implicit def of
1924   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
1925   // that is easy by disabling the check. The result works, but uses a
1926   // nonsensical sreg32orlds_and_sreg_1 regclass.
1927   //
1928   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
1929   // the variadic REG_SEQUENCE operands.
1930 
1931   Register Dst = MI.getOperand(0).getReg();
1932   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
1933   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
1934       MRI->getType(Dst) != LLT::scalar(64))
1935     return false;
1936 
1937   Register Src = MI.getOperand(1).getReg();
1938   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
1939   if (Fabs)
1940     Src = Fabs->getOperand(1).getReg();
1941 
1942   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
1943       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
1944     return false;
1945 
1946   MachineBasicBlock *BB = MI.getParent();
1947   const DebugLoc &DL = MI.getDebugLoc();
1948   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1949   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1950   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1951   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1952 
1953   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
1954     .addReg(Src, 0, AMDGPU::sub0);
1955   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
1956     .addReg(Src, 0, AMDGPU::sub1);
1957   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
1958     .addImm(0x80000000);
1959 
1960   // Set or toggle sign bit.
1961   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
1962   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
1963     .addReg(HiReg)
1964     .addReg(ConstReg);
1965   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
1966     .addReg(LoReg)
1967     .addImm(AMDGPU::sub0)
1968     .addReg(OpReg)
1969     .addImm(AMDGPU::sub1);
1970   MI.eraseFromParent();
1971   return true;
1972 }
1973 
1974 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
1975 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
1976   Register Dst = MI.getOperand(0).getReg();
1977   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
1978   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
1979       MRI->getType(Dst) != LLT::scalar(64))
1980     return false;
1981 
1982   Register Src = MI.getOperand(1).getReg();
1983   MachineBasicBlock *BB = MI.getParent();
1984   const DebugLoc &DL = MI.getDebugLoc();
1985   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1986   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1987   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1988   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1989 
1990   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
1991       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
1992     return false;
1993 
1994   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
1995     .addReg(Src, 0, AMDGPU::sub0);
1996   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
1997     .addReg(Src, 0, AMDGPU::sub1);
1998   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
1999     .addImm(0x7fffffff);
2000 
2001   // Clear sign bit.
2002   // TODO: Should this used S_BITSET0_*?
2003   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2004     .addReg(HiReg)
2005     .addReg(ConstReg);
2006   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2007     .addReg(LoReg)
2008     .addImm(AMDGPU::sub0)
2009     .addReg(OpReg)
2010     .addImm(AMDGPU::sub1);
2011 
2012   MI.eraseFromParent();
2013   return true;
2014 }
2015 
2016 static bool isConstant(const MachineInstr &MI) {
2017   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2018 }
2019 
2020 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2021     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2022 
2023   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2024 
2025   assert(PtrMI);
2026 
2027   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2028     return;
2029 
2030   GEPInfo GEPInfo(*PtrMI);
2031 
2032   for (unsigned i = 1; i != 3; ++i) {
2033     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2034     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2035     assert(OpDef);
2036     if (i == 2 && isConstant(*OpDef)) {
2037       // TODO: Could handle constant base + variable offset, but a combine
2038       // probably should have commuted it.
2039       assert(GEPInfo.Imm == 0);
2040       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2041       continue;
2042     }
2043     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2044     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2045       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2046     else
2047       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2048   }
2049 
2050   AddrInfo.push_back(GEPInfo);
2051   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2052 }
2053 
2054 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2055   if (!MI.hasOneMemOperand())
2056     return false;
2057 
2058   const MachineMemOperand *MMO = *MI.memoperands_begin();
2059   const Value *Ptr = MMO->getValue();
2060 
2061   // UndefValue means this is a load of a kernel input.  These are uniform.
2062   // Sometimes LDS instructions have constant pointers.
2063   // If Ptr is null, then that means this mem operand contains a
2064   // PseudoSourceValue like GOT.
2065   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2066       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2067     return true;
2068 
2069   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2070     return true;
2071 
2072   const Instruction *I = dyn_cast<Instruction>(Ptr);
2073   return I && I->getMetadata("amdgpu.uniform");
2074 }
2075 
2076 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2077   for (const GEPInfo &GEPInfo : AddrInfo) {
2078     if (!GEPInfo.VgprParts.empty())
2079       return true;
2080   }
2081   return false;
2082 }
2083 
2084 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2085   MachineBasicBlock *BB = I.getParent();
2086 
2087   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2088   unsigned AS = PtrTy.getAddressSpace();
2089   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2090       STI.ldsRequiresM0Init()) {
2091     // If DS instructions require M0 initializtion, insert it before selecting.
2092     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2093       .addImm(-1);
2094   }
2095 }
2096 
2097 bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I) const {
2098   initM0(I);
2099   return selectImpl(I, *CoverageInfo);
2100 }
2101 
2102 // TODO: No rtn optimization.
2103 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2104   MachineInstr &MI) const {
2105   Register PtrReg = MI.getOperand(1).getReg();
2106   const LLT PtrTy = MRI->getType(PtrReg);
2107   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2108       STI.useFlatForGlobal())
2109     return selectImpl(MI, *CoverageInfo);
2110 
2111   Register DstReg = MI.getOperand(0).getReg();
2112   const LLT Ty = MRI->getType(DstReg);
2113   const bool Is64 = Ty.getSizeInBits() == 64;
2114   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2115   Register TmpReg = MRI->createVirtualRegister(
2116     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2117 
2118   const DebugLoc &DL = MI.getDebugLoc();
2119   MachineBasicBlock *BB = MI.getParent();
2120 
2121   Register VAddr, RSrcReg, SOffset;
2122   int64_t Offset = 0;
2123 
2124   unsigned Opcode;
2125   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2126     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2127                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2128   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2129                                    RSrcReg, SOffset, Offset)) {
2130     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2131                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2132   } else
2133     return selectImpl(MI, *CoverageInfo);
2134 
2135   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2136     .addReg(MI.getOperand(2).getReg());
2137 
2138   if (VAddr)
2139     MIB.addReg(VAddr);
2140 
2141   MIB.addReg(RSrcReg);
2142   if (SOffset)
2143     MIB.addReg(SOffset);
2144   else
2145     MIB.addImm(0);
2146 
2147   MIB.addImm(Offset);
2148   MIB.addImm(0); // slc
2149   MIB.cloneMemRefs(MI);
2150 
2151   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2152     .addReg(TmpReg, RegState::Kill, SubReg);
2153 
2154   MI.eraseFromParent();
2155 
2156   MRI->setRegClass(
2157     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2158   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2159 }
2160 
2161 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2162   MachineBasicBlock *BB = I.getParent();
2163   MachineOperand &CondOp = I.getOperand(0);
2164   Register CondReg = CondOp.getReg();
2165   const DebugLoc &DL = I.getDebugLoc();
2166 
2167   unsigned BrOpcode;
2168   Register CondPhysReg;
2169   const TargetRegisterClass *ConstrainRC;
2170 
2171   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2172   // whether the branch is uniform when selecting the instruction. In
2173   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2174   // RegBankSelect knows what it's doing if the branch condition is scc, even
2175   // though it currently does not.
2176   if (!isVCC(CondReg, *MRI)) {
2177     if (MRI->getType(CondReg) != LLT::scalar(32))
2178       return false;
2179 
2180     CondPhysReg = AMDGPU::SCC;
2181     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2182     // FIXME: Hack for isSCC tests
2183     ConstrainRC = &AMDGPU::SGPR_32RegClass;
2184   } else {
2185     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2186     // We sort of know that a VCC producer based on the register bank, that ands
2187     // inactive lanes with 0. What if there was a logical operation with vcc
2188     // producers in different blocks/with different exec masks?
2189     // FIXME: Should scc->vcc copies and with exec?
2190     CondPhysReg = TRI.getVCC();
2191     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2192     ConstrainRC = TRI.getBoolRC();
2193   }
2194 
2195   if (!MRI->getRegClassOrNull(CondReg))
2196     MRI->setRegClass(CondReg, ConstrainRC);
2197 
2198   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2199     .addReg(CondReg);
2200   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2201     .addMBB(I.getOperand(1).getMBB());
2202 
2203   I.eraseFromParent();
2204   return true;
2205 }
2206 
2207 bool AMDGPUInstructionSelector::selectG_FRAME_INDEX_GLOBAL_VALUE(
2208   MachineInstr &I) const {
2209   Register DstReg = I.getOperand(0).getReg();
2210   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2211   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2212   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2213   if (IsVGPR)
2214     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2215 
2216   return RBI.constrainGenericRegister(
2217     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2218 }
2219 
2220 bool AMDGPUInstructionSelector::selectG_PTR_MASK(MachineInstr &I) const {
2221   uint64_t Align = I.getOperand(2).getImm();
2222   const uint64_t Mask = ~((UINT64_C(1) << Align) - 1);
2223 
2224   MachineBasicBlock *BB = I.getParent();
2225 
2226   Register DstReg = I.getOperand(0).getReg();
2227   Register SrcReg = I.getOperand(1).getReg();
2228 
2229   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2230   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2231   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2232   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2233   unsigned MovOpc = IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2234   const TargetRegisterClass &RegRC
2235     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2236 
2237   LLT Ty = MRI->getType(DstReg);
2238 
2239   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2240                                                                   *MRI);
2241   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2242                                                                   *MRI);
2243   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2244       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
2245     return false;
2246 
2247   const DebugLoc &DL = I.getDebugLoc();
2248   Register ImmReg = MRI->createVirtualRegister(&RegRC);
2249   BuildMI(*BB, &I, DL, TII.get(MovOpc), ImmReg)
2250     .addImm(Mask);
2251 
2252   if (Ty.getSizeInBits() == 32) {
2253     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2254       .addReg(SrcReg)
2255       .addReg(ImmReg);
2256     I.eraseFromParent();
2257     return true;
2258   }
2259 
2260   Register HiReg = MRI->createVirtualRegister(&RegRC);
2261   Register LoReg = MRI->createVirtualRegister(&RegRC);
2262   Register MaskLo = MRI->createVirtualRegister(&RegRC);
2263 
2264   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2265     .addReg(SrcReg, 0, AMDGPU::sub0);
2266   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2267     .addReg(SrcReg, 0, AMDGPU::sub1);
2268 
2269   BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskLo)
2270     .addReg(LoReg)
2271     .addReg(ImmReg);
2272   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2273     .addReg(MaskLo)
2274     .addImm(AMDGPU::sub0)
2275     .addReg(HiReg)
2276     .addImm(AMDGPU::sub1);
2277   I.eraseFromParent();
2278   return true;
2279 }
2280 
2281 /// Return the register to use for the index value, and the subregister to use
2282 /// for the indirectly accessed register.
2283 static std::pair<Register, unsigned>
2284 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2285                         const SIRegisterInfo &TRI,
2286                         const TargetRegisterClass *SuperRC,
2287                         Register IdxReg,
2288                         unsigned EltSize) {
2289   Register IdxBaseReg;
2290   int Offset;
2291   MachineInstr *Unused;
2292 
2293   std::tie(IdxBaseReg, Offset, Unused)
2294     = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2295   if (IdxBaseReg == AMDGPU::NoRegister) {
2296     // This will happen if the index is a known constant. This should ordinarily
2297     // be legalized out, but handle it as a register just in case.
2298     assert(Offset == 0);
2299     IdxBaseReg = IdxReg;
2300   }
2301 
2302   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2303 
2304   // Skip out of bounds offsets, or else we would end up using an undefined
2305   // register.
2306   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2307     return std::make_pair(IdxReg, SubRegs[0]);
2308   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2309 }
2310 
2311 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2312   MachineInstr &MI) const {
2313   Register DstReg = MI.getOperand(0).getReg();
2314   Register SrcReg = MI.getOperand(1).getReg();
2315   Register IdxReg = MI.getOperand(2).getReg();
2316 
2317   LLT DstTy = MRI->getType(DstReg);
2318   LLT SrcTy = MRI->getType(SrcReg);
2319 
2320   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2321   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2322   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2323 
2324   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2325   // into a waterfall loop.
2326   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2327     return false;
2328 
2329   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2330                                                                   *MRI);
2331   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2332                                                                   *MRI);
2333   if (!SrcRC || !DstRC)
2334     return false;
2335   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2336       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2337       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2338     return false;
2339 
2340   MachineBasicBlock *BB = MI.getParent();
2341   const DebugLoc &DL = MI.getDebugLoc();
2342   const bool Is64 = DstTy.getSizeInBits() == 64;
2343 
2344   unsigned SubReg;
2345   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2346                                                      DstTy.getSizeInBits() / 8);
2347 
2348   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2349     if (DstTy.getSizeInBits() != 32 && !Is64)
2350       return false;
2351 
2352     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2353       .addReg(IdxReg);
2354 
2355     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2356     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2357       .addReg(SrcReg, 0, SubReg)
2358       .addReg(SrcReg, RegState::Implicit);
2359     MI.eraseFromParent();
2360     return true;
2361   }
2362 
2363   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2364     return false;
2365 
2366   if (!STI.useVGPRIndexMode()) {
2367     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2368       .addReg(IdxReg);
2369     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2370       .addReg(SrcReg, RegState::Undef, SubReg)
2371       .addReg(SrcReg, RegState::Implicit);
2372     MI.eraseFromParent();
2373     return true;
2374   }
2375 
2376   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2377     .addReg(IdxReg)
2378     .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
2379   BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
2380     .addReg(SrcReg, RegState::Undef, SubReg)
2381     .addReg(SrcReg, RegState::Implicit)
2382     .addReg(AMDGPU::M0, RegState::Implicit);
2383   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2384 
2385   MI.eraseFromParent();
2386   return true;
2387 }
2388 
2389 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2390 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2391   MachineInstr &MI) const {
2392   Register DstReg = MI.getOperand(0).getReg();
2393   Register VecReg = MI.getOperand(1).getReg();
2394   Register ValReg = MI.getOperand(2).getReg();
2395   Register IdxReg = MI.getOperand(3).getReg();
2396 
2397   LLT VecTy = MRI->getType(DstReg);
2398   LLT ValTy = MRI->getType(ValReg);
2399   unsigned VecSize = VecTy.getSizeInBits();
2400   unsigned ValSize = ValTy.getSizeInBits();
2401 
2402   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2403   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2404   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2405 
2406   assert(VecTy.getElementType() == ValTy);
2407 
2408   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2409   // into a waterfall loop.
2410   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2411     return false;
2412 
2413   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2414                                                                   *MRI);
2415   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2416                                                                   *MRI);
2417 
2418   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2419       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2420       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2421       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2422     return false;
2423 
2424   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2425     return false;
2426 
2427   unsigned SubReg;
2428   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2429                                                      ValSize / 8);
2430 
2431   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2432                          STI.useVGPRIndexMode();
2433 
2434   MachineBasicBlock *BB = MI.getParent();
2435   const DebugLoc &DL = MI.getDebugLoc();
2436 
2437   if (IndexMode) {
2438     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2439       .addReg(IdxReg)
2440       .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
2441   } else {
2442     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2443       .addReg(IdxReg);
2444   }
2445 
2446   const MCInstrDesc &RegWriteOp
2447     = TII.getIndirectRegWritePseudo(VecSize, ValSize,
2448                                     VecRB->getID() == AMDGPU::SGPRRegBankID);
2449   BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2450     .addReg(VecReg)
2451     .addReg(ValReg)
2452     .addImm(SubReg);
2453 
2454   if (IndexMode)
2455     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2456 
2457   MI.eraseFromParent();
2458   return true;
2459 }
2460 
2461 static bool isZeroOrUndef(int X) {
2462   return X == 0 || X == -1;
2463 }
2464 
2465 static bool isOneOrUndef(int X) {
2466   return X == 1 || X == -1;
2467 }
2468 
2469 static bool isZeroOrOneOrUndef(int X) {
2470   return X == 0 || X == 1 || X == -1;
2471 }
2472 
2473 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2474 // 32-bit register.
2475 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2476                                    ArrayRef<int> Mask) {
2477   NewMask[0] = Mask[0];
2478   NewMask[1] = Mask[1];
2479   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2480     return Src0;
2481 
2482   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2483   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2484 
2485   // Shift the mask inputs to be 0/1;
2486   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2487   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2488   return Src1;
2489 }
2490 
2491 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2492 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2493   MachineInstr &MI) const {
2494   Register DstReg = MI.getOperand(0).getReg();
2495   Register Src0Reg = MI.getOperand(1).getReg();
2496   Register Src1Reg = MI.getOperand(2).getReg();
2497   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2498 
2499   const LLT V2S16 = LLT::vector(2, 16);
2500   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2501     return false;
2502 
2503   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2504     return false;
2505 
2506   assert(ShufMask.size() == 2);
2507   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2508 
2509   MachineBasicBlock *MBB = MI.getParent();
2510   const DebugLoc &DL = MI.getDebugLoc();
2511 
2512   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2513   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2514   const TargetRegisterClass &RC = IsVALU ?
2515     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2516 
2517   // Handle the degenerate case which should have folded out.
2518   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2519     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2520 
2521     MI.eraseFromParent();
2522     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2523   }
2524 
2525   // A legal VOP3P mask only reads one of the sources.
2526   int Mask[2];
2527   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2528 
2529   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2530       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2531     return false;
2532 
2533   // TODO: This also should have been folded out
2534   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2535     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2536       .addReg(SrcVec);
2537 
2538     MI.eraseFromParent();
2539     return true;
2540   }
2541 
2542   if (Mask[0] == 1 && Mask[1] == -1) {
2543     if (IsVALU) {
2544       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2545         .addImm(16)
2546         .addReg(SrcVec);
2547     } else {
2548       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2549         .addReg(SrcVec)
2550         .addImm(16);
2551     }
2552   } else if (Mask[0] == -1 && Mask[1] == 0) {
2553     if (IsVALU) {
2554       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2555         .addImm(16)
2556         .addReg(SrcVec);
2557     } else {
2558       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2559         .addReg(SrcVec)
2560         .addImm(16);
2561     }
2562   } else if (Mask[0] == 0 && Mask[1] == 0) {
2563     if (IsVALU) {
2564       // Write low half of the register into the high half.
2565       MachineInstr *MovSDWA =
2566         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2567         .addImm(0)                             // $src0_modifiers
2568         .addReg(SrcVec)                        // $src0
2569         .addImm(0)                             // $clamp
2570         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2571         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2572         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2573         .addReg(SrcVec, RegState::Implicit);
2574       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2575     } else {
2576       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2577         .addReg(SrcVec)
2578         .addReg(SrcVec);
2579     }
2580   } else if (Mask[0] == 1 && Mask[1] == 1) {
2581     if (IsVALU) {
2582       // Write high half of the register into the low half.
2583       MachineInstr *MovSDWA =
2584         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2585         .addImm(0)                             // $src0_modifiers
2586         .addReg(SrcVec)                        // $src0
2587         .addImm(0)                             // $clamp
2588         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2589         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2590         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2591         .addReg(SrcVec, RegState::Implicit);
2592       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2593     } else {
2594       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2595         .addReg(SrcVec)
2596         .addReg(SrcVec);
2597     }
2598   } else if (Mask[0] == 1 && Mask[1] == 0) {
2599     if (IsVALU) {
2600       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32), DstReg)
2601         .addReg(SrcVec)
2602         .addReg(SrcVec)
2603         .addImm(16);
2604     } else {
2605       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2606       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2607         .addReg(SrcVec)
2608         .addImm(16);
2609       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2610         .addReg(TmpReg)
2611         .addReg(SrcVec);
2612     }
2613   } else
2614     llvm_unreachable("all shuffle masks should be handled");
2615 
2616   MI.eraseFromParent();
2617   return true;
2618 }
2619 
2620 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
2621   if (I.isPHI())
2622     return selectPHI(I);
2623 
2624   if (!I.isPreISelOpcode()) {
2625     if (I.isCopy())
2626       return selectCOPY(I);
2627     return true;
2628   }
2629 
2630   switch (I.getOpcode()) {
2631   case TargetOpcode::G_AND:
2632   case TargetOpcode::G_OR:
2633   case TargetOpcode::G_XOR:
2634     if (selectImpl(I, *CoverageInfo))
2635       return true;
2636     return selectG_AND_OR_XOR(I);
2637   case TargetOpcode::G_ADD:
2638   case TargetOpcode::G_SUB:
2639     if (selectImpl(I, *CoverageInfo))
2640       return true;
2641     return selectG_ADD_SUB(I);
2642   case TargetOpcode::G_UADDO:
2643   case TargetOpcode::G_USUBO:
2644   case TargetOpcode::G_UADDE:
2645   case TargetOpcode::G_USUBE:
2646     return selectG_UADDO_USUBO_UADDE_USUBE(I);
2647   case TargetOpcode::G_INTTOPTR:
2648   case TargetOpcode::G_BITCAST:
2649   case TargetOpcode::G_PTRTOINT:
2650     return selectCOPY(I);
2651   case TargetOpcode::G_CONSTANT:
2652   case TargetOpcode::G_FCONSTANT:
2653     return selectG_CONSTANT(I);
2654   case TargetOpcode::G_FNEG:
2655     if (selectImpl(I, *CoverageInfo))
2656       return true;
2657     return selectG_FNEG(I);
2658   case TargetOpcode::G_FABS:
2659     if (selectImpl(I, *CoverageInfo))
2660       return true;
2661     return selectG_FABS(I);
2662   case TargetOpcode::G_EXTRACT:
2663     return selectG_EXTRACT(I);
2664   case TargetOpcode::G_MERGE_VALUES:
2665   case TargetOpcode::G_BUILD_VECTOR:
2666   case TargetOpcode::G_CONCAT_VECTORS:
2667     return selectG_MERGE_VALUES(I);
2668   case TargetOpcode::G_UNMERGE_VALUES:
2669     return selectG_UNMERGE_VALUES(I);
2670   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
2671     return selectG_BUILD_VECTOR_TRUNC(I);
2672   case TargetOpcode::G_PTR_ADD:
2673     return selectG_PTR_ADD(I);
2674   case TargetOpcode::G_IMPLICIT_DEF:
2675     return selectG_IMPLICIT_DEF(I);
2676   case TargetOpcode::G_INSERT:
2677     return selectG_INSERT(I);
2678   case TargetOpcode::G_INTRINSIC:
2679     return selectG_INTRINSIC(I);
2680   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2681     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
2682   case TargetOpcode::G_ICMP:
2683     if (selectG_ICMP(I))
2684       return true;
2685     return selectImpl(I, *CoverageInfo);
2686   case TargetOpcode::G_LOAD:
2687   case TargetOpcode::G_ATOMIC_CMPXCHG:
2688   case TargetOpcode::G_ATOMICRMW_XCHG:
2689   case TargetOpcode::G_ATOMICRMW_ADD:
2690   case TargetOpcode::G_ATOMICRMW_SUB:
2691   case TargetOpcode::G_ATOMICRMW_AND:
2692   case TargetOpcode::G_ATOMICRMW_OR:
2693   case TargetOpcode::G_ATOMICRMW_XOR:
2694   case TargetOpcode::G_ATOMICRMW_MIN:
2695   case TargetOpcode::G_ATOMICRMW_MAX:
2696   case TargetOpcode::G_ATOMICRMW_UMIN:
2697   case TargetOpcode::G_ATOMICRMW_UMAX:
2698   case TargetOpcode::G_ATOMICRMW_FADD:
2699     return selectG_LOAD_ATOMICRMW(I);
2700   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
2701     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
2702   case TargetOpcode::G_SELECT:
2703     return selectG_SELECT(I);
2704   case TargetOpcode::G_STORE:
2705     return selectG_STORE(I);
2706   case TargetOpcode::G_TRUNC:
2707     return selectG_TRUNC(I);
2708   case TargetOpcode::G_SEXT:
2709   case TargetOpcode::G_ZEXT:
2710   case TargetOpcode::G_ANYEXT:
2711   case TargetOpcode::G_SEXT_INREG:
2712     if (selectImpl(I, *CoverageInfo))
2713       return true;
2714     return selectG_SZA_EXT(I);
2715   case TargetOpcode::G_BRCOND:
2716     return selectG_BRCOND(I);
2717   case TargetOpcode::G_FRAME_INDEX:
2718   case TargetOpcode::G_GLOBAL_VALUE:
2719     return selectG_FRAME_INDEX_GLOBAL_VALUE(I);
2720   case TargetOpcode::G_PTR_MASK:
2721     return selectG_PTR_MASK(I);
2722   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
2723     return selectG_EXTRACT_VECTOR_ELT(I);
2724   case TargetOpcode::G_INSERT_VECTOR_ELT:
2725     return selectG_INSERT_VECTOR_ELT(I);
2726   case TargetOpcode::G_SHUFFLE_VECTOR:
2727     return selectG_SHUFFLE_VECTOR(I);
2728   case AMDGPU::G_AMDGPU_ATOMIC_INC:
2729   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
2730     initM0(I);
2731     return selectImpl(I, *CoverageInfo);
2732   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
2733   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
2734     const AMDGPU::ImageDimIntrinsicInfo *Intr
2735       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
2736     assert(Intr && "not an image intrinsic with image pseudo");
2737     return selectImageIntrinsic(I, Intr);
2738   }
2739   default:
2740     return selectImpl(I, *CoverageInfo);
2741   }
2742   return false;
2743 }
2744 
2745 InstructionSelector::ComplexRendererFns
2746 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
2747   return {{
2748       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2749   }};
2750 
2751 }
2752 
2753 std::pair<Register, unsigned>
2754 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root) const {
2755   Register Src = Root.getReg();
2756   Register OrigSrc = Src;
2757   unsigned Mods = 0;
2758   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
2759 
2760   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
2761     Src = MI->getOperand(1).getReg();
2762     Mods |= SISrcMods::NEG;
2763     MI = getDefIgnoringCopies(Src, *MRI);
2764   }
2765 
2766   if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
2767     Src = MI->getOperand(1).getReg();
2768     Mods |= SISrcMods::ABS;
2769   }
2770 
2771   if (Mods != 0 &&
2772       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
2773     MachineInstr *UseMI = Root.getParent();
2774 
2775     // If we looked through copies to find source modifiers on an SGPR operand,
2776     // we now have an SGPR register source. To avoid potentially violating the
2777     // constant bus restriction, we need to insert a copy to a VGPR.
2778     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
2779     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
2780             TII.get(AMDGPU::COPY), VGPRSrc)
2781       .addReg(Src);
2782     Src = VGPRSrc;
2783   }
2784 
2785   return std::make_pair(Src, Mods);
2786 }
2787 
2788 ///
2789 /// This will select either an SGPR or VGPR operand and will save us from
2790 /// having to write an extra tablegen pattern.
2791 InstructionSelector::ComplexRendererFns
2792 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
2793   return {{
2794       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2795   }};
2796 }
2797 
2798 InstructionSelector::ComplexRendererFns
2799 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
2800   Register Src;
2801   unsigned Mods;
2802   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
2803 
2804   return {{
2805       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2806       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
2807       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
2808       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
2809   }};
2810 }
2811 
2812 InstructionSelector::ComplexRendererFns
2813 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
2814   return {{
2815       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2816       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
2817       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
2818   }};
2819 }
2820 
2821 InstructionSelector::ComplexRendererFns
2822 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
2823   Register Src;
2824   unsigned Mods;
2825   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
2826 
2827   return {{
2828       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2829       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2830   }};
2831 }
2832 
2833 InstructionSelector::ComplexRendererFns
2834 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
2835   Register Reg = Root.getReg();
2836   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
2837   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
2838               Def->getOpcode() == AMDGPU::G_FABS))
2839     return {};
2840   return {{
2841       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2842   }};
2843 }
2844 
2845 std::pair<Register, unsigned>
2846 AMDGPUInstructionSelector::selectVOP3PModsImpl(
2847   Register Src, const MachineRegisterInfo &MRI) const {
2848   unsigned Mods = 0;
2849   MachineInstr *MI = MRI.getVRegDef(Src);
2850 
2851   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
2852       // It's possible to see an f32 fneg here, but unlikely.
2853       // TODO: Treat f32 fneg as only high bit.
2854       MRI.getType(Src) == LLT::vector(2, 16)) {
2855     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
2856     Src = MI->getOperand(1).getReg();
2857     MI = MRI.getVRegDef(Src);
2858   }
2859 
2860   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
2861 
2862   // Packed instructions do not have abs modifiers.
2863   Mods |= SISrcMods::OP_SEL_1;
2864 
2865   return std::make_pair(Src, Mods);
2866 }
2867 
2868 InstructionSelector::ComplexRendererFns
2869 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
2870   MachineRegisterInfo &MRI
2871     = Root.getParent()->getParent()->getParent()->getRegInfo();
2872 
2873   Register Src;
2874   unsigned Mods;
2875   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
2876 
2877   return {{
2878       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2879       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2880   }};
2881 }
2882 
2883 InstructionSelector::ComplexRendererFns
2884 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
2885   Register Src;
2886   unsigned Mods;
2887   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
2888   if (!TM.Options.NoNaNsFPMath && !isKnownNeverNaN(Src, *MRI))
2889     return None;
2890 
2891   return {{
2892       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2893       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2894   }};
2895 }
2896 
2897 InstructionSelector::ComplexRendererFns
2898 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
2899   // FIXME: Handle op_sel
2900   return {{
2901       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2902       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
2903   }};
2904 }
2905 
2906 InstructionSelector::ComplexRendererFns
2907 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
2908   SmallVector<GEPInfo, 4> AddrInfo;
2909   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2910 
2911   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2912     return None;
2913 
2914   const GEPInfo &GEPInfo = AddrInfo[0];
2915   Optional<int64_t> EncodedImm =
2916       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
2917   if (!EncodedImm)
2918     return None;
2919 
2920   unsigned PtrReg = GEPInfo.SgprParts[0];
2921   return {{
2922     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2923     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
2924   }};
2925 }
2926 
2927 InstructionSelector::ComplexRendererFns
2928 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
2929   SmallVector<GEPInfo, 4> AddrInfo;
2930   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2931 
2932   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2933     return None;
2934 
2935   const GEPInfo &GEPInfo = AddrInfo[0];
2936   Register PtrReg = GEPInfo.SgprParts[0];
2937   Optional<int64_t> EncodedImm =
2938       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
2939   if (!EncodedImm)
2940     return None;
2941 
2942   return {{
2943     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2944     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
2945   }};
2946 }
2947 
2948 InstructionSelector::ComplexRendererFns
2949 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
2950   MachineInstr *MI = Root.getParent();
2951   MachineBasicBlock *MBB = MI->getParent();
2952 
2953   SmallVector<GEPInfo, 4> AddrInfo;
2954   getAddrModeInfo(*MI, *MRI, AddrInfo);
2955 
2956   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
2957   // then we can select all ptr + 32-bit offsets not just immediate offsets.
2958   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2959     return None;
2960 
2961   const GEPInfo &GEPInfo = AddrInfo[0];
2962   // SGPR offset is unsigned.
2963   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
2964     return None;
2965 
2966   // If we make it this far we have a load with an 32-bit immediate offset.
2967   // It is OK to select this using a sgpr offset, because we have already
2968   // failed trying to select this load into one of the _IMM variants since
2969   // the _IMM Patterns are considered before the _SGPR patterns.
2970   Register PtrReg = GEPInfo.SgprParts[0];
2971   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2972   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
2973           .addImm(GEPInfo.Imm);
2974   return {{
2975     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2976     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
2977   }};
2978 }
2979 
2980 template <bool Signed>
2981 InstructionSelector::ComplexRendererFns
2982 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
2983   MachineInstr *MI = Root.getParent();
2984 
2985   InstructionSelector::ComplexRendererFns Default = {{
2986       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2987       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },  // offset
2988       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2989     }};
2990 
2991   if (!STI.hasFlatInstOffsets())
2992     return Default;
2993 
2994   const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
2995   if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
2996     return Default;
2997 
2998   Optional<int64_t> Offset =
2999     getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
3000   if (!Offset.hasValue())
3001     return Default;
3002 
3003   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3004   if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
3005     return Default;
3006 
3007   Register BasePtr = OpDef->getOperand(1).getReg();
3008 
3009   return {{
3010       [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
3011       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
3012       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
3013     }};
3014 }
3015 
3016 InstructionSelector::ComplexRendererFns
3017 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3018   return selectFlatOffsetImpl<false>(Root);
3019 }
3020 
3021 InstructionSelector::ComplexRendererFns
3022 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
3023   return selectFlatOffsetImpl<true>(Root);
3024 }
3025 
3026 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3027   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3028   return PSV && PSV->isStack();
3029 }
3030 
3031 InstructionSelector::ComplexRendererFns
3032 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3033   MachineInstr *MI = Root.getParent();
3034   MachineBasicBlock *MBB = MI->getParent();
3035   MachineFunction *MF = MBB->getParent();
3036   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3037 
3038   int64_t Offset = 0;
3039   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset))) {
3040     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3041 
3042     // TODO: Should this be inside the render function? The iterator seems to
3043     // move.
3044     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3045             HighBits)
3046       .addImm(Offset & ~4095);
3047 
3048     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3049                MIB.addReg(Info->getScratchRSrcReg());
3050              },
3051              [=](MachineInstrBuilder &MIB) { // vaddr
3052                MIB.addReg(HighBits);
3053              },
3054              [=](MachineInstrBuilder &MIB) { // soffset
3055                const MachineMemOperand *MMO = *MI->memoperands_begin();
3056                const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3057 
3058                if (isStackPtrRelative(PtrInfo))
3059                  MIB.addReg(Info->getStackPtrOffsetReg());
3060                else
3061                  MIB.addImm(0);
3062              },
3063              [=](MachineInstrBuilder &MIB) { // offset
3064                MIB.addImm(Offset & 4095);
3065              }}};
3066   }
3067 
3068   assert(Offset == 0);
3069 
3070   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3071   // offsets.
3072   Optional<int> FI;
3073   Register VAddr = Root.getReg();
3074   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3075     if (isBaseWithConstantOffset(Root, *MRI)) {
3076       const MachineOperand &LHS = RootDef->getOperand(1);
3077       const MachineOperand &RHS = RootDef->getOperand(2);
3078       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
3079       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
3080       if (LHSDef && RHSDef) {
3081         int64_t PossibleOffset =
3082             RHSDef->getOperand(1).getCImm()->getSExtValue();
3083         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
3084             (!STI.privateMemoryResourceIsRangeChecked() ||
3085              KnownBits->signBitIsZero(LHS.getReg()))) {
3086           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3087             FI = LHSDef->getOperand(1).getIndex();
3088           else
3089             VAddr = LHS.getReg();
3090           Offset = PossibleOffset;
3091         }
3092       }
3093     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3094       FI = RootDef->getOperand(1).getIndex();
3095     }
3096   }
3097 
3098   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3099              MIB.addReg(Info->getScratchRSrcReg());
3100            },
3101            [=](MachineInstrBuilder &MIB) { // vaddr
3102              if (FI.hasValue())
3103                MIB.addFrameIndex(FI.getValue());
3104              else
3105                MIB.addReg(VAddr);
3106            },
3107            [=](MachineInstrBuilder &MIB) { // soffset
3108              // If we don't know this private access is a local stack object, it
3109              // needs to be relative to the entry point's scratch wave offset.
3110              // TODO: Should split large offsets that don't fit like above.
3111              // TODO: Don't use scratch wave offset just because the offset
3112              // didn't fit.
3113              if (!Info->isEntryFunction() && FI.hasValue())
3114                MIB.addReg(Info->getStackPtrOffsetReg());
3115              else
3116                MIB.addImm(0);
3117            },
3118            [=](MachineInstrBuilder &MIB) { // offset
3119              MIB.addImm(Offset);
3120            }}};
3121 }
3122 
3123 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3124                                                 int64_t Offset,
3125                                                 unsigned OffsetBits) const {
3126   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
3127       (OffsetBits == 8 && !isUInt<8>(Offset)))
3128     return false;
3129 
3130   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3131     return true;
3132 
3133   // On Southern Islands instruction with a negative base value and an offset
3134   // don't seem to work.
3135   return KnownBits->signBitIsZero(Base);
3136 }
3137 
3138 InstructionSelector::ComplexRendererFns
3139 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3140     MachineOperand &Root) const {
3141   MachineInstr *MI = Root.getParent();
3142   MachineBasicBlock *MBB = MI->getParent();
3143 
3144   int64_t Offset = 0;
3145   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3146       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3147     return {};
3148 
3149   const MachineFunction *MF = MBB->getParent();
3150   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3151   const MachineMemOperand *MMO = *MI->memoperands_begin();
3152   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3153 
3154   return {{
3155       [=](MachineInstrBuilder &MIB) { // rsrc
3156         MIB.addReg(Info->getScratchRSrcReg());
3157       },
3158       [=](MachineInstrBuilder &MIB) { // soffset
3159         if (isStackPtrRelative(PtrInfo))
3160           MIB.addReg(Info->getStackPtrOffsetReg());
3161         else
3162           MIB.addImm(0);
3163       },
3164       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3165   }};
3166 }
3167 
3168 std::pair<Register, unsigned>
3169 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3170   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3171   if (!RootDef)
3172     return std::make_pair(Root.getReg(), 0);
3173 
3174   int64_t ConstAddr = 0;
3175 
3176   Register PtrBase;
3177   int64_t Offset;
3178   std::tie(PtrBase, Offset) =
3179     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3180 
3181   if (Offset) {
3182     if (isDSOffsetLegal(PtrBase, Offset, 16)) {
3183       // (add n0, c0)
3184       return std::make_pair(PtrBase, Offset);
3185     }
3186   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3187     // TODO
3188 
3189 
3190   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3191     // TODO
3192 
3193   }
3194 
3195   return std::make_pair(Root.getReg(), 0);
3196 }
3197 
3198 InstructionSelector::ComplexRendererFns
3199 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3200   Register Reg;
3201   unsigned Offset;
3202   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3203   return {{
3204       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3205       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3206     }};
3207 }
3208 
3209 InstructionSelector::ComplexRendererFns
3210 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3211   Register Reg;
3212   unsigned Offset;
3213   std::tie(Reg, Offset) = selectDS64Bit4ByteAlignedImpl(Root);
3214   return {{
3215       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3216       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3217       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3218     }};
3219 }
3220 
3221 std::pair<Register, unsigned>
3222 AMDGPUInstructionSelector::selectDS64Bit4ByteAlignedImpl(MachineOperand &Root) const {
3223   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3224   if (!RootDef)
3225     return std::make_pair(Root.getReg(), 0);
3226 
3227   int64_t ConstAddr = 0;
3228 
3229   Register PtrBase;
3230   int64_t Offset;
3231   std::tie(PtrBase, Offset) =
3232     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3233 
3234   if (Offset) {
3235     int64_t DWordOffset0 = Offset / 4;
3236     int64_t DWordOffset1 = DWordOffset0 + 1;
3237     if (isDSOffsetLegal(PtrBase, DWordOffset1, 8)) {
3238       // (add n0, c0)
3239       return std::make_pair(PtrBase, DWordOffset0);
3240     }
3241   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3242     // TODO
3243 
3244   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3245     // TODO
3246 
3247   }
3248 
3249   return std::make_pair(Root.getReg(), 0);
3250 }
3251 
3252 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3253 /// the base value with the constant offset. There may be intervening copies
3254 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
3255 /// not match the pattern.
3256 std::pair<Register, int64_t>
3257 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3258   Register Root, const MachineRegisterInfo &MRI) const {
3259   MachineInstr *RootI = MRI.getVRegDef(Root);
3260   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3261     return {Root, 0};
3262 
3263   MachineOperand &RHS = RootI->getOperand(2);
3264   Optional<ValueAndVReg> MaybeOffset
3265     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3266   if (!MaybeOffset)
3267     return {Root, 0};
3268   return {RootI->getOperand(1).getReg(), MaybeOffset->Value};
3269 }
3270 
3271 static void addZeroImm(MachineInstrBuilder &MIB) {
3272   MIB.addImm(0);
3273 }
3274 
3275 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3276 /// BasePtr is not valid, a null base pointer will be used.
3277 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3278                           uint32_t FormatLo, uint32_t FormatHi,
3279                           Register BasePtr) {
3280   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3281   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3282   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3283   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3284 
3285   B.buildInstr(AMDGPU::S_MOV_B32)
3286     .addDef(RSrc2)
3287     .addImm(FormatLo);
3288   B.buildInstr(AMDGPU::S_MOV_B32)
3289     .addDef(RSrc3)
3290     .addImm(FormatHi);
3291 
3292   // Build the half of the subregister with the constants before building the
3293   // full 128-bit register. If we are building multiple resource descriptors,
3294   // this will allow CSEing of the 2-component register.
3295   B.buildInstr(AMDGPU::REG_SEQUENCE)
3296     .addDef(RSrcHi)
3297     .addReg(RSrc2)
3298     .addImm(AMDGPU::sub0)
3299     .addReg(RSrc3)
3300     .addImm(AMDGPU::sub1);
3301 
3302   Register RSrcLo = BasePtr;
3303   if (!BasePtr) {
3304     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3305     B.buildInstr(AMDGPU::S_MOV_B64)
3306       .addDef(RSrcLo)
3307       .addImm(0);
3308   }
3309 
3310   B.buildInstr(AMDGPU::REG_SEQUENCE)
3311     .addDef(RSrc)
3312     .addReg(RSrcLo)
3313     .addImm(AMDGPU::sub0_sub1)
3314     .addReg(RSrcHi)
3315     .addImm(AMDGPU::sub2_sub3);
3316 
3317   return RSrc;
3318 }
3319 
3320 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3321                                 const SIInstrInfo &TII, Register BasePtr) {
3322   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3323 
3324   // FIXME: Why are half the "default" bits ignored based on the addressing
3325   // mode?
3326   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3327 }
3328 
3329 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3330                                const SIInstrInfo &TII, Register BasePtr) {
3331   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3332 
3333   // FIXME: Why are half the "default" bits ignored based on the addressing
3334   // mode?
3335   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
3336 }
3337 
3338 AMDGPUInstructionSelector::MUBUFAddressData
3339 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
3340   MUBUFAddressData Data;
3341   Data.N0 = Src;
3342 
3343   Register PtrBase;
3344   int64_t Offset;
3345 
3346   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
3347   if (isUInt<32>(Offset)) {
3348     Data.N0 = PtrBase;
3349     Data.Offset = Offset;
3350   }
3351 
3352   if (MachineInstr *InputAdd
3353       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
3354     Data.N2 = InputAdd->getOperand(1).getReg();
3355     Data.N3 = InputAdd->getOperand(2).getReg();
3356 
3357     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
3358     // FIXME: Don't know this was defined by operand 0
3359     //
3360     // TODO: Remove this when we have copy folding optimizations after
3361     // RegBankSelect.
3362     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
3363     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
3364   }
3365 
3366   return Data;
3367 }
3368 
3369 /// Return if the addr64 mubuf mode should be used for the given address.
3370 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
3371   // (ptr_add N2, N3) -> addr64, or
3372   // (ptr_add (ptr_add N2, N3), C1) -> addr64
3373   if (Addr.N2)
3374     return true;
3375 
3376   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
3377   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
3378 }
3379 
3380 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
3381 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
3382 /// component.
3383 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
3384   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
3385   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
3386     return;
3387 
3388   // Illegal offset, store it in soffset.
3389   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3390   B.buildInstr(AMDGPU::S_MOV_B32)
3391     .addDef(SOffset)
3392     .addImm(ImmOffset);
3393   ImmOffset = 0;
3394 }
3395 
3396 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
3397   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
3398   Register &SOffset, int64_t &Offset) const {
3399   // FIXME: Predicates should stop this from reaching here.
3400   // addr64 bit was removed for volcanic islands.
3401   if (!STI.hasAddr64() || STI.useFlatForGlobal())
3402     return false;
3403 
3404   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3405   if (!shouldUseAddr64(AddrData))
3406     return false;
3407 
3408   Register N0 = AddrData.N0;
3409   Register N2 = AddrData.N2;
3410   Register N3 = AddrData.N3;
3411   Offset = AddrData.Offset;
3412 
3413   // Base pointer for the SRD.
3414   Register SRDPtr;
3415 
3416   if (N2) {
3417     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3418       assert(N3);
3419       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3420         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
3421         // addr64, and construct the default resource from a 0 address.
3422         VAddr = N0;
3423       } else {
3424         SRDPtr = N3;
3425         VAddr = N2;
3426       }
3427     } else {
3428       // N2 is not divergent.
3429       SRDPtr = N2;
3430       VAddr = N3;
3431     }
3432   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3433     // Use the default null pointer in the resource
3434     VAddr = N0;
3435   } else {
3436     // N0 -> offset, or
3437     // (N0 + C1) -> offset
3438     SRDPtr = N0;
3439   }
3440 
3441   MachineIRBuilder B(*Root.getParent());
3442   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
3443   splitIllegalMUBUFOffset(B, SOffset, Offset);
3444   return true;
3445 }
3446 
3447 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
3448   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
3449   int64_t &Offset) const {
3450   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3451   if (shouldUseAddr64(AddrData))
3452     return false;
3453 
3454   // N0 -> offset, or
3455   // (N0 + C1) -> offset
3456   Register SRDPtr = AddrData.N0;
3457   Offset = AddrData.Offset;
3458 
3459   // TODO: Look through extensions for 32-bit soffset.
3460   MachineIRBuilder B(*Root.getParent());
3461 
3462   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
3463   splitIllegalMUBUFOffset(B, SOffset, Offset);
3464   return true;
3465 }
3466 
3467 InstructionSelector::ComplexRendererFns
3468 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
3469   Register VAddr;
3470   Register RSrcReg;
3471   Register SOffset;
3472   int64_t Offset = 0;
3473 
3474   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
3475     return {};
3476 
3477   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
3478   // pattern.
3479   return {{
3480       [=](MachineInstrBuilder &MIB) {  // rsrc
3481         MIB.addReg(RSrcReg);
3482       },
3483       [=](MachineInstrBuilder &MIB) { // vaddr
3484         MIB.addReg(VAddr);
3485       },
3486       [=](MachineInstrBuilder &MIB) { // soffset
3487         if (SOffset)
3488           MIB.addReg(SOffset);
3489         else
3490           MIB.addImm(0);
3491       },
3492       [=](MachineInstrBuilder &MIB) { // offset
3493         MIB.addImm(Offset);
3494       },
3495       addZeroImm, //  glc
3496       addZeroImm, //  slc
3497       addZeroImm, //  tfe
3498       addZeroImm, //  dlc
3499       addZeroImm  //  swz
3500     }};
3501 }
3502 
3503 InstructionSelector::ComplexRendererFns
3504 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
3505   Register RSrcReg;
3506   Register SOffset;
3507   int64_t Offset = 0;
3508 
3509   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
3510     return {};
3511 
3512   return {{
3513       [=](MachineInstrBuilder &MIB) {  // rsrc
3514         MIB.addReg(RSrcReg);
3515       },
3516       [=](MachineInstrBuilder &MIB) { // soffset
3517         if (SOffset)
3518           MIB.addReg(SOffset);
3519         else
3520           MIB.addImm(0);
3521       },
3522       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
3523       addZeroImm, //  glc
3524       addZeroImm, //  slc
3525       addZeroImm, //  tfe
3526       addZeroImm, //  dlc
3527       addZeroImm  //  swz
3528     }};
3529 }
3530 
3531 InstructionSelector::ComplexRendererFns
3532 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
3533   Register VAddr;
3534   Register RSrcReg;
3535   Register SOffset;
3536   int64_t Offset = 0;
3537 
3538   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
3539     return {};
3540 
3541   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
3542   // pattern.
3543   return {{
3544       [=](MachineInstrBuilder &MIB) {  // rsrc
3545         MIB.addReg(RSrcReg);
3546       },
3547       [=](MachineInstrBuilder &MIB) { // vaddr
3548         MIB.addReg(VAddr);
3549       },
3550       [=](MachineInstrBuilder &MIB) { // soffset
3551         if (SOffset)
3552           MIB.addReg(SOffset);
3553         else
3554           MIB.addImm(0);
3555       },
3556       [=](MachineInstrBuilder &MIB) { // offset
3557         MIB.addImm(Offset);
3558       },
3559       addZeroImm //  slc
3560     }};
3561 }
3562 
3563 InstructionSelector::ComplexRendererFns
3564 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
3565   Register RSrcReg;
3566   Register SOffset;
3567   int64_t Offset = 0;
3568 
3569   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
3570     return {};
3571 
3572   return {{
3573       [=](MachineInstrBuilder &MIB) {  // rsrc
3574         MIB.addReg(RSrcReg);
3575       },
3576       [=](MachineInstrBuilder &MIB) { // soffset
3577         if (SOffset)
3578           MIB.addReg(SOffset);
3579         else
3580           MIB.addImm(0);
3581       },
3582       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
3583       addZeroImm //  slc
3584     }};
3585 }
3586 
3587 /// Get an immediate that must be 32-bits, and treated as zero extended.
3588 static Optional<uint64_t> getConstantZext32Val(Register Reg,
3589                                                const MachineRegisterInfo &MRI) {
3590   // getConstantVRegVal sexts any values, so see if that matters.
3591   Optional<int64_t> OffsetVal = getConstantVRegVal(Reg, MRI);
3592   if (!OffsetVal || !isInt<32>(*OffsetVal))
3593     return None;
3594   return Lo_32(*OffsetVal);
3595 }
3596 
3597 InstructionSelector::ComplexRendererFns
3598 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
3599   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
3600   if (!OffsetVal)
3601     return {};
3602 
3603   Optional<int64_t> EncodedImm =
3604       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
3605   if (!EncodedImm)
3606     return {};
3607 
3608   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
3609 }
3610 
3611 InstructionSelector::ComplexRendererFns
3612 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
3613   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
3614 
3615   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
3616   if (!OffsetVal)
3617     return {};
3618 
3619   Optional<int64_t> EncodedImm
3620     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
3621   if (!EncodedImm)
3622     return {};
3623 
3624   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
3625 }
3626 
3627 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
3628                                                  const MachineInstr &MI,
3629                                                  int OpIdx) const {
3630   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
3631          "Expected G_CONSTANT");
3632   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
3633 }
3634 
3635 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
3636                                                 const MachineInstr &MI,
3637                                                 int OpIdx) const {
3638   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
3639          "Expected G_CONSTANT");
3640   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
3641 }
3642 
3643 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
3644                                                  const MachineInstr &MI,
3645                                                  int OpIdx) const {
3646   assert(OpIdx == -1);
3647 
3648   const MachineOperand &Op = MI.getOperand(1);
3649   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
3650     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
3651   else {
3652     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
3653     MIB.addImm(Op.getCImm()->getSExtValue());
3654   }
3655 }
3656 
3657 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
3658                                                 const MachineInstr &MI,
3659                                                 int OpIdx) const {
3660   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
3661          "Expected G_CONSTANT");
3662   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
3663 }
3664 
3665 /// This only really exists to satisfy DAG type checking machinery, so is a
3666 /// no-op here.
3667 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
3668                                                 const MachineInstr &MI,
3669                                                 int OpIdx) const {
3670   MIB.addImm(MI.getOperand(OpIdx).getImm());
3671 }
3672 
3673 void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
3674                                                  const MachineInstr &MI,
3675                                                  int OpIdx) const {
3676   assert(OpIdx >= 0 && "expected to match an immediate operand");
3677   MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
3678 }
3679 
3680 void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
3681                                                  const MachineInstr &MI,
3682                                                  int OpIdx) const {
3683   assert(OpIdx >= 0 && "expected to match an immediate operand");
3684   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
3685 }
3686 
3687 void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
3688                                                  const MachineInstr &MI,
3689                                                  int OpIdx) const {
3690   assert(OpIdx >= 0 && "expected to match an immediate operand");
3691   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
3692 }
3693 
3694 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
3695                                                  const MachineInstr &MI,
3696                                                  int OpIdx) const {
3697   assert(OpIdx >= 0 && "expected to match an immediate operand");
3698   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
3699 }
3700 
3701 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
3702   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
3703 }
3704 
3705 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
3706   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
3707 }
3708 
3709 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
3710   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
3711 }
3712 
3713 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
3714   return TII.isInlineConstant(Imm);
3715 }
3716