1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
23 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
24 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
25 #include "llvm/IR/DiagnosticInfo.h"
26 
27 #define DEBUG_TYPE "amdgpu-isel"
28 
29 using namespace llvm;
30 using namespace MIPatternMatch;
31 
32 static cl::opt<bool> AllowRiskySelect(
33   "amdgpu-global-isel-risky-select",
34   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
35   cl::init(false),
36   cl::ReallyHidden);
37 
38 #define GET_GLOBALISEL_IMPL
39 #define AMDGPUSubtarget GCNSubtarget
40 #include "AMDGPUGenGlobalISel.inc"
41 #undef GET_GLOBALISEL_IMPL
42 #undef AMDGPUSubtarget
43 
44 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
45     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
46     const AMDGPUTargetMachine &TM)
47     : InstructionSelector(), TII(*STI.getInstrInfo()),
48       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
49       STI(STI),
50       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
51 #define GET_GLOBALISEL_PREDICATES_INIT
52 #include "AMDGPUGenGlobalISel.inc"
53 #undef GET_GLOBALISEL_PREDICATES_INIT
54 #define GET_GLOBALISEL_TEMPORARIES_INIT
55 #include "AMDGPUGenGlobalISel.inc"
56 #undef GET_GLOBALISEL_TEMPORARIES_INIT
57 {
58 }
59 
60 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
61 
62 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
63                                         CodeGenCoverage &CoverageInfo,
64                                         ProfileSummaryInfo *PSI,
65                                         BlockFrequencyInfo *BFI) {
66   MRI = &MF.getRegInfo();
67   Subtarget = &MF.getSubtarget<GCNSubtarget>();
68   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
69 }
70 
71 bool AMDGPUInstructionSelector::isVCC(Register Reg,
72                                       const MachineRegisterInfo &MRI) const {
73   // The verifier is oblivious to s1 being a valid value for wavesize registers.
74   if (Reg.isPhysical())
75     return false;
76 
77   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
78   const TargetRegisterClass *RC =
79       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
80   if (RC) {
81     const LLT Ty = MRI.getType(Reg);
82     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
83            Ty.isValid() && Ty.getSizeInBits() == 1;
84   }
85 
86   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
87   return RB->getID() == AMDGPU::VCCRegBankID;
88 }
89 
90 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
91                                                         unsigned NewOpc) const {
92   MI.setDesc(TII.get(NewOpc));
93   MI.RemoveOperand(1); // Remove intrinsic ID.
94   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
95 
96   MachineOperand &Dst = MI.getOperand(0);
97   MachineOperand &Src = MI.getOperand(1);
98 
99   // TODO: This should be legalized to s32 if needed
100   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
101     return false;
102 
103   const TargetRegisterClass *DstRC
104     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
105   const TargetRegisterClass *SrcRC
106     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
107   if (!DstRC || DstRC != SrcRC)
108     return false;
109 
110   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
111          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
112 }
113 
114 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
115   const DebugLoc &DL = I.getDebugLoc();
116   MachineBasicBlock *BB = I.getParent();
117   I.setDesc(TII.get(TargetOpcode::COPY));
118 
119   const MachineOperand &Src = I.getOperand(1);
120   MachineOperand &Dst = I.getOperand(0);
121   Register DstReg = Dst.getReg();
122   Register SrcReg = Src.getReg();
123 
124   if (isVCC(DstReg, *MRI)) {
125     if (SrcReg == AMDGPU::SCC) {
126       const TargetRegisterClass *RC
127         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
128       if (!RC)
129         return true;
130       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
131     }
132 
133     if (!isVCC(SrcReg, *MRI)) {
134       // TODO: Should probably leave the copy and let copyPhysReg expand it.
135       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
136         return false;
137 
138       const TargetRegisterClass *SrcRC
139         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
140 
141       Optional<ValueAndVReg> ConstVal =
142           getConstantVRegValWithLookThrough(SrcReg, *MRI, true, true);
143       if (ConstVal) {
144         unsigned MovOpc =
145             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
146         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
147             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
148       } else {
149         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
150 
151         // We can't trust the high bits at this point, so clear them.
152 
153         // TODO: Skip masking high bits if def is known boolean.
154 
155         unsigned AndOpc =
156             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
157         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
158             .addImm(1)
159             .addReg(SrcReg);
160         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
161             .addImm(0)
162             .addReg(MaskedReg);
163       }
164 
165       if (!MRI->getRegClassOrNull(SrcReg))
166         MRI->setRegClass(SrcReg, SrcRC);
167       I.eraseFromParent();
168       return true;
169     }
170 
171     const TargetRegisterClass *RC =
172       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
173     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
174       return false;
175 
176     return true;
177   }
178 
179   for (const MachineOperand &MO : I.operands()) {
180     if (MO.getReg().isPhysical())
181       continue;
182 
183     const TargetRegisterClass *RC =
184             TRI.getConstrainedRegClassForOperand(MO, *MRI);
185     if (!RC)
186       continue;
187     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
188   }
189   return true;
190 }
191 
192 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
193   const Register DefReg = I.getOperand(0).getReg();
194   const LLT DefTy = MRI->getType(DefReg);
195   if (DefTy == LLT::scalar(1)) {
196     if (!AllowRiskySelect) {
197       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
198       return false;
199     }
200 
201     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
202   }
203 
204   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
205 
206   const RegClassOrRegBank &RegClassOrBank =
207     MRI->getRegClassOrRegBank(DefReg);
208 
209   const TargetRegisterClass *DefRC
210     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
211   if (!DefRC) {
212     if (!DefTy.isValid()) {
213       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
214       return false;
215     }
216 
217     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
218     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
219     if (!DefRC) {
220       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
221       return false;
222     }
223   }
224 
225   // TODO: Verify that all registers have the same bank
226   I.setDesc(TII.get(TargetOpcode::PHI));
227   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
228 }
229 
230 MachineOperand
231 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
232                                            const TargetRegisterClass &SubRC,
233                                            unsigned SubIdx) const {
234 
235   MachineInstr *MI = MO.getParent();
236   MachineBasicBlock *BB = MO.getParent()->getParent();
237   Register DstReg = MRI->createVirtualRegister(&SubRC);
238 
239   if (MO.isReg()) {
240     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
241     Register Reg = MO.getReg();
242     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
243             .addReg(Reg, 0, ComposedSubIdx);
244 
245     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
246                                      MO.isKill(), MO.isDead(), MO.isUndef(),
247                                      MO.isEarlyClobber(), 0, MO.isDebug(),
248                                      MO.isInternalRead());
249   }
250 
251   assert(MO.isImm());
252 
253   APInt Imm(64, MO.getImm());
254 
255   switch (SubIdx) {
256   default:
257     llvm_unreachable("do not know to split immediate with this sub index.");
258   case AMDGPU::sub0:
259     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
260   case AMDGPU::sub1:
261     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
262   }
263 }
264 
265 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
266   switch (Opc) {
267   case AMDGPU::G_AND:
268     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
269   case AMDGPU::G_OR:
270     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
271   case AMDGPU::G_XOR:
272     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
273   default:
274     llvm_unreachable("not a bit op");
275   }
276 }
277 
278 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
279   Register DstReg = I.getOperand(0).getReg();
280   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
281 
282   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
283   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
284       DstRB->getID() != AMDGPU::VCCRegBankID)
285     return false;
286 
287   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
288                             STI.isWave64());
289   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
290 
291   // Dead implicit-def of scc
292   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
293                                          true, // isImp
294                                          false, // isKill
295                                          true)); // isDead
296   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
297 }
298 
299 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
300   MachineBasicBlock *BB = I.getParent();
301   MachineFunction *MF = BB->getParent();
302   Register DstReg = I.getOperand(0).getReg();
303   const DebugLoc &DL = I.getDebugLoc();
304   LLT Ty = MRI->getType(DstReg);
305   if (Ty.isVector())
306     return false;
307 
308   unsigned Size = Ty.getSizeInBits();
309   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
310   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
311   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
312 
313   if (Size == 32) {
314     if (IsSALU) {
315       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
316       MachineInstr *Add =
317         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
318         .add(I.getOperand(1))
319         .add(I.getOperand(2));
320       I.eraseFromParent();
321       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
322     }
323 
324     if (STI.hasAddNoCarry()) {
325       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
326       I.setDesc(TII.get(Opc));
327       I.addOperand(*MF, MachineOperand::CreateImm(0));
328       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
329       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
330     }
331 
332     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
333 
334     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
335     MachineInstr *Add
336       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
337       .addDef(UnusedCarry, RegState::Dead)
338       .add(I.getOperand(1))
339       .add(I.getOperand(2))
340       .addImm(0);
341     I.eraseFromParent();
342     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
343   }
344 
345   assert(!Sub && "illegal sub should not reach here");
346 
347   const TargetRegisterClass &RC
348     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
349   const TargetRegisterClass &HalfRC
350     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
351 
352   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
353   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
354   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
355   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
356 
357   Register DstLo = MRI->createVirtualRegister(&HalfRC);
358   Register DstHi = MRI->createVirtualRegister(&HalfRC);
359 
360   if (IsSALU) {
361     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
362       .add(Lo1)
363       .add(Lo2);
364     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
365       .add(Hi1)
366       .add(Hi2);
367   } else {
368     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
369     Register CarryReg = MRI->createVirtualRegister(CarryRC);
370     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
371       .addDef(CarryReg)
372       .add(Lo1)
373       .add(Lo2)
374       .addImm(0);
375     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
376       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
377       .add(Hi1)
378       .add(Hi2)
379       .addReg(CarryReg, RegState::Kill)
380       .addImm(0);
381 
382     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
383       return false;
384   }
385 
386   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
387     .addReg(DstLo)
388     .addImm(AMDGPU::sub0)
389     .addReg(DstHi)
390     .addImm(AMDGPU::sub1);
391 
392 
393   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
394     return false;
395 
396   I.eraseFromParent();
397   return true;
398 }
399 
400 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
401   MachineInstr &I) const {
402   MachineBasicBlock *BB = I.getParent();
403   MachineFunction *MF = BB->getParent();
404   const DebugLoc &DL = I.getDebugLoc();
405   Register Dst0Reg = I.getOperand(0).getReg();
406   Register Dst1Reg = I.getOperand(1).getReg();
407   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
408                      I.getOpcode() == AMDGPU::G_UADDE;
409   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
410                           I.getOpcode() == AMDGPU::G_USUBE;
411 
412   if (isVCC(Dst1Reg, *MRI)) {
413     unsigned NoCarryOpc =
414         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
415     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
416     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
417     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
418     I.addOperand(*MF, MachineOperand::CreateImm(0));
419     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
420   }
421 
422   Register Src0Reg = I.getOperand(2).getReg();
423   Register Src1Reg = I.getOperand(3).getReg();
424 
425   if (HasCarryIn) {
426     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
427       .addReg(I.getOperand(4).getReg());
428   }
429 
430   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
431   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
432 
433   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
434     .add(I.getOperand(2))
435     .add(I.getOperand(3));
436   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
437     .addReg(AMDGPU::SCC);
438 
439   if (!MRI->getRegClassOrNull(Dst1Reg))
440     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
441 
442   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
443       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
444       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
445     return false;
446 
447   if (HasCarryIn &&
448       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
449                                     AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   I.eraseFromParent();
453   return true;
454 }
455 
456 // TODO: We should probably legalize these to only using 32-bit results.
457 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
458   MachineBasicBlock *BB = I.getParent();
459   Register DstReg = I.getOperand(0).getReg();
460   Register SrcReg = I.getOperand(1).getReg();
461   LLT DstTy = MRI->getType(DstReg);
462   LLT SrcTy = MRI->getType(SrcReg);
463   const unsigned SrcSize = SrcTy.getSizeInBits();
464   unsigned DstSize = DstTy.getSizeInBits();
465 
466   // TODO: Should handle any multiple of 32 offset.
467   unsigned Offset = I.getOperand(2).getImm();
468   if (Offset % 32 != 0 || DstSize > 128)
469     return false;
470 
471   // 16-bit operations really use 32-bit registers.
472   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
473   if (DstSize == 16)
474     DstSize = 32;
475 
476   const TargetRegisterClass *DstRC =
477     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
478   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
479     return false;
480 
481   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
482   const TargetRegisterClass *SrcRC =
483     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
484   if (!SrcRC)
485     return false;
486   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
487                                                          DstSize / 32);
488   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
489   if (!SrcRC)
490     return false;
491 
492   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
493                                     *SrcRC, I.getOperand(1));
494   const DebugLoc &DL = I.getDebugLoc();
495   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
496     .addReg(SrcReg, 0, SubReg);
497 
498   I.eraseFromParent();
499   return true;
500 }
501 
502 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
503   MachineBasicBlock *BB = MI.getParent();
504   Register DstReg = MI.getOperand(0).getReg();
505   LLT DstTy = MRI->getType(DstReg);
506   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
507 
508   const unsigned SrcSize = SrcTy.getSizeInBits();
509   if (SrcSize < 32)
510     return selectImpl(MI, *CoverageInfo);
511 
512   const DebugLoc &DL = MI.getDebugLoc();
513   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
514   const unsigned DstSize = DstTy.getSizeInBits();
515   const TargetRegisterClass *DstRC =
516     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
517   if (!DstRC)
518     return false;
519 
520   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
521   MachineInstrBuilder MIB =
522     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
523   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
524     MachineOperand &Src = MI.getOperand(I + 1);
525     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
526     MIB.addImm(SubRegs[I]);
527 
528     const TargetRegisterClass *SrcRC
529       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
530     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
531       return false;
532   }
533 
534   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
535     return false;
536 
537   MI.eraseFromParent();
538   return true;
539 }
540 
541 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
542   MachineBasicBlock *BB = MI.getParent();
543   const int NumDst = MI.getNumOperands() - 1;
544 
545   MachineOperand &Src = MI.getOperand(NumDst);
546 
547   Register SrcReg = Src.getReg();
548   Register DstReg0 = MI.getOperand(0).getReg();
549   LLT DstTy = MRI->getType(DstReg0);
550   LLT SrcTy = MRI->getType(SrcReg);
551 
552   const unsigned DstSize = DstTy.getSizeInBits();
553   const unsigned SrcSize = SrcTy.getSizeInBits();
554   const DebugLoc &DL = MI.getDebugLoc();
555   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
556 
557   const TargetRegisterClass *SrcRC =
558     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
559   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
560     return false;
561 
562   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
563   // source, and this relies on the fact that the same subregister indices are
564   // used for both.
565   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
566   for (int I = 0, E = NumDst; I != E; ++I) {
567     MachineOperand &Dst = MI.getOperand(I);
568     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
569       .addReg(SrcReg, 0, SubRegs[I]);
570 
571     // Make sure the subregister index is valid for the source register.
572     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
573     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
574       return false;
575 
576     const TargetRegisterClass *DstRC =
577       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
578     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
579       return false;
580   }
581 
582   MI.eraseFromParent();
583   return true;
584 }
585 
586 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
587   MachineInstr &MI) const {
588   if (selectImpl(MI, *CoverageInfo))
589     return true;
590 
591   const LLT S32 = LLT::scalar(32);
592   const LLT V2S16 = LLT::vector(2, 16);
593 
594   Register Dst = MI.getOperand(0).getReg();
595   if (MRI->getType(Dst) != V2S16)
596     return false;
597 
598   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
599   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
600     return false;
601 
602   Register Src0 = MI.getOperand(1).getReg();
603   Register Src1 = MI.getOperand(2).getReg();
604   if (MRI->getType(Src0) != S32)
605     return false;
606 
607   const DebugLoc &DL = MI.getDebugLoc();
608   MachineBasicBlock *BB = MI.getParent();
609 
610   auto ConstSrc1 =
611       getConstantVRegValWithLookThrough(Src1, *MRI, true, true, true);
612   if (ConstSrc1) {
613     auto ConstSrc0 =
614         getConstantVRegValWithLookThrough(Src0, *MRI, true, true, true);
615     if (ConstSrc0) {
616       const int64_t K0 = ConstSrc0->Value.getSExtValue();
617       const int64_t K1 = ConstSrc1->Value.getSExtValue();
618       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
619       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
620 
621       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
622         .addImm(Lo16 | (Hi16 << 16));
623       MI.eraseFromParent();
624       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
625     }
626   }
627 
628   // TODO: This should probably be a combine somewhere
629   // (build_vector_trunc $src0, undef -> copy $src0
630   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
631   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
632     MI.setDesc(TII.get(AMDGPU::COPY));
633     MI.RemoveOperand(2);
634     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
635            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
636   }
637 
638   Register ShiftSrc0;
639   Register ShiftSrc1;
640 
641   // With multiple uses of the shift, this will duplicate the shift and
642   // increase register pressure.
643   //
644   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
645   //  => (S_PACK_HH_B32_B16 $src0, $src1)
646   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
647   //  => (S_PACK_LH_B32_B16 $src0, $src1)
648   // (build_vector_trunc $src0, $src1)
649   //  => (S_PACK_LL_B32_B16 $src0, $src1)
650 
651   bool Shift0 = mi_match(
652       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
653 
654   bool Shift1 = mi_match(
655       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
656 
657   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
658   if (Shift0 && Shift1) {
659     Opc = AMDGPU::S_PACK_HH_B32_B16;
660     MI.getOperand(1).setReg(ShiftSrc0);
661     MI.getOperand(2).setReg(ShiftSrc1);
662   } else if (Shift1) {
663     Opc = AMDGPU::S_PACK_LH_B32_B16;
664     MI.getOperand(2).setReg(ShiftSrc1);
665   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
666     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
667     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
668       .addReg(ShiftSrc0)
669       .addImm(16);
670 
671     MI.eraseFromParent();
672     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
673   }
674 
675   MI.setDesc(TII.get(Opc));
676   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
677 }
678 
679 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
680   return selectG_ADD_SUB(I);
681 }
682 
683 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
684   const MachineOperand &MO = I.getOperand(0);
685 
686   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
687   // regbank check here is to know why getConstrainedRegClassForOperand failed.
688   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
689   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
690       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
691     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
692     return true;
693   }
694 
695   return false;
696 }
697 
698 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
699   MachineBasicBlock *BB = I.getParent();
700 
701   Register DstReg = I.getOperand(0).getReg();
702   Register Src0Reg = I.getOperand(1).getReg();
703   Register Src1Reg = I.getOperand(2).getReg();
704   LLT Src1Ty = MRI->getType(Src1Reg);
705 
706   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
707   unsigned InsSize = Src1Ty.getSizeInBits();
708 
709   int64_t Offset = I.getOperand(3).getImm();
710 
711   // FIXME: These cases should have been illegal and unnecessary to check here.
712   if (Offset % 32 != 0 || InsSize % 32 != 0)
713     return false;
714 
715   // Currently not handled by getSubRegFromChannel.
716   if (InsSize > 128)
717     return false;
718 
719   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
720   if (SubReg == AMDGPU::NoSubRegister)
721     return false;
722 
723   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
724   const TargetRegisterClass *DstRC =
725     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
726   if (!DstRC)
727     return false;
728 
729   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
730   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
731   const TargetRegisterClass *Src0RC =
732     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
733   const TargetRegisterClass *Src1RC =
734     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
735 
736   // Deal with weird cases where the class only partially supports the subreg
737   // index.
738   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
739   if (!Src0RC || !Src1RC)
740     return false;
741 
742   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
743       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
744       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
745     return false;
746 
747   const DebugLoc &DL = I.getDebugLoc();
748   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
749     .addReg(Src0Reg)
750     .addReg(Src1Reg)
751     .addImm(SubReg);
752 
753   I.eraseFromParent();
754   return true;
755 }
756 
757 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
758   if (STI.getLDSBankCount() != 16)
759     return selectImpl(MI, *CoverageInfo);
760 
761   Register Dst = MI.getOperand(0).getReg();
762   Register Src0 = MI.getOperand(2).getReg();
763   Register M0Val = MI.getOperand(6).getReg();
764   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
765       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
766       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
767     return false;
768 
769   // This requires 2 instructions. It is possible to write a pattern to support
770   // this, but the generated isel emitter doesn't correctly deal with multiple
771   // output instructions using the same physical register input. The copy to m0
772   // is incorrectly placed before the second instruction.
773   //
774   // TODO: Match source modifiers.
775 
776   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
777   const DebugLoc &DL = MI.getDebugLoc();
778   MachineBasicBlock *MBB = MI.getParent();
779 
780   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
781     .addReg(M0Val);
782   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
783     .addImm(2)
784     .addImm(MI.getOperand(4).getImm())  // $attr
785     .addImm(MI.getOperand(3).getImm()); // $attrchan
786 
787   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
788     .addImm(0)                          // $src0_modifiers
789     .addReg(Src0)                       // $src0
790     .addImm(MI.getOperand(4).getImm())  // $attr
791     .addImm(MI.getOperand(3).getImm())  // $attrchan
792     .addImm(0)                          // $src2_modifiers
793     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
794     .addImm(MI.getOperand(5).getImm())  // $high
795     .addImm(0)                          // $clamp
796     .addImm(0);                         // $omod
797 
798   MI.eraseFromParent();
799   return true;
800 }
801 
802 // Writelane is special in that it can use SGPR and M0 (which would normally
803 // count as using the constant bus twice - but in this case it is allowed since
804 // the lane selector doesn't count as a use of the constant bus). However, it is
805 // still required to abide by the 1 SGPR rule. Fix this up if we might have
806 // multiple SGPRs.
807 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
808   // With a constant bus limit of at least 2, there's no issue.
809   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
810     return selectImpl(MI, *CoverageInfo);
811 
812   MachineBasicBlock *MBB = MI.getParent();
813   const DebugLoc &DL = MI.getDebugLoc();
814   Register VDst = MI.getOperand(0).getReg();
815   Register Val = MI.getOperand(2).getReg();
816   Register LaneSelect = MI.getOperand(3).getReg();
817   Register VDstIn = MI.getOperand(4).getReg();
818 
819   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
820 
821   Optional<ValueAndVReg> ConstSelect =
822     getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true);
823   if (ConstSelect) {
824     // The selector has to be an inline immediate, so we can use whatever for
825     // the other operands.
826     MIB.addReg(Val);
827     MIB.addImm(ConstSelect->Value.getSExtValue() &
828                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
829   } else {
830     Optional<ValueAndVReg> ConstVal =
831       getConstantVRegValWithLookThrough(Val, *MRI, true, true);
832 
833     // If the value written is an inline immediate, we can get away without a
834     // copy to m0.
835     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
836                                                  STI.hasInv2PiInlineImm())) {
837       MIB.addImm(ConstVal->Value.getSExtValue());
838       MIB.addReg(LaneSelect);
839     } else {
840       MIB.addReg(Val);
841 
842       // If the lane selector was originally in a VGPR and copied with
843       // readfirstlane, there's a hazard to read the same SGPR from the
844       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
845       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
846 
847       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
848         .addReg(LaneSelect);
849       MIB.addReg(AMDGPU::M0);
850     }
851   }
852 
853   MIB.addReg(VDstIn);
854 
855   MI.eraseFromParent();
856   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
857 }
858 
859 // We need to handle this here because tablegen doesn't support matching
860 // instructions with multiple outputs.
861 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
862   Register Dst0 = MI.getOperand(0).getReg();
863   Register Dst1 = MI.getOperand(1).getReg();
864 
865   LLT Ty = MRI->getType(Dst0);
866   unsigned Opc;
867   if (Ty == LLT::scalar(32))
868     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
869   else if (Ty == LLT::scalar(64))
870     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
871   else
872     return false;
873 
874   // TODO: Match source modifiers.
875 
876   const DebugLoc &DL = MI.getDebugLoc();
877   MachineBasicBlock *MBB = MI.getParent();
878 
879   Register Numer = MI.getOperand(3).getReg();
880   Register Denom = MI.getOperand(4).getReg();
881   unsigned ChooseDenom = MI.getOperand(5).getImm();
882 
883   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
884 
885   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
886     .addDef(Dst1)
887     .addImm(0)     // $src0_modifiers
888     .addUse(Src0)  // $src0
889     .addImm(0)     // $src1_modifiers
890     .addUse(Denom) // $src1
891     .addImm(0)     // $src2_modifiers
892     .addUse(Numer) // $src2
893     .addImm(0)     // $clamp
894     .addImm(0);    // $omod
895 
896   MI.eraseFromParent();
897   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
898 }
899 
900 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
901   unsigned IntrinsicID = I.getIntrinsicID();
902   switch (IntrinsicID) {
903   case Intrinsic::amdgcn_if_break: {
904     MachineBasicBlock *BB = I.getParent();
905 
906     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
907     // SelectionDAG uses for wave32 vs wave64.
908     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
909       .add(I.getOperand(0))
910       .add(I.getOperand(2))
911       .add(I.getOperand(3));
912 
913     Register DstReg = I.getOperand(0).getReg();
914     Register Src0Reg = I.getOperand(2).getReg();
915     Register Src1Reg = I.getOperand(3).getReg();
916 
917     I.eraseFromParent();
918 
919     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
920       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
921 
922     return true;
923   }
924   case Intrinsic::amdgcn_interp_p1_f16:
925     return selectInterpP1F16(I);
926   case Intrinsic::amdgcn_wqm:
927     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
928   case Intrinsic::amdgcn_softwqm:
929     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
930   case Intrinsic::amdgcn_strict_wwm:
931   case Intrinsic::amdgcn_wwm:
932     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
933   case Intrinsic::amdgcn_strict_wqm:
934     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
935   case Intrinsic::amdgcn_writelane:
936     return selectWritelane(I);
937   case Intrinsic::amdgcn_div_scale:
938     return selectDivScale(I);
939   case Intrinsic::amdgcn_icmp:
940     return selectIntrinsicIcmp(I);
941   case Intrinsic::amdgcn_ballot:
942     return selectBallot(I);
943   case Intrinsic::amdgcn_reloc_constant:
944     return selectRelocConstant(I);
945   case Intrinsic::amdgcn_groupstaticsize:
946     return selectGroupStaticSize(I);
947   case Intrinsic::returnaddress:
948     return selectReturnAddress(I);
949   default:
950     return selectImpl(I, *CoverageInfo);
951   }
952 }
953 
954 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
955   if (Size != 32 && Size != 64)
956     return -1;
957   switch (P) {
958   default:
959     llvm_unreachable("Unknown condition code!");
960   case CmpInst::ICMP_NE:
961     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
962   case CmpInst::ICMP_EQ:
963     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
964   case CmpInst::ICMP_SGT:
965     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
966   case CmpInst::ICMP_SGE:
967     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
968   case CmpInst::ICMP_SLT:
969     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
970   case CmpInst::ICMP_SLE:
971     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
972   case CmpInst::ICMP_UGT:
973     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
974   case CmpInst::ICMP_UGE:
975     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
976   case CmpInst::ICMP_ULT:
977     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
978   case CmpInst::ICMP_ULE:
979     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
980   }
981 }
982 
983 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
984                                               unsigned Size) const {
985   if (Size == 64) {
986     if (!STI.hasScalarCompareEq64())
987       return -1;
988 
989     switch (P) {
990     case CmpInst::ICMP_NE:
991       return AMDGPU::S_CMP_LG_U64;
992     case CmpInst::ICMP_EQ:
993       return AMDGPU::S_CMP_EQ_U64;
994     default:
995       return -1;
996     }
997   }
998 
999   if (Size != 32)
1000     return -1;
1001 
1002   switch (P) {
1003   case CmpInst::ICMP_NE:
1004     return AMDGPU::S_CMP_LG_U32;
1005   case CmpInst::ICMP_EQ:
1006     return AMDGPU::S_CMP_EQ_U32;
1007   case CmpInst::ICMP_SGT:
1008     return AMDGPU::S_CMP_GT_I32;
1009   case CmpInst::ICMP_SGE:
1010     return AMDGPU::S_CMP_GE_I32;
1011   case CmpInst::ICMP_SLT:
1012     return AMDGPU::S_CMP_LT_I32;
1013   case CmpInst::ICMP_SLE:
1014     return AMDGPU::S_CMP_LE_I32;
1015   case CmpInst::ICMP_UGT:
1016     return AMDGPU::S_CMP_GT_U32;
1017   case CmpInst::ICMP_UGE:
1018     return AMDGPU::S_CMP_GE_U32;
1019   case CmpInst::ICMP_ULT:
1020     return AMDGPU::S_CMP_LT_U32;
1021   case CmpInst::ICMP_ULE:
1022     return AMDGPU::S_CMP_LE_U32;
1023   default:
1024     llvm_unreachable("Unknown condition code!");
1025   }
1026 }
1027 
1028 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1029   MachineBasicBlock *BB = I.getParent();
1030   const DebugLoc &DL = I.getDebugLoc();
1031 
1032   Register SrcReg = I.getOperand(2).getReg();
1033   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1034 
1035   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1036 
1037   Register CCReg = I.getOperand(0).getReg();
1038   if (!isVCC(CCReg, *MRI)) {
1039     int Opcode = getS_CMPOpcode(Pred, Size);
1040     if (Opcode == -1)
1041       return false;
1042     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1043             .add(I.getOperand(2))
1044             .add(I.getOperand(3));
1045     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1046       .addReg(AMDGPU::SCC);
1047     bool Ret =
1048         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1049         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1050     I.eraseFromParent();
1051     return Ret;
1052   }
1053 
1054   int Opcode = getV_CMPOpcode(Pred, Size);
1055   if (Opcode == -1)
1056     return false;
1057 
1058   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1059             I.getOperand(0).getReg())
1060             .add(I.getOperand(2))
1061             .add(I.getOperand(3));
1062   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1063                                *TRI.getBoolRC(), *MRI);
1064   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1065   I.eraseFromParent();
1066   return Ret;
1067 }
1068 
1069 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1070   Register Dst = I.getOperand(0).getReg();
1071   if (isVCC(Dst, *MRI))
1072     return false;
1073 
1074   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1075     return false;
1076 
1077   MachineBasicBlock *BB = I.getParent();
1078   const DebugLoc &DL = I.getDebugLoc();
1079   Register SrcReg = I.getOperand(2).getReg();
1080   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1081   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1082 
1083   int Opcode = getV_CMPOpcode(Pred, Size);
1084   if (Opcode == -1)
1085     return false;
1086 
1087   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1088                            .add(I.getOperand(2))
1089                            .add(I.getOperand(3));
1090   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1091                                *MRI);
1092   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1093   I.eraseFromParent();
1094   return Ret;
1095 }
1096 
1097 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1098   MachineBasicBlock *BB = I.getParent();
1099   const DebugLoc &DL = I.getDebugLoc();
1100   Register DstReg = I.getOperand(0).getReg();
1101   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1102   const bool Is64 = Size == 64;
1103 
1104   if (Size != STI.getWavefrontSize())
1105     return false;
1106 
1107   Optional<ValueAndVReg> Arg =
1108       getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true);
1109 
1110   if (Arg.hasValue()) {
1111     const int64_t Value = Arg.getValue().Value.getSExtValue();
1112     if (Value == 0) {
1113       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1114       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1115     } else if (Value == -1) { // all ones
1116       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1117       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1118     } else
1119       return false;
1120   } else {
1121     Register SrcReg = I.getOperand(2).getReg();
1122     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1123   }
1124 
1125   I.eraseFromParent();
1126   return true;
1127 }
1128 
1129 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1130   Register DstReg = I.getOperand(0).getReg();
1131   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1132   const TargetRegisterClass *DstRC =
1133     TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1134   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1135     return false;
1136 
1137   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1138 
1139   Module *M = MF->getFunction().getParent();
1140   const MDNode *Metadata = I.getOperand(2).getMetadata();
1141   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1142   auto RelocSymbol = cast<GlobalVariable>(
1143     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1144 
1145   MachineBasicBlock *BB = I.getParent();
1146   BuildMI(*BB, &I, I.getDebugLoc(),
1147           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1148     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1149 
1150   I.eraseFromParent();
1151   return true;
1152 }
1153 
1154 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1155   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1156 
1157   Register DstReg = I.getOperand(0).getReg();
1158   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1159   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1160     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1161 
1162   MachineBasicBlock *MBB = I.getParent();
1163   const DebugLoc &DL = I.getDebugLoc();
1164 
1165   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1166 
1167   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1168     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1169     MIB.addImm(MFI->getLDSSize());
1170   } else {
1171     Module *M = MF->getFunction().getParent();
1172     const GlobalValue *GV
1173       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1174     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1175   }
1176 
1177   I.eraseFromParent();
1178   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1179 }
1180 
1181 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1182   MachineBasicBlock *MBB = I.getParent();
1183   MachineFunction &MF = *MBB->getParent();
1184   const DebugLoc &DL = I.getDebugLoc();
1185 
1186   MachineOperand &Dst = I.getOperand(0);
1187   Register DstReg = Dst.getReg();
1188   unsigned Depth = I.getOperand(2).getImm();
1189 
1190   const TargetRegisterClass *RC
1191     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1192   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1193       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1194     return false;
1195 
1196   // Check for kernel and shader functions
1197   if (Depth != 0 ||
1198       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1199     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1200       .addImm(0);
1201     I.eraseFromParent();
1202     return true;
1203   }
1204 
1205   MachineFrameInfo &MFI = MF.getFrameInfo();
1206   // There is a call to @llvm.returnaddress in this function
1207   MFI.setReturnAddressIsTaken(true);
1208 
1209   // Get the return address reg and mark it as an implicit live-in
1210   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1211   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1212                                              AMDGPU::SReg_64RegClass);
1213   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1214     .addReg(LiveIn);
1215   I.eraseFromParent();
1216   return true;
1217 }
1218 
1219 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1220   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1221   // SelectionDAG uses for wave32 vs wave64.
1222   MachineBasicBlock *BB = MI.getParent();
1223   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1224       .add(MI.getOperand(1));
1225 
1226   Register Reg = MI.getOperand(1).getReg();
1227   MI.eraseFromParent();
1228 
1229   if (!MRI->getRegClassOrNull(Reg))
1230     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1231   return true;
1232 }
1233 
1234 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1235   MachineInstr &MI, Intrinsic::ID IntrID) const {
1236   MachineBasicBlock *MBB = MI.getParent();
1237   MachineFunction *MF = MBB->getParent();
1238   const DebugLoc &DL = MI.getDebugLoc();
1239 
1240   unsigned IndexOperand = MI.getOperand(7).getImm();
1241   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1242   bool WaveDone = MI.getOperand(9).getImm() != 0;
1243 
1244   if (WaveDone && !WaveRelease)
1245     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1246 
1247   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1248   IndexOperand &= ~0x3f;
1249   unsigned CountDw = 0;
1250 
1251   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1252     CountDw = (IndexOperand >> 24) & 0xf;
1253     IndexOperand &= ~(0xf << 24);
1254 
1255     if (CountDw < 1 || CountDw > 4) {
1256       report_fatal_error(
1257         "ds_ordered_count: dword count must be between 1 and 4");
1258     }
1259   }
1260 
1261   if (IndexOperand)
1262     report_fatal_error("ds_ordered_count: bad index operand");
1263 
1264   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1265   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1266 
1267   unsigned Offset0 = OrderedCountIndex << 2;
1268   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1269                      (Instruction << 4);
1270 
1271   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1272     Offset1 |= (CountDw - 1) << 6;
1273 
1274   unsigned Offset = Offset0 | (Offset1 << 8);
1275 
1276   Register M0Val = MI.getOperand(2).getReg();
1277   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1278     .addReg(M0Val);
1279 
1280   Register DstReg = MI.getOperand(0).getReg();
1281   Register ValReg = MI.getOperand(3).getReg();
1282   MachineInstrBuilder DS =
1283     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1284       .addReg(ValReg)
1285       .addImm(Offset)
1286       .cloneMemRefs(MI);
1287 
1288   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1289     return false;
1290 
1291   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1292   MI.eraseFromParent();
1293   return Ret;
1294 }
1295 
1296 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1297   switch (IntrID) {
1298   case Intrinsic::amdgcn_ds_gws_init:
1299     return AMDGPU::DS_GWS_INIT;
1300   case Intrinsic::amdgcn_ds_gws_barrier:
1301     return AMDGPU::DS_GWS_BARRIER;
1302   case Intrinsic::amdgcn_ds_gws_sema_v:
1303     return AMDGPU::DS_GWS_SEMA_V;
1304   case Intrinsic::amdgcn_ds_gws_sema_br:
1305     return AMDGPU::DS_GWS_SEMA_BR;
1306   case Intrinsic::amdgcn_ds_gws_sema_p:
1307     return AMDGPU::DS_GWS_SEMA_P;
1308   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1309     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1310   default:
1311     llvm_unreachable("not a gws intrinsic");
1312   }
1313 }
1314 
1315 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1316                                                      Intrinsic::ID IID) const {
1317   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1318       !STI.hasGWSSemaReleaseAll())
1319     return false;
1320 
1321   // intrinsic ID, vsrc, offset
1322   const bool HasVSrc = MI.getNumOperands() == 3;
1323   assert(HasVSrc || MI.getNumOperands() == 2);
1324 
1325   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1326   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1327   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1328     return false;
1329 
1330   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1331   assert(OffsetDef);
1332 
1333   unsigned ImmOffset;
1334 
1335   MachineBasicBlock *MBB = MI.getParent();
1336   const DebugLoc &DL = MI.getDebugLoc();
1337 
1338   MachineInstr *Readfirstlane = nullptr;
1339 
1340   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1341   // incoming offset, in case there's an add of a constant. We'll have to put it
1342   // back later.
1343   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1344     Readfirstlane = OffsetDef;
1345     BaseOffset = OffsetDef->getOperand(1).getReg();
1346     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1347   }
1348 
1349   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1350     // If we have a constant offset, try to use the 0 in m0 as the base.
1351     // TODO: Look into changing the default m0 initialization value. If the
1352     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1353     // the immediate offset.
1354 
1355     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1356     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1357       .addImm(0);
1358   } else {
1359     std::tie(BaseOffset, ImmOffset) =
1360         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1361 
1362     if (Readfirstlane) {
1363       // We have the constant offset now, so put the readfirstlane back on the
1364       // variable component.
1365       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1366         return false;
1367 
1368       Readfirstlane->getOperand(1).setReg(BaseOffset);
1369       BaseOffset = Readfirstlane->getOperand(0).getReg();
1370     } else {
1371       if (!RBI.constrainGenericRegister(BaseOffset,
1372                                         AMDGPU::SReg_32RegClass, *MRI))
1373         return false;
1374     }
1375 
1376     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1377     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1378       .addReg(BaseOffset)
1379       .addImm(16);
1380 
1381     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1382       .addReg(M0Base);
1383   }
1384 
1385   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1386   // offset field) % 64. Some versions of the programming guide omit the m0
1387   // part, or claim it's from offset 0.
1388   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1389 
1390   if (HasVSrc) {
1391     Register VSrc = MI.getOperand(1).getReg();
1392     MIB.addReg(VSrc);
1393     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1394       return false;
1395   }
1396 
1397   MIB.addImm(ImmOffset)
1398      .cloneMemRefs(MI);
1399 
1400   MI.eraseFromParent();
1401   return true;
1402 }
1403 
1404 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1405                                                       bool IsAppend) const {
1406   Register PtrBase = MI.getOperand(2).getReg();
1407   LLT PtrTy = MRI->getType(PtrBase);
1408   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1409 
1410   unsigned Offset;
1411   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1412 
1413   // TODO: Should this try to look through readfirstlane like GWS?
1414   if (!isDSOffsetLegal(PtrBase, Offset)) {
1415     PtrBase = MI.getOperand(2).getReg();
1416     Offset = 0;
1417   }
1418 
1419   MachineBasicBlock *MBB = MI.getParent();
1420   const DebugLoc &DL = MI.getDebugLoc();
1421   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1422 
1423   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1424     .addReg(PtrBase);
1425   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1426     return false;
1427 
1428   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1429     .addImm(Offset)
1430     .addImm(IsGDS ? -1 : 0)
1431     .cloneMemRefs(MI);
1432   MI.eraseFromParent();
1433   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1434 }
1435 
1436 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1437   if (TM.getOptLevel() > CodeGenOpt::None) {
1438     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1439     if (WGSize <= STI.getWavefrontSize()) {
1440       MachineBasicBlock *MBB = MI.getParent();
1441       const DebugLoc &DL = MI.getDebugLoc();
1442       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1443       MI.eraseFromParent();
1444       return true;
1445     }
1446   }
1447   return selectImpl(MI, *CoverageInfo);
1448 }
1449 
1450 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1451                          bool &IsTexFail) {
1452   if (TexFailCtrl)
1453     IsTexFail = true;
1454 
1455   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1456   TexFailCtrl &= ~(uint64_t)0x1;
1457   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1458   TexFailCtrl &= ~(uint64_t)0x2;
1459 
1460   return TexFailCtrl == 0;
1461 }
1462 
1463 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1464   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1465   MachineBasicBlock *MBB = MI.getParent();
1466   const DebugLoc &DL = MI.getDebugLoc();
1467 
1468   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1469     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1470 
1471   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1472   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1473       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1474   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1475       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1476   unsigned IntrOpcode = Intr->BaseOpcode;
1477   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1478 
1479   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1480 
1481   Register VDataIn, VDataOut;
1482   LLT VDataTy;
1483   int NumVDataDwords = -1;
1484   bool IsD16 = false;
1485 
1486   bool Unorm;
1487   if (!BaseOpcode->Sampler)
1488     Unorm = true;
1489   else
1490     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1491 
1492   bool TFE;
1493   bool LWE;
1494   bool IsTexFail = false;
1495   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1496                     TFE, LWE, IsTexFail))
1497     return false;
1498 
1499   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1500   const bool IsA16 = (Flags & 1) != 0;
1501   const bool IsG16 = (Flags & 2) != 0;
1502 
1503   // A16 implies 16 bit gradients
1504   if (IsA16 && !IsG16)
1505     return false;
1506 
1507   unsigned DMask = 0;
1508   unsigned DMaskLanes = 0;
1509 
1510   if (BaseOpcode->Atomic) {
1511     VDataOut = MI.getOperand(0).getReg();
1512     VDataIn = MI.getOperand(2).getReg();
1513     LLT Ty = MRI->getType(VDataIn);
1514 
1515     // Be careful to allow atomic swap on 16-bit element vectors.
1516     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1517       Ty.getSizeInBits() == 128 :
1518       Ty.getSizeInBits() == 64;
1519 
1520     if (BaseOpcode->AtomicX2) {
1521       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1522 
1523       DMask = Is64Bit ? 0xf : 0x3;
1524       NumVDataDwords = Is64Bit ? 4 : 2;
1525     } else {
1526       DMask = Is64Bit ? 0x3 : 0x1;
1527       NumVDataDwords = Is64Bit ? 2 : 1;
1528     }
1529   } else {
1530     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1531     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1532 
1533     // One memoperand is mandatory, except for getresinfo.
1534     // FIXME: Check this in verifier.
1535     if (!MI.memoperands_empty()) {
1536       const MachineMemOperand *MMO = *MI.memoperands_begin();
1537 
1538       // Infer d16 from the memory size, as the register type will be mangled by
1539       // unpacked subtargets, or by TFE.
1540       IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1541     }
1542 
1543     if (BaseOpcode->Store) {
1544       VDataIn = MI.getOperand(1).getReg();
1545       VDataTy = MRI->getType(VDataIn);
1546       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1547     } else {
1548       VDataOut = MI.getOperand(0).getReg();
1549       VDataTy = MRI->getType(VDataOut);
1550       NumVDataDwords = DMaskLanes;
1551 
1552       if (IsD16 && !STI.hasUnpackedD16VMem())
1553         NumVDataDwords = (DMaskLanes + 1) / 2;
1554     }
1555   }
1556 
1557   // Optimize _L to _LZ when _L is zero
1558   if (LZMappingInfo) {
1559     // The legalizer replaced the register with an immediate 0 if we need to
1560     // change the opcode.
1561     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex);
1562     if (Lod.isImm()) {
1563       assert(Lod.getImm() == 0);
1564       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1565     }
1566   }
1567 
1568   // Optimize _mip away, when 'lod' is zero
1569   if (MIPMappingInfo) {
1570     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex);
1571     if (Lod.isImm()) {
1572       assert(Lod.getImm() == 0);
1573       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1574     }
1575   }
1576 
1577   // Set G16 opcode
1578   if (IsG16 && !IsA16) {
1579     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1580         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1581     assert(G16MappingInfo);
1582     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1583   }
1584 
1585   // TODO: Check this in verifier.
1586   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1587 
1588   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1589   if (BaseOpcode->Atomic)
1590     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1591   if (CPol & ~AMDGPU::CPol::ALL)
1592     return false;
1593 
1594   int NumVAddrRegs = 0;
1595   int NumVAddrDwords = 0;
1596   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1597     // Skip the $noregs and 0s inserted during legalization.
1598     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1599     if (!AddrOp.isReg())
1600       continue; // XXX - Break?
1601 
1602     Register Addr = AddrOp.getReg();
1603     if (!Addr)
1604       break;
1605 
1606     ++NumVAddrRegs;
1607     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1608   }
1609 
1610   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1611   // NSA, these should have beeen packed into a single value in the first
1612   // address register
1613   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1614   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1615     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1616     return false;
1617   }
1618 
1619   if (IsTexFail)
1620     ++NumVDataDwords;
1621 
1622   int Opcode = -1;
1623   if (IsGFX10Plus) {
1624     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1625                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1626                                           : AMDGPU::MIMGEncGfx10Default,
1627                                    NumVDataDwords, NumVAddrDwords);
1628   } else {
1629     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1630       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1631                                      NumVDataDwords, NumVAddrDwords);
1632     if (Opcode == -1)
1633       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1634                                      NumVDataDwords, NumVAddrDwords);
1635   }
1636   assert(Opcode != -1);
1637 
1638   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1639     .cloneMemRefs(MI);
1640 
1641   if (VDataOut) {
1642     if (BaseOpcode->AtomicX2) {
1643       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1644 
1645       Register TmpReg = MRI->createVirtualRegister(
1646         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1647       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1648 
1649       MIB.addDef(TmpReg);
1650       if (!MRI->use_empty(VDataOut)) {
1651         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1652             .addReg(TmpReg, RegState::Kill, SubReg);
1653       }
1654 
1655     } else {
1656       MIB.addDef(VDataOut); // vdata output
1657     }
1658   }
1659 
1660   if (VDataIn)
1661     MIB.addReg(VDataIn); // vdata input
1662 
1663   for (int I = 0; I != NumVAddrRegs; ++I) {
1664     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1665     if (SrcOp.isReg()) {
1666       assert(SrcOp.getReg() != 0);
1667       MIB.addReg(SrcOp.getReg());
1668     }
1669   }
1670 
1671   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1672   if (BaseOpcode->Sampler)
1673     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1674 
1675   MIB.addImm(DMask); // dmask
1676 
1677   if (IsGFX10Plus)
1678     MIB.addImm(DimInfo->Encoding);
1679   MIB.addImm(Unorm);
1680 
1681   MIB.addImm(CPol);
1682   MIB.addImm(IsA16 &&  // a16 or r128
1683              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1684   if (IsGFX10Plus)
1685     MIB.addImm(IsA16 ? -1 : 0);
1686 
1687   MIB.addImm(TFE); // tfe
1688   MIB.addImm(LWE); // lwe
1689   if (!IsGFX10Plus)
1690     MIB.addImm(DimInfo->DA ? -1 : 0);
1691   if (BaseOpcode->HasD16)
1692     MIB.addImm(IsD16 ? -1 : 0);
1693 
1694   if (IsTexFail) {
1695     // An image load instruction with TFE/LWE only conditionally writes to its
1696     // result registers. Initialize them to zero so that we always get well
1697     // defined result values.
1698     assert(VDataOut && !VDataIn);
1699     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1700     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1701     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1702       .addImm(0);
1703     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1704     if (STI.usePRTStrictNull()) {
1705       // With enable-prt-strict-null enabled, initialize all result registers to
1706       // zero.
1707       auto RegSeq =
1708           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1709       for (auto Sub : Parts)
1710         RegSeq.addReg(Zero).addImm(Sub);
1711     } else {
1712       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1713       // result register.
1714       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1715       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1716       auto RegSeq =
1717           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1718       for (auto Sub : Parts.drop_back(1))
1719         RegSeq.addReg(Undef).addImm(Sub);
1720       RegSeq.addReg(Zero).addImm(Parts.back());
1721     }
1722     MIB.addReg(Tied, RegState::Implicit);
1723     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1724   }
1725 
1726   MI.eraseFromParent();
1727   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1728 }
1729 
1730 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1731     MachineInstr &I) const {
1732   unsigned IntrinsicID = I.getIntrinsicID();
1733   switch (IntrinsicID) {
1734   case Intrinsic::amdgcn_end_cf:
1735     return selectEndCfIntrinsic(I);
1736   case Intrinsic::amdgcn_ds_ordered_add:
1737   case Intrinsic::amdgcn_ds_ordered_swap:
1738     return selectDSOrderedIntrinsic(I, IntrinsicID);
1739   case Intrinsic::amdgcn_ds_gws_init:
1740   case Intrinsic::amdgcn_ds_gws_barrier:
1741   case Intrinsic::amdgcn_ds_gws_sema_v:
1742   case Intrinsic::amdgcn_ds_gws_sema_br:
1743   case Intrinsic::amdgcn_ds_gws_sema_p:
1744   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1745     return selectDSGWSIntrinsic(I, IntrinsicID);
1746   case Intrinsic::amdgcn_ds_append:
1747     return selectDSAppendConsume(I, true);
1748   case Intrinsic::amdgcn_ds_consume:
1749     return selectDSAppendConsume(I, false);
1750   case Intrinsic::amdgcn_s_barrier:
1751     return selectSBarrier(I);
1752   case Intrinsic::amdgcn_global_atomic_fadd:
1753     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1754   default: {
1755     return selectImpl(I, *CoverageInfo);
1756   }
1757   }
1758 }
1759 
1760 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1761   if (selectImpl(I, *CoverageInfo))
1762     return true;
1763 
1764   MachineBasicBlock *BB = I.getParent();
1765   const DebugLoc &DL = I.getDebugLoc();
1766 
1767   Register DstReg = I.getOperand(0).getReg();
1768   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1769   assert(Size <= 32 || Size == 64);
1770   const MachineOperand &CCOp = I.getOperand(1);
1771   Register CCReg = CCOp.getReg();
1772   if (!isVCC(CCReg, *MRI)) {
1773     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1774                                          AMDGPU::S_CSELECT_B32;
1775     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1776             .addReg(CCReg);
1777 
1778     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1779     // bank, because it does not cover the register class that we used to represent
1780     // for it.  So we need to manually set the register class here.
1781     if (!MRI->getRegClassOrNull(CCReg))
1782         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1783     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1784             .add(I.getOperand(2))
1785             .add(I.getOperand(3));
1786 
1787     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1788                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1789     I.eraseFromParent();
1790     return Ret;
1791   }
1792 
1793   // Wide VGPR select should have been split in RegBankSelect.
1794   if (Size > 32)
1795     return false;
1796 
1797   MachineInstr *Select =
1798       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1799               .addImm(0)
1800               .add(I.getOperand(3))
1801               .addImm(0)
1802               .add(I.getOperand(2))
1803               .add(I.getOperand(1));
1804 
1805   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1806   I.eraseFromParent();
1807   return Ret;
1808 }
1809 
1810 static int sizeToSubRegIndex(unsigned Size) {
1811   switch (Size) {
1812   case 32:
1813     return AMDGPU::sub0;
1814   case 64:
1815     return AMDGPU::sub0_sub1;
1816   case 96:
1817     return AMDGPU::sub0_sub1_sub2;
1818   case 128:
1819     return AMDGPU::sub0_sub1_sub2_sub3;
1820   case 256:
1821     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1822   default:
1823     if (Size < 32)
1824       return AMDGPU::sub0;
1825     if (Size > 256)
1826       return -1;
1827     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1828   }
1829 }
1830 
1831 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1832   Register DstReg = I.getOperand(0).getReg();
1833   Register SrcReg = I.getOperand(1).getReg();
1834   const LLT DstTy = MRI->getType(DstReg);
1835   const LLT SrcTy = MRI->getType(SrcReg);
1836   const LLT S1 = LLT::scalar(1);
1837 
1838   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1839   const RegisterBank *DstRB;
1840   if (DstTy == S1) {
1841     // This is a special case. We don't treat s1 for legalization artifacts as
1842     // vcc booleans.
1843     DstRB = SrcRB;
1844   } else {
1845     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1846     if (SrcRB != DstRB)
1847       return false;
1848   }
1849 
1850   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1851 
1852   unsigned DstSize = DstTy.getSizeInBits();
1853   unsigned SrcSize = SrcTy.getSizeInBits();
1854 
1855   const TargetRegisterClass *SrcRC
1856     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1857   const TargetRegisterClass *DstRC
1858     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1859   if (!SrcRC || !DstRC)
1860     return false;
1861 
1862   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1863       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1864     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1865     return false;
1866   }
1867 
1868   if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1869     MachineBasicBlock *MBB = I.getParent();
1870     const DebugLoc &DL = I.getDebugLoc();
1871 
1872     Register LoReg = MRI->createVirtualRegister(DstRC);
1873     Register HiReg = MRI->createVirtualRegister(DstRC);
1874     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1875       .addReg(SrcReg, 0, AMDGPU::sub0);
1876     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1877       .addReg(SrcReg, 0, AMDGPU::sub1);
1878 
1879     if (IsVALU && STI.hasSDWA()) {
1880       // Write the low 16-bits of the high element into the high 16-bits of the
1881       // low element.
1882       MachineInstr *MovSDWA =
1883         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1884         .addImm(0)                             // $src0_modifiers
1885         .addReg(HiReg)                         // $src0
1886         .addImm(0)                             // $clamp
1887         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1888         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1889         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1890         .addReg(LoReg, RegState::Implicit);
1891       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1892     } else {
1893       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1894       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1895       Register ImmReg = MRI->createVirtualRegister(DstRC);
1896       if (IsVALU) {
1897         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1898           .addImm(16)
1899           .addReg(HiReg);
1900       } else {
1901         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1902           .addReg(HiReg)
1903           .addImm(16);
1904       }
1905 
1906       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1907       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1908       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1909 
1910       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1911         .addImm(0xffff);
1912       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1913         .addReg(LoReg)
1914         .addReg(ImmReg);
1915       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1916         .addReg(TmpReg0)
1917         .addReg(TmpReg1);
1918     }
1919 
1920     I.eraseFromParent();
1921     return true;
1922   }
1923 
1924   if (!DstTy.isScalar())
1925     return false;
1926 
1927   if (SrcSize > 32) {
1928     int SubRegIdx = sizeToSubRegIndex(DstSize);
1929     if (SubRegIdx == -1)
1930       return false;
1931 
1932     // Deal with weird cases where the class only partially supports the subreg
1933     // index.
1934     const TargetRegisterClass *SrcWithSubRC
1935       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1936     if (!SrcWithSubRC)
1937       return false;
1938 
1939     if (SrcWithSubRC != SrcRC) {
1940       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1941         return false;
1942     }
1943 
1944     I.getOperand(1).setSubReg(SubRegIdx);
1945   }
1946 
1947   I.setDesc(TII.get(TargetOpcode::COPY));
1948   return true;
1949 }
1950 
1951 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1952 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1953   Mask = maskTrailingOnes<unsigned>(Size);
1954   int SignedMask = static_cast<int>(Mask);
1955   return SignedMask >= -16 && SignedMask <= 64;
1956 }
1957 
1958 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1959 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1960   Register Reg, const MachineRegisterInfo &MRI,
1961   const TargetRegisterInfo &TRI) const {
1962   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1963   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1964     return RB;
1965 
1966   // Ignore the type, since we don't use vcc in artifacts.
1967   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1968     return &RBI.getRegBankFromRegClass(*RC, LLT());
1969   return nullptr;
1970 }
1971 
1972 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1973   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1974   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1975   const DebugLoc &DL = I.getDebugLoc();
1976   MachineBasicBlock &MBB = *I.getParent();
1977   const Register DstReg = I.getOperand(0).getReg();
1978   const Register SrcReg = I.getOperand(1).getReg();
1979 
1980   const LLT DstTy = MRI->getType(DstReg);
1981   const LLT SrcTy = MRI->getType(SrcReg);
1982   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1983     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1984   const unsigned DstSize = DstTy.getSizeInBits();
1985   if (!DstTy.isScalar())
1986     return false;
1987 
1988   // Artifact casts should never use vcc.
1989   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1990 
1991   // FIXME: This should probably be illegal and split earlier.
1992   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
1993     if (DstSize <= 32)
1994       return selectCOPY(I);
1995 
1996     const TargetRegisterClass *SrcRC =
1997         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
1998     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1999     const TargetRegisterClass *DstRC =
2000         TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
2001 
2002     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2003     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2004     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2005       .addReg(SrcReg)
2006       .addImm(AMDGPU::sub0)
2007       .addReg(UndefReg)
2008       .addImm(AMDGPU::sub1);
2009     I.eraseFromParent();
2010 
2011     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2012            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2013   }
2014 
2015   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2016     // 64-bit should have been split up in RegBankSelect
2017 
2018     // Try to use an and with a mask if it will save code size.
2019     unsigned Mask;
2020     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2021       MachineInstr *ExtI =
2022       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2023         .addImm(Mask)
2024         .addReg(SrcReg);
2025       I.eraseFromParent();
2026       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2027     }
2028 
2029     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2030     MachineInstr *ExtI =
2031       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2032       .addReg(SrcReg)
2033       .addImm(0) // Offset
2034       .addImm(SrcSize); // Width
2035     I.eraseFromParent();
2036     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2037   }
2038 
2039   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2040     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2041       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2042     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2043       return false;
2044 
2045     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2046       const unsigned SextOpc = SrcSize == 8 ?
2047         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2048       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2049         .addReg(SrcReg);
2050       I.eraseFromParent();
2051       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2052     }
2053 
2054     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2055     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2056 
2057     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2058     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2059       // We need a 64-bit register source, but the high bits don't matter.
2060       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2061       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2062       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2063 
2064       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2065       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2066         .addReg(SrcReg, 0, SubReg)
2067         .addImm(AMDGPU::sub0)
2068         .addReg(UndefReg)
2069         .addImm(AMDGPU::sub1);
2070 
2071       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2072         .addReg(ExtReg)
2073         .addImm(SrcSize << 16);
2074 
2075       I.eraseFromParent();
2076       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2077     }
2078 
2079     unsigned Mask;
2080     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2081       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2082         .addReg(SrcReg)
2083         .addImm(Mask);
2084     } else {
2085       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2086         .addReg(SrcReg)
2087         .addImm(SrcSize << 16);
2088     }
2089 
2090     I.eraseFromParent();
2091     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2092   }
2093 
2094   return false;
2095 }
2096 
2097 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2098   MachineBasicBlock *BB = I.getParent();
2099   MachineOperand &ImmOp = I.getOperand(1);
2100   Register DstReg = I.getOperand(0).getReg();
2101   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2102 
2103   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2104   if (ImmOp.isFPImm()) {
2105     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2106     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2107   } else if (ImmOp.isCImm()) {
2108     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2109   } else {
2110     llvm_unreachable("Not supported by g_constants");
2111   }
2112 
2113   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2114   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2115 
2116   unsigned Opcode;
2117   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2118     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2119   } else {
2120     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2121 
2122     // We should never produce s1 values on banks other than VCC. If the user of
2123     // this already constrained the register, we may incorrectly think it's VCC
2124     // if it wasn't originally.
2125     if (Size == 1)
2126       return false;
2127   }
2128 
2129   if (Size != 64) {
2130     I.setDesc(TII.get(Opcode));
2131     I.addImplicitDefUseOperands(*MF);
2132     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2133   }
2134 
2135   const DebugLoc &DL = I.getDebugLoc();
2136 
2137   APInt Imm(Size, I.getOperand(1).getImm());
2138 
2139   MachineInstr *ResInst;
2140   if (IsSgpr && TII.isInlineConstant(Imm)) {
2141     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2142       .addImm(I.getOperand(1).getImm());
2143   } else {
2144     const TargetRegisterClass *RC = IsSgpr ?
2145       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2146     Register LoReg = MRI->createVirtualRegister(RC);
2147     Register HiReg = MRI->createVirtualRegister(RC);
2148 
2149     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2150       .addImm(Imm.trunc(32).getZExtValue());
2151 
2152     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2153       .addImm(Imm.ashr(32).getZExtValue());
2154 
2155     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2156       .addReg(LoReg)
2157       .addImm(AMDGPU::sub0)
2158       .addReg(HiReg)
2159       .addImm(AMDGPU::sub1);
2160   }
2161 
2162   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2163   // work for target independent opcodes
2164   I.eraseFromParent();
2165   const TargetRegisterClass *DstRC =
2166     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2167   if (!DstRC)
2168     return true;
2169   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2170 }
2171 
2172 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2173   // Only manually handle the f64 SGPR case.
2174   //
2175   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2176   // the bit ops theoretically have a second result due to the implicit def of
2177   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2178   // that is easy by disabling the check. The result works, but uses a
2179   // nonsensical sreg32orlds_and_sreg_1 regclass.
2180   //
2181   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2182   // the variadic REG_SEQUENCE operands.
2183 
2184   Register Dst = MI.getOperand(0).getReg();
2185   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2186   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2187       MRI->getType(Dst) != LLT::scalar(64))
2188     return false;
2189 
2190   Register Src = MI.getOperand(1).getReg();
2191   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2192   if (Fabs)
2193     Src = Fabs->getOperand(1).getReg();
2194 
2195   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2196       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2197     return false;
2198 
2199   MachineBasicBlock *BB = MI.getParent();
2200   const DebugLoc &DL = MI.getDebugLoc();
2201   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2202   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2203   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2204   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2205 
2206   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2207     .addReg(Src, 0, AMDGPU::sub0);
2208   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2209     .addReg(Src, 0, AMDGPU::sub1);
2210   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2211     .addImm(0x80000000);
2212 
2213   // Set or toggle sign bit.
2214   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2215   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2216     .addReg(HiReg)
2217     .addReg(ConstReg);
2218   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2219     .addReg(LoReg)
2220     .addImm(AMDGPU::sub0)
2221     .addReg(OpReg)
2222     .addImm(AMDGPU::sub1);
2223   MI.eraseFromParent();
2224   return true;
2225 }
2226 
2227 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2228 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2229   Register Dst = MI.getOperand(0).getReg();
2230   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2231   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2232       MRI->getType(Dst) != LLT::scalar(64))
2233     return false;
2234 
2235   Register Src = MI.getOperand(1).getReg();
2236   MachineBasicBlock *BB = MI.getParent();
2237   const DebugLoc &DL = MI.getDebugLoc();
2238   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2239   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2240   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2241   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2242 
2243   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2244       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2245     return false;
2246 
2247   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2248     .addReg(Src, 0, AMDGPU::sub0);
2249   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2250     .addReg(Src, 0, AMDGPU::sub1);
2251   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2252     .addImm(0x7fffffff);
2253 
2254   // Clear sign bit.
2255   // TODO: Should this used S_BITSET0_*?
2256   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2257     .addReg(HiReg)
2258     .addReg(ConstReg);
2259   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2260     .addReg(LoReg)
2261     .addImm(AMDGPU::sub0)
2262     .addReg(OpReg)
2263     .addImm(AMDGPU::sub1);
2264 
2265   MI.eraseFromParent();
2266   return true;
2267 }
2268 
2269 static bool isConstant(const MachineInstr &MI) {
2270   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2271 }
2272 
2273 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2274     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2275 
2276   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2277 
2278   assert(PtrMI);
2279 
2280   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2281     return;
2282 
2283   GEPInfo GEPInfo(*PtrMI);
2284 
2285   for (unsigned i = 1; i != 3; ++i) {
2286     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2287     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2288     assert(OpDef);
2289     if (i == 2 && isConstant(*OpDef)) {
2290       // TODO: Could handle constant base + variable offset, but a combine
2291       // probably should have commuted it.
2292       assert(GEPInfo.Imm == 0);
2293       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2294       continue;
2295     }
2296     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2297     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2298       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2299     else
2300       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2301   }
2302 
2303   AddrInfo.push_back(GEPInfo);
2304   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2305 }
2306 
2307 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2308   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2309 }
2310 
2311 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2312   if (!MI.hasOneMemOperand())
2313     return false;
2314 
2315   const MachineMemOperand *MMO = *MI.memoperands_begin();
2316   const Value *Ptr = MMO->getValue();
2317 
2318   // UndefValue means this is a load of a kernel input.  These are uniform.
2319   // Sometimes LDS instructions have constant pointers.
2320   // If Ptr is null, then that means this mem operand contains a
2321   // PseudoSourceValue like GOT.
2322   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2323       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2324     return true;
2325 
2326   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2327     return true;
2328 
2329   const Instruction *I = dyn_cast<Instruction>(Ptr);
2330   return I && I->getMetadata("amdgpu.uniform");
2331 }
2332 
2333 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2334   for (const GEPInfo &GEPInfo : AddrInfo) {
2335     if (!GEPInfo.VgprParts.empty())
2336       return true;
2337   }
2338   return false;
2339 }
2340 
2341 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2342   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2343   unsigned AS = PtrTy.getAddressSpace();
2344   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2345       STI.ldsRequiresM0Init()) {
2346     MachineBasicBlock *BB = I.getParent();
2347 
2348     // If DS instructions require M0 initializtion, insert it before selecting.
2349     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2350       .addImm(-1);
2351   }
2352 }
2353 
2354 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2355   MachineInstr &I) const {
2356   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2357     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2358     unsigned AS = PtrTy.getAddressSpace();
2359     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2360       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2361   }
2362 
2363   initM0(I);
2364   return selectImpl(I, *CoverageInfo);
2365 }
2366 
2367 // TODO: No rtn optimization.
2368 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2369   MachineInstr &MI) const {
2370   Register PtrReg = MI.getOperand(1).getReg();
2371   const LLT PtrTy = MRI->getType(PtrReg);
2372   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2373       STI.useFlatForGlobal())
2374     return selectImpl(MI, *CoverageInfo);
2375 
2376   Register DstReg = MI.getOperand(0).getReg();
2377   const LLT Ty = MRI->getType(DstReg);
2378   const bool Is64 = Ty.getSizeInBits() == 64;
2379   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2380   Register TmpReg = MRI->createVirtualRegister(
2381     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2382 
2383   const DebugLoc &DL = MI.getDebugLoc();
2384   MachineBasicBlock *BB = MI.getParent();
2385 
2386   Register VAddr, RSrcReg, SOffset;
2387   int64_t Offset = 0;
2388 
2389   unsigned Opcode;
2390   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2391     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2392                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2393   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2394                                    RSrcReg, SOffset, Offset)) {
2395     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2396                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2397   } else
2398     return selectImpl(MI, *CoverageInfo);
2399 
2400   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2401     .addReg(MI.getOperand(2).getReg());
2402 
2403   if (VAddr)
2404     MIB.addReg(VAddr);
2405 
2406   MIB.addReg(RSrcReg);
2407   if (SOffset)
2408     MIB.addReg(SOffset);
2409   else
2410     MIB.addImm(0);
2411 
2412   MIB.addImm(Offset);
2413   MIB.addImm(AMDGPU::CPol::GLC);
2414   MIB.cloneMemRefs(MI);
2415 
2416   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2417     .addReg(TmpReg, RegState::Kill, SubReg);
2418 
2419   MI.eraseFromParent();
2420 
2421   MRI->setRegClass(
2422     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2423   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2424 }
2425 
2426 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2427   MachineBasicBlock *BB = I.getParent();
2428   MachineOperand &CondOp = I.getOperand(0);
2429   Register CondReg = CondOp.getReg();
2430   const DebugLoc &DL = I.getDebugLoc();
2431 
2432   unsigned BrOpcode;
2433   Register CondPhysReg;
2434   const TargetRegisterClass *ConstrainRC;
2435 
2436   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2437   // whether the branch is uniform when selecting the instruction. In
2438   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2439   // RegBankSelect knows what it's doing if the branch condition is scc, even
2440   // though it currently does not.
2441   if (!isVCC(CondReg, *MRI)) {
2442     if (MRI->getType(CondReg) != LLT::scalar(32))
2443       return false;
2444 
2445     CondPhysReg = AMDGPU::SCC;
2446     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2447     ConstrainRC = &AMDGPU::SReg_32RegClass;
2448   } else {
2449     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2450     // We sort of know that a VCC producer based on the register bank, that ands
2451     // inactive lanes with 0. What if there was a logical operation with vcc
2452     // producers in different blocks/with different exec masks?
2453     // FIXME: Should scc->vcc copies and with exec?
2454     CondPhysReg = TRI.getVCC();
2455     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2456     ConstrainRC = TRI.getBoolRC();
2457   }
2458 
2459   if (!MRI->getRegClassOrNull(CondReg))
2460     MRI->setRegClass(CondReg, ConstrainRC);
2461 
2462   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2463     .addReg(CondReg);
2464   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2465     .addMBB(I.getOperand(1).getMBB());
2466 
2467   I.eraseFromParent();
2468   return true;
2469 }
2470 
2471 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2472   MachineInstr &I) const {
2473   Register DstReg = I.getOperand(0).getReg();
2474   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2475   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2476   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2477   if (IsVGPR)
2478     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2479 
2480   return RBI.constrainGenericRegister(
2481     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2482 }
2483 
2484 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2485   Register DstReg = I.getOperand(0).getReg();
2486   Register SrcReg = I.getOperand(1).getReg();
2487   Register MaskReg = I.getOperand(2).getReg();
2488   LLT Ty = MRI->getType(DstReg);
2489   LLT MaskTy = MRI->getType(MaskReg);
2490 
2491   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2492   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2493   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2494   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2495   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2496     return false;
2497 
2498   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2499   const TargetRegisterClass &RegRC
2500     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2501 
2502   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2503                                                                   *MRI);
2504   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2505                                                                   *MRI);
2506   const TargetRegisterClass *MaskRC =
2507       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2508 
2509   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2510       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2511       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2512     return false;
2513 
2514   MachineBasicBlock *BB = I.getParent();
2515   const DebugLoc &DL = I.getDebugLoc();
2516   if (Ty.getSizeInBits() == 32) {
2517     assert(MaskTy.getSizeInBits() == 32 &&
2518            "ptrmask should have been narrowed during legalize");
2519 
2520     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2521       .addReg(SrcReg)
2522       .addReg(MaskReg);
2523     I.eraseFromParent();
2524     return true;
2525   }
2526 
2527   Register HiReg = MRI->createVirtualRegister(&RegRC);
2528   Register LoReg = MRI->createVirtualRegister(&RegRC);
2529 
2530   // Extract the subregisters from the source pointer.
2531   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2532     .addReg(SrcReg, 0, AMDGPU::sub0);
2533   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2534     .addReg(SrcReg, 0, AMDGPU::sub1);
2535 
2536   Register MaskedLo, MaskedHi;
2537 
2538   // Try to avoid emitting a bit operation when we only need to touch half of
2539   // the 64-bit pointer.
2540   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2541 
2542   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2543   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2544   if ((MaskOnes & MaskLo32) == MaskLo32) {
2545     // If all the bits in the low half are 1, we only need a copy for it.
2546     MaskedLo = LoReg;
2547   } else {
2548     // Extract the mask subregister and apply the and.
2549     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2550     MaskedLo = MRI->createVirtualRegister(&RegRC);
2551 
2552     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2553       .addReg(MaskReg, 0, AMDGPU::sub0);
2554     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2555       .addReg(LoReg)
2556       .addReg(MaskLo);
2557   }
2558 
2559   if ((MaskOnes & MaskHi32) == MaskHi32) {
2560     // If all the bits in the high half are 1, we only need a copy for it.
2561     MaskedHi = HiReg;
2562   } else {
2563     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2564     MaskedHi = MRI->createVirtualRegister(&RegRC);
2565 
2566     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2567       .addReg(MaskReg, 0, AMDGPU::sub1);
2568     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2569       .addReg(HiReg)
2570       .addReg(MaskHi);
2571   }
2572 
2573   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2574     .addReg(MaskedLo)
2575     .addImm(AMDGPU::sub0)
2576     .addReg(MaskedHi)
2577     .addImm(AMDGPU::sub1);
2578   I.eraseFromParent();
2579   return true;
2580 }
2581 
2582 /// Return the register to use for the index value, and the subregister to use
2583 /// for the indirectly accessed register.
2584 static std::pair<Register, unsigned>
2585 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2586                         const SIRegisterInfo &TRI,
2587                         const TargetRegisterClass *SuperRC,
2588                         Register IdxReg,
2589                         unsigned EltSize) {
2590   Register IdxBaseReg;
2591   int Offset;
2592 
2593   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2594   if (IdxBaseReg == AMDGPU::NoRegister) {
2595     // This will happen if the index is a known constant. This should ordinarily
2596     // be legalized out, but handle it as a register just in case.
2597     assert(Offset == 0);
2598     IdxBaseReg = IdxReg;
2599   }
2600 
2601   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2602 
2603   // Skip out of bounds offsets, or else we would end up using an undefined
2604   // register.
2605   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2606     return std::make_pair(IdxReg, SubRegs[0]);
2607   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2608 }
2609 
2610 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2611   MachineInstr &MI) const {
2612   Register DstReg = MI.getOperand(0).getReg();
2613   Register SrcReg = MI.getOperand(1).getReg();
2614   Register IdxReg = MI.getOperand(2).getReg();
2615 
2616   LLT DstTy = MRI->getType(DstReg);
2617   LLT SrcTy = MRI->getType(SrcReg);
2618 
2619   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2620   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2621   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2622 
2623   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2624   // into a waterfall loop.
2625   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2626     return false;
2627 
2628   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2629                                                                   *MRI);
2630   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2631                                                                   *MRI);
2632   if (!SrcRC || !DstRC)
2633     return false;
2634   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2635       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2636       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2637     return false;
2638 
2639   MachineBasicBlock *BB = MI.getParent();
2640   const DebugLoc &DL = MI.getDebugLoc();
2641   const bool Is64 = DstTy.getSizeInBits() == 64;
2642 
2643   unsigned SubReg;
2644   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2645                                                      DstTy.getSizeInBits() / 8);
2646 
2647   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2648     if (DstTy.getSizeInBits() != 32 && !Is64)
2649       return false;
2650 
2651     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2652       .addReg(IdxReg);
2653 
2654     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2655     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2656       .addReg(SrcReg, 0, SubReg)
2657       .addReg(SrcReg, RegState::Implicit);
2658     MI.eraseFromParent();
2659     return true;
2660   }
2661 
2662   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2663     return false;
2664 
2665   if (!STI.useVGPRIndexMode()) {
2666     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2667       .addReg(IdxReg);
2668     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2669       .addReg(SrcReg, 0, SubReg)
2670       .addReg(SrcReg, RegState::Implicit);
2671     MI.eraseFromParent();
2672     return true;
2673   }
2674 
2675   const MCInstrDesc &GPRIDXDesc =
2676       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2677   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2678       .addReg(SrcReg)
2679       .addReg(IdxReg)
2680       .addImm(SubReg);
2681 
2682   MI.eraseFromParent();
2683   return true;
2684 }
2685 
2686 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2687 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2688   MachineInstr &MI) const {
2689   Register DstReg = MI.getOperand(0).getReg();
2690   Register VecReg = MI.getOperand(1).getReg();
2691   Register ValReg = MI.getOperand(2).getReg();
2692   Register IdxReg = MI.getOperand(3).getReg();
2693 
2694   LLT VecTy = MRI->getType(DstReg);
2695   LLT ValTy = MRI->getType(ValReg);
2696   unsigned VecSize = VecTy.getSizeInBits();
2697   unsigned ValSize = ValTy.getSizeInBits();
2698 
2699   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2700   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2701   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2702 
2703   assert(VecTy.getElementType() == ValTy);
2704 
2705   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2706   // into a waterfall loop.
2707   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2708     return false;
2709 
2710   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2711                                                                   *MRI);
2712   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2713                                                                   *MRI);
2714 
2715   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2716       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2717       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2718       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2719     return false;
2720 
2721   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2722     return false;
2723 
2724   unsigned SubReg;
2725   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2726                                                      ValSize / 8);
2727 
2728   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2729                          STI.useVGPRIndexMode();
2730 
2731   MachineBasicBlock *BB = MI.getParent();
2732   const DebugLoc &DL = MI.getDebugLoc();
2733 
2734   if (!IndexMode) {
2735     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2736       .addReg(IdxReg);
2737 
2738     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2739         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2740     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2741         .addReg(VecReg)
2742         .addReg(ValReg)
2743         .addImm(SubReg);
2744     MI.eraseFromParent();
2745     return true;
2746   }
2747 
2748   const MCInstrDesc &GPRIDXDesc =
2749       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2750   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2751       .addReg(VecReg)
2752       .addReg(ValReg)
2753       .addReg(IdxReg)
2754       .addImm(SubReg);
2755 
2756   MI.eraseFromParent();
2757   return true;
2758 }
2759 
2760 static bool isZeroOrUndef(int X) {
2761   return X == 0 || X == -1;
2762 }
2763 
2764 static bool isOneOrUndef(int X) {
2765   return X == 1 || X == -1;
2766 }
2767 
2768 static bool isZeroOrOneOrUndef(int X) {
2769   return X == 0 || X == 1 || X == -1;
2770 }
2771 
2772 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2773 // 32-bit register.
2774 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2775                                    ArrayRef<int> Mask) {
2776   NewMask[0] = Mask[0];
2777   NewMask[1] = Mask[1];
2778   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2779     return Src0;
2780 
2781   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2782   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2783 
2784   // Shift the mask inputs to be 0/1;
2785   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2786   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2787   return Src1;
2788 }
2789 
2790 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2791 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2792   MachineInstr &MI) const {
2793   Register DstReg = MI.getOperand(0).getReg();
2794   Register Src0Reg = MI.getOperand(1).getReg();
2795   Register Src1Reg = MI.getOperand(2).getReg();
2796   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2797 
2798   const LLT V2S16 = LLT::vector(2, 16);
2799   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2800     return false;
2801 
2802   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2803     return false;
2804 
2805   assert(ShufMask.size() == 2);
2806   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2807 
2808   MachineBasicBlock *MBB = MI.getParent();
2809   const DebugLoc &DL = MI.getDebugLoc();
2810 
2811   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2812   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2813   const TargetRegisterClass &RC = IsVALU ?
2814     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2815 
2816   // Handle the degenerate case which should have folded out.
2817   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2818     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2819 
2820     MI.eraseFromParent();
2821     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2822   }
2823 
2824   // A legal VOP3P mask only reads one of the sources.
2825   int Mask[2];
2826   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2827 
2828   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2829       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2830     return false;
2831 
2832   // TODO: This also should have been folded out
2833   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2834     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2835       .addReg(SrcVec);
2836 
2837     MI.eraseFromParent();
2838     return true;
2839   }
2840 
2841   if (Mask[0] == 1 && Mask[1] == -1) {
2842     if (IsVALU) {
2843       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2844         .addImm(16)
2845         .addReg(SrcVec);
2846     } else {
2847       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2848         .addReg(SrcVec)
2849         .addImm(16);
2850     }
2851   } else if (Mask[0] == -1 && Mask[1] == 0) {
2852     if (IsVALU) {
2853       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2854         .addImm(16)
2855         .addReg(SrcVec);
2856     } else {
2857       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2858         .addReg(SrcVec)
2859         .addImm(16);
2860     }
2861   } else if (Mask[0] == 0 && Mask[1] == 0) {
2862     if (IsVALU) {
2863       // Write low half of the register into the high half.
2864       MachineInstr *MovSDWA =
2865         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2866         .addImm(0)                             // $src0_modifiers
2867         .addReg(SrcVec)                        // $src0
2868         .addImm(0)                             // $clamp
2869         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2870         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2871         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2872         .addReg(SrcVec, RegState::Implicit);
2873       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2874     } else {
2875       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2876         .addReg(SrcVec)
2877         .addReg(SrcVec);
2878     }
2879   } else if (Mask[0] == 1 && Mask[1] == 1) {
2880     if (IsVALU) {
2881       // Write high half of the register into the low half.
2882       MachineInstr *MovSDWA =
2883         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2884         .addImm(0)                             // $src0_modifiers
2885         .addReg(SrcVec)                        // $src0
2886         .addImm(0)                             // $clamp
2887         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2888         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2889         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2890         .addReg(SrcVec, RegState::Implicit);
2891       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2892     } else {
2893       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2894         .addReg(SrcVec)
2895         .addReg(SrcVec);
2896     }
2897   } else if (Mask[0] == 1 && Mask[1] == 0) {
2898     if (IsVALU) {
2899       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2900         .addReg(SrcVec)
2901         .addReg(SrcVec)
2902         .addImm(16);
2903     } else {
2904       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2905       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2906         .addReg(SrcVec)
2907         .addImm(16);
2908       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2909         .addReg(TmpReg)
2910         .addReg(SrcVec);
2911     }
2912   } else
2913     llvm_unreachable("all shuffle masks should be handled");
2914 
2915   MI.eraseFromParent();
2916   return true;
2917 }
2918 
2919 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2920   MachineInstr &MI) const {
2921   if (STI.hasGFX90AInsts())
2922     return selectImpl(MI, *CoverageInfo);
2923 
2924   MachineBasicBlock *MBB = MI.getParent();
2925   const DebugLoc &DL = MI.getDebugLoc();
2926 
2927   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2928     Function &F = MBB->getParent()->getFunction();
2929     DiagnosticInfoUnsupported
2930       NoFpRet(F, "return versions of fp atomics not supported",
2931               MI.getDebugLoc(), DS_Error);
2932     F.getContext().diagnose(NoFpRet);
2933     return false;
2934   }
2935 
2936   // FIXME: This is only needed because tablegen requires number of dst operands
2937   // in match and replace pattern to be the same. Otherwise patterns can be
2938   // exported from SDag path.
2939   MachineOperand &VDataIn = MI.getOperand(1);
2940   MachineOperand &VIndex = MI.getOperand(3);
2941   MachineOperand &VOffset = MI.getOperand(4);
2942   MachineOperand &SOffset = MI.getOperand(5);
2943   int16_t Offset = MI.getOperand(6).getImm();
2944 
2945   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2946   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2947 
2948   unsigned Opcode;
2949   if (HasVOffset) {
2950     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2951                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2952   } else {
2953     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2954                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2955   }
2956 
2957   if (MRI->getType(VDataIn.getReg()).isVector()) {
2958     switch (Opcode) {
2959     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2960       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2961       break;
2962     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2963       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2964       break;
2965     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
2966       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
2967       break;
2968     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
2969       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
2970       break;
2971     }
2972   }
2973 
2974   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
2975   I.add(VDataIn);
2976 
2977   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
2978       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
2979     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
2980     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
2981       .addReg(VIndex.getReg())
2982       .addImm(AMDGPU::sub0)
2983       .addReg(VOffset.getReg())
2984       .addImm(AMDGPU::sub1);
2985 
2986     I.addReg(IdxReg);
2987   } else if (HasVIndex) {
2988     I.add(VIndex);
2989   } else if (HasVOffset) {
2990     I.add(VOffset);
2991   }
2992 
2993   I.add(MI.getOperand(2)); // rsrc
2994   I.add(SOffset);
2995   I.addImm(Offset);
2996   I.addImm(MI.getOperand(7).getImm()); // cpol
2997   I.cloneMemRefs(MI);
2998 
2999   MI.eraseFromParent();
3000 
3001   return true;
3002 }
3003 
3004 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3005   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3006 
3007   if (STI.hasGFX90AInsts()) {
3008     // gfx90a adds return versions of the global atomic fadd instructions so no
3009     // special handling is required.
3010     return selectImpl(MI, *CoverageInfo);
3011   }
3012 
3013   MachineBasicBlock *MBB = MI.getParent();
3014   const DebugLoc &DL = MI.getDebugLoc();
3015 
3016   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3017     Function &F = MBB->getParent()->getFunction();
3018     DiagnosticInfoUnsupported
3019       NoFpRet(F, "return versions of fp atomics not supported",
3020               MI.getDebugLoc(), DS_Error);
3021     F.getContext().diagnose(NoFpRet);
3022     return false;
3023   }
3024 
3025   // FIXME: This is only needed because tablegen requires number of dst operands
3026   // in match and replace pattern to be the same. Otherwise patterns can be
3027   // exported from SDag path.
3028   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3029 
3030   Register Data = DataOp.getReg();
3031   const unsigned Opc = MRI->getType(Data).isVector() ?
3032     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3033   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3034     .addReg(Addr.first)
3035     .addReg(Data)
3036     .addImm(Addr.second)
3037     .addImm(0) // cpol
3038     .cloneMemRefs(MI);
3039 
3040   MI.eraseFromParent();
3041   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3042 }
3043 
3044 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3045   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3046   MI.RemoveOperand(1);
3047   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3048   return true;
3049 }
3050 
3051 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3052   if (I.isPHI())
3053     return selectPHI(I);
3054 
3055   if (!I.isPreISelOpcode()) {
3056     if (I.isCopy())
3057       return selectCOPY(I);
3058     return true;
3059   }
3060 
3061   switch (I.getOpcode()) {
3062   case TargetOpcode::G_AND:
3063   case TargetOpcode::G_OR:
3064   case TargetOpcode::G_XOR:
3065     if (selectImpl(I, *CoverageInfo))
3066       return true;
3067     return selectG_AND_OR_XOR(I);
3068   case TargetOpcode::G_ADD:
3069   case TargetOpcode::G_SUB:
3070     if (selectImpl(I, *CoverageInfo))
3071       return true;
3072     return selectG_ADD_SUB(I);
3073   case TargetOpcode::G_UADDO:
3074   case TargetOpcode::G_USUBO:
3075   case TargetOpcode::G_UADDE:
3076   case TargetOpcode::G_USUBE:
3077     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3078   case TargetOpcode::G_INTTOPTR:
3079   case TargetOpcode::G_BITCAST:
3080   case TargetOpcode::G_PTRTOINT:
3081     return selectCOPY(I);
3082   case TargetOpcode::G_CONSTANT:
3083   case TargetOpcode::G_FCONSTANT:
3084     return selectG_CONSTANT(I);
3085   case TargetOpcode::G_FNEG:
3086     if (selectImpl(I, *CoverageInfo))
3087       return true;
3088     return selectG_FNEG(I);
3089   case TargetOpcode::G_FABS:
3090     if (selectImpl(I, *CoverageInfo))
3091       return true;
3092     return selectG_FABS(I);
3093   case TargetOpcode::G_EXTRACT:
3094     return selectG_EXTRACT(I);
3095   case TargetOpcode::G_MERGE_VALUES:
3096   case TargetOpcode::G_BUILD_VECTOR:
3097   case TargetOpcode::G_CONCAT_VECTORS:
3098     return selectG_MERGE_VALUES(I);
3099   case TargetOpcode::G_UNMERGE_VALUES:
3100     return selectG_UNMERGE_VALUES(I);
3101   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3102     return selectG_BUILD_VECTOR_TRUNC(I);
3103   case TargetOpcode::G_PTR_ADD:
3104     return selectG_PTR_ADD(I);
3105   case TargetOpcode::G_IMPLICIT_DEF:
3106     return selectG_IMPLICIT_DEF(I);
3107   case TargetOpcode::G_FREEZE:
3108     return selectCOPY(I);
3109   case TargetOpcode::G_INSERT:
3110     return selectG_INSERT(I);
3111   case TargetOpcode::G_INTRINSIC:
3112     return selectG_INTRINSIC(I);
3113   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3114     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3115   case TargetOpcode::G_ICMP:
3116     if (selectG_ICMP(I))
3117       return true;
3118     return selectImpl(I, *CoverageInfo);
3119   case TargetOpcode::G_LOAD:
3120   case TargetOpcode::G_STORE:
3121   case TargetOpcode::G_ATOMIC_CMPXCHG:
3122   case TargetOpcode::G_ATOMICRMW_XCHG:
3123   case TargetOpcode::G_ATOMICRMW_ADD:
3124   case TargetOpcode::G_ATOMICRMW_SUB:
3125   case TargetOpcode::G_ATOMICRMW_AND:
3126   case TargetOpcode::G_ATOMICRMW_OR:
3127   case TargetOpcode::G_ATOMICRMW_XOR:
3128   case TargetOpcode::G_ATOMICRMW_MIN:
3129   case TargetOpcode::G_ATOMICRMW_MAX:
3130   case TargetOpcode::G_ATOMICRMW_UMIN:
3131   case TargetOpcode::G_ATOMICRMW_UMAX:
3132   case TargetOpcode::G_ATOMICRMW_FADD:
3133   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3134   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3135   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3136   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3137     return selectG_LOAD_STORE_ATOMICRMW(I);
3138   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3139     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3140   case TargetOpcode::G_SELECT:
3141     return selectG_SELECT(I);
3142   case TargetOpcode::G_TRUNC:
3143     return selectG_TRUNC(I);
3144   case TargetOpcode::G_SEXT:
3145   case TargetOpcode::G_ZEXT:
3146   case TargetOpcode::G_ANYEXT:
3147   case TargetOpcode::G_SEXT_INREG:
3148     if (selectImpl(I, *CoverageInfo))
3149       return true;
3150     return selectG_SZA_EXT(I);
3151   case TargetOpcode::G_BRCOND:
3152     return selectG_BRCOND(I);
3153   case TargetOpcode::G_GLOBAL_VALUE:
3154     return selectG_GLOBAL_VALUE(I);
3155   case TargetOpcode::G_PTRMASK:
3156     return selectG_PTRMASK(I);
3157   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3158     return selectG_EXTRACT_VECTOR_ELT(I);
3159   case TargetOpcode::G_INSERT_VECTOR_ELT:
3160     return selectG_INSERT_VECTOR_ELT(I);
3161   case TargetOpcode::G_SHUFFLE_VECTOR:
3162     return selectG_SHUFFLE_VECTOR(I);
3163   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3164   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
3165     const AMDGPU::ImageDimIntrinsicInfo *Intr
3166       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3167     assert(Intr && "not an image intrinsic with image pseudo");
3168     return selectImageIntrinsic(I, Intr);
3169   }
3170   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3171     return selectBVHIntrinsic(I);
3172   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3173     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3174   default:
3175     return selectImpl(I, *CoverageInfo);
3176   }
3177   return false;
3178 }
3179 
3180 InstructionSelector::ComplexRendererFns
3181 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3182   return {{
3183       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3184   }};
3185 
3186 }
3187 
3188 std::pair<Register, unsigned>
3189 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3190                                               bool AllowAbs) const {
3191   Register Src = Root.getReg();
3192   Register OrigSrc = Src;
3193   unsigned Mods = 0;
3194   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3195 
3196   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3197     Src = MI->getOperand(1).getReg();
3198     Mods |= SISrcMods::NEG;
3199     MI = getDefIgnoringCopies(Src, *MRI);
3200   }
3201 
3202   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3203     Src = MI->getOperand(1).getReg();
3204     Mods |= SISrcMods::ABS;
3205   }
3206 
3207   if (Mods != 0 &&
3208       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3209     MachineInstr *UseMI = Root.getParent();
3210 
3211     // If we looked through copies to find source modifiers on an SGPR operand,
3212     // we now have an SGPR register source. To avoid potentially violating the
3213     // constant bus restriction, we need to insert a copy to a VGPR.
3214     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3215     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3216             TII.get(AMDGPU::COPY), VGPRSrc)
3217       .addReg(Src);
3218     Src = VGPRSrc;
3219   }
3220 
3221   return std::make_pair(Src, Mods);
3222 }
3223 
3224 ///
3225 /// This will select either an SGPR or VGPR operand and will save us from
3226 /// having to write an extra tablegen pattern.
3227 InstructionSelector::ComplexRendererFns
3228 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3229   return {{
3230       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3231   }};
3232 }
3233 
3234 InstructionSelector::ComplexRendererFns
3235 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3236   Register Src;
3237   unsigned Mods;
3238   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3239 
3240   return {{
3241       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3242       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3243       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3244       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3245   }};
3246 }
3247 
3248 InstructionSelector::ComplexRendererFns
3249 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3250   Register Src;
3251   unsigned Mods;
3252   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3253 
3254   return {{
3255       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3256       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3257       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3258       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3259   }};
3260 }
3261 
3262 InstructionSelector::ComplexRendererFns
3263 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3264   return {{
3265       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3266       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3267       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3268   }};
3269 }
3270 
3271 InstructionSelector::ComplexRendererFns
3272 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3273   Register Src;
3274   unsigned Mods;
3275   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3276 
3277   return {{
3278       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3279       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3280   }};
3281 }
3282 
3283 InstructionSelector::ComplexRendererFns
3284 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3285   Register Src;
3286   unsigned Mods;
3287   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3288 
3289   return {{
3290       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3291       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3292   }};
3293 }
3294 
3295 InstructionSelector::ComplexRendererFns
3296 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3297   Register Reg = Root.getReg();
3298   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3299   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3300               Def->getOpcode() == AMDGPU::G_FABS))
3301     return {};
3302   return {{
3303       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3304   }};
3305 }
3306 
3307 std::pair<Register, unsigned>
3308 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3309   Register Src, const MachineRegisterInfo &MRI) const {
3310   unsigned Mods = 0;
3311   MachineInstr *MI = MRI.getVRegDef(Src);
3312 
3313   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3314       // It's possible to see an f32 fneg here, but unlikely.
3315       // TODO: Treat f32 fneg as only high bit.
3316       MRI.getType(Src) == LLT::vector(2, 16)) {
3317     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3318     Src = MI->getOperand(1).getReg();
3319     MI = MRI.getVRegDef(Src);
3320   }
3321 
3322   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3323 
3324   // Packed instructions do not have abs modifiers.
3325   Mods |= SISrcMods::OP_SEL_1;
3326 
3327   return std::make_pair(Src, Mods);
3328 }
3329 
3330 InstructionSelector::ComplexRendererFns
3331 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3332   MachineRegisterInfo &MRI
3333     = Root.getParent()->getParent()->getParent()->getRegInfo();
3334 
3335   Register Src;
3336   unsigned Mods;
3337   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3338 
3339   return {{
3340       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3341       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3342   }};
3343 }
3344 
3345 InstructionSelector::ComplexRendererFns
3346 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3347   Register Src;
3348   unsigned Mods;
3349   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3350   if (!isKnownNeverNaN(Src, *MRI))
3351     return None;
3352 
3353   return {{
3354       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3355       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3356   }};
3357 }
3358 
3359 InstructionSelector::ComplexRendererFns
3360 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3361   // FIXME: Handle op_sel
3362   return {{
3363       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3364       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3365   }};
3366 }
3367 
3368 InstructionSelector::ComplexRendererFns
3369 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3370   SmallVector<GEPInfo, 4> AddrInfo;
3371   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3372 
3373   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3374     return None;
3375 
3376   const GEPInfo &GEPInfo = AddrInfo[0];
3377   Optional<int64_t> EncodedImm =
3378       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3379   if (!EncodedImm)
3380     return None;
3381 
3382   unsigned PtrReg = GEPInfo.SgprParts[0];
3383   return {{
3384     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3385     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3386   }};
3387 }
3388 
3389 InstructionSelector::ComplexRendererFns
3390 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3391   SmallVector<GEPInfo, 4> AddrInfo;
3392   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3393 
3394   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3395     return None;
3396 
3397   const GEPInfo &GEPInfo = AddrInfo[0];
3398   Register PtrReg = GEPInfo.SgprParts[0];
3399   Optional<int64_t> EncodedImm =
3400       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3401   if (!EncodedImm)
3402     return None;
3403 
3404   return {{
3405     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3406     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3407   }};
3408 }
3409 
3410 InstructionSelector::ComplexRendererFns
3411 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3412   MachineInstr *MI = Root.getParent();
3413   MachineBasicBlock *MBB = MI->getParent();
3414 
3415   SmallVector<GEPInfo, 4> AddrInfo;
3416   getAddrModeInfo(*MI, *MRI, AddrInfo);
3417 
3418   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3419   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3420   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3421     return None;
3422 
3423   const GEPInfo &GEPInfo = AddrInfo[0];
3424   // SGPR offset is unsigned.
3425   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3426     return None;
3427 
3428   // If we make it this far we have a load with an 32-bit immediate offset.
3429   // It is OK to select this using a sgpr offset, because we have already
3430   // failed trying to select this load into one of the _IMM variants since
3431   // the _IMM Patterns are considered before the _SGPR patterns.
3432   Register PtrReg = GEPInfo.SgprParts[0];
3433   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3434   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3435           .addImm(GEPInfo.Imm);
3436   return {{
3437     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3438     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3439   }};
3440 }
3441 
3442 std::pair<Register, int>
3443 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3444                                                 uint64_t FlatVariant) const {
3445   MachineInstr *MI = Root.getParent();
3446 
3447   auto Default = std::make_pair(Root.getReg(), 0);
3448 
3449   if (!STI.hasFlatInstOffsets())
3450     return Default;
3451 
3452   Register PtrBase;
3453   int64_t ConstOffset;
3454   std::tie(PtrBase, ConstOffset) =
3455       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3456   if (ConstOffset == 0)
3457     return Default;
3458 
3459   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3460   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3461     return Default;
3462 
3463   return std::make_pair(PtrBase, ConstOffset);
3464 }
3465 
3466 InstructionSelector::ComplexRendererFns
3467 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3468   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3469 
3470   return {{
3471       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3472       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3473     }};
3474 }
3475 
3476 InstructionSelector::ComplexRendererFns
3477 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3478   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3479 
3480   return {{
3481       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3482       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3483   }};
3484 }
3485 
3486 InstructionSelector::ComplexRendererFns
3487 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3488   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3489 
3490   return {{
3491       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3492       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3493     }};
3494 }
3495 
3496 /// Match a zero extend from a 32-bit value to 64-bits.
3497 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3498   Register ZExtSrc;
3499   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3500     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3501 
3502   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3503   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3504   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3505     return false;
3506 
3507   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3508     return Def->getOperand(1).getReg();
3509   }
3510 
3511   return Register();
3512 }
3513 
3514 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3515 InstructionSelector::ComplexRendererFns
3516 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3517   Register Addr = Root.getReg();
3518   Register PtrBase;
3519   int64_t ConstOffset;
3520   int64_t ImmOffset = 0;
3521 
3522   // Match the immediate offset first, which canonically is moved as low as
3523   // possible.
3524   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3525 
3526   if (ConstOffset != 0) {
3527     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3528                               SIInstrFlags::FlatGlobal)) {
3529       Addr = PtrBase;
3530       ImmOffset = ConstOffset;
3531     } else if (ConstOffset > 0) {
3532       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3533       if (!PtrBaseDef)
3534         return None;
3535 
3536       if (isSGPR(PtrBaseDef->Reg)) {
3537         // Offset is too large.
3538         //
3539         // saddr + large_offset -> saddr + (voffset = large_offset & ~MaxOffset)
3540         //                         + (large_offset & MaxOffset);
3541         int64_t SplitImmOffset, RemainderOffset;
3542         std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3543             ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3544 
3545         if (isUInt<32>(RemainderOffset)) {
3546           MachineInstr *MI = Root.getParent();
3547           MachineBasicBlock *MBB = MI->getParent();
3548           Register HighBits
3549             = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3550 
3551           BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3552                   HighBits)
3553             .addImm(RemainderOffset);
3554 
3555           return {{
3556             [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); },  // saddr
3557             [=](MachineInstrBuilder &MIB) { MIB.addReg(HighBits); }, // voffset
3558             [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3559           }};
3560         }
3561       }
3562     }
3563   }
3564 
3565   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3566   if (!AddrDef)
3567     return None;
3568 
3569   // Match the variable offset.
3570   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD) {
3571     // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3572     // drop this.
3573     if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3574         AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT)
3575       return None;
3576 
3577     // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3578     // moves required to copy a 64-bit SGPR to VGPR.
3579     const Register SAddr = AddrDef->Reg;
3580     if (!isSGPR(SAddr))
3581       return None;
3582 
3583     MachineInstr *MI = Root.getParent();
3584     MachineBasicBlock *MBB = MI->getParent();
3585     Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3586 
3587     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3588             VOffset)
3589       .addImm(0);
3590 
3591     return {{
3592         [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); },    // saddr
3593         [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },  // voffset
3594         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3595     }};
3596   }
3597 
3598   // Look through the SGPR->VGPR copy.
3599   Register SAddr =
3600     getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3601   if (!SAddr || !isSGPR(SAddr))
3602     return None;
3603 
3604   Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3605 
3606   // It's possible voffset is an SGPR here, but the copy to VGPR will be
3607   // inserted later.
3608   Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset);
3609   if (!VOffset)
3610     return None;
3611 
3612   return {{[=](MachineInstrBuilder &MIB) { // saddr
3613              MIB.addReg(SAddr);
3614            },
3615            [=](MachineInstrBuilder &MIB) { // voffset
3616              MIB.addReg(VOffset);
3617            },
3618            [=](MachineInstrBuilder &MIB) { // offset
3619              MIB.addImm(ImmOffset);
3620            }}};
3621 }
3622 
3623 InstructionSelector::ComplexRendererFns
3624 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3625   Register Addr = Root.getReg();
3626   Register PtrBase;
3627   int64_t ConstOffset;
3628   int64_t ImmOffset = 0;
3629 
3630   // Match the immediate offset first, which canonically is moved as low as
3631   // possible.
3632   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3633 
3634   if (ConstOffset != 0 &&
3635       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
3636                             SIInstrFlags::FlatScratch)) {
3637     Addr = PtrBase;
3638     ImmOffset = ConstOffset;
3639   }
3640 
3641   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3642   if (!AddrDef)
3643     return None;
3644 
3645   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3646     int FI = AddrDef->MI->getOperand(1).getIndex();
3647     return {{
3648         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3649         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3650     }};
3651   }
3652 
3653   Register SAddr = AddrDef->Reg;
3654 
3655   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3656     Register LHS = AddrDef->MI->getOperand(1).getReg();
3657     Register RHS = AddrDef->MI->getOperand(2).getReg();
3658     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3659     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3660 
3661     if (LHSDef && RHSDef &&
3662         LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3663         isSGPR(RHSDef->Reg)) {
3664       int FI = LHSDef->MI->getOperand(1).getIndex();
3665       MachineInstr &I = *Root.getParent();
3666       MachineBasicBlock *BB = I.getParent();
3667       const DebugLoc &DL = I.getDebugLoc();
3668       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3669 
3670       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), SAddr)
3671         .addFrameIndex(FI)
3672         .addReg(RHSDef->Reg);
3673     }
3674   }
3675 
3676   if (!isSGPR(SAddr))
3677     return None;
3678 
3679   return {{
3680       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3681       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3682   }};
3683 }
3684 
3685 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3686   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3687   return PSV && PSV->isStack();
3688 }
3689 
3690 InstructionSelector::ComplexRendererFns
3691 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3692   MachineInstr *MI = Root.getParent();
3693   MachineBasicBlock *MBB = MI->getParent();
3694   MachineFunction *MF = MBB->getParent();
3695   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3696 
3697   int64_t Offset = 0;
3698   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3699       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3700     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3701 
3702     // TODO: Should this be inside the render function? The iterator seems to
3703     // move.
3704     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3705             HighBits)
3706       .addImm(Offset & ~4095);
3707 
3708     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3709                MIB.addReg(Info->getScratchRSrcReg());
3710              },
3711              [=](MachineInstrBuilder &MIB) { // vaddr
3712                MIB.addReg(HighBits);
3713              },
3714              [=](MachineInstrBuilder &MIB) { // soffset
3715                // Use constant zero for soffset and rely on eliminateFrameIndex
3716                // to choose the appropriate frame register if need be.
3717                MIB.addImm(0);
3718              },
3719              [=](MachineInstrBuilder &MIB) { // offset
3720                MIB.addImm(Offset & 4095);
3721              }}};
3722   }
3723 
3724   assert(Offset == 0 || Offset == -1);
3725 
3726   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3727   // offsets.
3728   Optional<int> FI;
3729   Register VAddr = Root.getReg();
3730   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3731     Register PtrBase;
3732     int64_t ConstOffset;
3733     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
3734     if (ConstOffset != 0) {
3735       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
3736           (!STI.privateMemoryResourceIsRangeChecked() ||
3737            KnownBits->signBitIsZero(PtrBase))) {
3738         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
3739         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3740           FI = PtrBaseDef->getOperand(1).getIndex();
3741         else
3742           VAddr = PtrBase;
3743         Offset = ConstOffset;
3744       }
3745     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3746       FI = RootDef->getOperand(1).getIndex();
3747     }
3748   }
3749 
3750   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3751              MIB.addReg(Info->getScratchRSrcReg());
3752            },
3753            [=](MachineInstrBuilder &MIB) { // vaddr
3754              if (FI.hasValue())
3755                MIB.addFrameIndex(FI.getValue());
3756              else
3757                MIB.addReg(VAddr);
3758            },
3759            [=](MachineInstrBuilder &MIB) { // soffset
3760              // Use constant zero for soffset and rely on eliminateFrameIndex
3761              // to choose the appropriate frame register if need be.
3762              MIB.addImm(0);
3763            },
3764            [=](MachineInstrBuilder &MIB) { // offset
3765              MIB.addImm(Offset);
3766            }}};
3767 }
3768 
3769 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3770                                                 int64_t Offset) const {
3771   if (!isUInt<16>(Offset))
3772     return false;
3773 
3774   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3775     return true;
3776 
3777   // On Southern Islands instruction with a negative base value and an offset
3778   // don't seem to work.
3779   return KnownBits->signBitIsZero(Base);
3780 }
3781 
3782 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
3783                                                  int64_t Offset1,
3784                                                  unsigned Size) const {
3785   if (Offset0 % Size != 0 || Offset1 % Size != 0)
3786     return false;
3787   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
3788     return false;
3789 
3790   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3791     return true;
3792 
3793   // On Southern Islands instruction with a negative base value and an offset
3794   // don't seem to work.
3795   return KnownBits->signBitIsZero(Base);
3796 }
3797 
3798 InstructionSelector::ComplexRendererFns
3799 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3800     MachineOperand &Root) const {
3801   MachineInstr *MI = Root.getParent();
3802   MachineBasicBlock *MBB = MI->getParent();
3803 
3804   int64_t Offset = 0;
3805   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3806       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3807     return {};
3808 
3809   const MachineFunction *MF = MBB->getParent();
3810   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3811   const MachineMemOperand *MMO = *MI->memoperands_begin();
3812   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3813 
3814   return {{
3815       [=](MachineInstrBuilder &MIB) { // rsrc
3816         MIB.addReg(Info->getScratchRSrcReg());
3817       },
3818       [=](MachineInstrBuilder &MIB) { // soffset
3819         if (isStackPtrRelative(PtrInfo))
3820           MIB.addReg(Info->getStackPtrOffsetReg());
3821         else
3822           MIB.addImm(0);
3823       },
3824       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3825   }};
3826 }
3827 
3828 std::pair<Register, unsigned>
3829 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3830   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3831   if (!RootDef)
3832     return std::make_pair(Root.getReg(), 0);
3833 
3834   int64_t ConstAddr = 0;
3835 
3836   Register PtrBase;
3837   int64_t Offset;
3838   std::tie(PtrBase, Offset) =
3839     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3840 
3841   if (Offset) {
3842     if (isDSOffsetLegal(PtrBase, Offset)) {
3843       // (add n0, c0)
3844       return std::make_pair(PtrBase, Offset);
3845     }
3846   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3847     // TODO
3848 
3849 
3850   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3851     // TODO
3852 
3853   }
3854 
3855   return std::make_pair(Root.getReg(), 0);
3856 }
3857 
3858 InstructionSelector::ComplexRendererFns
3859 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3860   Register Reg;
3861   unsigned Offset;
3862   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3863   return {{
3864       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3865       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3866     }};
3867 }
3868 
3869 InstructionSelector::ComplexRendererFns
3870 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3871   return selectDSReadWrite2(Root, 4);
3872 }
3873 
3874 InstructionSelector::ComplexRendererFns
3875 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
3876   return selectDSReadWrite2(Root, 8);
3877 }
3878 
3879 InstructionSelector::ComplexRendererFns
3880 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
3881                                               unsigned Size) const {
3882   Register Reg;
3883   unsigned Offset;
3884   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
3885   return {{
3886       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3887       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3888       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3889     }};
3890 }
3891 
3892 std::pair<Register, unsigned>
3893 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
3894                                                   unsigned Size) const {
3895   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3896   if (!RootDef)
3897     return std::make_pair(Root.getReg(), 0);
3898 
3899   int64_t ConstAddr = 0;
3900 
3901   Register PtrBase;
3902   int64_t Offset;
3903   std::tie(PtrBase, Offset) =
3904     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3905 
3906   if (Offset) {
3907     int64_t OffsetValue0 = Offset;
3908     int64_t OffsetValue1 = Offset + Size;
3909     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
3910       // (add n0, c0)
3911       return std::make_pair(PtrBase, OffsetValue0 / Size);
3912     }
3913   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3914     // TODO
3915 
3916   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3917     // TODO
3918 
3919   }
3920 
3921   return std::make_pair(Root.getReg(), 0);
3922 }
3923 
3924 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3925 /// the base value with the constant offset. There may be intervening copies
3926 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
3927 /// not match the pattern.
3928 std::pair<Register, int64_t>
3929 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3930   Register Root, const MachineRegisterInfo &MRI) const {
3931   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
3932   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3933     return {Root, 0};
3934 
3935   MachineOperand &RHS = RootI->getOperand(2);
3936   Optional<ValueAndVReg> MaybeOffset
3937     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3938   if (!MaybeOffset)
3939     return {Root, 0};
3940   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
3941 }
3942 
3943 static void addZeroImm(MachineInstrBuilder &MIB) {
3944   MIB.addImm(0);
3945 }
3946 
3947 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3948 /// BasePtr is not valid, a null base pointer will be used.
3949 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3950                           uint32_t FormatLo, uint32_t FormatHi,
3951                           Register BasePtr) {
3952   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3953   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3954   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3955   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3956 
3957   B.buildInstr(AMDGPU::S_MOV_B32)
3958     .addDef(RSrc2)
3959     .addImm(FormatLo);
3960   B.buildInstr(AMDGPU::S_MOV_B32)
3961     .addDef(RSrc3)
3962     .addImm(FormatHi);
3963 
3964   // Build the half of the subregister with the constants before building the
3965   // full 128-bit register. If we are building multiple resource descriptors,
3966   // this will allow CSEing of the 2-component register.
3967   B.buildInstr(AMDGPU::REG_SEQUENCE)
3968     .addDef(RSrcHi)
3969     .addReg(RSrc2)
3970     .addImm(AMDGPU::sub0)
3971     .addReg(RSrc3)
3972     .addImm(AMDGPU::sub1);
3973 
3974   Register RSrcLo = BasePtr;
3975   if (!BasePtr) {
3976     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3977     B.buildInstr(AMDGPU::S_MOV_B64)
3978       .addDef(RSrcLo)
3979       .addImm(0);
3980   }
3981 
3982   B.buildInstr(AMDGPU::REG_SEQUENCE)
3983     .addDef(RSrc)
3984     .addReg(RSrcLo)
3985     .addImm(AMDGPU::sub0_sub1)
3986     .addReg(RSrcHi)
3987     .addImm(AMDGPU::sub2_sub3);
3988 
3989   return RSrc;
3990 }
3991 
3992 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3993                                 const SIInstrInfo &TII, Register BasePtr) {
3994   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3995 
3996   // FIXME: Why are half the "default" bits ignored based on the addressing
3997   // mode?
3998   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3999 }
4000 
4001 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4002                                const SIInstrInfo &TII, Register BasePtr) {
4003   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4004 
4005   // FIXME: Why are half the "default" bits ignored based on the addressing
4006   // mode?
4007   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4008 }
4009 
4010 AMDGPUInstructionSelector::MUBUFAddressData
4011 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4012   MUBUFAddressData Data;
4013   Data.N0 = Src;
4014 
4015   Register PtrBase;
4016   int64_t Offset;
4017 
4018   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4019   if (isUInt<32>(Offset)) {
4020     Data.N0 = PtrBase;
4021     Data.Offset = Offset;
4022   }
4023 
4024   if (MachineInstr *InputAdd
4025       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4026     Data.N2 = InputAdd->getOperand(1).getReg();
4027     Data.N3 = InputAdd->getOperand(2).getReg();
4028 
4029     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4030     // FIXME: Don't know this was defined by operand 0
4031     //
4032     // TODO: Remove this when we have copy folding optimizations after
4033     // RegBankSelect.
4034     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4035     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4036   }
4037 
4038   return Data;
4039 }
4040 
4041 /// Return if the addr64 mubuf mode should be used for the given address.
4042 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4043   // (ptr_add N2, N3) -> addr64, or
4044   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4045   if (Addr.N2)
4046     return true;
4047 
4048   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4049   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4050 }
4051 
4052 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4053 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4054 /// component.
4055 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4056   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4057   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4058     return;
4059 
4060   // Illegal offset, store it in soffset.
4061   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4062   B.buildInstr(AMDGPU::S_MOV_B32)
4063     .addDef(SOffset)
4064     .addImm(ImmOffset);
4065   ImmOffset = 0;
4066 }
4067 
4068 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4069   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4070   Register &SOffset, int64_t &Offset) const {
4071   // FIXME: Predicates should stop this from reaching here.
4072   // addr64 bit was removed for volcanic islands.
4073   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4074     return false;
4075 
4076   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4077   if (!shouldUseAddr64(AddrData))
4078     return false;
4079 
4080   Register N0 = AddrData.N0;
4081   Register N2 = AddrData.N2;
4082   Register N3 = AddrData.N3;
4083   Offset = AddrData.Offset;
4084 
4085   // Base pointer for the SRD.
4086   Register SRDPtr;
4087 
4088   if (N2) {
4089     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4090       assert(N3);
4091       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4092         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4093         // addr64, and construct the default resource from a 0 address.
4094         VAddr = N0;
4095       } else {
4096         SRDPtr = N3;
4097         VAddr = N2;
4098       }
4099     } else {
4100       // N2 is not divergent.
4101       SRDPtr = N2;
4102       VAddr = N3;
4103     }
4104   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4105     // Use the default null pointer in the resource
4106     VAddr = N0;
4107   } else {
4108     // N0 -> offset, or
4109     // (N0 + C1) -> offset
4110     SRDPtr = N0;
4111   }
4112 
4113   MachineIRBuilder B(*Root.getParent());
4114   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4115   splitIllegalMUBUFOffset(B, SOffset, Offset);
4116   return true;
4117 }
4118 
4119 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4120   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4121   int64_t &Offset) const {
4122 
4123   // FIXME: Pattern should not reach here.
4124   if (STI.useFlatForGlobal())
4125     return false;
4126 
4127   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4128   if (shouldUseAddr64(AddrData))
4129     return false;
4130 
4131   // N0 -> offset, or
4132   // (N0 + C1) -> offset
4133   Register SRDPtr = AddrData.N0;
4134   Offset = AddrData.Offset;
4135 
4136   // TODO: Look through extensions for 32-bit soffset.
4137   MachineIRBuilder B(*Root.getParent());
4138 
4139   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4140   splitIllegalMUBUFOffset(B, SOffset, Offset);
4141   return true;
4142 }
4143 
4144 InstructionSelector::ComplexRendererFns
4145 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4146   Register VAddr;
4147   Register RSrcReg;
4148   Register SOffset;
4149   int64_t Offset = 0;
4150 
4151   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4152     return {};
4153 
4154   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4155   // pattern.
4156   return {{
4157       [=](MachineInstrBuilder &MIB) {  // rsrc
4158         MIB.addReg(RSrcReg);
4159       },
4160       [=](MachineInstrBuilder &MIB) { // vaddr
4161         MIB.addReg(VAddr);
4162       },
4163       [=](MachineInstrBuilder &MIB) { // soffset
4164         if (SOffset)
4165           MIB.addReg(SOffset);
4166         else
4167           MIB.addImm(0);
4168       },
4169       [=](MachineInstrBuilder &MIB) { // offset
4170         MIB.addImm(Offset);
4171       },
4172       addZeroImm, //  cpol
4173       addZeroImm, //  tfe
4174       addZeroImm  //  swz
4175     }};
4176 }
4177 
4178 InstructionSelector::ComplexRendererFns
4179 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4180   Register RSrcReg;
4181   Register SOffset;
4182   int64_t Offset = 0;
4183 
4184   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4185     return {};
4186 
4187   return {{
4188       [=](MachineInstrBuilder &MIB) {  // rsrc
4189         MIB.addReg(RSrcReg);
4190       },
4191       [=](MachineInstrBuilder &MIB) { // soffset
4192         if (SOffset)
4193           MIB.addReg(SOffset);
4194         else
4195           MIB.addImm(0);
4196       },
4197       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4198       addZeroImm, //  cpol
4199       addZeroImm, //  tfe
4200       addZeroImm, //  swz
4201     }};
4202 }
4203 
4204 InstructionSelector::ComplexRendererFns
4205 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4206   Register VAddr;
4207   Register RSrcReg;
4208   Register SOffset;
4209   int64_t Offset = 0;
4210 
4211   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4212     return {};
4213 
4214   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4215   // pattern.
4216   return {{
4217       [=](MachineInstrBuilder &MIB) {  // rsrc
4218         MIB.addReg(RSrcReg);
4219       },
4220       [=](MachineInstrBuilder &MIB) { // vaddr
4221         MIB.addReg(VAddr);
4222       },
4223       [=](MachineInstrBuilder &MIB) { // soffset
4224         if (SOffset)
4225           MIB.addReg(SOffset);
4226         else
4227           MIB.addImm(0);
4228       },
4229       [=](MachineInstrBuilder &MIB) { // offset
4230         MIB.addImm(Offset);
4231       },
4232       [=](MachineInstrBuilder &MIB) {
4233         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4234       }
4235     }};
4236 }
4237 
4238 InstructionSelector::ComplexRendererFns
4239 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4240   Register RSrcReg;
4241   Register SOffset;
4242   int64_t Offset = 0;
4243 
4244   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4245     return {};
4246 
4247   return {{
4248       [=](MachineInstrBuilder &MIB) {  // rsrc
4249         MIB.addReg(RSrcReg);
4250       },
4251       [=](MachineInstrBuilder &MIB) { // soffset
4252         if (SOffset)
4253           MIB.addReg(SOffset);
4254         else
4255           MIB.addImm(0);
4256       },
4257       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4258       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4259     }};
4260 }
4261 
4262 /// Get an immediate that must be 32-bits, and treated as zero extended.
4263 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4264                                                const MachineRegisterInfo &MRI) {
4265   // getConstantVRegVal sexts any values, so see if that matters.
4266   Optional<int64_t> OffsetVal = getConstantVRegSExtVal(Reg, MRI);
4267   if (!OffsetVal || !isInt<32>(*OffsetVal))
4268     return None;
4269   return Lo_32(*OffsetVal);
4270 }
4271 
4272 InstructionSelector::ComplexRendererFns
4273 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4274   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4275   if (!OffsetVal)
4276     return {};
4277 
4278   Optional<int64_t> EncodedImm =
4279       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4280   if (!EncodedImm)
4281     return {};
4282 
4283   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4284 }
4285 
4286 InstructionSelector::ComplexRendererFns
4287 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4288   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4289 
4290   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4291   if (!OffsetVal)
4292     return {};
4293 
4294   Optional<int64_t> EncodedImm
4295     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4296   if (!EncodedImm)
4297     return {};
4298 
4299   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4300 }
4301 
4302 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4303                                                  const MachineInstr &MI,
4304                                                  int OpIdx) const {
4305   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4306          "Expected G_CONSTANT");
4307   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4308 }
4309 
4310 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4311                                                 const MachineInstr &MI,
4312                                                 int OpIdx) const {
4313   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4314          "Expected G_CONSTANT");
4315   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4316 }
4317 
4318 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4319                                                  const MachineInstr &MI,
4320                                                  int OpIdx) const {
4321   assert(OpIdx == -1);
4322 
4323   const MachineOperand &Op = MI.getOperand(1);
4324   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4325     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4326   else {
4327     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4328     MIB.addImm(Op.getCImm()->getSExtValue());
4329   }
4330 }
4331 
4332 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4333                                                 const MachineInstr &MI,
4334                                                 int OpIdx) const {
4335   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4336          "Expected G_CONSTANT");
4337   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4338 }
4339 
4340 /// This only really exists to satisfy DAG type checking machinery, so is a
4341 /// no-op here.
4342 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4343                                                 const MachineInstr &MI,
4344                                                 int OpIdx) const {
4345   MIB.addImm(MI.getOperand(OpIdx).getImm());
4346 }
4347 
4348 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4349                                                   const MachineInstr &MI,
4350                                                   int OpIdx) const {
4351   assert(OpIdx >= 0 && "expected to match an immediate operand");
4352   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4353 }
4354 
4355 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4356                                                  const MachineInstr &MI,
4357                                                  int OpIdx) const {
4358   assert(OpIdx >= 0 && "expected to match an immediate operand");
4359   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4360 }
4361 
4362 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4363                                              const MachineInstr &MI,
4364                                              int OpIdx) const {
4365   assert(OpIdx >= 0 && "expected to match an immediate operand");
4366   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4367 }
4368 
4369 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4370                                                  const MachineInstr &MI,
4371                                                  int OpIdx) const {
4372   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4373 }
4374 
4375 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4376   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4377 }
4378 
4379 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4380   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4381 }
4382 
4383 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4384   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4385 }
4386 
4387 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4388   return TII.isInlineConstant(Imm);
4389 }
4390