1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
23 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
24 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
25 #include "llvm/IR/DiagnosticInfo.h"
26 
27 #define DEBUG_TYPE "amdgpu-isel"
28 
29 using namespace llvm;
30 using namespace MIPatternMatch;
31 
32 static cl::opt<bool> AllowRiskySelect(
33   "amdgpu-global-isel-risky-select",
34   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
35   cl::init(false),
36   cl::ReallyHidden);
37 
38 #define GET_GLOBALISEL_IMPL
39 #define AMDGPUSubtarget GCNSubtarget
40 #include "AMDGPUGenGlobalISel.inc"
41 #undef GET_GLOBALISEL_IMPL
42 #undef AMDGPUSubtarget
43 
44 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
45     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
46     const AMDGPUTargetMachine &TM)
47     : InstructionSelector(), TII(*STI.getInstrInfo()),
48       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
49       STI(STI),
50       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
51 #define GET_GLOBALISEL_PREDICATES_INIT
52 #include "AMDGPUGenGlobalISel.inc"
53 #undef GET_GLOBALISEL_PREDICATES_INIT
54 #define GET_GLOBALISEL_TEMPORARIES_INIT
55 #include "AMDGPUGenGlobalISel.inc"
56 #undef GET_GLOBALISEL_TEMPORARIES_INIT
57 {
58 }
59 
60 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
61 
62 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
63                                         CodeGenCoverage &CoverageInfo,
64                                         ProfileSummaryInfo *PSI,
65                                         BlockFrequencyInfo *BFI) {
66   MRI = &MF.getRegInfo();
67   Subtarget = &MF.getSubtarget<GCNSubtarget>();
68   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
69 }
70 
71 bool AMDGPUInstructionSelector::isVCC(Register Reg,
72                                       const MachineRegisterInfo &MRI) const {
73   // The verifier is oblivious to s1 being a valid value for wavesize registers.
74   if (Reg.isPhysical())
75     return false;
76 
77   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
78   const TargetRegisterClass *RC =
79       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
80   if (RC) {
81     const LLT Ty = MRI.getType(Reg);
82     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
83            Ty.isValid() && Ty.getSizeInBits() == 1;
84   }
85 
86   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
87   return RB->getID() == AMDGPU::VCCRegBankID;
88 }
89 
90 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
91                                                         unsigned NewOpc) const {
92   MI.setDesc(TII.get(NewOpc));
93   MI.RemoveOperand(1); // Remove intrinsic ID.
94   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
95 
96   MachineOperand &Dst = MI.getOperand(0);
97   MachineOperand &Src = MI.getOperand(1);
98 
99   // TODO: This should be legalized to s32 if needed
100   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
101     return false;
102 
103   const TargetRegisterClass *DstRC
104     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
105   const TargetRegisterClass *SrcRC
106     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
107   if (!DstRC || DstRC != SrcRC)
108     return false;
109 
110   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
111          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
112 }
113 
114 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
115   const DebugLoc &DL = I.getDebugLoc();
116   MachineBasicBlock *BB = I.getParent();
117   I.setDesc(TII.get(TargetOpcode::COPY));
118 
119   const MachineOperand &Src = I.getOperand(1);
120   MachineOperand &Dst = I.getOperand(0);
121   Register DstReg = Dst.getReg();
122   Register SrcReg = Src.getReg();
123 
124   if (isVCC(DstReg, *MRI)) {
125     if (SrcReg == AMDGPU::SCC) {
126       const TargetRegisterClass *RC
127         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
128       if (!RC)
129         return true;
130       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
131     }
132 
133     if (!isVCC(SrcReg, *MRI)) {
134       // TODO: Should probably leave the copy and let copyPhysReg expand it.
135       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
136         return false;
137 
138       const TargetRegisterClass *SrcRC
139         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
140 
141       Optional<ValueAndVReg> ConstVal =
142           getConstantVRegValWithLookThrough(SrcReg, *MRI, true, true);
143       if (ConstVal) {
144         unsigned MovOpc =
145             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
146         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
147             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
148       } else {
149         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
150 
151         // We can't trust the high bits at this point, so clear them.
152 
153         // TODO: Skip masking high bits if def is known boolean.
154 
155         unsigned AndOpc =
156             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
157         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
158             .addImm(1)
159             .addReg(SrcReg);
160         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
161             .addImm(0)
162             .addReg(MaskedReg);
163       }
164 
165       if (!MRI->getRegClassOrNull(SrcReg))
166         MRI->setRegClass(SrcReg, SrcRC);
167       I.eraseFromParent();
168       return true;
169     }
170 
171     const TargetRegisterClass *RC =
172       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
173     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
174       return false;
175 
176     return true;
177   }
178 
179   for (const MachineOperand &MO : I.operands()) {
180     if (MO.getReg().isPhysical())
181       continue;
182 
183     const TargetRegisterClass *RC =
184             TRI.getConstrainedRegClassForOperand(MO, *MRI);
185     if (!RC)
186       continue;
187     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
188   }
189   return true;
190 }
191 
192 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
193   const Register DefReg = I.getOperand(0).getReg();
194   const LLT DefTy = MRI->getType(DefReg);
195   if (DefTy == LLT::scalar(1)) {
196     if (!AllowRiskySelect) {
197       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
198       return false;
199     }
200 
201     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
202   }
203 
204   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
205 
206   const RegClassOrRegBank &RegClassOrBank =
207     MRI->getRegClassOrRegBank(DefReg);
208 
209   const TargetRegisterClass *DefRC
210     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
211   if (!DefRC) {
212     if (!DefTy.isValid()) {
213       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
214       return false;
215     }
216 
217     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
218     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
219     if (!DefRC) {
220       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
221       return false;
222     }
223   }
224 
225   // TODO: Verify that all registers have the same bank
226   I.setDesc(TII.get(TargetOpcode::PHI));
227   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
228 }
229 
230 MachineOperand
231 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
232                                            const TargetRegisterClass &SubRC,
233                                            unsigned SubIdx) const {
234 
235   MachineInstr *MI = MO.getParent();
236   MachineBasicBlock *BB = MO.getParent()->getParent();
237   Register DstReg = MRI->createVirtualRegister(&SubRC);
238 
239   if (MO.isReg()) {
240     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
241     Register Reg = MO.getReg();
242     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
243             .addReg(Reg, 0, ComposedSubIdx);
244 
245     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
246                                      MO.isKill(), MO.isDead(), MO.isUndef(),
247                                      MO.isEarlyClobber(), 0, MO.isDebug(),
248                                      MO.isInternalRead());
249   }
250 
251   assert(MO.isImm());
252 
253   APInt Imm(64, MO.getImm());
254 
255   switch (SubIdx) {
256   default:
257     llvm_unreachable("do not know to split immediate with this sub index.");
258   case AMDGPU::sub0:
259     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
260   case AMDGPU::sub1:
261     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
262   }
263 }
264 
265 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
266   switch (Opc) {
267   case AMDGPU::G_AND:
268     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
269   case AMDGPU::G_OR:
270     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
271   case AMDGPU::G_XOR:
272     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
273   default:
274     llvm_unreachable("not a bit op");
275   }
276 }
277 
278 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
279   Register DstReg = I.getOperand(0).getReg();
280   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
281 
282   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
283   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
284       DstRB->getID() != AMDGPU::VCCRegBankID)
285     return false;
286 
287   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
288                             STI.isWave64());
289   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
290 
291   // Dead implicit-def of scc
292   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
293                                          true, // isImp
294                                          false, // isKill
295                                          true)); // isDead
296   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
297 }
298 
299 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
300   MachineBasicBlock *BB = I.getParent();
301   MachineFunction *MF = BB->getParent();
302   Register DstReg = I.getOperand(0).getReg();
303   const DebugLoc &DL = I.getDebugLoc();
304   LLT Ty = MRI->getType(DstReg);
305   if (Ty.isVector())
306     return false;
307 
308   unsigned Size = Ty.getSizeInBits();
309   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
310   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
311   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
312 
313   if (Size == 32) {
314     if (IsSALU) {
315       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
316       MachineInstr *Add =
317         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
318         .add(I.getOperand(1))
319         .add(I.getOperand(2));
320       I.eraseFromParent();
321       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
322     }
323 
324     if (STI.hasAddNoCarry()) {
325       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
326       I.setDesc(TII.get(Opc));
327       I.addOperand(*MF, MachineOperand::CreateImm(0));
328       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
329       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
330     }
331 
332     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
333 
334     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
335     MachineInstr *Add
336       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
337       .addDef(UnusedCarry, RegState::Dead)
338       .add(I.getOperand(1))
339       .add(I.getOperand(2))
340       .addImm(0);
341     I.eraseFromParent();
342     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
343   }
344 
345   assert(!Sub && "illegal sub should not reach here");
346 
347   const TargetRegisterClass &RC
348     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
349   const TargetRegisterClass &HalfRC
350     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
351 
352   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
353   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
354   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
355   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
356 
357   Register DstLo = MRI->createVirtualRegister(&HalfRC);
358   Register DstHi = MRI->createVirtualRegister(&HalfRC);
359 
360   if (IsSALU) {
361     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
362       .add(Lo1)
363       .add(Lo2);
364     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
365       .add(Hi1)
366       .add(Hi2);
367   } else {
368     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
369     Register CarryReg = MRI->createVirtualRegister(CarryRC);
370     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
371       .addDef(CarryReg)
372       .add(Lo1)
373       .add(Lo2)
374       .addImm(0);
375     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
376       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
377       .add(Hi1)
378       .add(Hi2)
379       .addReg(CarryReg, RegState::Kill)
380       .addImm(0);
381 
382     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
383       return false;
384   }
385 
386   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
387     .addReg(DstLo)
388     .addImm(AMDGPU::sub0)
389     .addReg(DstHi)
390     .addImm(AMDGPU::sub1);
391 
392 
393   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
394     return false;
395 
396   I.eraseFromParent();
397   return true;
398 }
399 
400 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
401   MachineInstr &I) const {
402   MachineBasicBlock *BB = I.getParent();
403   MachineFunction *MF = BB->getParent();
404   const DebugLoc &DL = I.getDebugLoc();
405   Register Dst0Reg = I.getOperand(0).getReg();
406   Register Dst1Reg = I.getOperand(1).getReg();
407   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
408                      I.getOpcode() == AMDGPU::G_UADDE;
409   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
410                           I.getOpcode() == AMDGPU::G_USUBE;
411 
412   if (isVCC(Dst1Reg, *MRI)) {
413     unsigned NoCarryOpc =
414         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
415     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
416     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
417     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
418     I.addOperand(*MF, MachineOperand::CreateImm(0));
419     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
420   }
421 
422   Register Src0Reg = I.getOperand(2).getReg();
423   Register Src1Reg = I.getOperand(3).getReg();
424 
425   if (HasCarryIn) {
426     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
427       .addReg(I.getOperand(4).getReg());
428   }
429 
430   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
431   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
432 
433   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
434     .add(I.getOperand(2))
435     .add(I.getOperand(3));
436   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
437     .addReg(AMDGPU::SCC);
438 
439   if (!MRI->getRegClassOrNull(Dst1Reg))
440     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
441 
442   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
443       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
444       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
445     return false;
446 
447   if (HasCarryIn &&
448       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
449                                     AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   I.eraseFromParent();
453   return true;
454 }
455 
456 // TODO: We should probably legalize these to only using 32-bit results.
457 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
458   MachineBasicBlock *BB = I.getParent();
459   Register DstReg = I.getOperand(0).getReg();
460   Register SrcReg = I.getOperand(1).getReg();
461   LLT DstTy = MRI->getType(DstReg);
462   LLT SrcTy = MRI->getType(SrcReg);
463   const unsigned SrcSize = SrcTy.getSizeInBits();
464   unsigned DstSize = DstTy.getSizeInBits();
465 
466   // TODO: Should handle any multiple of 32 offset.
467   unsigned Offset = I.getOperand(2).getImm();
468   if (Offset % 32 != 0 || DstSize > 128)
469     return false;
470 
471   // 16-bit operations really use 32-bit registers.
472   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
473   if (DstSize == 16)
474     DstSize = 32;
475 
476   const TargetRegisterClass *DstRC =
477     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
478   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
479     return false;
480 
481   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
482   const TargetRegisterClass *SrcRC =
483     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
484   if (!SrcRC)
485     return false;
486   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
487                                                          DstSize / 32);
488   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
489   if (!SrcRC)
490     return false;
491 
492   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
493                                     *SrcRC, I.getOperand(1));
494   const DebugLoc &DL = I.getDebugLoc();
495   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
496     .addReg(SrcReg, 0, SubReg);
497 
498   I.eraseFromParent();
499   return true;
500 }
501 
502 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
503   MachineBasicBlock *BB = MI.getParent();
504   Register DstReg = MI.getOperand(0).getReg();
505   LLT DstTy = MRI->getType(DstReg);
506   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
507 
508   const unsigned SrcSize = SrcTy.getSizeInBits();
509   if (SrcSize < 32)
510     return selectImpl(MI, *CoverageInfo);
511 
512   const DebugLoc &DL = MI.getDebugLoc();
513   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
514   const unsigned DstSize = DstTy.getSizeInBits();
515   const TargetRegisterClass *DstRC =
516     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
517   if (!DstRC)
518     return false;
519 
520   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
521   MachineInstrBuilder MIB =
522     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
523   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
524     MachineOperand &Src = MI.getOperand(I + 1);
525     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
526     MIB.addImm(SubRegs[I]);
527 
528     const TargetRegisterClass *SrcRC
529       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
530     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
531       return false;
532   }
533 
534   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
535     return false;
536 
537   MI.eraseFromParent();
538   return true;
539 }
540 
541 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
542   MachineBasicBlock *BB = MI.getParent();
543   const int NumDst = MI.getNumOperands() - 1;
544 
545   MachineOperand &Src = MI.getOperand(NumDst);
546 
547   Register SrcReg = Src.getReg();
548   Register DstReg0 = MI.getOperand(0).getReg();
549   LLT DstTy = MRI->getType(DstReg0);
550   LLT SrcTy = MRI->getType(SrcReg);
551 
552   const unsigned DstSize = DstTy.getSizeInBits();
553   const unsigned SrcSize = SrcTy.getSizeInBits();
554   const DebugLoc &DL = MI.getDebugLoc();
555   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
556 
557   const TargetRegisterClass *SrcRC =
558     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
559   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
560     return false;
561 
562   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
563   // source, and this relies on the fact that the same subregister indices are
564   // used for both.
565   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
566   for (int I = 0, E = NumDst; I != E; ++I) {
567     MachineOperand &Dst = MI.getOperand(I);
568     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
569       .addReg(SrcReg, 0, SubRegs[I]);
570 
571     // Make sure the subregister index is valid for the source register.
572     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
573     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
574       return false;
575 
576     const TargetRegisterClass *DstRC =
577       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
578     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
579       return false;
580   }
581 
582   MI.eraseFromParent();
583   return true;
584 }
585 
586 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
587   MachineInstr &MI) const {
588   if (selectImpl(MI, *CoverageInfo))
589     return true;
590 
591   const LLT S32 = LLT::scalar(32);
592   const LLT V2S16 = LLT::vector(2, 16);
593 
594   Register Dst = MI.getOperand(0).getReg();
595   if (MRI->getType(Dst) != V2S16)
596     return false;
597 
598   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
599   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
600     return false;
601 
602   Register Src0 = MI.getOperand(1).getReg();
603   Register Src1 = MI.getOperand(2).getReg();
604   if (MRI->getType(Src0) != S32)
605     return false;
606 
607   const DebugLoc &DL = MI.getDebugLoc();
608   MachineBasicBlock *BB = MI.getParent();
609 
610   auto ConstSrc1 =
611       getConstantVRegValWithLookThrough(Src1, *MRI, true, true, true);
612   if (ConstSrc1) {
613     auto ConstSrc0 =
614         getConstantVRegValWithLookThrough(Src0, *MRI, true, true, true);
615     if (ConstSrc0) {
616       const int64_t K0 = ConstSrc0->Value.getSExtValue();
617       const int64_t K1 = ConstSrc1->Value.getSExtValue();
618       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
619       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
620 
621       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
622         .addImm(Lo16 | (Hi16 << 16));
623       MI.eraseFromParent();
624       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
625     }
626   }
627 
628   // TODO: This should probably be a combine somewhere
629   // (build_vector_trunc $src0, undef -> copy $src0
630   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
631   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
632     MI.setDesc(TII.get(AMDGPU::COPY));
633     MI.RemoveOperand(2);
634     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
635            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
636   }
637 
638   Register ShiftSrc0;
639   Register ShiftSrc1;
640 
641   // With multiple uses of the shift, this will duplicate the shift and
642   // increase register pressure.
643   //
644   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
645   //  => (S_PACK_HH_B32_B16 $src0, $src1)
646   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
647   //  => (S_PACK_LH_B32_B16 $src0, $src1)
648   // (build_vector_trunc $src0, $src1)
649   //  => (S_PACK_LL_B32_B16 $src0, $src1)
650 
651   bool Shift0 = mi_match(
652       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
653 
654   bool Shift1 = mi_match(
655       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
656 
657   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
658   if (Shift0 && Shift1) {
659     Opc = AMDGPU::S_PACK_HH_B32_B16;
660     MI.getOperand(1).setReg(ShiftSrc0);
661     MI.getOperand(2).setReg(ShiftSrc1);
662   } else if (Shift1) {
663     Opc = AMDGPU::S_PACK_LH_B32_B16;
664     MI.getOperand(2).setReg(ShiftSrc1);
665   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
666     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
667     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
668       .addReg(ShiftSrc0)
669       .addImm(16);
670 
671     MI.eraseFromParent();
672     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
673   }
674 
675   MI.setDesc(TII.get(Opc));
676   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
677 }
678 
679 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
680   return selectG_ADD_SUB(I);
681 }
682 
683 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
684   const MachineOperand &MO = I.getOperand(0);
685 
686   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
687   // regbank check here is to know why getConstrainedRegClassForOperand failed.
688   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
689   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
690       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
691     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
692     return true;
693   }
694 
695   return false;
696 }
697 
698 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
699   MachineBasicBlock *BB = I.getParent();
700 
701   Register DstReg = I.getOperand(0).getReg();
702   Register Src0Reg = I.getOperand(1).getReg();
703   Register Src1Reg = I.getOperand(2).getReg();
704   LLT Src1Ty = MRI->getType(Src1Reg);
705 
706   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
707   unsigned InsSize = Src1Ty.getSizeInBits();
708 
709   int64_t Offset = I.getOperand(3).getImm();
710 
711   // FIXME: These cases should have been illegal and unnecessary to check here.
712   if (Offset % 32 != 0 || InsSize % 32 != 0)
713     return false;
714 
715   // Currently not handled by getSubRegFromChannel.
716   if (InsSize > 128)
717     return false;
718 
719   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
720   if (SubReg == AMDGPU::NoSubRegister)
721     return false;
722 
723   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
724   const TargetRegisterClass *DstRC =
725     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
726   if (!DstRC)
727     return false;
728 
729   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
730   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
731   const TargetRegisterClass *Src0RC =
732     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
733   const TargetRegisterClass *Src1RC =
734     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
735 
736   // Deal with weird cases where the class only partially supports the subreg
737   // index.
738   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
739   if (!Src0RC || !Src1RC)
740     return false;
741 
742   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
743       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
744       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
745     return false;
746 
747   const DebugLoc &DL = I.getDebugLoc();
748   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
749     .addReg(Src0Reg)
750     .addReg(Src1Reg)
751     .addImm(SubReg);
752 
753   I.eraseFromParent();
754   return true;
755 }
756 
757 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
758   if (STI.getLDSBankCount() != 16)
759     return selectImpl(MI, *CoverageInfo);
760 
761   Register Dst = MI.getOperand(0).getReg();
762   Register Src0 = MI.getOperand(2).getReg();
763   Register M0Val = MI.getOperand(6).getReg();
764   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
765       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
766       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
767     return false;
768 
769   // This requires 2 instructions. It is possible to write a pattern to support
770   // this, but the generated isel emitter doesn't correctly deal with multiple
771   // output instructions using the same physical register input. The copy to m0
772   // is incorrectly placed before the second instruction.
773   //
774   // TODO: Match source modifiers.
775 
776   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
777   const DebugLoc &DL = MI.getDebugLoc();
778   MachineBasicBlock *MBB = MI.getParent();
779 
780   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
781     .addReg(M0Val);
782   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
783     .addImm(2)
784     .addImm(MI.getOperand(4).getImm())  // $attr
785     .addImm(MI.getOperand(3).getImm()); // $attrchan
786 
787   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
788     .addImm(0)                          // $src0_modifiers
789     .addReg(Src0)                       // $src0
790     .addImm(MI.getOperand(4).getImm())  // $attr
791     .addImm(MI.getOperand(3).getImm())  // $attrchan
792     .addImm(0)                          // $src2_modifiers
793     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
794     .addImm(MI.getOperand(5).getImm())  // $high
795     .addImm(0)                          // $clamp
796     .addImm(0);                         // $omod
797 
798   MI.eraseFromParent();
799   return true;
800 }
801 
802 // Writelane is special in that it can use SGPR and M0 (which would normally
803 // count as using the constant bus twice - but in this case it is allowed since
804 // the lane selector doesn't count as a use of the constant bus). However, it is
805 // still required to abide by the 1 SGPR rule. Fix this up if we might have
806 // multiple SGPRs.
807 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
808   // With a constant bus limit of at least 2, there's no issue.
809   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
810     return selectImpl(MI, *CoverageInfo);
811 
812   MachineBasicBlock *MBB = MI.getParent();
813   const DebugLoc &DL = MI.getDebugLoc();
814   Register VDst = MI.getOperand(0).getReg();
815   Register Val = MI.getOperand(2).getReg();
816   Register LaneSelect = MI.getOperand(3).getReg();
817   Register VDstIn = MI.getOperand(4).getReg();
818 
819   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
820 
821   Optional<ValueAndVReg> ConstSelect =
822     getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true);
823   if (ConstSelect) {
824     // The selector has to be an inline immediate, so we can use whatever for
825     // the other operands.
826     MIB.addReg(Val);
827     MIB.addImm(ConstSelect->Value.getSExtValue() &
828                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
829   } else {
830     Optional<ValueAndVReg> ConstVal =
831       getConstantVRegValWithLookThrough(Val, *MRI, true, true);
832 
833     // If the value written is an inline immediate, we can get away without a
834     // copy to m0.
835     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
836                                                  STI.hasInv2PiInlineImm())) {
837       MIB.addImm(ConstVal->Value.getSExtValue());
838       MIB.addReg(LaneSelect);
839     } else {
840       MIB.addReg(Val);
841 
842       // If the lane selector was originally in a VGPR and copied with
843       // readfirstlane, there's a hazard to read the same SGPR from the
844       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
845       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
846 
847       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
848         .addReg(LaneSelect);
849       MIB.addReg(AMDGPU::M0);
850     }
851   }
852 
853   MIB.addReg(VDstIn);
854 
855   MI.eraseFromParent();
856   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
857 }
858 
859 // We need to handle this here because tablegen doesn't support matching
860 // instructions with multiple outputs.
861 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
862   Register Dst0 = MI.getOperand(0).getReg();
863   Register Dst1 = MI.getOperand(1).getReg();
864 
865   LLT Ty = MRI->getType(Dst0);
866   unsigned Opc;
867   if (Ty == LLT::scalar(32))
868     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
869   else if (Ty == LLT::scalar(64))
870     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
871   else
872     return false;
873 
874   // TODO: Match source modifiers.
875 
876   const DebugLoc &DL = MI.getDebugLoc();
877   MachineBasicBlock *MBB = MI.getParent();
878 
879   Register Numer = MI.getOperand(3).getReg();
880   Register Denom = MI.getOperand(4).getReg();
881   unsigned ChooseDenom = MI.getOperand(5).getImm();
882 
883   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
884 
885   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
886     .addDef(Dst1)
887     .addImm(0)     // $src0_modifiers
888     .addUse(Src0)  // $src0
889     .addImm(0)     // $src1_modifiers
890     .addUse(Denom) // $src1
891     .addImm(0)     // $src2_modifiers
892     .addUse(Numer) // $src2
893     .addImm(0)     // $clamp
894     .addImm(0);    // $omod
895 
896   MI.eraseFromParent();
897   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
898 }
899 
900 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
901   unsigned IntrinsicID = I.getIntrinsicID();
902   switch (IntrinsicID) {
903   case Intrinsic::amdgcn_if_break: {
904     MachineBasicBlock *BB = I.getParent();
905 
906     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
907     // SelectionDAG uses for wave32 vs wave64.
908     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
909       .add(I.getOperand(0))
910       .add(I.getOperand(2))
911       .add(I.getOperand(3));
912 
913     Register DstReg = I.getOperand(0).getReg();
914     Register Src0Reg = I.getOperand(2).getReg();
915     Register Src1Reg = I.getOperand(3).getReg();
916 
917     I.eraseFromParent();
918 
919     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
920       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
921 
922     return true;
923   }
924   case Intrinsic::amdgcn_interp_p1_f16:
925     return selectInterpP1F16(I);
926   case Intrinsic::amdgcn_wqm:
927     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
928   case Intrinsic::amdgcn_softwqm:
929     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
930   case Intrinsic::amdgcn_wwm:
931     return constrainCopyLikeIntrin(I, AMDGPU::WWM);
932   case Intrinsic::amdgcn_writelane:
933     return selectWritelane(I);
934   case Intrinsic::amdgcn_div_scale:
935     return selectDivScale(I);
936   case Intrinsic::amdgcn_icmp:
937     return selectIntrinsicIcmp(I);
938   case Intrinsic::amdgcn_ballot:
939     return selectBallot(I);
940   case Intrinsic::amdgcn_reloc_constant:
941     return selectRelocConstant(I);
942   case Intrinsic::amdgcn_groupstaticsize:
943     return selectGroupStaticSize(I);
944   case Intrinsic::returnaddress:
945     return selectReturnAddress(I);
946   default:
947     return selectImpl(I, *CoverageInfo);
948   }
949 }
950 
951 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
952   if (Size != 32 && Size != 64)
953     return -1;
954   switch (P) {
955   default:
956     llvm_unreachable("Unknown condition code!");
957   case CmpInst::ICMP_NE:
958     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
959   case CmpInst::ICMP_EQ:
960     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
961   case CmpInst::ICMP_SGT:
962     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
963   case CmpInst::ICMP_SGE:
964     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
965   case CmpInst::ICMP_SLT:
966     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
967   case CmpInst::ICMP_SLE:
968     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
969   case CmpInst::ICMP_UGT:
970     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
971   case CmpInst::ICMP_UGE:
972     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
973   case CmpInst::ICMP_ULT:
974     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
975   case CmpInst::ICMP_ULE:
976     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
977   }
978 }
979 
980 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
981                                               unsigned Size) const {
982   if (Size == 64) {
983     if (!STI.hasScalarCompareEq64())
984       return -1;
985 
986     switch (P) {
987     case CmpInst::ICMP_NE:
988       return AMDGPU::S_CMP_LG_U64;
989     case CmpInst::ICMP_EQ:
990       return AMDGPU::S_CMP_EQ_U64;
991     default:
992       return -1;
993     }
994   }
995 
996   if (Size != 32)
997     return -1;
998 
999   switch (P) {
1000   case CmpInst::ICMP_NE:
1001     return AMDGPU::S_CMP_LG_U32;
1002   case CmpInst::ICMP_EQ:
1003     return AMDGPU::S_CMP_EQ_U32;
1004   case CmpInst::ICMP_SGT:
1005     return AMDGPU::S_CMP_GT_I32;
1006   case CmpInst::ICMP_SGE:
1007     return AMDGPU::S_CMP_GE_I32;
1008   case CmpInst::ICMP_SLT:
1009     return AMDGPU::S_CMP_LT_I32;
1010   case CmpInst::ICMP_SLE:
1011     return AMDGPU::S_CMP_LE_I32;
1012   case CmpInst::ICMP_UGT:
1013     return AMDGPU::S_CMP_GT_U32;
1014   case CmpInst::ICMP_UGE:
1015     return AMDGPU::S_CMP_GE_U32;
1016   case CmpInst::ICMP_ULT:
1017     return AMDGPU::S_CMP_LT_U32;
1018   case CmpInst::ICMP_ULE:
1019     return AMDGPU::S_CMP_LE_U32;
1020   default:
1021     llvm_unreachable("Unknown condition code!");
1022   }
1023 }
1024 
1025 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1026   MachineBasicBlock *BB = I.getParent();
1027   const DebugLoc &DL = I.getDebugLoc();
1028 
1029   Register SrcReg = I.getOperand(2).getReg();
1030   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1031 
1032   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1033 
1034   Register CCReg = I.getOperand(0).getReg();
1035   if (!isVCC(CCReg, *MRI)) {
1036     int Opcode = getS_CMPOpcode(Pred, Size);
1037     if (Opcode == -1)
1038       return false;
1039     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1040             .add(I.getOperand(2))
1041             .add(I.getOperand(3));
1042     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1043       .addReg(AMDGPU::SCC);
1044     bool Ret =
1045         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1046         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1047     I.eraseFromParent();
1048     return Ret;
1049   }
1050 
1051   int Opcode = getV_CMPOpcode(Pred, Size);
1052   if (Opcode == -1)
1053     return false;
1054 
1055   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1056             I.getOperand(0).getReg())
1057             .add(I.getOperand(2))
1058             .add(I.getOperand(3));
1059   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1060                                *TRI.getBoolRC(), *MRI);
1061   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1062   I.eraseFromParent();
1063   return Ret;
1064 }
1065 
1066 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1067   Register Dst = I.getOperand(0).getReg();
1068   if (isVCC(Dst, *MRI))
1069     return false;
1070 
1071   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1072     return false;
1073 
1074   MachineBasicBlock *BB = I.getParent();
1075   const DebugLoc &DL = I.getDebugLoc();
1076   Register SrcReg = I.getOperand(2).getReg();
1077   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1078   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1079 
1080   int Opcode = getV_CMPOpcode(Pred, Size);
1081   if (Opcode == -1)
1082     return false;
1083 
1084   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1085                            .add(I.getOperand(2))
1086                            .add(I.getOperand(3));
1087   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1088                                *MRI);
1089   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1090   I.eraseFromParent();
1091   return Ret;
1092 }
1093 
1094 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1095   MachineBasicBlock *BB = I.getParent();
1096   const DebugLoc &DL = I.getDebugLoc();
1097   Register DstReg = I.getOperand(0).getReg();
1098   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1099   const bool Is64 = Size == 64;
1100 
1101   if (Size != STI.getWavefrontSize())
1102     return false;
1103 
1104   Optional<ValueAndVReg> Arg =
1105       getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true);
1106 
1107   if (Arg.hasValue()) {
1108     const int64_t Value = Arg.getValue().Value.getSExtValue();
1109     if (Value == 0) {
1110       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1111       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1112     } else if (Value == -1) { // all ones
1113       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1114       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1115     } else
1116       return false;
1117   } else {
1118     Register SrcReg = I.getOperand(2).getReg();
1119     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1120   }
1121 
1122   I.eraseFromParent();
1123   return true;
1124 }
1125 
1126 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1127   Register DstReg = I.getOperand(0).getReg();
1128   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1129   const TargetRegisterClass *DstRC =
1130     TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1131   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1132     return false;
1133 
1134   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1135 
1136   Module *M = MF->getFunction().getParent();
1137   const MDNode *Metadata = I.getOperand(2).getMetadata();
1138   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1139   auto RelocSymbol = cast<GlobalVariable>(
1140     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1141 
1142   MachineBasicBlock *BB = I.getParent();
1143   BuildMI(*BB, &I, I.getDebugLoc(),
1144           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1145     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1146 
1147   I.eraseFromParent();
1148   return true;
1149 }
1150 
1151 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1152   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1153 
1154   Register DstReg = I.getOperand(0).getReg();
1155   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1156   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1157     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1158 
1159   MachineBasicBlock *MBB = I.getParent();
1160   const DebugLoc &DL = I.getDebugLoc();
1161 
1162   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1163 
1164   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1165     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1166     MIB.addImm(MFI->getLDSSize());
1167   } else {
1168     Module *M = MF->getFunction().getParent();
1169     const GlobalValue *GV
1170       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1171     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1172   }
1173 
1174   I.eraseFromParent();
1175   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1176 }
1177 
1178 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1179   MachineBasicBlock *MBB = I.getParent();
1180   MachineFunction &MF = *MBB->getParent();
1181   const DebugLoc &DL = I.getDebugLoc();
1182 
1183   MachineOperand &Dst = I.getOperand(0);
1184   Register DstReg = Dst.getReg();
1185   unsigned Depth = I.getOperand(2).getImm();
1186 
1187   const TargetRegisterClass *RC
1188     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1189   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1190       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1191     return false;
1192 
1193   // Check for kernel and shader functions
1194   if (Depth != 0 ||
1195       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1196     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1197       .addImm(0);
1198     I.eraseFromParent();
1199     return true;
1200   }
1201 
1202   MachineFrameInfo &MFI = MF.getFrameInfo();
1203   // There is a call to @llvm.returnaddress in this function
1204   MFI.setReturnAddressIsTaken(true);
1205 
1206   // Get the return address reg and mark it as an implicit live-in
1207   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1208   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1209                                              AMDGPU::SReg_64RegClass);
1210   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1211     .addReg(LiveIn);
1212   I.eraseFromParent();
1213   return true;
1214 }
1215 
1216 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1217   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1218   // SelectionDAG uses for wave32 vs wave64.
1219   MachineBasicBlock *BB = MI.getParent();
1220   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1221       .add(MI.getOperand(1));
1222 
1223   Register Reg = MI.getOperand(1).getReg();
1224   MI.eraseFromParent();
1225 
1226   if (!MRI->getRegClassOrNull(Reg))
1227     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1228   return true;
1229 }
1230 
1231 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1232   MachineInstr &MI, Intrinsic::ID IntrID) const {
1233   MachineBasicBlock *MBB = MI.getParent();
1234   MachineFunction *MF = MBB->getParent();
1235   const DebugLoc &DL = MI.getDebugLoc();
1236 
1237   unsigned IndexOperand = MI.getOperand(7).getImm();
1238   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1239   bool WaveDone = MI.getOperand(9).getImm() != 0;
1240 
1241   if (WaveDone && !WaveRelease)
1242     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1243 
1244   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1245   IndexOperand &= ~0x3f;
1246   unsigned CountDw = 0;
1247 
1248   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1249     CountDw = (IndexOperand >> 24) & 0xf;
1250     IndexOperand &= ~(0xf << 24);
1251 
1252     if (CountDw < 1 || CountDw > 4) {
1253       report_fatal_error(
1254         "ds_ordered_count: dword count must be between 1 and 4");
1255     }
1256   }
1257 
1258   if (IndexOperand)
1259     report_fatal_error("ds_ordered_count: bad index operand");
1260 
1261   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1262   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1263 
1264   unsigned Offset0 = OrderedCountIndex << 2;
1265   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1266                      (Instruction << 4);
1267 
1268   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1269     Offset1 |= (CountDw - 1) << 6;
1270 
1271   unsigned Offset = Offset0 | (Offset1 << 8);
1272 
1273   Register M0Val = MI.getOperand(2).getReg();
1274   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1275     .addReg(M0Val);
1276 
1277   Register DstReg = MI.getOperand(0).getReg();
1278   Register ValReg = MI.getOperand(3).getReg();
1279   MachineInstrBuilder DS =
1280     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1281       .addReg(ValReg)
1282       .addImm(Offset)
1283       .cloneMemRefs(MI);
1284 
1285   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1286     return false;
1287 
1288   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1289   MI.eraseFromParent();
1290   return Ret;
1291 }
1292 
1293 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1294   switch (IntrID) {
1295   case Intrinsic::amdgcn_ds_gws_init:
1296     return AMDGPU::DS_GWS_INIT;
1297   case Intrinsic::amdgcn_ds_gws_barrier:
1298     return AMDGPU::DS_GWS_BARRIER;
1299   case Intrinsic::amdgcn_ds_gws_sema_v:
1300     return AMDGPU::DS_GWS_SEMA_V;
1301   case Intrinsic::amdgcn_ds_gws_sema_br:
1302     return AMDGPU::DS_GWS_SEMA_BR;
1303   case Intrinsic::amdgcn_ds_gws_sema_p:
1304     return AMDGPU::DS_GWS_SEMA_P;
1305   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1306     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1307   default:
1308     llvm_unreachable("not a gws intrinsic");
1309   }
1310 }
1311 
1312 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1313                                                      Intrinsic::ID IID) const {
1314   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1315       !STI.hasGWSSemaReleaseAll())
1316     return false;
1317 
1318   // intrinsic ID, vsrc, offset
1319   const bool HasVSrc = MI.getNumOperands() == 3;
1320   assert(HasVSrc || MI.getNumOperands() == 2);
1321 
1322   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1323   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1324   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1325     return false;
1326 
1327   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1328   assert(OffsetDef);
1329 
1330   unsigned ImmOffset;
1331 
1332   MachineBasicBlock *MBB = MI.getParent();
1333   const DebugLoc &DL = MI.getDebugLoc();
1334 
1335   MachineInstr *Readfirstlane = nullptr;
1336 
1337   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1338   // incoming offset, in case there's an add of a constant. We'll have to put it
1339   // back later.
1340   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1341     Readfirstlane = OffsetDef;
1342     BaseOffset = OffsetDef->getOperand(1).getReg();
1343     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1344   }
1345 
1346   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1347     // If we have a constant offset, try to use the 0 in m0 as the base.
1348     // TODO: Look into changing the default m0 initialization value. If the
1349     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1350     // the immediate offset.
1351 
1352     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1353     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1354       .addImm(0);
1355   } else {
1356     std::tie(BaseOffset, ImmOffset) =
1357         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1358 
1359     if (Readfirstlane) {
1360       // We have the constant offset now, so put the readfirstlane back on the
1361       // variable component.
1362       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1363         return false;
1364 
1365       Readfirstlane->getOperand(1).setReg(BaseOffset);
1366       BaseOffset = Readfirstlane->getOperand(0).getReg();
1367     } else {
1368       if (!RBI.constrainGenericRegister(BaseOffset,
1369                                         AMDGPU::SReg_32RegClass, *MRI))
1370         return false;
1371     }
1372 
1373     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1374     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1375       .addReg(BaseOffset)
1376       .addImm(16);
1377 
1378     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1379       .addReg(M0Base);
1380   }
1381 
1382   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1383   // offset field) % 64. Some versions of the programming guide omit the m0
1384   // part, or claim it's from offset 0.
1385   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1386 
1387   if (HasVSrc) {
1388     Register VSrc = MI.getOperand(1).getReg();
1389     MIB.addReg(VSrc);
1390     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1391       return false;
1392   }
1393 
1394   MIB.addImm(ImmOffset)
1395      .cloneMemRefs(MI);
1396 
1397   MI.eraseFromParent();
1398   return true;
1399 }
1400 
1401 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1402                                                       bool IsAppend) const {
1403   Register PtrBase = MI.getOperand(2).getReg();
1404   LLT PtrTy = MRI->getType(PtrBase);
1405   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1406 
1407   unsigned Offset;
1408   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1409 
1410   // TODO: Should this try to look through readfirstlane like GWS?
1411   if (!isDSOffsetLegal(PtrBase, Offset)) {
1412     PtrBase = MI.getOperand(2).getReg();
1413     Offset = 0;
1414   }
1415 
1416   MachineBasicBlock *MBB = MI.getParent();
1417   const DebugLoc &DL = MI.getDebugLoc();
1418   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1419 
1420   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1421     .addReg(PtrBase);
1422   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1423     return false;
1424 
1425   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1426     .addImm(Offset)
1427     .addImm(IsGDS ? -1 : 0)
1428     .cloneMemRefs(MI);
1429   MI.eraseFromParent();
1430   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1431 }
1432 
1433 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1434   if (TM.getOptLevel() > CodeGenOpt::None) {
1435     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1436     if (WGSize <= STI.getWavefrontSize()) {
1437       MachineBasicBlock *MBB = MI.getParent();
1438       const DebugLoc &DL = MI.getDebugLoc();
1439       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1440       MI.eraseFromParent();
1441       return true;
1442     }
1443   }
1444   return selectImpl(MI, *CoverageInfo);
1445 }
1446 
1447 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1448                          bool &IsTexFail) {
1449   if (TexFailCtrl)
1450     IsTexFail = true;
1451 
1452   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1453   TexFailCtrl &= ~(uint64_t)0x1;
1454   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1455   TexFailCtrl &= ~(uint64_t)0x2;
1456 
1457   return TexFailCtrl == 0;
1458 }
1459 
1460 static bool parseCachePolicy(uint64_t Value,
1461                              bool *GLC, bool *SLC, bool *DLC, bool *SCC) {
1462   if (GLC) {
1463     *GLC = (Value & 0x1) ? 1 : 0;
1464     Value &= ~(uint64_t)0x1;
1465   }
1466   if (SLC) {
1467     *SLC = (Value & 0x2) ? 1 : 0;
1468     Value &= ~(uint64_t)0x2;
1469   }
1470   if (DLC) {
1471     *DLC = (Value & 0x4) ? 1 : 0;
1472     Value &= ~(uint64_t)0x4;
1473   }
1474   if (SCC) {
1475     *SCC = (Value & 0x10) ? 1 : 0;
1476     Value &= ~(uint64_t)0x10;
1477   }
1478 
1479   return Value == 0;
1480 }
1481 
1482 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1483   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1484   MachineBasicBlock *MBB = MI.getParent();
1485   const DebugLoc &DL = MI.getDebugLoc();
1486 
1487   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1488     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1489 
1490   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1491   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1492       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1493   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1494       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1495   unsigned IntrOpcode = Intr->BaseOpcode;
1496   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1497 
1498   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1499 
1500   Register VDataIn, VDataOut;
1501   LLT VDataTy;
1502   int NumVDataDwords = -1;
1503   bool IsD16 = false;
1504 
1505   bool Unorm;
1506   if (!BaseOpcode->Sampler)
1507     Unorm = true;
1508   else
1509     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1510 
1511   bool TFE;
1512   bool LWE;
1513   bool IsTexFail = false;
1514   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1515                     TFE, LWE, IsTexFail))
1516     return false;
1517 
1518   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1519   const bool IsA16 = (Flags & 1) != 0;
1520   const bool IsG16 = (Flags & 2) != 0;
1521 
1522   // A16 implies 16 bit gradients
1523   if (IsA16 && !IsG16)
1524     return false;
1525 
1526   unsigned DMask = 0;
1527   unsigned DMaskLanes = 0;
1528 
1529   if (BaseOpcode->Atomic) {
1530     VDataOut = MI.getOperand(0).getReg();
1531     VDataIn = MI.getOperand(2).getReg();
1532     LLT Ty = MRI->getType(VDataIn);
1533 
1534     // Be careful to allow atomic swap on 16-bit element vectors.
1535     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1536       Ty.getSizeInBits() == 128 :
1537       Ty.getSizeInBits() == 64;
1538 
1539     if (BaseOpcode->AtomicX2) {
1540       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1541 
1542       DMask = Is64Bit ? 0xf : 0x3;
1543       NumVDataDwords = Is64Bit ? 4 : 2;
1544     } else {
1545       DMask = Is64Bit ? 0x3 : 0x1;
1546       NumVDataDwords = Is64Bit ? 2 : 1;
1547     }
1548   } else {
1549     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1550     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1551 
1552     // One memoperand is mandatory, except for getresinfo.
1553     // FIXME: Check this in verifier.
1554     if (!MI.memoperands_empty()) {
1555       const MachineMemOperand *MMO = *MI.memoperands_begin();
1556 
1557       // Infer d16 from the memory size, as the register type will be mangled by
1558       // unpacked subtargets, or by TFE.
1559       IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1560     }
1561 
1562     if (BaseOpcode->Store) {
1563       VDataIn = MI.getOperand(1).getReg();
1564       VDataTy = MRI->getType(VDataIn);
1565       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1566     } else {
1567       VDataOut = MI.getOperand(0).getReg();
1568       VDataTy = MRI->getType(VDataOut);
1569       NumVDataDwords = DMaskLanes;
1570 
1571       if (IsD16 && !STI.hasUnpackedD16VMem())
1572         NumVDataDwords = (DMaskLanes + 1) / 2;
1573     }
1574   }
1575 
1576   // Optimize _L to _LZ when _L is zero
1577   if (LZMappingInfo) {
1578     // The legalizer replaced the register with an immediate 0 if we need to
1579     // change the opcode.
1580     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex);
1581     if (Lod.isImm()) {
1582       assert(Lod.getImm() == 0);
1583       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1584     }
1585   }
1586 
1587   // Optimize _mip away, when 'lod' is zero
1588   if (MIPMappingInfo) {
1589     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex);
1590     if (Lod.isImm()) {
1591       assert(Lod.getImm() == 0);
1592       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1593     }
1594   }
1595 
1596   // Set G16 opcode
1597   if (IsG16 && !IsA16) {
1598     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1599         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1600     assert(G16MappingInfo);
1601     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1602   }
1603 
1604   // TODO: Check this in verifier.
1605   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1606 
1607   bool GLC = false;
1608   bool SLC = false;
1609   bool DLC = false;
1610   bool SCC = false;
1611   if (BaseOpcode->Atomic) {
1612     GLC = true; // TODO no-return optimization
1613     if (!parseCachePolicy(
1614             MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(), nullptr,
1615             &SLC, IsGFX10Plus ? &DLC : nullptr, &SCC))
1616       return false;
1617   } else {
1618     if (!parseCachePolicy(
1619             MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(), &GLC,
1620             &SLC, IsGFX10Plus ? &DLC : nullptr, &SCC))
1621       return false;
1622   }
1623 
1624   int NumVAddrRegs = 0;
1625   int NumVAddrDwords = 0;
1626   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1627     // Skip the $noregs and 0s inserted during legalization.
1628     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1629     if (!AddrOp.isReg())
1630       continue; // XXX - Break?
1631 
1632     Register Addr = AddrOp.getReg();
1633     if (!Addr)
1634       break;
1635 
1636     ++NumVAddrRegs;
1637     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1638   }
1639 
1640   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1641   // NSA, these should have beeen packed into a single value in the first
1642   // address register
1643   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1644   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1645     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1646     return false;
1647   }
1648 
1649   if (IsTexFail)
1650     ++NumVDataDwords;
1651 
1652   int Opcode = -1;
1653   if (IsGFX10Plus) {
1654     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1655                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1656                                           : AMDGPU::MIMGEncGfx10Default,
1657                                    NumVDataDwords, NumVAddrDwords);
1658   } else {
1659     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1660       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1661                                      NumVDataDwords, NumVAddrDwords);
1662     if (Opcode == -1)
1663       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1664                                      NumVDataDwords, NumVAddrDwords);
1665   }
1666   assert(Opcode != -1);
1667 
1668   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1669     .cloneMemRefs(MI);
1670 
1671   if (VDataOut) {
1672     if (BaseOpcode->AtomicX2) {
1673       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1674 
1675       Register TmpReg = MRI->createVirtualRegister(
1676         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1677       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1678 
1679       MIB.addDef(TmpReg);
1680       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1681         .addReg(TmpReg, RegState::Kill, SubReg);
1682 
1683     } else {
1684       MIB.addDef(VDataOut); // vdata output
1685     }
1686   }
1687 
1688   if (VDataIn)
1689     MIB.addReg(VDataIn); // vdata input
1690 
1691   for (int I = 0; I != NumVAddrRegs; ++I) {
1692     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1693     if (SrcOp.isReg()) {
1694       assert(SrcOp.getReg() != 0);
1695       MIB.addReg(SrcOp.getReg());
1696     }
1697   }
1698 
1699   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1700   if (BaseOpcode->Sampler)
1701     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1702 
1703   MIB.addImm(DMask); // dmask
1704 
1705   if (IsGFX10Plus)
1706     MIB.addImm(DimInfo->Encoding);
1707   MIB.addImm(Unorm);
1708   if (IsGFX10Plus)
1709     MIB.addImm(DLC);
1710   else
1711     MIB.addImm(SCC);
1712 
1713   MIB.addImm(GLC);
1714   MIB.addImm(SLC);
1715   MIB.addImm(IsA16 &&  // a16 or r128
1716              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1717   if (IsGFX10Plus)
1718     MIB.addImm(IsA16 ? -1 : 0);
1719 
1720   MIB.addImm(TFE); // tfe
1721   MIB.addImm(LWE); // lwe
1722   if (!IsGFX10Plus)
1723     MIB.addImm(DimInfo->DA ? -1 : 0);
1724   if (BaseOpcode->HasD16)
1725     MIB.addImm(IsD16 ? -1 : 0);
1726 
1727   MI.eraseFromParent();
1728   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1729 }
1730 
1731 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1732     MachineInstr &I) const {
1733   unsigned IntrinsicID = I.getIntrinsicID();
1734   switch (IntrinsicID) {
1735   case Intrinsic::amdgcn_end_cf:
1736     return selectEndCfIntrinsic(I);
1737   case Intrinsic::amdgcn_ds_ordered_add:
1738   case Intrinsic::amdgcn_ds_ordered_swap:
1739     return selectDSOrderedIntrinsic(I, IntrinsicID);
1740   case Intrinsic::amdgcn_ds_gws_init:
1741   case Intrinsic::amdgcn_ds_gws_barrier:
1742   case Intrinsic::amdgcn_ds_gws_sema_v:
1743   case Intrinsic::amdgcn_ds_gws_sema_br:
1744   case Intrinsic::amdgcn_ds_gws_sema_p:
1745   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1746     return selectDSGWSIntrinsic(I, IntrinsicID);
1747   case Intrinsic::amdgcn_ds_append:
1748     return selectDSAppendConsume(I, true);
1749   case Intrinsic::amdgcn_ds_consume:
1750     return selectDSAppendConsume(I, false);
1751   case Intrinsic::amdgcn_s_barrier:
1752     return selectSBarrier(I);
1753   case Intrinsic::amdgcn_global_atomic_fadd:
1754     return selectGlobalAtomicFaddIntrinsic(I);
1755   default: {
1756     return selectImpl(I, *CoverageInfo);
1757   }
1758   }
1759 }
1760 
1761 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1762   if (selectImpl(I, *CoverageInfo))
1763     return true;
1764 
1765   MachineBasicBlock *BB = I.getParent();
1766   const DebugLoc &DL = I.getDebugLoc();
1767 
1768   Register DstReg = I.getOperand(0).getReg();
1769   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1770   assert(Size <= 32 || Size == 64);
1771   const MachineOperand &CCOp = I.getOperand(1);
1772   Register CCReg = CCOp.getReg();
1773   if (!isVCC(CCReg, *MRI)) {
1774     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1775                                          AMDGPU::S_CSELECT_B32;
1776     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1777             .addReg(CCReg);
1778 
1779     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1780     // bank, because it does not cover the register class that we used to represent
1781     // for it.  So we need to manually set the register class here.
1782     if (!MRI->getRegClassOrNull(CCReg))
1783         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1784     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1785             .add(I.getOperand(2))
1786             .add(I.getOperand(3));
1787 
1788     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1789                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1790     I.eraseFromParent();
1791     return Ret;
1792   }
1793 
1794   // Wide VGPR select should have been split in RegBankSelect.
1795   if (Size > 32)
1796     return false;
1797 
1798   MachineInstr *Select =
1799       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1800               .addImm(0)
1801               .add(I.getOperand(3))
1802               .addImm(0)
1803               .add(I.getOperand(2))
1804               .add(I.getOperand(1));
1805 
1806   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1807   I.eraseFromParent();
1808   return Ret;
1809 }
1810 
1811 static int sizeToSubRegIndex(unsigned Size) {
1812   switch (Size) {
1813   case 32:
1814     return AMDGPU::sub0;
1815   case 64:
1816     return AMDGPU::sub0_sub1;
1817   case 96:
1818     return AMDGPU::sub0_sub1_sub2;
1819   case 128:
1820     return AMDGPU::sub0_sub1_sub2_sub3;
1821   case 256:
1822     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1823   default:
1824     if (Size < 32)
1825       return AMDGPU::sub0;
1826     if (Size > 256)
1827       return -1;
1828     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1829   }
1830 }
1831 
1832 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1833   Register DstReg = I.getOperand(0).getReg();
1834   Register SrcReg = I.getOperand(1).getReg();
1835   const LLT DstTy = MRI->getType(DstReg);
1836   const LLT SrcTy = MRI->getType(SrcReg);
1837   const LLT S1 = LLT::scalar(1);
1838 
1839   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1840   const RegisterBank *DstRB;
1841   if (DstTy == S1) {
1842     // This is a special case. We don't treat s1 for legalization artifacts as
1843     // vcc booleans.
1844     DstRB = SrcRB;
1845   } else {
1846     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1847     if (SrcRB != DstRB)
1848       return false;
1849   }
1850 
1851   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1852 
1853   unsigned DstSize = DstTy.getSizeInBits();
1854   unsigned SrcSize = SrcTy.getSizeInBits();
1855 
1856   const TargetRegisterClass *SrcRC
1857     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1858   const TargetRegisterClass *DstRC
1859     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1860   if (!SrcRC || !DstRC)
1861     return false;
1862 
1863   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1864       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1865     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1866     return false;
1867   }
1868 
1869   if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1870     MachineBasicBlock *MBB = I.getParent();
1871     const DebugLoc &DL = I.getDebugLoc();
1872 
1873     Register LoReg = MRI->createVirtualRegister(DstRC);
1874     Register HiReg = MRI->createVirtualRegister(DstRC);
1875     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1876       .addReg(SrcReg, 0, AMDGPU::sub0);
1877     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1878       .addReg(SrcReg, 0, AMDGPU::sub1);
1879 
1880     if (IsVALU && STI.hasSDWA()) {
1881       // Write the low 16-bits of the high element into the high 16-bits of the
1882       // low element.
1883       MachineInstr *MovSDWA =
1884         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1885         .addImm(0)                             // $src0_modifiers
1886         .addReg(HiReg)                         // $src0
1887         .addImm(0)                             // $clamp
1888         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1889         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1890         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1891         .addReg(LoReg, RegState::Implicit);
1892       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1893     } else {
1894       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1895       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1896       Register ImmReg = MRI->createVirtualRegister(DstRC);
1897       if (IsVALU) {
1898         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1899           .addImm(16)
1900           .addReg(HiReg);
1901       } else {
1902         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1903           .addReg(HiReg)
1904           .addImm(16);
1905       }
1906 
1907       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1908       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1909       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1910 
1911       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1912         .addImm(0xffff);
1913       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1914         .addReg(LoReg)
1915         .addReg(ImmReg);
1916       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1917         .addReg(TmpReg0)
1918         .addReg(TmpReg1);
1919     }
1920 
1921     I.eraseFromParent();
1922     return true;
1923   }
1924 
1925   if (!DstTy.isScalar())
1926     return false;
1927 
1928   if (SrcSize > 32) {
1929     int SubRegIdx = sizeToSubRegIndex(DstSize);
1930     if (SubRegIdx == -1)
1931       return false;
1932 
1933     // Deal with weird cases where the class only partially supports the subreg
1934     // index.
1935     const TargetRegisterClass *SrcWithSubRC
1936       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1937     if (!SrcWithSubRC)
1938       return false;
1939 
1940     if (SrcWithSubRC != SrcRC) {
1941       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1942         return false;
1943     }
1944 
1945     I.getOperand(1).setSubReg(SubRegIdx);
1946   }
1947 
1948   I.setDesc(TII.get(TargetOpcode::COPY));
1949   return true;
1950 }
1951 
1952 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1953 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1954   Mask = maskTrailingOnes<unsigned>(Size);
1955   int SignedMask = static_cast<int>(Mask);
1956   return SignedMask >= -16 && SignedMask <= 64;
1957 }
1958 
1959 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1960 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1961   Register Reg, const MachineRegisterInfo &MRI,
1962   const TargetRegisterInfo &TRI) const {
1963   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1964   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1965     return RB;
1966 
1967   // Ignore the type, since we don't use vcc in artifacts.
1968   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1969     return &RBI.getRegBankFromRegClass(*RC, LLT());
1970   return nullptr;
1971 }
1972 
1973 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1974   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1975   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1976   const DebugLoc &DL = I.getDebugLoc();
1977   MachineBasicBlock &MBB = *I.getParent();
1978   const Register DstReg = I.getOperand(0).getReg();
1979   const Register SrcReg = I.getOperand(1).getReg();
1980 
1981   const LLT DstTy = MRI->getType(DstReg);
1982   const LLT SrcTy = MRI->getType(SrcReg);
1983   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1984     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1985   const unsigned DstSize = DstTy.getSizeInBits();
1986   if (!DstTy.isScalar())
1987     return false;
1988 
1989   // Artifact casts should never use vcc.
1990   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1991 
1992   // FIXME: This should probably be illegal and split earlier.
1993   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
1994     if (DstSize <= 32)
1995       return selectCOPY(I);
1996 
1997     const TargetRegisterClass *SrcRC =
1998         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
1999     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2000     const TargetRegisterClass *DstRC =
2001         TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
2002 
2003     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2004     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2005     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2006       .addReg(SrcReg)
2007       .addImm(AMDGPU::sub0)
2008       .addReg(UndefReg)
2009       .addImm(AMDGPU::sub1);
2010     I.eraseFromParent();
2011 
2012     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2013            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2014   }
2015 
2016   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2017     // 64-bit should have been split up in RegBankSelect
2018 
2019     // Try to use an and with a mask if it will save code size.
2020     unsigned Mask;
2021     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2022       MachineInstr *ExtI =
2023       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2024         .addImm(Mask)
2025         .addReg(SrcReg);
2026       I.eraseFromParent();
2027       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2028     }
2029 
2030     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2031     MachineInstr *ExtI =
2032       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2033       .addReg(SrcReg)
2034       .addImm(0) // Offset
2035       .addImm(SrcSize); // Width
2036     I.eraseFromParent();
2037     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2038   }
2039 
2040   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2041     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2042       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2043     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2044       return false;
2045 
2046     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2047       const unsigned SextOpc = SrcSize == 8 ?
2048         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2049       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2050         .addReg(SrcReg);
2051       I.eraseFromParent();
2052       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2053     }
2054 
2055     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2056     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2057 
2058     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2059     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2060       // We need a 64-bit register source, but the high bits don't matter.
2061       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2062       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2063       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2064 
2065       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2066       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2067         .addReg(SrcReg, 0, SubReg)
2068         .addImm(AMDGPU::sub0)
2069         .addReg(UndefReg)
2070         .addImm(AMDGPU::sub1);
2071 
2072       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2073         .addReg(ExtReg)
2074         .addImm(SrcSize << 16);
2075 
2076       I.eraseFromParent();
2077       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2078     }
2079 
2080     unsigned Mask;
2081     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2082       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2083         .addReg(SrcReg)
2084         .addImm(Mask);
2085     } else {
2086       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2087         .addReg(SrcReg)
2088         .addImm(SrcSize << 16);
2089     }
2090 
2091     I.eraseFromParent();
2092     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2093   }
2094 
2095   return false;
2096 }
2097 
2098 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2099   MachineBasicBlock *BB = I.getParent();
2100   MachineOperand &ImmOp = I.getOperand(1);
2101   Register DstReg = I.getOperand(0).getReg();
2102   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2103 
2104   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2105   if (ImmOp.isFPImm()) {
2106     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2107     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2108   } else if (ImmOp.isCImm()) {
2109     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2110   } else {
2111     llvm_unreachable("Not supported by g_constants");
2112   }
2113 
2114   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2115   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2116 
2117   unsigned Opcode;
2118   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2119     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2120   } else {
2121     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2122 
2123     // We should never produce s1 values on banks other than VCC. If the user of
2124     // this already constrained the register, we may incorrectly think it's VCC
2125     // if it wasn't originally.
2126     if (Size == 1)
2127       return false;
2128   }
2129 
2130   if (Size != 64) {
2131     I.setDesc(TII.get(Opcode));
2132     I.addImplicitDefUseOperands(*MF);
2133     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2134   }
2135 
2136   const DebugLoc &DL = I.getDebugLoc();
2137 
2138   APInt Imm(Size, I.getOperand(1).getImm());
2139 
2140   MachineInstr *ResInst;
2141   if (IsSgpr && TII.isInlineConstant(Imm)) {
2142     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2143       .addImm(I.getOperand(1).getImm());
2144   } else {
2145     const TargetRegisterClass *RC = IsSgpr ?
2146       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2147     Register LoReg = MRI->createVirtualRegister(RC);
2148     Register HiReg = MRI->createVirtualRegister(RC);
2149 
2150     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2151       .addImm(Imm.trunc(32).getZExtValue());
2152 
2153     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2154       .addImm(Imm.ashr(32).getZExtValue());
2155 
2156     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2157       .addReg(LoReg)
2158       .addImm(AMDGPU::sub0)
2159       .addReg(HiReg)
2160       .addImm(AMDGPU::sub1);
2161   }
2162 
2163   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2164   // work for target independent opcodes
2165   I.eraseFromParent();
2166   const TargetRegisterClass *DstRC =
2167     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2168   if (!DstRC)
2169     return true;
2170   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2171 }
2172 
2173 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2174   // Only manually handle the f64 SGPR case.
2175   //
2176   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2177   // the bit ops theoretically have a second result due to the implicit def of
2178   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2179   // that is easy by disabling the check. The result works, but uses a
2180   // nonsensical sreg32orlds_and_sreg_1 regclass.
2181   //
2182   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2183   // the variadic REG_SEQUENCE operands.
2184 
2185   Register Dst = MI.getOperand(0).getReg();
2186   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2187   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2188       MRI->getType(Dst) != LLT::scalar(64))
2189     return false;
2190 
2191   Register Src = MI.getOperand(1).getReg();
2192   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2193   if (Fabs)
2194     Src = Fabs->getOperand(1).getReg();
2195 
2196   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2197       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2198     return false;
2199 
2200   MachineBasicBlock *BB = MI.getParent();
2201   const DebugLoc &DL = MI.getDebugLoc();
2202   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2203   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2204   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2205   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2206 
2207   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2208     .addReg(Src, 0, AMDGPU::sub0);
2209   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2210     .addReg(Src, 0, AMDGPU::sub1);
2211   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2212     .addImm(0x80000000);
2213 
2214   // Set or toggle sign bit.
2215   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2216   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2217     .addReg(HiReg)
2218     .addReg(ConstReg);
2219   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2220     .addReg(LoReg)
2221     .addImm(AMDGPU::sub0)
2222     .addReg(OpReg)
2223     .addImm(AMDGPU::sub1);
2224   MI.eraseFromParent();
2225   return true;
2226 }
2227 
2228 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2229 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2230   Register Dst = MI.getOperand(0).getReg();
2231   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2232   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2233       MRI->getType(Dst) != LLT::scalar(64))
2234     return false;
2235 
2236   Register Src = MI.getOperand(1).getReg();
2237   MachineBasicBlock *BB = MI.getParent();
2238   const DebugLoc &DL = MI.getDebugLoc();
2239   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2240   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2241   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2242   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2243 
2244   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2245       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2246     return false;
2247 
2248   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2249     .addReg(Src, 0, AMDGPU::sub0);
2250   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2251     .addReg(Src, 0, AMDGPU::sub1);
2252   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2253     .addImm(0x7fffffff);
2254 
2255   // Clear sign bit.
2256   // TODO: Should this used S_BITSET0_*?
2257   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2258     .addReg(HiReg)
2259     .addReg(ConstReg);
2260   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2261     .addReg(LoReg)
2262     .addImm(AMDGPU::sub0)
2263     .addReg(OpReg)
2264     .addImm(AMDGPU::sub1);
2265 
2266   MI.eraseFromParent();
2267   return true;
2268 }
2269 
2270 static bool isConstant(const MachineInstr &MI) {
2271   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2272 }
2273 
2274 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2275     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2276 
2277   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2278 
2279   assert(PtrMI);
2280 
2281   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2282     return;
2283 
2284   GEPInfo GEPInfo(*PtrMI);
2285 
2286   for (unsigned i = 1; i != 3; ++i) {
2287     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2288     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2289     assert(OpDef);
2290     if (i == 2 && isConstant(*OpDef)) {
2291       // TODO: Could handle constant base + variable offset, but a combine
2292       // probably should have commuted it.
2293       assert(GEPInfo.Imm == 0);
2294       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2295       continue;
2296     }
2297     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2298     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2299       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2300     else
2301       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2302   }
2303 
2304   AddrInfo.push_back(GEPInfo);
2305   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2306 }
2307 
2308 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2309   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2310 }
2311 
2312 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2313   if (!MI.hasOneMemOperand())
2314     return false;
2315 
2316   const MachineMemOperand *MMO = *MI.memoperands_begin();
2317   const Value *Ptr = MMO->getValue();
2318 
2319   // UndefValue means this is a load of a kernel input.  These are uniform.
2320   // Sometimes LDS instructions have constant pointers.
2321   // If Ptr is null, then that means this mem operand contains a
2322   // PseudoSourceValue like GOT.
2323   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2324       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2325     return true;
2326 
2327   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2328     return true;
2329 
2330   const Instruction *I = dyn_cast<Instruction>(Ptr);
2331   return I && I->getMetadata("amdgpu.uniform");
2332 }
2333 
2334 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2335   for (const GEPInfo &GEPInfo : AddrInfo) {
2336     if (!GEPInfo.VgprParts.empty())
2337       return true;
2338   }
2339   return false;
2340 }
2341 
2342 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2343   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2344   unsigned AS = PtrTy.getAddressSpace();
2345   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2346       STI.ldsRequiresM0Init()) {
2347     MachineBasicBlock *BB = I.getParent();
2348 
2349     // If DS instructions require M0 initializtion, insert it before selecting.
2350     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2351       .addImm(-1);
2352   }
2353 }
2354 
2355 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2356   MachineInstr &I) const {
2357   initM0(I);
2358   return selectImpl(I, *CoverageInfo);
2359 }
2360 
2361 // TODO: No rtn optimization.
2362 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2363   MachineInstr &MI) const {
2364   Register PtrReg = MI.getOperand(1).getReg();
2365   const LLT PtrTy = MRI->getType(PtrReg);
2366   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2367       STI.useFlatForGlobal())
2368     return selectImpl(MI, *CoverageInfo);
2369 
2370   Register DstReg = MI.getOperand(0).getReg();
2371   const LLT Ty = MRI->getType(DstReg);
2372   const bool Is64 = Ty.getSizeInBits() == 64;
2373   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2374   Register TmpReg = MRI->createVirtualRegister(
2375     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2376 
2377   const DebugLoc &DL = MI.getDebugLoc();
2378   MachineBasicBlock *BB = MI.getParent();
2379 
2380   Register VAddr, RSrcReg, SOffset;
2381   int64_t Offset = 0;
2382 
2383   unsigned Opcode;
2384   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2385     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2386                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2387   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2388                                    RSrcReg, SOffset, Offset)) {
2389     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2390                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2391   } else
2392     return selectImpl(MI, *CoverageInfo);
2393 
2394   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2395     .addReg(MI.getOperand(2).getReg());
2396 
2397   if (VAddr)
2398     MIB.addReg(VAddr);
2399 
2400   MIB.addReg(RSrcReg);
2401   if (SOffset)
2402     MIB.addReg(SOffset);
2403   else
2404     MIB.addImm(0);
2405 
2406   MIB.addImm(Offset);
2407   MIB.addImm(1); // glc
2408   MIB.addImm(0); // slc
2409   MIB.cloneMemRefs(MI);
2410 
2411   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2412     .addReg(TmpReg, RegState::Kill, SubReg);
2413 
2414   MI.eraseFromParent();
2415 
2416   MRI->setRegClass(
2417     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2418   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2419 }
2420 
2421 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2422   MachineBasicBlock *BB = I.getParent();
2423   MachineOperand &CondOp = I.getOperand(0);
2424   Register CondReg = CondOp.getReg();
2425   const DebugLoc &DL = I.getDebugLoc();
2426 
2427   unsigned BrOpcode;
2428   Register CondPhysReg;
2429   const TargetRegisterClass *ConstrainRC;
2430 
2431   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2432   // whether the branch is uniform when selecting the instruction. In
2433   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2434   // RegBankSelect knows what it's doing if the branch condition is scc, even
2435   // though it currently does not.
2436   if (!isVCC(CondReg, *MRI)) {
2437     if (MRI->getType(CondReg) != LLT::scalar(32))
2438       return false;
2439 
2440     CondPhysReg = AMDGPU::SCC;
2441     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2442     ConstrainRC = &AMDGPU::SReg_32RegClass;
2443   } else {
2444     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2445     // We sort of know that a VCC producer based on the register bank, that ands
2446     // inactive lanes with 0. What if there was a logical operation with vcc
2447     // producers in different blocks/with different exec masks?
2448     // FIXME: Should scc->vcc copies and with exec?
2449     CondPhysReg = TRI.getVCC();
2450     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2451     ConstrainRC = TRI.getBoolRC();
2452   }
2453 
2454   if (!MRI->getRegClassOrNull(CondReg))
2455     MRI->setRegClass(CondReg, ConstrainRC);
2456 
2457   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2458     .addReg(CondReg);
2459   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2460     .addMBB(I.getOperand(1).getMBB());
2461 
2462   I.eraseFromParent();
2463   return true;
2464 }
2465 
2466 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2467   MachineInstr &I) const {
2468   Register DstReg = I.getOperand(0).getReg();
2469   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2470   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2471   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2472   if (IsVGPR)
2473     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2474 
2475   return RBI.constrainGenericRegister(
2476     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2477 }
2478 
2479 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2480   Register DstReg = I.getOperand(0).getReg();
2481   Register SrcReg = I.getOperand(1).getReg();
2482   Register MaskReg = I.getOperand(2).getReg();
2483   LLT Ty = MRI->getType(DstReg);
2484   LLT MaskTy = MRI->getType(MaskReg);
2485 
2486   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2487   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2488   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2489   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2490   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2491     return false;
2492 
2493   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2494   const TargetRegisterClass &RegRC
2495     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2496 
2497   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2498                                                                   *MRI);
2499   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2500                                                                   *MRI);
2501   const TargetRegisterClass *MaskRC =
2502       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2503 
2504   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2505       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2506       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2507     return false;
2508 
2509   MachineBasicBlock *BB = I.getParent();
2510   const DebugLoc &DL = I.getDebugLoc();
2511   if (Ty.getSizeInBits() == 32) {
2512     assert(MaskTy.getSizeInBits() == 32 &&
2513            "ptrmask should have been narrowed during legalize");
2514 
2515     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2516       .addReg(SrcReg)
2517       .addReg(MaskReg);
2518     I.eraseFromParent();
2519     return true;
2520   }
2521 
2522   Register HiReg = MRI->createVirtualRegister(&RegRC);
2523   Register LoReg = MRI->createVirtualRegister(&RegRC);
2524 
2525   // Extract the subregisters from the source pointer.
2526   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2527     .addReg(SrcReg, 0, AMDGPU::sub0);
2528   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2529     .addReg(SrcReg, 0, AMDGPU::sub1);
2530 
2531   Register MaskedLo, MaskedHi;
2532 
2533   // Try to avoid emitting a bit operation when we only need to touch half of
2534   // the 64-bit pointer.
2535   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2536 
2537   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2538   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2539   if ((MaskOnes & MaskLo32) == MaskLo32) {
2540     // If all the bits in the low half are 1, we only need a copy for it.
2541     MaskedLo = LoReg;
2542   } else {
2543     // Extract the mask subregister and apply the and.
2544     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2545     MaskedLo = MRI->createVirtualRegister(&RegRC);
2546 
2547     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2548       .addReg(MaskReg, 0, AMDGPU::sub0);
2549     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2550       .addReg(LoReg)
2551       .addReg(MaskLo);
2552   }
2553 
2554   if ((MaskOnes & MaskHi32) == MaskHi32) {
2555     // If all the bits in the high half are 1, we only need a copy for it.
2556     MaskedHi = HiReg;
2557   } else {
2558     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2559     MaskedHi = MRI->createVirtualRegister(&RegRC);
2560 
2561     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2562       .addReg(MaskReg, 0, AMDGPU::sub1);
2563     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2564       .addReg(HiReg)
2565       .addReg(MaskHi);
2566   }
2567 
2568   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2569     .addReg(MaskedLo)
2570     .addImm(AMDGPU::sub0)
2571     .addReg(MaskedHi)
2572     .addImm(AMDGPU::sub1);
2573   I.eraseFromParent();
2574   return true;
2575 }
2576 
2577 /// Return the register to use for the index value, and the subregister to use
2578 /// for the indirectly accessed register.
2579 static std::pair<Register, unsigned>
2580 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2581                         const SIRegisterInfo &TRI,
2582                         const TargetRegisterClass *SuperRC,
2583                         Register IdxReg,
2584                         unsigned EltSize) {
2585   Register IdxBaseReg;
2586   int Offset;
2587 
2588   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2589   if (IdxBaseReg == AMDGPU::NoRegister) {
2590     // This will happen if the index is a known constant. This should ordinarily
2591     // be legalized out, but handle it as a register just in case.
2592     assert(Offset == 0);
2593     IdxBaseReg = IdxReg;
2594   }
2595 
2596   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2597 
2598   // Skip out of bounds offsets, or else we would end up using an undefined
2599   // register.
2600   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2601     return std::make_pair(IdxReg, SubRegs[0]);
2602   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2603 }
2604 
2605 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2606   MachineInstr &MI) const {
2607   Register DstReg = MI.getOperand(0).getReg();
2608   Register SrcReg = MI.getOperand(1).getReg();
2609   Register IdxReg = MI.getOperand(2).getReg();
2610 
2611   LLT DstTy = MRI->getType(DstReg);
2612   LLT SrcTy = MRI->getType(SrcReg);
2613 
2614   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2615   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2616   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2617 
2618   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2619   // into a waterfall loop.
2620   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2621     return false;
2622 
2623   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2624                                                                   *MRI);
2625   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2626                                                                   *MRI);
2627   if (!SrcRC || !DstRC)
2628     return false;
2629   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2630       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2631       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2632     return false;
2633 
2634   MachineBasicBlock *BB = MI.getParent();
2635   const DebugLoc &DL = MI.getDebugLoc();
2636   const bool Is64 = DstTy.getSizeInBits() == 64;
2637 
2638   unsigned SubReg;
2639   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2640                                                      DstTy.getSizeInBits() / 8);
2641 
2642   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2643     if (DstTy.getSizeInBits() != 32 && !Is64)
2644       return false;
2645 
2646     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2647       .addReg(IdxReg);
2648 
2649     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2650     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2651       .addReg(SrcReg, 0, SubReg)
2652       .addReg(SrcReg, RegState::Implicit);
2653     MI.eraseFromParent();
2654     return true;
2655   }
2656 
2657   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2658     return false;
2659 
2660   if (!STI.useVGPRIndexMode()) {
2661     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2662       .addReg(IdxReg);
2663     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2664       .addReg(SrcReg, 0, SubReg)
2665       .addReg(SrcReg, RegState::Implicit);
2666     MI.eraseFromParent();
2667     return true;
2668   }
2669 
2670   const MCInstrDesc &GPRIDXDesc =
2671       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2672   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2673       .addReg(SrcReg)
2674       .addReg(IdxReg)
2675       .addImm(SubReg);
2676 
2677   MI.eraseFromParent();
2678   return true;
2679 }
2680 
2681 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2682 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2683   MachineInstr &MI) const {
2684   Register DstReg = MI.getOperand(0).getReg();
2685   Register VecReg = MI.getOperand(1).getReg();
2686   Register ValReg = MI.getOperand(2).getReg();
2687   Register IdxReg = MI.getOperand(3).getReg();
2688 
2689   LLT VecTy = MRI->getType(DstReg);
2690   LLT ValTy = MRI->getType(ValReg);
2691   unsigned VecSize = VecTy.getSizeInBits();
2692   unsigned ValSize = ValTy.getSizeInBits();
2693 
2694   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2695   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2696   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2697 
2698   assert(VecTy.getElementType() == ValTy);
2699 
2700   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2701   // into a waterfall loop.
2702   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2703     return false;
2704 
2705   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2706                                                                   *MRI);
2707   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2708                                                                   *MRI);
2709 
2710   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2711       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2712       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2713       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2714     return false;
2715 
2716   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2717     return false;
2718 
2719   unsigned SubReg;
2720   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2721                                                      ValSize / 8);
2722 
2723   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2724                          STI.useVGPRIndexMode();
2725 
2726   MachineBasicBlock *BB = MI.getParent();
2727   const DebugLoc &DL = MI.getDebugLoc();
2728 
2729   if (!IndexMode) {
2730     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2731       .addReg(IdxReg);
2732 
2733     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2734         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2735     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2736         .addReg(VecReg)
2737         .addReg(ValReg)
2738         .addImm(SubReg);
2739     MI.eraseFromParent();
2740     return true;
2741   }
2742 
2743   const MCInstrDesc &GPRIDXDesc =
2744       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2745   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2746       .addReg(VecReg)
2747       .addReg(ValReg)
2748       .addReg(IdxReg)
2749       .addImm(SubReg);
2750 
2751   MI.eraseFromParent();
2752   return true;
2753 }
2754 
2755 static bool isZeroOrUndef(int X) {
2756   return X == 0 || X == -1;
2757 }
2758 
2759 static bool isOneOrUndef(int X) {
2760   return X == 1 || X == -1;
2761 }
2762 
2763 static bool isZeroOrOneOrUndef(int X) {
2764   return X == 0 || X == 1 || X == -1;
2765 }
2766 
2767 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2768 // 32-bit register.
2769 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2770                                    ArrayRef<int> Mask) {
2771   NewMask[0] = Mask[0];
2772   NewMask[1] = Mask[1];
2773   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2774     return Src0;
2775 
2776   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2777   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2778 
2779   // Shift the mask inputs to be 0/1;
2780   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2781   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2782   return Src1;
2783 }
2784 
2785 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2786 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2787   MachineInstr &MI) const {
2788   Register DstReg = MI.getOperand(0).getReg();
2789   Register Src0Reg = MI.getOperand(1).getReg();
2790   Register Src1Reg = MI.getOperand(2).getReg();
2791   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2792 
2793   const LLT V2S16 = LLT::vector(2, 16);
2794   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2795     return false;
2796 
2797   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2798     return false;
2799 
2800   assert(ShufMask.size() == 2);
2801   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2802 
2803   MachineBasicBlock *MBB = MI.getParent();
2804   const DebugLoc &DL = MI.getDebugLoc();
2805 
2806   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2807   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2808   const TargetRegisterClass &RC = IsVALU ?
2809     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2810 
2811   // Handle the degenerate case which should have folded out.
2812   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2813     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2814 
2815     MI.eraseFromParent();
2816     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2817   }
2818 
2819   // A legal VOP3P mask only reads one of the sources.
2820   int Mask[2];
2821   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2822 
2823   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2824       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2825     return false;
2826 
2827   // TODO: This also should have been folded out
2828   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2829     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2830       .addReg(SrcVec);
2831 
2832     MI.eraseFromParent();
2833     return true;
2834   }
2835 
2836   if (Mask[0] == 1 && Mask[1] == -1) {
2837     if (IsVALU) {
2838       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2839         .addImm(16)
2840         .addReg(SrcVec);
2841     } else {
2842       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2843         .addReg(SrcVec)
2844         .addImm(16);
2845     }
2846   } else if (Mask[0] == -1 && Mask[1] == 0) {
2847     if (IsVALU) {
2848       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2849         .addImm(16)
2850         .addReg(SrcVec);
2851     } else {
2852       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2853         .addReg(SrcVec)
2854         .addImm(16);
2855     }
2856   } else if (Mask[0] == 0 && Mask[1] == 0) {
2857     if (IsVALU) {
2858       // Write low half of the register into the high half.
2859       MachineInstr *MovSDWA =
2860         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2861         .addImm(0)                             // $src0_modifiers
2862         .addReg(SrcVec)                        // $src0
2863         .addImm(0)                             // $clamp
2864         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2865         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2866         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2867         .addReg(SrcVec, RegState::Implicit);
2868       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2869     } else {
2870       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2871         .addReg(SrcVec)
2872         .addReg(SrcVec);
2873     }
2874   } else if (Mask[0] == 1 && Mask[1] == 1) {
2875     if (IsVALU) {
2876       // Write high half of the register into the low half.
2877       MachineInstr *MovSDWA =
2878         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2879         .addImm(0)                             // $src0_modifiers
2880         .addReg(SrcVec)                        // $src0
2881         .addImm(0)                             // $clamp
2882         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2883         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2884         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2885         .addReg(SrcVec, RegState::Implicit);
2886       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2887     } else {
2888       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2889         .addReg(SrcVec)
2890         .addReg(SrcVec);
2891     }
2892   } else if (Mask[0] == 1 && Mask[1] == 0) {
2893     if (IsVALU) {
2894       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2895         .addReg(SrcVec)
2896         .addReg(SrcVec)
2897         .addImm(16);
2898     } else {
2899       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2900       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2901         .addReg(SrcVec)
2902         .addImm(16);
2903       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2904         .addReg(TmpReg)
2905         .addReg(SrcVec);
2906     }
2907   } else
2908     llvm_unreachable("all shuffle masks should be handled");
2909 
2910   MI.eraseFromParent();
2911   return true;
2912 }
2913 
2914 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2915   MachineInstr &MI) const {
2916   if (STI.hasGFX90AInsts())
2917     return selectImpl(MI, *CoverageInfo);
2918 
2919   MachineBasicBlock *MBB = MI.getParent();
2920   const DebugLoc &DL = MI.getDebugLoc();
2921 
2922   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2923     Function &F = MBB->getParent()->getFunction();
2924     DiagnosticInfoUnsupported
2925       NoFpRet(F, "return versions of fp atomics not supported",
2926               MI.getDebugLoc(), DS_Error);
2927     F.getContext().diagnose(NoFpRet);
2928     return false;
2929   }
2930 
2931   // FIXME: This is only needed because tablegen requires number of dst operands
2932   // in match and replace pattern to be the same. Otherwise patterns can be
2933   // exported from SDag path.
2934   MachineOperand &VDataIn = MI.getOperand(1);
2935   MachineOperand &VIndex = MI.getOperand(3);
2936   MachineOperand &VOffset = MI.getOperand(4);
2937   MachineOperand &SOffset = MI.getOperand(5);
2938   int16_t Offset = MI.getOperand(6).getImm();
2939 
2940   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2941   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2942 
2943   unsigned Opcode;
2944   if (HasVOffset) {
2945     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2946                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2947   } else {
2948     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2949                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2950   }
2951 
2952   if (MRI->getType(VDataIn.getReg()).isVector()) {
2953     switch (Opcode) {
2954     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2955       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2956       break;
2957     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2958       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2959       break;
2960     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
2961       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
2962       break;
2963     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
2964       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
2965       break;
2966     }
2967   }
2968 
2969   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
2970   I.add(VDataIn);
2971 
2972   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
2973       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
2974     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
2975     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
2976       .addReg(VIndex.getReg())
2977       .addImm(AMDGPU::sub0)
2978       .addReg(VOffset.getReg())
2979       .addImm(AMDGPU::sub1);
2980 
2981     I.addReg(IdxReg);
2982   } else if (HasVIndex) {
2983     I.add(VIndex);
2984   } else if (HasVOffset) {
2985     I.add(VOffset);
2986   }
2987 
2988   I.add(MI.getOperand(2)); // rsrc
2989   I.add(SOffset);
2990   I.addImm(Offset);
2991   renderExtractSLC(I, MI, 7);
2992   I.cloneMemRefs(MI);
2993 
2994   MI.eraseFromParent();
2995 
2996   return true;
2997 }
2998 
2999 bool AMDGPUInstructionSelector::selectGlobalAtomicFaddIntrinsic(
3000   MachineInstr &MI) const{
3001 
3002   if (STI.hasGFX90AInsts())
3003     return selectImpl(MI, *CoverageInfo);
3004 
3005   MachineBasicBlock *MBB = MI.getParent();
3006   const DebugLoc &DL = MI.getDebugLoc();
3007 
3008   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3009     Function &F = MBB->getParent()->getFunction();
3010     DiagnosticInfoUnsupported
3011       NoFpRet(F, "return versions of fp atomics not supported",
3012               MI.getDebugLoc(), DS_Error);
3013     F.getContext().diagnose(NoFpRet);
3014     return false;
3015   }
3016 
3017   // FIXME: This is only needed because tablegen requires number of dst operands
3018   // in match and replace pattern to be the same. Otherwise patterns can be
3019   // exported from SDag path.
3020   auto Addr = selectFlatOffsetImpl<true>(MI.getOperand(2));
3021 
3022   Register Data = MI.getOperand(3).getReg();
3023   const unsigned Opc = MRI->getType(Data).isVector() ?
3024     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3025   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3026     .addReg(Addr.first)
3027     .addReg(Data)
3028     .addImm(Addr.second)
3029     .addImm(0) // SLC
3030     .addImm(0) // SSCB
3031     .cloneMemRefs(MI);
3032 
3033   MI.eraseFromParent();
3034   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3035 }
3036 
3037 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3038   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3039   MI.RemoveOperand(1);
3040   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3041   return true;
3042 }
3043 
3044 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3045   if (I.isPHI())
3046     return selectPHI(I);
3047 
3048   if (!I.isPreISelOpcode()) {
3049     if (I.isCopy())
3050       return selectCOPY(I);
3051     return true;
3052   }
3053 
3054   switch (I.getOpcode()) {
3055   case TargetOpcode::G_AND:
3056   case TargetOpcode::G_OR:
3057   case TargetOpcode::G_XOR:
3058     if (selectImpl(I, *CoverageInfo))
3059       return true;
3060     return selectG_AND_OR_XOR(I);
3061   case TargetOpcode::G_ADD:
3062   case TargetOpcode::G_SUB:
3063     if (selectImpl(I, *CoverageInfo))
3064       return true;
3065     return selectG_ADD_SUB(I);
3066   case TargetOpcode::G_UADDO:
3067   case TargetOpcode::G_USUBO:
3068   case TargetOpcode::G_UADDE:
3069   case TargetOpcode::G_USUBE:
3070     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3071   case TargetOpcode::G_INTTOPTR:
3072   case TargetOpcode::G_BITCAST:
3073   case TargetOpcode::G_PTRTOINT:
3074     return selectCOPY(I);
3075   case TargetOpcode::G_CONSTANT:
3076   case TargetOpcode::G_FCONSTANT:
3077     return selectG_CONSTANT(I);
3078   case TargetOpcode::G_FNEG:
3079     if (selectImpl(I, *CoverageInfo))
3080       return true;
3081     return selectG_FNEG(I);
3082   case TargetOpcode::G_FABS:
3083     if (selectImpl(I, *CoverageInfo))
3084       return true;
3085     return selectG_FABS(I);
3086   case TargetOpcode::G_EXTRACT:
3087     return selectG_EXTRACT(I);
3088   case TargetOpcode::G_MERGE_VALUES:
3089   case TargetOpcode::G_BUILD_VECTOR:
3090   case TargetOpcode::G_CONCAT_VECTORS:
3091     return selectG_MERGE_VALUES(I);
3092   case TargetOpcode::G_UNMERGE_VALUES:
3093     return selectG_UNMERGE_VALUES(I);
3094   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3095     return selectG_BUILD_VECTOR_TRUNC(I);
3096   case TargetOpcode::G_PTR_ADD:
3097     return selectG_PTR_ADD(I);
3098   case TargetOpcode::G_IMPLICIT_DEF:
3099     return selectG_IMPLICIT_DEF(I);
3100   case TargetOpcode::G_FREEZE:
3101     return selectCOPY(I);
3102   case TargetOpcode::G_INSERT:
3103     return selectG_INSERT(I);
3104   case TargetOpcode::G_INTRINSIC:
3105     return selectG_INTRINSIC(I);
3106   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3107     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3108   case TargetOpcode::G_ICMP:
3109     if (selectG_ICMP(I))
3110       return true;
3111     return selectImpl(I, *CoverageInfo);
3112   case TargetOpcode::G_LOAD:
3113   case TargetOpcode::G_STORE:
3114   case TargetOpcode::G_ATOMIC_CMPXCHG:
3115   case TargetOpcode::G_ATOMICRMW_XCHG:
3116   case TargetOpcode::G_ATOMICRMW_ADD:
3117   case TargetOpcode::G_ATOMICRMW_SUB:
3118   case TargetOpcode::G_ATOMICRMW_AND:
3119   case TargetOpcode::G_ATOMICRMW_OR:
3120   case TargetOpcode::G_ATOMICRMW_XOR:
3121   case TargetOpcode::G_ATOMICRMW_MIN:
3122   case TargetOpcode::G_ATOMICRMW_MAX:
3123   case TargetOpcode::G_ATOMICRMW_UMIN:
3124   case TargetOpcode::G_ATOMICRMW_UMAX:
3125   case TargetOpcode::G_ATOMICRMW_FADD:
3126   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3127   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3128   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3129   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3130     return selectG_LOAD_STORE_ATOMICRMW(I);
3131   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3132     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3133   case TargetOpcode::G_SELECT:
3134     return selectG_SELECT(I);
3135   case TargetOpcode::G_TRUNC:
3136     return selectG_TRUNC(I);
3137   case TargetOpcode::G_SEXT:
3138   case TargetOpcode::G_ZEXT:
3139   case TargetOpcode::G_ANYEXT:
3140   case TargetOpcode::G_SEXT_INREG:
3141     if (selectImpl(I, *CoverageInfo))
3142       return true;
3143     return selectG_SZA_EXT(I);
3144   case TargetOpcode::G_BRCOND:
3145     return selectG_BRCOND(I);
3146   case TargetOpcode::G_GLOBAL_VALUE:
3147     return selectG_GLOBAL_VALUE(I);
3148   case TargetOpcode::G_PTRMASK:
3149     return selectG_PTRMASK(I);
3150   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3151     return selectG_EXTRACT_VECTOR_ELT(I);
3152   case TargetOpcode::G_INSERT_VECTOR_ELT:
3153     return selectG_INSERT_VECTOR_ELT(I);
3154   case TargetOpcode::G_SHUFFLE_VECTOR:
3155     return selectG_SHUFFLE_VECTOR(I);
3156   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3157   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
3158     const AMDGPU::ImageDimIntrinsicInfo *Intr
3159       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3160     assert(Intr && "not an image intrinsic with image pseudo");
3161     return selectImageIntrinsic(I, Intr);
3162   }
3163   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3164     return selectBVHIntrinsic(I);
3165   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3166     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3167   default:
3168     return selectImpl(I, *CoverageInfo);
3169   }
3170   return false;
3171 }
3172 
3173 InstructionSelector::ComplexRendererFns
3174 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3175   return {{
3176       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3177   }};
3178 
3179 }
3180 
3181 std::pair<Register, unsigned>
3182 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3183                                               bool AllowAbs) const {
3184   Register Src = Root.getReg();
3185   Register OrigSrc = Src;
3186   unsigned Mods = 0;
3187   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3188 
3189   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3190     Src = MI->getOperand(1).getReg();
3191     Mods |= SISrcMods::NEG;
3192     MI = getDefIgnoringCopies(Src, *MRI);
3193   }
3194 
3195   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3196     Src = MI->getOperand(1).getReg();
3197     Mods |= SISrcMods::ABS;
3198   }
3199 
3200   if (Mods != 0 &&
3201       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3202     MachineInstr *UseMI = Root.getParent();
3203 
3204     // If we looked through copies to find source modifiers on an SGPR operand,
3205     // we now have an SGPR register source. To avoid potentially violating the
3206     // constant bus restriction, we need to insert a copy to a VGPR.
3207     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3208     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3209             TII.get(AMDGPU::COPY), VGPRSrc)
3210       .addReg(Src);
3211     Src = VGPRSrc;
3212   }
3213 
3214   return std::make_pair(Src, Mods);
3215 }
3216 
3217 ///
3218 /// This will select either an SGPR or VGPR operand and will save us from
3219 /// having to write an extra tablegen pattern.
3220 InstructionSelector::ComplexRendererFns
3221 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3222   return {{
3223       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3224   }};
3225 }
3226 
3227 InstructionSelector::ComplexRendererFns
3228 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3229   Register Src;
3230   unsigned Mods;
3231   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3232 
3233   return {{
3234       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3235       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3236       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3237       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3238   }};
3239 }
3240 
3241 InstructionSelector::ComplexRendererFns
3242 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3243   Register Src;
3244   unsigned Mods;
3245   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3246 
3247   return {{
3248       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3249       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3250       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3251       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3252   }};
3253 }
3254 
3255 InstructionSelector::ComplexRendererFns
3256 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3257   return {{
3258       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3259       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3260       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3261   }};
3262 }
3263 
3264 InstructionSelector::ComplexRendererFns
3265 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3266   Register Src;
3267   unsigned Mods;
3268   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3269 
3270   return {{
3271       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3272       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3273   }};
3274 }
3275 
3276 InstructionSelector::ComplexRendererFns
3277 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3278   Register Src;
3279   unsigned Mods;
3280   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3281 
3282   return {{
3283       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3284       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3285   }};
3286 }
3287 
3288 InstructionSelector::ComplexRendererFns
3289 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3290   Register Reg = Root.getReg();
3291   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3292   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3293               Def->getOpcode() == AMDGPU::G_FABS))
3294     return {};
3295   return {{
3296       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3297   }};
3298 }
3299 
3300 std::pair<Register, unsigned>
3301 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3302   Register Src, const MachineRegisterInfo &MRI) const {
3303   unsigned Mods = 0;
3304   MachineInstr *MI = MRI.getVRegDef(Src);
3305 
3306   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3307       // It's possible to see an f32 fneg here, but unlikely.
3308       // TODO: Treat f32 fneg as only high bit.
3309       MRI.getType(Src) == LLT::vector(2, 16)) {
3310     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3311     Src = MI->getOperand(1).getReg();
3312     MI = MRI.getVRegDef(Src);
3313   }
3314 
3315   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3316 
3317   // Packed instructions do not have abs modifiers.
3318   Mods |= SISrcMods::OP_SEL_1;
3319 
3320   return std::make_pair(Src, Mods);
3321 }
3322 
3323 InstructionSelector::ComplexRendererFns
3324 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3325   MachineRegisterInfo &MRI
3326     = Root.getParent()->getParent()->getParent()->getRegInfo();
3327 
3328   Register Src;
3329   unsigned Mods;
3330   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3331 
3332   return {{
3333       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3334       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3335   }};
3336 }
3337 
3338 InstructionSelector::ComplexRendererFns
3339 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3340   Register Src;
3341   unsigned Mods;
3342   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3343   if (!isKnownNeverNaN(Src, *MRI))
3344     return None;
3345 
3346   return {{
3347       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3348       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3349   }};
3350 }
3351 
3352 InstructionSelector::ComplexRendererFns
3353 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3354   // FIXME: Handle op_sel
3355   return {{
3356       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3357       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3358   }};
3359 }
3360 
3361 InstructionSelector::ComplexRendererFns
3362 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3363   SmallVector<GEPInfo, 4> AddrInfo;
3364   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3365 
3366   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3367     return None;
3368 
3369   const GEPInfo &GEPInfo = AddrInfo[0];
3370   Optional<int64_t> EncodedImm =
3371       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3372   if (!EncodedImm)
3373     return None;
3374 
3375   unsigned PtrReg = GEPInfo.SgprParts[0];
3376   return {{
3377     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3378     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3379   }};
3380 }
3381 
3382 InstructionSelector::ComplexRendererFns
3383 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3384   SmallVector<GEPInfo, 4> AddrInfo;
3385   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3386 
3387   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3388     return None;
3389 
3390   const GEPInfo &GEPInfo = AddrInfo[0];
3391   Register PtrReg = GEPInfo.SgprParts[0];
3392   Optional<int64_t> EncodedImm =
3393       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3394   if (!EncodedImm)
3395     return None;
3396 
3397   return {{
3398     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3399     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3400   }};
3401 }
3402 
3403 InstructionSelector::ComplexRendererFns
3404 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3405   MachineInstr *MI = Root.getParent();
3406   MachineBasicBlock *MBB = MI->getParent();
3407 
3408   SmallVector<GEPInfo, 4> AddrInfo;
3409   getAddrModeInfo(*MI, *MRI, AddrInfo);
3410 
3411   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3412   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3413   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3414     return None;
3415 
3416   const GEPInfo &GEPInfo = AddrInfo[0];
3417   // SGPR offset is unsigned.
3418   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3419     return None;
3420 
3421   // If we make it this far we have a load with an 32-bit immediate offset.
3422   // It is OK to select this using a sgpr offset, because we have already
3423   // failed trying to select this load into one of the _IMM variants since
3424   // the _IMM Patterns are considered before the _SGPR patterns.
3425   Register PtrReg = GEPInfo.SgprParts[0];
3426   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3427   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3428           .addImm(GEPInfo.Imm);
3429   return {{
3430     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3431     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3432   }};
3433 }
3434 
3435 template <bool Signed>
3436 std::pair<Register, int>
3437 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
3438   MachineInstr *MI = Root.getParent();
3439 
3440   auto Default = std::make_pair(Root.getReg(), 0);
3441 
3442   if (!STI.hasFlatInstOffsets())
3443     return Default;
3444 
3445   Register PtrBase;
3446   int64_t ConstOffset;
3447   std::tie(PtrBase, ConstOffset) =
3448       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3449   if (ConstOffset == 0)
3450     return Default;
3451 
3452   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3453   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, Signed))
3454     return Default;
3455 
3456   return std::make_pair(PtrBase, ConstOffset);
3457 }
3458 
3459 InstructionSelector::ComplexRendererFns
3460 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3461   auto PtrWithOffset = selectFlatOffsetImpl<false>(Root);
3462 
3463   return {{
3464       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3465       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3466     }};
3467 }
3468 
3469 InstructionSelector::ComplexRendererFns
3470 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
3471   auto PtrWithOffset = selectFlatOffsetImpl<true>(Root);
3472 
3473   return {{
3474       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3475       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3476     }};
3477 }
3478 
3479 /// Match a zero extend from a 32-bit value to 64-bits.
3480 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3481   Register ZExtSrc;
3482   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3483     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3484 
3485   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3486   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3487   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3488     return false;
3489 
3490   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3491     return Def->getOperand(1).getReg();
3492   }
3493 
3494   return Register();
3495 }
3496 
3497 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3498 InstructionSelector::ComplexRendererFns
3499 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3500   Register Addr = Root.getReg();
3501   Register PtrBase;
3502   int64_t ConstOffset;
3503   int64_t ImmOffset = 0;
3504 
3505   // Match the immediate offset first, which canonically is moved as low as
3506   // possible.
3507   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3508 
3509   if (ConstOffset != 0) {
3510     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, true)) {
3511       Addr = PtrBase;
3512       ImmOffset = ConstOffset;
3513     } else if (ConstOffset > 0) {
3514       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3515       if (!PtrBaseDef)
3516         return None;
3517 
3518       if (isSGPR(PtrBaseDef->Reg)) {
3519         // Offset is too large.
3520         //
3521         // saddr + large_offset -> saddr + (voffset = large_offset & ~MaxOffset)
3522         //                         + (large_offset & MaxOffset);
3523         int64_t SplitImmOffset, RemainderOffset;
3524         std::tie(SplitImmOffset, RemainderOffset)
3525           = TII.splitFlatOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, true);
3526 
3527         if (isUInt<32>(RemainderOffset)) {
3528           MachineInstr *MI = Root.getParent();
3529           MachineBasicBlock *MBB = MI->getParent();
3530           Register HighBits
3531             = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3532 
3533           BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3534                   HighBits)
3535             .addImm(RemainderOffset);
3536 
3537           return {{
3538             [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); },  // saddr
3539             [=](MachineInstrBuilder &MIB) { MIB.addReg(HighBits); }, // voffset
3540             [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3541           }};
3542         }
3543       }
3544     }
3545   }
3546 
3547   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3548   if (!AddrDef)
3549     return None;
3550 
3551   // Match the variable offset.
3552   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD) {
3553     // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3554     // drop this.
3555     if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3556         AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT)
3557       return None;
3558 
3559     // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3560     // moves required to copy a 64-bit SGPR to VGPR.
3561     const Register SAddr = AddrDef->Reg;
3562     if (!isSGPR(SAddr))
3563       return None;
3564 
3565     MachineInstr *MI = Root.getParent();
3566     MachineBasicBlock *MBB = MI->getParent();
3567     Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3568 
3569     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3570             VOffset)
3571       .addImm(0);
3572 
3573     return {{
3574         [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); },    // saddr
3575         [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },  // voffset
3576         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3577     }};
3578   }
3579 
3580   // Look through the SGPR->VGPR copy.
3581   Register SAddr =
3582     getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3583   if (!SAddr || !isSGPR(SAddr))
3584     return None;
3585 
3586   Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3587 
3588   // It's possible voffset is an SGPR here, but the copy to VGPR will be
3589   // inserted later.
3590   Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset);
3591   if (!VOffset)
3592     return None;
3593 
3594   return {{[=](MachineInstrBuilder &MIB) { // saddr
3595              MIB.addReg(SAddr);
3596            },
3597            [=](MachineInstrBuilder &MIB) { // voffset
3598              MIB.addReg(VOffset);
3599            },
3600            [=](MachineInstrBuilder &MIB) { // offset
3601              MIB.addImm(ImmOffset);
3602            }}};
3603 }
3604 
3605 InstructionSelector::ComplexRendererFns
3606 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3607   Register Addr = Root.getReg();
3608   Register PtrBase;
3609   int64_t ConstOffset;
3610   int64_t ImmOffset = 0;
3611 
3612   // Match the immediate offset first, which canonically is moved as low as
3613   // possible.
3614   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3615 
3616   if (ConstOffset != 0 &&
3617       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
3618     Addr = PtrBase;
3619     ImmOffset = ConstOffset;
3620   }
3621 
3622   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3623   if (!AddrDef)
3624     return None;
3625 
3626   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3627     int FI = AddrDef->MI->getOperand(1).getIndex();
3628     return {{
3629         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3630         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3631     }};
3632   }
3633 
3634   Register SAddr = AddrDef->Reg;
3635 
3636   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3637     Register LHS = AddrDef->MI->getOperand(1).getReg();
3638     Register RHS = AddrDef->MI->getOperand(2).getReg();
3639     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3640     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3641 
3642     if (LHSDef && RHSDef &&
3643         LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3644         isSGPR(RHSDef->Reg)) {
3645       int FI = LHSDef->MI->getOperand(1).getIndex();
3646       MachineInstr &I = *Root.getParent();
3647       MachineBasicBlock *BB = I.getParent();
3648       const DebugLoc &DL = I.getDebugLoc();
3649       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3650 
3651       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), SAddr)
3652         .addFrameIndex(FI)
3653         .addReg(RHSDef->Reg);
3654     }
3655   }
3656 
3657   if (!isSGPR(SAddr))
3658     return None;
3659 
3660   return {{
3661       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3662       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3663   }};
3664 }
3665 
3666 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3667   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3668   return PSV && PSV->isStack();
3669 }
3670 
3671 InstructionSelector::ComplexRendererFns
3672 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3673   MachineInstr *MI = Root.getParent();
3674   MachineBasicBlock *MBB = MI->getParent();
3675   MachineFunction *MF = MBB->getParent();
3676   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3677 
3678   int64_t Offset = 0;
3679   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3680       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3681     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3682 
3683     // TODO: Should this be inside the render function? The iterator seems to
3684     // move.
3685     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3686             HighBits)
3687       .addImm(Offset & ~4095);
3688 
3689     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3690                MIB.addReg(Info->getScratchRSrcReg());
3691              },
3692              [=](MachineInstrBuilder &MIB) { // vaddr
3693                MIB.addReg(HighBits);
3694              },
3695              [=](MachineInstrBuilder &MIB) { // soffset
3696                // Use constant zero for soffset and rely on eliminateFrameIndex
3697                // to choose the appropriate frame register if need be.
3698                MIB.addImm(0);
3699              },
3700              [=](MachineInstrBuilder &MIB) { // offset
3701                MIB.addImm(Offset & 4095);
3702              }}};
3703   }
3704 
3705   assert(Offset == 0 || Offset == -1);
3706 
3707   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3708   // offsets.
3709   Optional<int> FI;
3710   Register VAddr = Root.getReg();
3711   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3712     if (isBaseWithConstantOffset(Root, *MRI)) {
3713       const MachineOperand &LHS = RootDef->getOperand(1);
3714       const MachineOperand &RHS = RootDef->getOperand(2);
3715       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
3716       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
3717       if (LHSDef && RHSDef) {
3718         int64_t PossibleOffset =
3719             RHSDef->getOperand(1).getCImm()->getSExtValue();
3720         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
3721             (!STI.privateMemoryResourceIsRangeChecked() ||
3722              KnownBits->signBitIsZero(LHS.getReg()))) {
3723           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3724             FI = LHSDef->getOperand(1).getIndex();
3725           else
3726             VAddr = LHS.getReg();
3727           Offset = PossibleOffset;
3728         }
3729       }
3730     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3731       FI = RootDef->getOperand(1).getIndex();
3732     }
3733   }
3734 
3735   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3736              MIB.addReg(Info->getScratchRSrcReg());
3737            },
3738            [=](MachineInstrBuilder &MIB) { // vaddr
3739              if (FI.hasValue())
3740                MIB.addFrameIndex(FI.getValue());
3741              else
3742                MIB.addReg(VAddr);
3743            },
3744            [=](MachineInstrBuilder &MIB) { // soffset
3745              // Use constant zero for soffset and rely on eliminateFrameIndex
3746              // to choose the appropriate frame register if need be.
3747              MIB.addImm(0);
3748            },
3749            [=](MachineInstrBuilder &MIB) { // offset
3750              MIB.addImm(Offset);
3751            }}};
3752 }
3753 
3754 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3755                                                 int64_t Offset) const {
3756   if (!isUInt<16>(Offset))
3757     return false;
3758 
3759   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3760     return true;
3761 
3762   // On Southern Islands instruction with a negative base value and an offset
3763   // don't seem to work.
3764   return KnownBits->signBitIsZero(Base);
3765 }
3766 
3767 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
3768                                                  int64_t Offset1,
3769                                                  unsigned Size) const {
3770   if (Offset0 % Size != 0 || Offset1 % Size != 0)
3771     return false;
3772   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
3773     return false;
3774 
3775   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3776     return true;
3777 
3778   // On Southern Islands instruction with a negative base value and an offset
3779   // don't seem to work.
3780   return KnownBits->signBitIsZero(Base);
3781 }
3782 
3783 InstructionSelector::ComplexRendererFns
3784 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3785     MachineOperand &Root) const {
3786   MachineInstr *MI = Root.getParent();
3787   MachineBasicBlock *MBB = MI->getParent();
3788 
3789   int64_t Offset = 0;
3790   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3791       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3792     return {};
3793 
3794   const MachineFunction *MF = MBB->getParent();
3795   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3796   const MachineMemOperand *MMO = *MI->memoperands_begin();
3797   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3798 
3799   return {{
3800       [=](MachineInstrBuilder &MIB) { // rsrc
3801         MIB.addReg(Info->getScratchRSrcReg());
3802       },
3803       [=](MachineInstrBuilder &MIB) { // soffset
3804         if (isStackPtrRelative(PtrInfo))
3805           MIB.addReg(Info->getStackPtrOffsetReg());
3806         else
3807           MIB.addImm(0);
3808       },
3809       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3810   }};
3811 }
3812 
3813 std::pair<Register, unsigned>
3814 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3815   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3816   if (!RootDef)
3817     return std::make_pair(Root.getReg(), 0);
3818 
3819   int64_t ConstAddr = 0;
3820 
3821   Register PtrBase;
3822   int64_t Offset;
3823   std::tie(PtrBase, Offset) =
3824     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3825 
3826   if (Offset) {
3827     if (isDSOffsetLegal(PtrBase, Offset)) {
3828       // (add n0, c0)
3829       return std::make_pair(PtrBase, Offset);
3830     }
3831   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3832     // TODO
3833 
3834 
3835   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3836     // TODO
3837 
3838   }
3839 
3840   return std::make_pair(Root.getReg(), 0);
3841 }
3842 
3843 InstructionSelector::ComplexRendererFns
3844 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3845   Register Reg;
3846   unsigned Offset;
3847   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3848   return {{
3849       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3850       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3851     }};
3852 }
3853 
3854 InstructionSelector::ComplexRendererFns
3855 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3856   return selectDSReadWrite2(Root, 4);
3857 }
3858 
3859 InstructionSelector::ComplexRendererFns
3860 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
3861   return selectDSReadWrite2(Root, 8);
3862 }
3863 
3864 InstructionSelector::ComplexRendererFns
3865 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
3866                                               unsigned Size) const {
3867   Register Reg;
3868   unsigned Offset;
3869   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
3870   return {{
3871       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3872       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3873       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3874     }};
3875 }
3876 
3877 std::pair<Register, unsigned>
3878 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
3879                                                   unsigned Size) const {
3880   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3881   if (!RootDef)
3882     return std::make_pair(Root.getReg(), 0);
3883 
3884   int64_t ConstAddr = 0;
3885 
3886   Register PtrBase;
3887   int64_t Offset;
3888   std::tie(PtrBase, Offset) =
3889     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3890 
3891   if (Offset) {
3892     int64_t OffsetValue0 = Offset;
3893     int64_t OffsetValue1 = Offset + Size;
3894     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
3895       // (add n0, c0)
3896       return std::make_pair(PtrBase, OffsetValue0 / Size);
3897     }
3898   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3899     // TODO
3900 
3901   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3902     // TODO
3903 
3904   }
3905 
3906   return std::make_pair(Root.getReg(), 0);
3907 }
3908 
3909 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3910 /// the base value with the constant offset. There may be intervening copies
3911 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
3912 /// not match the pattern.
3913 std::pair<Register, int64_t>
3914 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3915   Register Root, const MachineRegisterInfo &MRI) const {
3916   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
3917   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3918     return {Root, 0};
3919 
3920   MachineOperand &RHS = RootI->getOperand(2);
3921   Optional<ValueAndVReg> MaybeOffset
3922     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3923   if (!MaybeOffset)
3924     return {Root, 0};
3925   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
3926 }
3927 
3928 static void addZeroImm(MachineInstrBuilder &MIB) {
3929   MIB.addImm(0);
3930 }
3931 
3932 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3933 /// BasePtr is not valid, a null base pointer will be used.
3934 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3935                           uint32_t FormatLo, uint32_t FormatHi,
3936                           Register BasePtr) {
3937   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3938   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3939   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3940   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3941 
3942   B.buildInstr(AMDGPU::S_MOV_B32)
3943     .addDef(RSrc2)
3944     .addImm(FormatLo);
3945   B.buildInstr(AMDGPU::S_MOV_B32)
3946     .addDef(RSrc3)
3947     .addImm(FormatHi);
3948 
3949   // Build the half of the subregister with the constants before building the
3950   // full 128-bit register. If we are building multiple resource descriptors,
3951   // this will allow CSEing of the 2-component register.
3952   B.buildInstr(AMDGPU::REG_SEQUENCE)
3953     .addDef(RSrcHi)
3954     .addReg(RSrc2)
3955     .addImm(AMDGPU::sub0)
3956     .addReg(RSrc3)
3957     .addImm(AMDGPU::sub1);
3958 
3959   Register RSrcLo = BasePtr;
3960   if (!BasePtr) {
3961     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3962     B.buildInstr(AMDGPU::S_MOV_B64)
3963       .addDef(RSrcLo)
3964       .addImm(0);
3965   }
3966 
3967   B.buildInstr(AMDGPU::REG_SEQUENCE)
3968     .addDef(RSrc)
3969     .addReg(RSrcLo)
3970     .addImm(AMDGPU::sub0_sub1)
3971     .addReg(RSrcHi)
3972     .addImm(AMDGPU::sub2_sub3);
3973 
3974   return RSrc;
3975 }
3976 
3977 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3978                                 const SIInstrInfo &TII, Register BasePtr) {
3979   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3980 
3981   // FIXME: Why are half the "default" bits ignored based on the addressing
3982   // mode?
3983   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3984 }
3985 
3986 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3987                                const SIInstrInfo &TII, Register BasePtr) {
3988   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3989 
3990   // FIXME: Why are half the "default" bits ignored based on the addressing
3991   // mode?
3992   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
3993 }
3994 
3995 AMDGPUInstructionSelector::MUBUFAddressData
3996 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
3997   MUBUFAddressData Data;
3998   Data.N0 = Src;
3999 
4000   Register PtrBase;
4001   int64_t Offset;
4002 
4003   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4004   if (isUInt<32>(Offset)) {
4005     Data.N0 = PtrBase;
4006     Data.Offset = Offset;
4007   }
4008 
4009   if (MachineInstr *InputAdd
4010       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4011     Data.N2 = InputAdd->getOperand(1).getReg();
4012     Data.N3 = InputAdd->getOperand(2).getReg();
4013 
4014     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4015     // FIXME: Don't know this was defined by operand 0
4016     //
4017     // TODO: Remove this when we have copy folding optimizations after
4018     // RegBankSelect.
4019     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4020     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4021   }
4022 
4023   return Data;
4024 }
4025 
4026 /// Return if the addr64 mubuf mode should be used for the given address.
4027 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4028   // (ptr_add N2, N3) -> addr64, or
4029   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4030   if (Addr.N2)
4031     return true;
4032 
4033   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4034   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4035 }
4036 
4037 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4038 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4039 /// component.
4040 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4041   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4042   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4043     return;
4044 
4045   // Illegal offset, store it in soffset.
4046   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4047   B.buildInstr(AMDGPU::S_MOV_B32)
4048     .addDef(SOffset)
4049     .addImm(ImmOffset);
4050   ImmOffset = 0;
4051 }
4052 
4053 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4054   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4055   Register &SOffset, int64_t &Offset) const {
4056   // FIXME: Predicates should stop this from reaching here.
4057   // addr64 bit was removed for volcanic islands.
4058   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4059     return false;
4060 
4061   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4062   if (!shouldUseAddr64(AddrData))
4063     return false;
4064 
4065   Register N0 = AddrData.N0;
4066   Register N2 = AddrData.N2;
4067   Register N3 = AddrData.N3;
4068   Offset = AddrData.Offset;
4069 
4070   // Base pointer for the SRD.
4071   Register SRDPtr;
4072 
4073   if (N2) {
4074     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4075       assert(N3);
4076       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4077         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4078         // addr64, and construct the default resource from a 0 address.
4079         VAddr = N0;
4080       } else {
4081         SRDPtr = N3;
4082         VAddr = N2;
4083       }
4084     } else {
4085       // N2 is not divergent.
4086       SRDPtr = N2;
4087       VAddr = N3;
4088     }
4089   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4090     // Use the default null pointer in the resource
4091     VAddr = N0;
4092   } else {
4093     // N0 -> offset, or
4094     // (N0 + C1) -> offset
4095     SRDPtr = N0;
4096   }
4097 
4098   MachineIRBuilder B(*Root.getParent());
4099   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4100   splitIllegalMUBUFOffset(B, SOffset, Offset);
4101   return true;
4102 }
4103 
4104 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4105   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4106   int64_t &Offset) const {
4107 
4108   // FIXME: Pattern should not reach here.
4109   if (STI.useFlatForGlobal())
4110     return false;
4111 
4112   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4113   if (shouldUseAddr64(AddrData))
4114     return false;
4115 
4116   // N0 -> offset, or
4117   // (N0 + C1) -> offset
4118   Register SRDPtr = AddrData.N0;
4119   Offset = AddrData.Offset;
4120 
4121   // TODO: Look through extensions for 32-bit soffset.
4122   MachineIRBuilder B(*Root.getParent());
4123 
4124   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4125   splitIllegalMUBUFOffset(B, SOffset, Offset);
4126   return true;
4127 }
4128 
4129 InstructionSelector::ComplexRendererFns
4130 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4131   Register VAddr;
4132   Register RSrcReg;
4133   Register SOffset;
4134   int64_t Offset = 0;
4135 
4136   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4137     return {};
4138 
4139   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4140   // pattern.
4141   return {{
4142       [=](MachineInstrBuilder &MIB) {  // rsrc
4143         MIB.addReg(RSrcReg);
4144       },
4145       [=](MachineInstrBuilder &MIB) { // vaddr
4146         MIB.addReg(VAddr);
4147       },
4148       [=](MachineInstrBuilder &MIB) { // soffset
4149         if (SOffset)
4150           MIB.addReg(SOffset);
4151         else
4152           MIB.addImm(0);
4153       },
4154       [=](MachineInstrBuilder &MIB) { // offset
4155         MIB.addImm(Offset);
4156       },
4157       addZeroImm, //  glc
4158       addZeroImm, //  slc
4159       addZeroImm, //  tfe
4160       addZeroImm, //  dlc
4161       addZeroImm, //  swz
4162       addZeroImm  //  scc
4163     }};
4164 }
4165 
4166 InstructionSelector::ComplexRendererFns
4167 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4168   Register RSrcReg;
4169   Register SOffset;
4170   int64_t Offset = 0;
4171 
4172   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4173     return {};
4174 
4175   return {{
4176       [=](MachineInstrBuilder &MIB) {  // rsrc
4177         MIB.addReg(RSrcReg);
4178       },
4179       [=](MachineInstrBuilder &MIB) { // soffset
4180         if (SOffset)
4181           MIB.addReg(SOffset);
4182         else
4183           MIB.addImm(0);
4184       },
4185       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4186       addZeroImm, //  glc
4187       addZeroImm, //  slc
4188       addZeroImm, //  tfe
4189       addZeroImm, //  dlc
4190       addZeroImm, //  swz
4191       addZeroImm  //  scc
4192     }};
4193 }
4194 
4195 InstructionSelector::ComplexRendererFns
4196 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4197   Register VAddr;
4198   Register RSrcReg;
4199   Register SOffset;
4200   int64_t Offset = 0;
4201 
4202   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4203     return {};
4204 
4205   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4206   // pattern.
4207   return {{
4208       [=](MachineInstrBuilder &MIB) {  // rsrc
4209         MIB.addReg(RSrcReg);
4210       },
4211       [=](MachineInstrBuilder &MIB) { // vaddr
4212         MIB.addReg(VAddr);
4213       },
4214       [=](MachineInstrBuilder &MIB) { // soffset
4215         if (SOffset)
4216           MIB.addReg(SOffset);
4217         else
4218           MIB.addImm(0);
4219       },
4220       [=](MachineInstrBuilder &MIB) { // offset
4221         MIB.addImm(Offset);
4222       },
4223       addZeroImm //  slc
4224     }};
4225 }
4226 
4227 InstructionSelector::ComplexRendererFns
4228 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4229   Register RSrcReg;
4230   Register SOffset;
4231   int64_t Offset = 0;
4232 
4233   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4234     return {};
4235 
4236   return {{
4237       [=](MachineInstrBuilder &MIB) {  // rsrc
4238         MIB.addReg(RSrcReg);
4239       },
4240       [=](MachineInstrBuilder &MIB) { // soffset
4241         if (SOffset)
4242           MIB.addReg(SOffset);
4243         else
4244           MIB.addImm(0);
4245       },
4246       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4247       addZeroImm //  slc
4248     }};
4249 }
4250 
4251 /// Get an immediate that must be 32-bits, and treated as zero extended.
4252 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4253                                                const MachineRegisterInfo &MRI) {
4254   // getConstantVRegVal sexts any values, so see if that matters.
4255   Optional<int64_t> OffsetVal = getConstantVRegSExtVal(Reg, MRI);
4256   if (!OffsetVal || !isInt<32>(*OffsetVal))
4257     return None;
4258   return Lo_32(*OffsetVal);
4259 }
4260 
4261 InstructionSelector::ComplexRendererFns
4262 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4263   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4264   if (!OffsetVal)
4265     return {};
4266 
4267   Optional<int64_t> EncodedImm =
4268       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4269   if (!EncodedImm)
4270     return {};
4271 
4272   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4273 }
4274 
4275 InstructionSelector::ComplexRendererFns
4276 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4277   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4278 
4279   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4280   if (!OffsetVal)
4281     return {};
4282 
4283   Optional<int64_t> EncodedImm
4284     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4285   if (!EncodedImm)
4286     return {};
4287 
4288   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4289 }
4290 
4291 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4292                                                  const MachineInstr &MI,
4293                                                  int OpIdx) const {
4294   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4295          "Expected G_CONSTANT");
4296   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4297 }
4298 
4299 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4300                                                 const MachineInstr &MI,
4301                                                 int OpIdx) const {
4302   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4303          "Expected G_CONSTANT");
4304   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4305 }
4306 
4307 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4308                                                  const MachineInstr &MI,
4309                                                  int OpIdx) const {
4310   assert(OpIdx == -1);
4311 
4312   const MachineOperand &Op = MI.getOperand(1);
4313   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4314     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4315   else {
4316     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4317     MIB.addImm(Op.getCImm()->getSExtValue());
4318   }
4319 }
4320 
4321 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4322                                                 const MachineInstr &MI,
4323                                                 int OpIdx) const {
4324   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4325          "Expected G_CONSTANT");
4326   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4327 }
4328 
4329 /// This only really exists to satisfy DAG type checking machinery, so is a
4330 /// no-op here.
4331 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4332                                                 const MachineInstr &MI,
4333                                                 int OpIdx) const {
4334   MIB.addImm(MI.getOperand(OpIdx).getImm());
4335 }
4336 
4337 void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
4338                                                  const MachineInstr &MI,
4339                                                  int OpIdx) const {
4340   assert(OpIdx >= 0 && "expected to match an immediate operand");
4341   MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
4342 }
4343 
4344 void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
4345                                                  const MachineInstr &MI,
4346                                                  int OpIdx) const {
4347   assert(OpIdx >= 0 && "expected to match an immediate operand");
4348   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
4349 }
4350 
4351 void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
4352                                                  const MachineInstr &MI,
4353                                                  int OpIdx) const {
4354   assert(OpIdx >= 0 && "expected to match an immediate operand");
4355   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
4356 }
4357 
4358 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4359                                                  const MachineInstr &MI,
4360                                                  int OpIdx) const {
4361   assert(OpIdx >= 0 && "expected to match an immediate operand");
4362   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4363 }
4364 
4365 void AMDGPUInstructionSelector::renderExtractSCCB(MachineInstrBuilder &MIB,
4366                                                   const MachineInstr &MI,
4367                                                   int OpIdx) const {
4368   assert(OpIdx >= 0 && "expected to match an immediate operand");
4369   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 4) & 1);
4370 }
4371 
4372 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4373                                                  const MachineInstr &MI,
4374                                                  int OpIdx) const {
4375   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4376 }
4377 
4378 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4379   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4380 }
4381 
4382 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4383   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4384 }
4385 
4386 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4387   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4388 }
4389 
4390 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4391   return TII.isInlineConstant(Imm);
4392 }
4393