1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 
30 #define DEBUG_TYPE "amdgpu-isel"
31 
32 using namespace llvm;
33 using namespace MIPatternMatch;
34 
35 static cl::opt<bool> AllowRiskySelect(
36   "amdgpu-global-isel-risky-select",
37   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
38   cl::init(false),
39   cl::ReallyHidden);
40 
41 #define GET_GLOBALISEL_IMPL
42 #define AMDGPUSubtarget GCNSubtarget
43 #include "AMDGPUGenGlobalISel.inc"
44 #undef GET_GLOBALISEL_IMPL
45 #undef AMDGPUSubtarget
46 
47 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
48     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
49     const AMDGPUTargetMachine &TM)
50     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51       STI(STI),
52       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53 #define GET_GLOBALISEL_PREDICATES_INIT
54 #include "AMDGPUGenGlobalISel.inc"
55 #undef GET_GLOBALISEL_PREDICATES_INIT
56 #define GET_GLOBALISEL_TEMPORARIES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_TEMPORARIES_INIT
59 {
60 }
61 
62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                                         CodeGenCoverage &CoverageInfo,
66                                         ProfileSummaryInfo *PSI,
67                                         BlockFrequencyInfo *BFI) {
68   MRI = &MF.getRegInfo();
69   Subtarget = &MF.getSubtarget<GCNSubtarget>();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   // The verifier is oblivious to s1 being a valid value for wavesize registers.
76   if (Reg.isPhysical())
77     return false;
78 
79   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80   const TargetRegisterClass *RC =
81       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82   if (RC) {
83     const LLT Ty = MRI.getType(Reg);
84     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
85            Ty.isValid() && Ty.getSizeInBits() == 1;
86   }
87 
88   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
89   return RB->getID() == AMDGPU::VCCRegBankID;
90 }
91 
92 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
93                                                         unsigned NewOpc) const {
94   MI.setDesc(TII.get(NewOpc));
95   MI.removeOperand(1); // Remove intrinsic ID.
96   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
97 
98   MachineOperand &Dst = MI.getOperand(0);
99   MachineOperand &Src = MI.getOperand(1);
100 
101   // TODO: This should be legalized to s32 if needed
102   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
103     return false;
104 
105   const TargetRegisterClass *DstRC
106     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
107   const TargetRegisterClass *SrcRC
108     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
109   if (!DstRC || DstRC != SrcRC)
110     return false;
111 
112   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
113          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
114 }
115 
116 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
117   const DebugLoc &DL = I.getDebugLoc();
118   MachineBasicBlock *BB = I.getParent();
119   I.setDesc(TII.get(TargetOpcode::COPY));
120 
121   const MachineOperand &Src = I.getOperand(1);
122   MachineOperand &Dst = I.getOperand(0);
123   Register DstReg = Dst.getReg();
124   Register SrcReg = Src.getReg();
125 
126   if (isVCC(DstReg, *MRI)) {
127     if (SrcReg == AMDGPU::SCC) {
128       const TargetRegisterClass *RC
129         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
130       if (!RC)
131         return true;
132       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
133     }
134 
135     if (!isVCC(SrcReg, *MRI)) {
136       // TODO: Should probably leave the copy and let copyPhysReg expand it.
137       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
138         return false;
139 
140       const TargetRegisterClass *SrcRC
141         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
142 
143       Optional<ValueAndVReg> ConstVal =
144           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
145       if (ConstVal) {
146         unsigned MovOpc =
147             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
148         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
149             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
150       } else {
151         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
152 
153         // We can't trust the high bits at this point, so clear them.
154 
155         // TODO: Skip masking high bits if def is known boolean.
156 
157         unsigned AndOpc =
158             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
159         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
160             .addImm(1)
161             .addReg(SrcReg);
162         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
163             .addImm(0)
164             .addReg(MaskedReg);
165       }
166 
167       if (!MRI->getRegClassOrNull(SrcReg))
168         MRI->setRegClass(SrcReg, SrcRC);
169       I.eraseFromParent();
170       return true;
171     }
172 
173     const TargetRegisterClass *RC =
174       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
175     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
176       return false;
177 
178     return true;
179   }
180 
181   for (const MachineOperand &MO : I.operands()) {
182     if (MO.getReg().isPhysical())
183       continue;
184 
185     const TargetRegisterClass *RC =
186             TRI.getConstrainedRegClassForOperand(MO, *MRI);
187     if (!RC)
188       continue;
189     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
190   }
191   return true;
192 }
193 
194 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
195   const Register DefReg = I.getOperand(0).getReg();
196   const LLT DefTy = MRI->getType(DefReg);
197   if (DefTy == LLT::scalar(1)) {
198     if (!AllowRiskySelect) {
199       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
200       return false;
201     }
202 
203     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
204   }
205 
206   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
207 
208   const RegClassOrRegBank &RegClassOrBank =
209     MRI->getRegClassOrRegBank(DefReg);
210 
211   const TargetRegisterClass *DefRC
212     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
213   if (!DefRC) {
214     if (!DefTy.isValid()) {
215       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
216       return false;
217     }
218 
219     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
220     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
221     if (!DefRC) {
222       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
223       return false;
224     }
225   }
226 
227   // TODO: Verify that all registers have the same bank
228   I.setDesc(TII.get(TargetOpcode::PHI));
229   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
230 }
231 
232 MachineOperand
233 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
234                                            const TargetRegisterClass &SubRC,
235                                            unsigned SubIdx) const {
236 
237   MachineInstr *MI = MO.getParent();
238   MachineBasicBlock *BB = MO.getParent()->getParent();
239   Register DstReg = MRI->createVirtualRegister(&SubRC);
240 
241   if (MO.isReg()) {
242     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
243     Register Reg = MO.getReg();
244     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
245             .addReg(Reg, 0, ComposedSubIdx);
246 
247     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
248                                      MO.isKill(), MO.isDead(), MO.isUndef(),
249                                      MO.isEarlyClobber(), 0, MO.isDebug(),
250                                      MO.isInternalRead());
251   }
252 
253   assert(MO.isImm());
254 
255   APInt Imm(64, MO.getImm());
256 
257   switch (SubIdx) {
258   default:
259     llvm_unreachable("do not know to split immediate with this sub index.");
260   case AMDGPU::sub0:
261     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
262   case AMDGPU::sub1:
263     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
264   }
265 }
266 
267 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
268   switch (Opc) {
269   case AMDGPU::G_AND:
270     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
271   case AMDGPU::G_OR:
272     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
273   case AMDGPU::G_XOR:
274     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
275   default:
276     llvm_unreachable("not a bit op");
277   }
278 }
279 
280 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
281   Register DstReg = I.getOperand(0).getReg();
282   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
283 
284   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
285   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
286       DstRB->getID() != AMDGPU::VCCRegBankID)
287     return false;
288 
289   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
290                             STI.isWave64());
291   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
292 
293   // Dead implicit-def of scc
294   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
295                                          true, // isImp
296                                          false, // isKill
297                                          true)); // isDead
298   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
299 }
300 
301 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
302   MachineBasicBlock *BB = I.getParent();
303   MachineFunction *MF = BB->getParent();
304   Register DstReg = I.getOperand(0).getReg();
305   const DebugLoc &DL = I.getDebugLoc();
306   LLT Ty = MRI->getType(DstReg);
307   if (Ty.isVector())
308     return false;
309 
310   unsigned Size = Ty.getSizeInBits();
311   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
312   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
313   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
314 
315   if (Size == 32) {
316     if (IsSALU) {
317       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
318       MachineInstr *Add =
319         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
320         .add(I.getOperand(1))
321         .add(I.getOperand(2));
322       I.eraseFromParent();
323       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
324     }
325 
326     if (STI.hasAddNoCarry()) {
327       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
328       I.setDesc(TII.get(Opc));
329       I.addOperand(*MF, MachineOperand::CreateImm(0));
330       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
331       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
332     }
333 
334     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
335 
336     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
337     MachineInstr *Add
338       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
339       .addDef(UnusedCarry, RegState::Dead)
340       .add(I.getOperand(1))
341       .add(I.getOperand(2))
342       .addImm(0);
343     I.eraseFromParent();
344     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
345   }
346 
347   assert(!Sub && "illegal sub should not reach here");
348 
349   const TargetRegisterClass &RC
350     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
351   const TargetRegisterClass &HalfRC
352     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
353 
354   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
355   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
356   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
357   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
358 
359   Register DstLo = MRI->createVirtualRegister(&HalfRC);
360   Register DstHi = MRI->createVirtualRegister(&HalfRC);
361 
362   if (IsSALU) {
363     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
364       .add(Lo1)
365       .add(Lo2);
366     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
367       .add(Hi1)
368       .add(Hi2);
369   } else {
370     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
371     Register CarryReg = MRI->createVirtualRegister(CarryRC);
372     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
373       .addDef(CarryReg)
374       .add(Lo1)
375       .add(Lo2)
376       .addImm(0);
377     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
378       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
379       .add(Hi1)
380       .add(Hi2)
381       .addReg(CarryReg, RegState::Kill)
382       .addImm(0);
383 
384     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
385       return false;
386   }
387 
388   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
389     .addReg(DstLo)
390     .addImm(AMDGPU::sub0)
391     .addReg(DstHi)
392     .addImm(AMDGPU::sub1);
393 
394 
395   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
396     return false;
397 
398   I.eraseFromParent();
399   return true;
400 }
401 
402 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
403   MachineInstr &I) const {
404   MachineBasicBlock *BB = I.getParent();
405   MachineFunction *MF = BB->getParent();
406   const DebugLoc &DL = I.getDebugLoc();
407   Register Dst0Reg = I.getOperand(0).getReg();
408   Register Dst1Reg = I.getOperand(1).getReg();
409   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
410                      I.getOpcode() == AMDGPU::G_UADDE;
411   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
412                           I.getOpcode() == AMDGPU::G_USUBE;
413 
414   if (isVCC(Dst1Reg, *MRI)) {
415     unsigned NoCarryOpc =
416         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
417     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
418     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
419     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
420     I.addOperand(*MF, MachineOperand::CreateImm(0));
421     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
422   }
423 
424   Register Src0Reg = I.getOperand(2).getReg();
425   Register Src1Reg = I.getOperand(3).getReg();
426 
427   if (HasCarryIn) {
428     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
429       .addReg(I.getOperand(4).getReg());
430   }
431 
432   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
433   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
434 
435   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
436     .add(I.getOperand(2))
437     .add(I.getOperand(3));
438   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
439     .addReg(AMDGPU::SCC);
440 
441   if (!MRI->getRegClassOrNull(Dst1Reg))
442     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
443 
444   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
445       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
446       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
447     return false;
448 
449   if (HasCarryIn &&
450       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
451                                     AMDGPU::SReg_32RegClass, *MRI))
452     return false;
453 
454   I.eraseFromParent();
455   return true;
456 }
457 
458 // TODO: We should probably legalize these to only using 32-bit results.
459 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
460   MachineBasicBlock *BB = I.getParent();
461   Register DstReg = I.getOperand(0).getReg();
462   Register SrcReg = I.getOperand(1).getReg();
463   LLT DstTy = MRI->getType(DstReg);
464   LLT SrcTy = MRI->getType(SrcReg);
465   const unsigned SrcSize = SrcTy.getSizeInBits();
466   unsigned DstSize = DstTy.getSizeInBits();
467 
468   // TODO: Should handle any multiple of 32 offset.
469   unsigned Offset = I.getOperand(2).getImm();
470   if (Offset % 32 != 0 || DstSize > 128)
471     return false;
472 
473   // 16-bit operations really use 32-bit registers.
474   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
475   if (DstSize == 16)
476     DstSize = 32;
477 
478   const TargetRegisterClass *DstRC =
479     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
480   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
481     return false;
482 
483   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
484   const TargetRegisterClass *SrcRC =
485     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
486   if (!SrcRC)
487     return false;
488   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
489                                                          DstSize / 32);
490   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
491   if (!SrcRC)
492     return false;
493 
494   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
495                                     *SrcRC, I.getOperand(1));
496   const DebugLoc &DL = I.getDebugLoc();
497   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
498     .addReg(SrcReg, 0, SubReg);
499 
500   I.eraseFromParent();
501   return true;
502 }
503 
504 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
505   MachineBasicBlock *BB = MI.getParent();
506   Register DstReg = MI.getOperand(0).getReg();
507   LLT DstTy = MRI->getType(DstReg);
508   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
509 
510   const unsigned SrcSize = SrcTy.getSizeInBits();
511   if (SrcSize < 32)
512     return selectImpl(MI, *CoverageInfo);
513 
514   const DebugLoc &DL = MI.getDebugLoc();
515   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
516   const unsigned DstSize = DstTy.getSizeInBits();
517   const TargetRegisterClass *DstRC =
518     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
519   if (!DstRC)
520     return false;
521 
522   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
523   MachineInstrBuilder MIB =
524     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
525   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
526     MachineOperand &Src = MI.getOperand(I + 1);
527     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
528     MIB.addImm(SubRegs[I]);
529 
530     const TargetRegisterClass *SrcRC
531       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
532     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
533       return false;
534   }
535 
536   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
537     return false;
538 
539   MI.eraseFromParent();
540   return true;
541 }
542 
543 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
544   MachineBasicBlock *BB = MI.getParent();
545   const int NumDst = MI.getNumOperands() - 1;
546 
547   MachineOperand &Src = MI.getOperand(NumDst);
548 
549   Register SrcReg = Src.getReg();
550   Register DstReg0 = MI.getOperand(0).getReg();
551   LLT DstTy = MRI->getType(DstReg0);
552   LLT SrcTy = MRI->getType(SrcReg);
553 
554   const unsigned DstSize = DstTy.getSizeInBits();
555   const unsigned SrcSize = SrcTy.getSizeInBits();
556   const DebugLoc &DL = MI.getDebugLoc();
557   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
558 
559   const TargetRegisterClass *SrcRC =
560     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
561   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
562     return false;
563 
564   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
565   // source, and this relies on the fact that the same subregister indices are
566   // used for both.
567   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
568   for (int I = 0, E = NumDst; I != E; ++I) {
569     MachineOperand &Dst = MI.getOperand(I);
570     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
571       .addReg(SrcReg, 0, SubRegs[I]);
572 
573     // Make sure the subregister index is valid for the source register.
574     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
575     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
576       return false;
577 
578     const TargetRegisterClass *DstRC =
579       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
580     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
581       return false;
582   }
583 
584   MI.eraseFromParent();
585   return true;
586 }
587 
588 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
589   MachineInstr &MI) const {
590   if (selectImpl(MI, *CoverageInfo))
591     return true;
592 
593   const LLT S32 = LLT::scalar(32);
594   const LLT V2S16 = LLT::fixed_vector(2, 16);
595 
596   Register Dst = MI.getOperand(0).getReg();
597   if (MRI->getType(Dst) != V2S16)
598     return false;
599 
600   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
601   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
602     return false;
603 
604   Register Src0 = MI.getOperand(1).getReg();
605   Register Src1 = MI.getOperand(2).getReg();
606   if (MRI->getType(Src0) != S32)
607     return false;
608 
609   const DebugLoc &DL = MI.getDebugLoc();
610   MachineBasicBlock *BB = MI.getParent();
611 
612   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
613   if (ConstSrc1) {
614     auto ConstSrc0 =
615         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
616     if (ConstSrc0) {
617       const int64_t K0 = ConstSrc0->Value.getSExtValue();
618       const int64_t K1 = ConstSrc1->Value.getSExtValue();
619       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
620       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
621 
622       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
623         .addImm(Lo16 | (Hi16 << 16));
624       MI.eraseFromParent();
625       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
626     }
627   }
628 
629   // TODO: This should probably be a combine somewhere
630   // (build_vector_trunc $src0, undef -> copy $src0
631   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
632   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
633     MI.setDesc(TII.get(AMDGPU::COPY));
634     MI.removeOperand(2);
635     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
636            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
637   }
638 
639   Register ShiftSrc0;
640   Register ShiftSrc1;
641 
642   // With multiple uses of the shift, this will duplicate the shift and
643   // increase register pressure.
644   //
645   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
646   //  => (S_PACK_HH_B32_B16 $src0, $src1)
647   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
648   //  => (S_PACK_LH_B32_B16 $src0, $src1)
649   // (build_vector_trunc $src0, $src1)
650   //  => (S_PACK_LL_B32_B16 $src0, $src1)
651 
652   bool Shift0 = mi_match(
653       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
654 
655   bool Shift1 = mi_match(
656       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
657 
658   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
659   if (Shift0 && Shift1) {
660     Opc = AMDGPU::S_PACK_HH_B32_B16;
661     MI.getOperand(1).setReg(ShiftSrc0);
662     MI.getOperand(2).setReg(ShiftSrc1);
663   } else if (Shift1) {
664     Opc = AMDGPU::S_PACK_LH_B32_B16;
665     MI.getOperand(2).setReg(ShiftSrc1);
666   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
667     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
668     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
669       .addReg(ShiftSrc0)
670       .addImm(16);
671 
672     MI.eraseFromParent();
673     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
674   }
675 
676   MI.setDesc(TII.get(Opc));
677   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
678 }
679 
680 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
681   return selectG_ADD_SUB(I);
682 }
683 
684 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
685   const MachineOperand &MO = I.getOperand(0);
686 
687   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
688   // regbank check here is to know why getConstrainedRegClassForOperand failed.
689   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
690   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
691       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
692     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
693     return true;
694   }
695 
696   return false;
697 }
698 
699 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
700   MachineBasicBlock *BB = I.getParent();
701 
702   Register DstReg = I.getOperand(0).getReg();
703   Register Src0Reg = I.getOperand(1).getReg();
704   Register Src1Reg = I.getOperand(2).getReg();
705   LLT Src1Ty = MRI->getType(Src1Reg);
706 
707   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
708   unsigned InsSize = Src1Ty.getSizeInBits();
709 
710   int64_t Offset = I.getOperand(3).getImm();
711 
712   // FIXME: These cases should have been illegal and unnecessary to check here.
713   if (Offset % 32 != 0 || InsSize % 32 != 0)
714     return false;
715 
716   // Currently not handled by getSubRegFromChannel.
717   if (InsSize > 128)
718     return false;
719 
720   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
721   if (SubReg == AMDGPU::NoSubRegister)
722     return false;
723 
724   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
725   const TargetRegisterClass *DstRC =
726     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
727   if (!DstRC)
728     return false;
729 
730   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
731   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
732   const TargetRegisterClass *Src0RC =
733     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
734   const TargetRegisterClass *Src1RC =
735     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
736 
737   // Deal with weird cases where the class only partially supports the subreg
738   // index.
739   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
740   if (!Src0RC || !Src1RC)
741     return false;
742 
743   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
744       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
745       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
746     return false;
747 
748   const DebugLoc &DL = I.getDebugLoc();
749   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
750     .addReg(Src0Reg)
751     .addReg(Src1Reg)
752     .addImm(SubReg);
753 
754   I.eraseFromParent();
755   return true;
756 }
757 
758 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
759   Register DstReg = MI.getOperand(0).getReg();
760   Register SrcReg = MI.getOperand(1).getReg();
761   Register OffsetReg = MI.getOperand(2).getReg();
762   Register WidthReg = MI.getOperand(3).getReg();
763 
764   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
765          "scalar BFX instructions are expanded in regbankselect");
766   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
767          "64-bit vector BFX instructions are expanded in regbankselect");
768 
769   const DebugLoc &DL = MI.getDebugLoc();
770   MachineBasicBlock *MBB = MI.getParent();
771 
772   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
773   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
774   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
775                  .addReg(SrcReg)
776                  .addReg(OffsetReg)
777                  .addReg(WidthReg);
778   MI.eraseFromParent();
779   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
780 }
781 
782 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
783   if (STI.getLDSBankCount() != 16)
784     return selectImpl(MI, *CoverageInfo);
785 
786   Register Dst = MI.getOperand(0).getReg();
787   Register Src0 = MI.getOperand(2).getReg();
788   Register M0Val = MI.getOperand(6).getReg();
789   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
790       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
791       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
792     return false;
793 
794   // This requires 2 instructions. It is possible to write a pattern to support
795   // this, but the generated isel emitter doesn't correctly deal with multiple
796   // output instructions using the same physical register input. The copy to m0
797   // is incorrectly placed before the second instruction.
798   //
799   // TODO: Match source modifiers.
800 
801   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
802   const DebugLoc &DL = MI.getDebugLoc();
803   MachineBasicBlock *MBB = MI.getParent();
804 
805   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
806     .addReg(M0Val);
807   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
808     .addImm(2)
809     .addImm(MI.getOperand(4).getImm())  // $attr
810     .addImm(MI.getOperand(3).getImm()); // $attrchan
811 
812   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
813     .addImm(0)                          // $src0_modifiers
814     .addReg(Src0)                       // $src0
815     .addImm(MI.getOperand(4).getImm())  // $attr
816     .addImm(MI.getOperand(3).getImm())  // $attrchan
817     .addImm(0)                          // $src2_modifiers
818     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
819     .addImm(MI.getOperand(5).getImm())  // $high
820     .addImm(0)                          // $clamp
821     .addImm(0);                         // $omod
822 
823   MI.eraseFromParent();
824   return true;
825 }
826 
827 // Writelane is special in that it can use SGPR and M0 (which would normally
828 // count as using the constant bus twice - but in this case it is allowed since
829 // the lane selector doesn't count as a use of the constant bus). However, it is
830 // still required to abide by the 1 SGPR rule. Fix this up if we might have
831 // multiple SGPRs.
832 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
833   // With a constant bus limit of at least 2, there's no issue.
834   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
835     return selectImpl(MI, *CoverageInfo);
836 
837   MachineBasicBlock *MBB = MI.getParent();
838   const DebugLoc &DL = MI.getDebugLoc();
839   Register VDst = MI.getOperand(0).getReg();
840   Register Val = MI.getOperand(2).getReg();
841   Register LaneSelect = MI.getOperand(3).getReg();
842   Register VDstIn = MI.getOperand(4).getReg();
843 
844   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
845 
846   Optional<ValueAndVReg> ConstSelect =
847       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
848   if (ConstSelect) {
849     // The selector has to be an inline immediate, so we can use whatever for
850     // the other operands.
851     MIB.addReg(Val);
852     MIB.addImm(ConstSelect->Value.getSExtValue() &
853                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
854   } else {
855     Optional<ValueAndVReg> ConstVal =
856         getIConstantVRegValWithLookThrough(Val, *MRI);
857 
858     // If the value written is an inline immediate, we can get away without a
859     // copy to m0.
860     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
861                                                  STI.hasInv2PiInlineImm())) {
862       MIB.addImm(ConstVal->Value.getSExtValue());
863       MIB.addReg(LaneSelect);
864     } else {
865       MIB.addReg(Val);
866 
867       // If the lane selector was originally in a VGPR and copied with
868       // readfirstlane, there's a hazard to read the same SGPR from the
869       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
870       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
871 
872       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
873         .addReg(LaneSelect);
874       MIB.addReg(AMDGPU::M0);
875     }
876   }
877 
878   MIB.addReg(VDstIn);
879 
880   MI.eraseFromParent();
881   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
882 }
883 
884 // We need to handle this here because tablegen doesn't support matching
885 // instructions with multiple outputs.
886 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
887   Register Dst0 = MI.getOperand(0).getReg();
888   Register Dst1 = MI.getOperand(1).getReg();
889 
890   LLT Ty = MRI->getType(Dst0);
891   unsigned Opc;
892   if (Ty == LLT::scalar(32))
893     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
894   else if (Ty == LLT::scalar(64))
895     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
896   else
897     return false;
898 
899   // TODO: Match source modifiers.
900 
901   const DebugLoc &DL = MI.getDebugLoc();
902   MachineBasicBlock *MBB = MI.getParent();
903 
904   Register Numer = MI.getOperand(3).getReg();
905   Register Denom = MI.getOperand(4).getReg();
906   unsigned ChooseDenom = MI.getOperand(5).getImm();
907 
908   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
909 
910   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
911     .addDef(Dst1)
912     .addImm(0)     // $src0_modifiers
913     .addUse(Src0)  // $src0
914     .addImm(0)     // $src1_modifiers
915     .addUse(Denom) // $src1
916     .addImm(0)     // $src2_modifiers
917     .addUse(Numer) // $src2
918     .addImm(0)     // $clamp
919     .addImm(0);    // $omod
920 
921   MI.eraseFromParent();
922   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
923 }
924 
925 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
926   unsigned IntrinsicID = I.getIntrinsicID();
927   switch (IntrinsicID) {
928   case Intrinsic::amdgcn_if_break: {
929     MachineBasicBlock *BB = I.getParent();
930 
931     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
932     // SelectionDAG uses for wave32 vs wave64.
933     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
934       .add(I.getOperand(0))
935       .add(I.getOperand(2))
936       .add(I.getOperand(3));
937 
938     Register DstReg = I.getOperand(0).getReg();
939     Register Src0Reg = I.getOperand(2).getReg();
940     Register Src1Reg = I.getOperand(3).getReg();
941 
942     I.eraseFromParent();
943 
944     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
945       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
946 
947     return true;
948   }
949   case Intrinsic::amdgcn_interp_p1_f16:
950     return selectInterpP1F16(I);
951   case Intrinsic::amdgcn_wqm:
952     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
953   case Intrinsic::amdgcn_softwqm:
954     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
955   case Intrinsic::amdgcn_strict_wwm:
956   case Intrinsic::amdgcn_wwm:
957     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
958   case Intrinsic::amdgcn_strict_wqm:
959     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
960   case Intrinsic::amdgcn_writelane:
961     return selectWritelane(I);
962   case Intrinsic::amdgcn_div_scale:
963     return selectDivScale(I);
964   case Intrinsic::amdgcn_icmp:
965     return selectIntrinsicIcmp(I);
966   case Intrinsic::amdgcn_ballot:
967     return selectBallot(I);
968   case Intrinsic::amdgcn_reloc_constant:
969     return selectRelocConstant(I);
970   case Intrinsic::amdgcn_groupstaticsize:
971     return selectGroupStaticSize(I);
972   case Intrinsic::returnaddress:
973     return selectReturnAddress(I);
974   default:
975     return selectImpl(I, *CoverageInfo);
976   }
977 }
978 
979 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
980   if (Size != 32 && Size != 64)
981     return -1;
982   switch (P) {
983   default:
984     llvm_unreachable("Unknown condition code!");
985   case CmpInst::ICMP_NE:
986     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
987   case CmpInst::ICMP_EQ:
988     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
989   case CmpInst::ICMP_SGT:
990     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
991   case CmpInst::ICMP_SGE:
992     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
993   case CmpInst::ICMP_SLT:
994     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
995   case CmpInst::ICMP_SLE:
996     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
997   case CmpInst::ICMP_UGT:
998     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
999   case CmpInst::ICMP_UGE:
1000     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1001   case CmpInst::ICMP_ULT:
1002     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1003   case CmpInst::ICMP_ULE:
1004     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1005   }
1006 }
1007 
1008 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1009                                               unsigned Size) const {
1010   if (Size == 64) {
1011     if (!STI.hasScalarCompareEq64())
1012       return -1;
1013 
1014     switch (P) {
1015     case CmpInst::ICMP_NE:
1016       return AMDGPU::S_CMP_LG_U64;
1017     case CmpInst::ICMP_EQ:
1018       return AMDGPU::S_CMP_EQ_U64;
1019     default:
1020       return -1;
1021     }
1022   }
1023 
1024   if (Size != 32)
1025     return -1;
1026 
1027   switch (P) {
1028   case CmpInst::ICMP_NE:
1029     return AMDGPU::S_CMP_LG_U32;
1030   case CmpInst::ICMP_EQ:
1031     return AMDGPU::S_CMP_EQ_U32;
1032   case CmpInst::ICMP_SGT:
1033     return AMDGPU::S_CMP_GT_I32;
1034   case CmpInst::ICMP_SGE:
1035     return AMDGPU::S_CMP_GE_I32;
1036   case CmpInst::ICMP_SLT:
1037     return AMDGPU::S_CMP_LT_I32;
1038   case CmpInst::ICMP_SLE:
1039     return AMDGPU::S_CMP_LE_I32;
1040   case CmpInst::ICMP_UGT:
1041     return AMDGPU::S_CMP_GT_U32;
1042   case CmpInst::ICMP_UGE:
1043     return AMDGPU::S_CMP_GE_U32;
1044   case CmpInst::ICMP_ULT:
1045     return AMDGPU::S_CMP_LT_U32;
1046   case CmpInst::ICMP_ULE:
1047     return AMDGPU::S_CMP_LE_U32;
1048   default:
1049     llvm_unreachable("Unknown condition code!");
1050   }
1051 }
1052 
1053 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1054   MachineBasicBlock *BB = I.getParent();
1055   const DebugLoc &DL = I.getDebugLoc();
1056 
1057   Register SrcReg = I.getOperand(2).getReg();
1058   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1059 
1060   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1061 
1062   Register CCReg = I.getOperand(0).getReg();
1063   if (!isVCC(CCReg, *MRI)) {
1064     int Opcode = getS_CMPOpcode(Pred, Size);
1065     if (Opcode == -1)
1066       return false;
1067     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1068             .add(I.getOperand(2))
1069             .add(I.getOperand(3));
1070     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1071       .addReg(AMDGPU::SCC);
1072     bool Ret =
1073         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1074         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1075     I.eraseFromParent();
1076     return Ret;
1077   }
1078 
1079   int Opcode = getV_CMPOpcode(Pred, Size);
1080   if (Opcode == -1)
1081     return false;
1082 
1083   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1084             I.getOperand(0).getReg())
1085             .add(I.getOperand(2))
1086             .add(I.getOperand(3));
1087   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1088                                *TRI.getBoolRC(), *MRI);
1089   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1090   I.eraseFromParent();
1091   return Ret;
1092 }
1093 
1094 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1095   Register Dst = I.getOperand(0).getReg();
1096   if (isVCC(Dst, *MRI))
1097     return false;
1098 
1099   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1100     return false;
1101 
1102   MachineBasicBlock *BB = I.getParent();
1103   const DebugLoc &DL = I.getDebugLoc();
1104   Register SrcReg = I.getOperand(2).getReg();
1105   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1106 
1107   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1108   if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1109     MachineInstr *ICmp =
1110         BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1111 
1112     if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1113                                       *TRI.getBoolRC(), *MRI))
1114       return false;
1115     I.eraseFromParent();
1116     return true;
1117   }
1118 
1119   int Opcode = getV_CMPOpcode(Pred, Size);
1120   if (Opcode == -1)
1121     return false;
1122 
1123   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1124                            .add(I.getOperand(2))
1125                            .add(I.getOperand(3));
1126   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1127                                *MRI);
1128   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1129   I.eraseFromParent();
1130   return Ret;
1131 }
1132 
1133 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1134   MachineBasicBlock *BB = I.getParent();
1135   const DebugLoc &DL = I.getDebugLoc();
1136   Register DstReg = I.getOperand(0).getReg();
1137   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1138   const bool Is64 = Size == 64;
1139 
1140   if (Size != STI.getWavefrontSize())
1141     return false;
1142 
1143   Optional<ValueAndVReg> Arg =
1144       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1145 
1146   if (Arg.hasValue()) {
1147     const int64_t Value = Arg.getValue().Value.getSExtValue();
1148     if (Value == 0) {
1149       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1150       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1151     } else if (Value == -1) { // all ones
1152       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1153       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1154     } else
1155       return false;
1156   } else {
1157     Register SrcReg = I.getOperand(2).getReg();
1158     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1159   }
1160 
1161   I.eraseFromParent();
1162   return true;
1163 }
1164 
1165 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1166   Register DstReg = I.getOperand(0).getReg();
1167   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1168   const TargetRegisterClass *DstRC =
1169     TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1170   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1171     return false;
1172 
1173   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1174 
1175   Module *M = MF->getFunction().getParent();
1176   const MDNode *Metadata = I.getOperand(2).getMetadata();
1177   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1178   auto RelocSymbol = cast<GlobalVariable>(
1179     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1180 
1181   MachineBasicBlock *BB = I.getParent();
1182   BuildMI(*BB, &I, I.getDebugLoc(),
1183           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1184     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1185 
1186   I.eraseFromParent();
1187   return true;
1188 }
1189 
1190 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1191   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1192 
1193   Register DstReg = I.getOperand(0).getReg();
1194   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1195   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1196     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1197 
1198   MachineBasicBlock *MBB = I.getParent();
1199   const DebugLoc &DL = I.getDebugLoc();
1200 
1201   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1202 
1203   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1204     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1205     MIB.addImm(MFI->getLDSSize());
1206   } else {
1207     Module *M = MF->getFunction().getParent();
1208     const GlobalValue *GV
1209       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1210     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1211   }
1212 
1213   I.eraseFromParent();
1214   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1215 }
1216 
1217 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1218   MachineBasicBlock *MBB = I.getParent();
1219   MachineFunction &MF = *MBB->getParent();
1220   const DebugLoc &DL = I.getDebugLoc();
1221 
1222   MachineOperand &Dst = I.getOperand(0);
1223   Register DstReg = Dst.getReg();
1224   unsigned Depth = I.getOperand(2).getImm();
1225 
1226   const TargetRegisterClass *RC
1227     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1228   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1229       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1230     return false;
1231 
1232   // Check for kernel and shader functions
1233   if (Depth != 0 ||
1234       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1235     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1236       .addImm(0);
1237     I.eraseFromParent();
1238     return true;
1239   }
1240 
1241   MachineFrameInfo &MFI = MF.getFrameInfo();
1242   // There is a call to @llvm.returnaddress in this function
1243   MFI.setReturnAddressIsTaken(true);
1244 
1245   // Get the return address reg and mark it as an implicit live-in
1246   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1247   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1248                                              AMDGPU::SReg_64RegClass, DL);
1249   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1250     .addReg(LiveIn);
1251   I.eraseFromParent();
1252   return true;
1253 }
1254 
1255 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1256   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1257   // SelectionDAG uses for wave32 vs wave64.
1258   MachineBasicBlock *BB = MI.getParent();
1259   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1260       .add(MI.getOperand(1));
1261 
1262   Register Reg = MI.getOperand(1).getReg();
1263   MI.eraseFromParent();
1264 
1265   if (!MRI->getRegClassOrNull(Reg))
1266     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1267   return true;
1268 }
1269 
1270 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1271   MachineInstr &MI, Intrinsic::ID IntrID) const {
1272   MachineBasicBlock *MBB = MI.getParent();
1273   MachineFunction *MF = MBB->getParent();
1274   const DebugLoc &DL = MI.getDebugLoc();
1275 
1276   unsigned IndexOperand = MI.getOperand(7).getImm();
1277   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1278   bool WaveDone = MI.getOperand(9).getImm() != 0;
1279 
1280   if (WaveDone && !WaveRelease)
1281     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1282 
1283   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1284   IndexOperand &= ~0x3f;
1285   unsigned CountDw = 0;
1286 
1287   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1288     CountDw = (IndexOperand >> 24) & 0xf;
1289     IndexOperand &= ~(0xf << 24);
1290 
1291     if (CountDw < 1 || CountDw > 4) {
1292       report_fatal_error(
1293         "ds_ordered_count: dword count must be between 1 and 4");
1294     }
1295   }
1296 
1297   if (IndexOperand)
1298     report_fatal_error("ds_ordered_count: bad index operand");
1299 
1300   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1301   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1302 
1303   unsigned Offset0 = OrderedCountIndex << 2;
1304   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1305                      (Instruction << 4);
1306 
1307   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1308     Offset1 |= (CountDw - 1) << 6;
1309 
1310   unsigned Offset = Offset0 | (Offset1 << 8);
1311 
1312   Register M0Val = MI.getOperand(2).getReg();
1313   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1314     .addReg(M0Val);
1315 
1316   Register DstReg = MI.getOperand(0).getReg();
1317   Register ValReg = MI.getOperand(3).getReg();
1318   MachineInstrBuilder DS =
1319     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1320       .addReg(ValReg)
1321       .addImm(Offset)
1322       .cloneMemRefs(MI);
1323 
1324   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1325     return false;
1326 
1327   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1328   MI.eraseFromParent();
1329   return Ret;
1330 }
1331 
1332 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1333   switch (IntrID) {
1334   case Intrinsic::amdgcn_ds_gws_init:
1335     return AMDGPU::DS_GWS_INIT;
1336   case Intrinsic::amdgcn_ds_gws_barrier:
1337     return AMDGPU::DS_GWS_BARRIER;
1338   case Intrinsic::amdgcn_ds_gws_sema_v:
1339     return AMDGPU::DS_GWS_SEMA_V;
1340   case Intrinsic::amdgcn_ds_gws_sema_br:
1341     return AMDGPU::DS_GWS_SEMA_BR;
1342   case Intrinsic::amdgcn_ds_gws_sema_p:
1343     return AMDGPU::DS_GWS_SEMA_P;
1344   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1345     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1346   default:
1347     llvm_unreachable("not a gws intrinsic");
1348   }
1349 }
1350 
1351 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1352                                                      Intrinsic::ID IID) const {
1353   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1354       !STI.hasGWSSemaReleaseAll())
1355     return false;
1356 
1357   // intrinsic ID, vsrc, offset
1358   const bool HasVSrc = MI.getNumOperands() == 3;
1359   assert(HasVSrc || MI.getNumOperands() == 2);
1360 
1361   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1362   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1363   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1364     return false;
1365 
1366   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1367   assert(OffsetDef);
1368 
1369   unsigned ImmOffset;
1370 
1371   MachineBasicBlock *MBB = MI.getParent();
1372   const DebugLoc &DL = MI.getDebugLoc();
1373 
1374   MachineInstr *Readfirstlane = nullptr;
1375 
1376   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1377   // incoming offset, in case there's an add of a constant. We'll have to put it
1378   // back later.
1379   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1380     Readfirstlane = OffsetDef;
1381     BaseOffset = OffsetDef->getOperand(1).getReg();
1382     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1383   }
1384 
1385   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1386     // If we have a constant offset, try to use the 0 in m0 as the base.
1387     // TODO: Look into changing the default m0 initialization value. If the
1388     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1389     // the immediate offset.
1390 
1391     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1392     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1393       .addImm(0);
1394   } else {
1395     std::tie(BaseOffset, ImmOffset) =
1396         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1397 
1398     if (Readfirstlane) {
1399       // We have the constant offset now, so put the readfirstlane back on the
1400       // variable component.
1401       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1402         return false;
1403 
1404       Readfirstlane->getOperand(1).setReg(BaseOffset);
1405       BaseOffset = Readfirstlane->getOperand(0).getReg();
1406     } else {
1407       if (!RBI.constrainGenericRegister(BaseOffset,
1408                                         AMDGPU::SReg_32RegClass, *MRI))
1409         return false;
1410     }
1411 
1412     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1413     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1414       .addReg(BaseOffset)
1415       .addImm(16);
1416 
1417     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1418       .addReg(M0Base);
1419   }
1420 
1421   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1422   // offset field) % 64. Some versions of the programming guide omit the m0
1423   // part, or claim it's from offset 0.
1424   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1425 
1426   if (HasVSrc) {
1427     Register VSrc = MI.getOperand(1).getReg();
1428 
1429     if (STI.needsAlignedVGPRs()) {
1430       // Add implicit aligned super-reg to force alignment on the data operand.
1431       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1432       BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1433       Register NewVR =
1434           MRI->createVirtualRegister(&AMDGPU::VReg_64_Align2RegClass);
1435       BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), NewVR)
1436           .addReg(VSrc, 0, MI.getOperand(1).getSubReg())
1437           .addImm(AMDGPU::sub0)
1438           .addReg(Undef)
1439           .addImm(AMDGPU::sub1);
1440       MIB.addReg(NewVR, 0, AMDGPU::sub0);
1441       MIB.addReg(NewVR, RegState::Implicit);
1442     } else {
1443       MIB.addReg(VSrc);
1444     }
1445 
1446     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1447       return false;
1448   }
1449 
1450   MIB.addImm(ImmOffset)
1451      .cloneMemRefs(MI);
1452 
1453   MI.eraseFromParent();
1454   return true;
1455 }
1456 
1457 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1458                                                       bool IsAppend) const {
1459   Register PtrBase = MI.getOperand(2).getReg();
1460   LLT PtrTy = MRI->getType(PtrBase);
1461   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1462 
1463   unsigned Offset;
1464   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1465 
1466   // TODO: Should this try to look through readfirstlane like GWS?
1467   if (!isDSOffsetLegal(PtrBase, Offset)) {
1468     PtrBase = MI.getOperand(2).getReg();
1469     Offset = 0;
1470   }
1471 
1472   MachineBasicBlock *MBB = MI.getParent();
1473   const DebugLoc &DL = MI.getDebugLoc();
1474   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1475 
1476   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1477     .addReg(PtrBase);
1478   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1479     return false;
1480 
1481   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1482     .addImm(Offset)
1483     .addImm(IsGDS ? -1 : 0)
1484     .cloneMemRefs(MI);
1485   MI.eraseFromParent();
1486   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1487 }
1488 
1489 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1490   if (TM.getOptLevel() > CodeGenOpt::None) {
1491     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1492     if (WGSize <= STI.getWavefrontSize()) {
1493       MachineBasicBlock *MBB = MI.getParent();
1494       const DebugLoc &DL = MI.getDebugLoc();
1495       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1496       MI.eraseFromParent();
1497       return true;
1498     }
1499   }
1500   return selectImpl(MI, *CoverageInfo);
1501 }
1502 
1503 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1504                          bool &IsTexFail) {
1505   if (TexFailCtrl)
1506     IsTexFail = true;
1507 
1508   TFE = (TexFailCtrl & 0x1) ? true : false;
1509   TexFailCtrl &= ~(uint64_t)0x1;
1510   LWE = (TexFailCtrl & 0x2) ? true : false;
1511   TexFailCtrl &= ~(uint64_t)0x2;
1512 
1513   return TexFailCtrl == 0;
1514 }
1515 
1516 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1517   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1518   MachineBasicBlock *MBB = MI.getParent();
1519   const DebugLoc &DL = MI.getDebugLoc();
1520 
1521   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1522     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1523 
1524   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1525   unsigned IntrOpcode = Intr->BaseOpcode;
1526   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1527 
1528   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1529 
1530   Register VDataIn, VDataOut;
1531   LLT VDataTy;
1532   int NumVDataDwords = -1;
1533   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1534                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1535 
1536   bool Unorm;
1537   if (!BaseOpcode->Sampler)
1538     Unorm = true;
1539   else
1540     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1541 
1542   bool TFE;
1543   bool LWE;
1544   bool IsTexFail = false;
1545   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1546                     TFE, LWE, IsTexFail))
1547     return false;
1548 
1549   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1550   const bool IsA16 = (Flags & 1) != 0;
1551   const bool IsG16 = (Flags & 2) != 0;
1552 
1553   // A16 implies 16 bit gradients if subtarget doesn't support G16
1554   if (IsA16 && !STI.hasG16() && !IsG16)
1555     return false;
1556 
1557   unsigned DMask = 0;
1558   unsigned DMaskLanes = 0;
1559 
1560   if (BaseOpcode->Atomic) {
1561     VDataOut = MI.getOperand(0).getReg();
1562     VDataIn = MI.getOperand(2).getReg();
1563     LLT Ty = MRI->getType(VDataIn);
1564 
1565     // Be careful to allow atomic swap on 16-bit element vectors.
1566     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1567       Ty.getSizeInBits() == 128 :
1568       Ty.getSizeInBits() == 64;
1569 
1570     if (BaseOpcode->AtomicX2) {
1571       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1572 
1573       DMask = Is64Bit ? 0xf : 0x3;
1574       NumVDataDwords = Is64Bit ? 4 : 2;
1575     } else {
1576       DMask = Is64Bit ? 0x3 : 0x1;
1577       NumVDataDwords = Is64Bit ? 2 : 1;
1578     }
1579   } else {
1580     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1581     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1582 
1583     if (BaseOpcode->Store) {
1584       VDataIn = MI.getOperand(1).getReg();
1585       VDataTy = MRI->getType(VDataIn);
1586       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1587     } else {
1588       VDataOut = MI.getOperand(0).getReg();
1589       VDataTy = MRI->getType(VDataOut);
1590       NumVDataDwords = DMaskLanes;
1591 
1592       if (IsD16 && !STI.hasUnpackedD16VMem())
1593         NumVDataDwords = (DMaskLanes + 1) / 2;
1594     }
1595   }
1596 
1597   // Set G16 opcode
1598   if (IsG16 && !IsA16) {
1599     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1600         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1601     assert(G16MappingInfo);
1602     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1603   }
1604 
1605   // TODO: Check this in verifier.
1606   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1607 
1608   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1609   if (BaseOpcode->Atomic)
1610     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1611   if (CPol & ~AMDGPU::CPol::ALL)
1612     return false;
1613 
1614   int NumVAddrRegs = 0;
1615   int NumVAddrDwords = 0;
1616   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1617     // Skip the $noregs and 0s inserted during legalization.
1618     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1619     if (!AddrOp.isReg())
1620       continue; // XXX - Break?
1621 
1622     Register Addr = AddrOp.getReg();
1623     if (!Addr)
1624       break;
1625 
1626     ++NumVAddrRegs;
1627     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1628   }
1629 
1630   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1631   // NSA, these should have been packed into a single value in the first
1632   // address register
1633   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1634   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1635     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1636     return false;
1637   }
1638 
1639   if (IsTexFail)
1640     ++NumVDataDwords;
1641 
1642   int Opcode = -1;
1643   if (IsGFX10Plus) {
1644     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1645                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1646                                           : AMDGPU::MIMGEncGfx10Default,
1647                                    NumVDataDwords, NumVAddrDwords);
1648   } else {
1649     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1650       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1651                                      NumVDataDwords, NumVAddrDwords);
1652     if (Opcode == -1)
1653       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1654                                      NumVDataDwords, NumVAddrDwords);
1655   }
1656   assert(Opcode != -1);
1657 
1658   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1659     .cloneMemRefs(MI);
1660 
1661   if (VDataOut) {
1662     if (BaseOpcode->AtomicX2) {
1663       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1664 
1665       Register TmpReg = MRI->createVirtualRegister(
1666         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1667       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1668 
1669       MIB.addDef(TmpReg);
1670       if (!MRI->use_empty(VDataOut)) {
1671         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1672             .addReg(TmpReg, RegState::Kill, SubReg);
1673       }
1674 
1675     } else {
1676       MIB.addDef(VDataOut); // vdata output
1677     }
1678   }
1679 
1680   if (VDataIn)
1681     MIB.addReg(VDataIn); // vdata input
1682 
1683   for (int I = 0; I != NumVAddrRegs; ++I) {
1684     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1685     if (SrcOp.isReg()) {
1686       assert(SrcOp.getReg() != 0);
1687       MIB.addReg(SrcOp.getReg());
1688     }
1689   }
1690 
1691   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1692   if (BaseOpcode->Sampler)
1693     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1694 
1695   MIB.addImm(DMask); // dmask
1696 
1697   if (IsGFX10Plus)
1698     MIB.addImm(DimInfo->Encoding);
1699   MIB.addImm(Unorm);
1700 
1701   MIB.addImm(CPol);
1702   MIB.addImm(IsA16 &&  // a16 or r128
1703              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1704   if (IsGFX10Plus)
1705     MIB.addImm(IsA16 ? -1 : 0);
1706 
1707   MIB.addImm(TFE); // tfe
1708   MIB.addImm(LWE); // lwe
1709   if (!IsGFX10Plus)
1710     MIB.addImm(DimInfo->DA ? -1 : 0);
1711   if (BaseOpcode->HasD16)
1712     MIB.addImm(IsD16 ? -1 : 0);
1713 
1714   if (IsTexFail) {
1715     // An image load instruction with TFE/LWE only conditionally writes to its
1716     // result registers. Initialize them to zero so that we always get well
1717     // defined result values.
1718     assert(VDataOut && !VDataIn);
1719     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1720     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1721     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1722       .addImm(0);
1723     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1724     if (STI.usePRTStrictNull()) {
1725       // With enable-prt-strict-null enabled, initialize all result registers to
1726       // zero.
1727       auto RegSeq =
1728           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1729       for (auto Sub : Parts)
1730         RegSeq.addReg(Zero).addImm(Sub);
1731     } else {
1732       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1733       // result register.
1734       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1735       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1736       auto RegSeq =
1737           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1738       for (auto Sub : Parts.drop_back(1))
1739         RegSeq.addReg(Undef).addImm(Sub);
1740       RegSeq.addReg(Zero).addImm(Parts.back());
1741     }
1742     MIB.addReg(Tied, RegState::Implicit);
1743     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1744   }
1745 
1746   MI.eraseFromParent();
1747   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1748 }
1749 
1750 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1751     MachineInstr &I) const {
1752   unsigned IntrinsicID = I.getIntrinsicID();
1753   switch (IntrinsicID) {
1754   case Intrinsic::amdgcn_end_cf:
1755     return selectEndCfIntrinsic(I);
1756   case Intrinsic::amdgcn_ds_ordered_add:
1757   case Intrinsic::amdgcn_ds_ordered_swap:
1758     return selectDSOrderedIntrinsic(I, IntrinsicID);
1759   case Intrinsic::amdgcn_ds_gws_init:
1760   case Intrinsic::amdgcn_ds_gws_barrier:
1761   case Intrinsic::amdgcn_ds_gws_sema_v:
1762   case Intrinsic::amdgcn_ds_gws_sema_br:
1763   case Intrinsic::amdgcn_ds_gws_sema_p:
1764   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1765     return selectDSGWSIntrinsic(I, IntrinsicID);
1766   case Intrinsic::amdgcn_ds_append:
1767     return selectDSAppendConsume(I, true);
1768   case Intrinsic::amdgcn_ds_consume:
1769     return selectDSAppendConsume(I, false);
1770   case Intrinsic::amdgcn_s_barrier:
1771     return selectSBarrier(I);
1772   case Intrinsic::amdgcn_global_atomic_fadd:
1773     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1774   default: {
1775     return selectImpl(I, *CoverageInfo);
1776   }
1777   }
1778 }
1779 
1780 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1781   if (selectImpl(I, *CoverageInfo))
1782     return true;
1783 
1784   MachineBasicBlock *BB = I.getParent();
1785   const DebugLoc &DL = I.getDebugLoc();
1786 
1787   Register DstReg = I.getOperand(0).getReg();
1788   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1789   assert(Size <= 32 || Size == 64);
1790   const MachineOperand &CCOp = I.getOperand(1);
1791   Register CCReg = CCOp.getReg();
1792   if (!isVCC(CCReg, *MRI)) {
1793     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1794                                          AMDGPU::S_CSELECT_B32;
1795     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1796             .addReg(CCReg);
1797 
1798     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1799     // bank, because it does not cover the register class that we used to represent
1800     // for it.  So we need to manually set the register class here.
1801     if (!MRI->getRegClassOrNull(CCReg))
1802         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1803     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1804             .add(I.getOperand(2))
1805             .add(I.getOperand(3));
1806 
1807     bool Ret = false;
1808     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1809     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1810     I.eraseFromParent();
1811     return Ret;
1812   }
1813 
1814   // Wide VGPR select should have been split in RegBankSelect.
1815   if (Size > 32)
1816     return false;
1817 
1818   MachineInstr *Select =
1819       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1820               .addImm(0)
1821               .add(I.getOperand(3))
1822               .addImm(0)
1823               .add(I.getOperand(2))
1824               .add(I.getOperand(1));
1825 
1826   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1827   I.eraseFromParent();
1828   return Ret;
1829 }
1830 
1831 static int sizeToSubRegIndex(unsigned Size) {
1832   switch (Size) {
1833   case 32:
1834     return AMDGPU::sub0;
1835   case 64:
1836     return AMDGPU::sub0_sub1;
1837   case 96:
1838     return AMDGPU::sub0_sub1_sub2;
1839   case 128:
1840     return AMDGPU::sub0_sub1_sub2_sub3;
1841   case 256:
1842     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1843   default:
1844     if (Size < 32)
1845       return AMDGPU::sub0;
1846     if (Size > 256)
1847       return -1;
1848     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1849   }
1850 }
1851 
1852 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1853   Register DstReg = I.getOperand(0).getReg();
1854   Register SrcReg = I.getOperand(1).getReg();
1855   const LLT DstTy = MRI->getType(DstReg);
1856   const LLT SrcTy = MRI->getType(SrcReg);
1857   const LLT S1 = LLT::scalar(1);
1858 
1859   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1860   const RegisterBank *DstRB;
1861   if (DstTy == S1) {
1862     // This is a special case. We don't treat s1 for legalization artifacts as
1863     // vcc booleans.
1864     DstRB = SrcRB;
1865   } else {
1866     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1867     if (SrcRB != DstRB)
1868       return false;
1869   }
1870 
1871   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1872 
1873   unsigned DstSize = DstTy.getSizeInBits();
1874   unsigned SrcSize = SrcTy.getSizeInBits();
1875 
1876   const TargetRegisterClass *SrcRC
1877     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1878   const TargetRegisterClass *DstRC
1879     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1880   if (!SrcRC || !DstRC)
1881     return false;
1882 
1883   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1884       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1885     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1886     return false;
1887   }
1888 
1889   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1890     MachineBasicBlock *MBB = I.getParent();
1891     const DebugLoc &DL = I.getDebugLoc();
1892 
1893     Register LoReg = MRI->createVirtualRegister(DstRC);
1894     Register HiReg = MRI->createVirtualRegister(DstRC);
1895     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1896       .addReg(SrcReg, 0, AMDGPU::sub0);
1897     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1898       .addReg(SrcReg, 0, AMDGPU::sub1);
1899 
1900     if (IsVALU && STI.hasSDWA()) {
1901       // Write the low 16-bits of the high element into the high 16-bits of the
1902       // low element.
1903       MachineInstr *MovSDWA =
1904         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1905         .addImm(0)                             // $src0_modifiers
1906         .addReg(HiReg)                         // $src0
1907         .addImm(0)                             // $clamp
1908         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1909         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1910         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1911         .addReg(LoReg, RegState::Implicit);
1912       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1913     } else {
1914       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1915       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1916       Register ImmReg = MRI->createVirtualRegister(DstRC);
1917       if (IsVALU) {
1918         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1919           .addImm(16)
1920           .addReg(HiReg);
1921       } else {
1922         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1923           .addReg(HiReg)
1924           .addImm(16);
1925       }
1926 
1927       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1928       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1929       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1930 
1931       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1932         .addImm(0xffff);
1933       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1934         .addReg(LoReg)
1935         .addReg(ImmReg);
1936       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1937         .addReg(TmpReg0)
1938         .addReg(TmpReg1);
1939     }
1940 
1941     I.eraseFromParent();
1942     return true;
1943   }
1944 
1945   if (!DstTy.isScalar())
1946     return false;
1947 
1948   if (SrcSize > 32) {
1949     int SubRegIdx = sizeToSubRegIndex(DstSize);
1950     if (SubRegIdx == -1)
1951       return false;
1952 
1953     // Deal with weird cases where the class only partially supports the subreg
1954     // index.
1955     const TargetRegisterClass *SrcWithSubRC
1956       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1957     if (!SrcWithSubRC)
1958       return false;
1959 
1960     if (SrcWithSubRC != SrcRC) {
1961       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1962         return false;
1963     }
1964 
1965     I.getOperand(1).setSubReg(SubRegIdx);
1966   }
1967 
1968   I.setDesc(TII.get(TargetOpcode::COPY));
1969   return true;
1970 }
1971 
1972 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1973 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1974   Mask = maskTrailingOnes<unsigned>(Size);
1975   int SignedMask = static_cast<int>(Mask);
1976   return SignedMask >= -16 && SignedMask <= 64;
1977 }
1978 
1979 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1980 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1981   Register Reg, const MachineRegisterInfo &MRI,
1982   const TargetRegisterInfo &TRI) const {
1983   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1984   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1985     return RB;
1986 
1987   // Ignore the type, since we don't use vcc in artifacts.
1988   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1989     return &RBI.getRegBankFromRegClass(*RC, LLT());
1990   return nullptr;
1991 }
1992 
1993 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1994   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1995   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1996   const DebugLoc &DL = I.getDebugLoc();
1997   MachineBasicBlock &MBB = *I.getParent();
1998   const Register DstReg = I.getOperand(0).getReg();
1999   const Register SrcReg = I.getOperand(1).getReg();
2000 
2001   const LLT DstTy = MRI->getType(DstReg);
2002   const LLT SrcTy = MRI->getType(SrcReg);
2003   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2004     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2005   const unsigned DstSize = DstTy.getSizeInBits();
2006   if (!DstTy.isScalar())
2007     return false;
2008 
2009   // Artifact casts should never use vcc.
2010   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2011 
2012   // FIXME: This should probably be illegal and split earlier.
2013   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2014     if (DstSize <= 32)
2015       return selectCOPY(I);
2016 
2017     const TargetRegisterClass *SrcRC =
2018         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
2019     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2020     const TargetRegisterClass *DstRC =
2021         TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
2022 
2023     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2024     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2025     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2026       .addReg(SrcReg)
2027       .addImm(AMDGPU::sub0)
2028       .addReg(UndefReg)
2029       .addImm(AMDGPU::sub1);
2030     I.eraseFromParent();
2031 
2032     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2033            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2034   }
2035 
2036   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2037     // 64-bit should have been split up in RegBankSelect
2038 
2039     // Try to use an and with a mask if it will save code size.
2040     unsigned Mask;
2041     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2042       MachineInstr *ExtI =
2043       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2044         .addImm(Mask)
2045         .addReg(SrcReg);
2046       I.eraseFromParent();
2047       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2048     }
2049 
2050     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2051     MachineInstr *ExtI =
2052       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2053       .addReg(SrcReg)
2054       .addImm(0) // Offset
2055       .addImm(SrcSize); // Width
2056     I.eraseFromParent();
2057     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2058   }
2059 
2060   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2061     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2062       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2063     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2064       return false;
2065 
2066     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2067       const unsigned SextOpc = SrcSize == 8 ?
2068         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2069       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2070         .addReg(SrcReg);
2071       I.eraseFromParent();
2072       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2073     }
2074 
2075     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2076     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2077 
2078     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2079     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2080       // We need a 64-bit register source, but the high bits don't matter.
2081       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2082       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2083       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2084 
2085       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2086       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2087         .addReg(SrcReg, 0, SubReg)
2088         .addImm(AMDGPU::sub0)
2089         .addReg(UndefReg)
2090         .addImm(AMDGPU::sub1);
2091 
2092       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2093         .addReg(ExtReg)
2094         .addImm(SrcSize << 16);
2095 
2096       I.eraseFromParent();
2097       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2098     }
2099 
2100     unsigned Mask;
2101     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2102       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2103         .addReg(SrcReg)
2104         .addImm(Mask);
2105     } else {
2106       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2107         .addReg(SrcReg)
2108         .addImm(SrcSize << 16);
2109     }
2110 
2111     I.eraseFromParent();
2112     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2113   }
2114 
2115   return false;
2116 }
2117 
2118 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2119   MachineBasicBlock *BB = I.getParent();
2120   MachineOperand &ImmOp = I.getOperand(1);
2121   Register DstReg = I.getOperand(0).getReg();
2122   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2123 
2124   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2125   if (ImmOp.isFPImm()) {
2126     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2127     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2128   } else if (ImmOp.isCImm()) {
2129     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2130   } else {
2131     llvm_unreachable("Not supported by g_constants");
2132   }
2133 
2134   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2135   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2136 
2137   unsigned Opcode;
2138   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2139     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2140   } else {
2141     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2142 
2143     // We should never produce s1 values on banks other than VCC. If the user of
2144     // this already constrained the register, we may incorrectly think it's VCC
2145     // if it wasn't originally.
2146     if (Size == 1)
2147       return false;
2148   }
2149 
2150   if (Size != 64) {
2151     I.setDesc(TII.get(Opcode));
2152     I.addImplicitDefUseOperands(*MF);
2153     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2154   }
2155 
2156   const DebugLoc &DL = I.getDebugLoc();
2157 
2158   APInt Imm(Size, I.getOperand(1).getImm());
2159 
2160   MachineInstr *ResInst;
2161   if (IsSgpr && TII.isInlineConstant(Imm)) {
2162     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2163       .addImm(I.getOperand(1).getImm());
2164   } else {
2165     const TargetRegisterClass *RC = IsSgpr ?
2166       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2167     Register LoReg = MRI->createVirtualRegister(RC);
2168     Register HiReg = MRI->createVirtualRegister(RC);
2169 
2170     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2171       .addImm(Imm.trunc(32).getZExtValue());
2172 
2173     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2174       .addImm(Imm.ashr(32).getZExtValue());
2175 
2176     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2177       .addReg(LoReg)
2178       .addImm(AMDGPU::sub0)
2179       .addReg(HiReg)
2180       .addImm(AMDGPU::sub1);
2181   }
2182 
2183   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2184   // work for target independent opcodes
2185   I.eraseFromParent();
2186   const TargetRegisterClass *DstRC =
2187     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2188   if (!DstRC)
2189     return true;
2190   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2191 }
2192 
2193 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2194   // Only manually handle the f64 SGPR case.
2195   //
2196   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2197   // the bit ops theoretically have a second result due to the implicit def of
2198   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2199   // that is easy by disabling the check. The result works, but uses a
2200   // nonsensical sreg32orlds_and_sreg_1 regclass.
2201   //
2202   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2203   // the variadic REG_SEQUENCE operands.
2204 
2205   Register Dst = MI.getOperand(0).getReg();
2206   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2207   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2208       MRI->getType(Dst) != LLT::scalar(64))
2209     return false;
2210 
2211   Register Src = MI.getOperand(1).getReg();
2212   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2213   if (Fabs)
2214     Src = Fabs->getOperand(1).getReg();
2215 
2216   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2217       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2218     return false;
2219 
2220   MachineBasicBlock *BB = MI.getParent();
2221   const DebugLoc &DL = MI.getDebugLoc();
2222   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2223   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2224   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2225   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2226 
2227   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2228     .addReg(Src, 0, AMDGPU::sub0);
2229   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2230     .addReg(Src, 0, AMDGPU::sub1);
2231   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2232     .addImm(0x80000000);
2233 
2234   // Set or toggle sign bit.
2235   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2236   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2237     .addReg(HiReg)
2238     .addReg(ConstReg);
2239   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2240     .addReg(LoReg)
2241     .addImm(AMDGPU::sub0)
2242     .addReg(OpReg)
2243     .addImm(AMDGPU::sub1);
2244   MI.eraseFromParent();
2245   return true;
2246 }
2247 
2248 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2249 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2250   Register Dst = MI.getOperand(0).getReg();
2251   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2252   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2253       MRI->getType(Dst) != LLT::scalar(64))
2254     return false;
2255 
2256   Register Src = MI.getOperand(1).getReg();
2257   MachineBasicBlock *BB = MI.getParent();
2258   const DebugLoc &DL = MI.getDebugLoc();
2259   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2260   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2261   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2262   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2263 
2264   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2265       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2266     return false;
2267 
2268   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2269     .addReg(Src, 0, AMDGPU::sub0);
2270   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2271     .addReg(Src, 0, AMDGPU::sub1);
2272   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2273     .addImm(0x7fffffff);
2274 
2275   // Clear sign bit.
2276   // TODO: Should this used S_BITSET0_*?
2277   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2278     .addReg(HiReg)
2279     .addReg(ConstReg);
2280   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2281     .addReg(LoReg)
2282     .addImm(AMDGPU::sub0)
2283     .addReg(OpReg)
2284     .addImm(AMDGPU::sub1);
2285 
2286   MI.eraseFromParent();
2287   return true;
2288 }
2289 
2290 static bool isConstant(const MachineInstr &MI) {
2291   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2292 }
2293 
2294 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2295     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2296 
2297   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2298 
2299   assert(PtrMI);
2300 
2301   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2302     return;
2303 
2304   GEPInfo GEPInfo(*PtrMI);
2305 
2306   for (unsigned i = 1; i != 3; ++i) {
2307     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2308     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2309     assert(OpDef);
2310     if (i == 2 && isConstant(*OpDef)) {
2311       // TODO: Could handle constant base + variable offset, but a combine
2312       // probably should have commuted it.
2313       assert(GEPInfo.Imm == 0);
2314       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2315       continue;
2316     }
2317     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2318     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2319       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2320     else
2321       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2322   }
2323 
2324   AddrInfo.push_back(GEPInfo);
2325   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2326 }
2327 
2328 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2329   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2330 }
2331 
2332 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2333   if (!MI.hasOneMemOperand())
2334     return false;
2335 
2336   const MachineMemOperand *MMO = *MI.memoperands_begin();
2337   const Value *Ptr = MMO->getValue();
2338 
2339   // UndefValue means this is a load of a kernel input.  These are uniform.
2340   // Sometimes LDS instructions have constant pointers.
2341   // If Ptr is null, then that means this mem operand contains a
2342   // PseudoSourceValue like GOT.
2343   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2344       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2345     return true;
2346 
2347   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2348     return true;
2349 
2350   const Instruction *I = dyn_cast<Instruction>(Ptr);
2351   return I && I->getMetadata("amdgpu.uniform");
2352 }
2353 
2354 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2355   for (const GEPInfo &GEPInfo : AddrInfo) {
2356     if (!GEPInfo.VgprParts.empty())
2357       return true;
2358   }
2359   return false;
2360 }
2361 
2362 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2363   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2364   unsigned AS = PtrTy.getAddressSpace();
2365   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2366       STI.ldsRequiresM0Init()) {
2367     MachineBasicBlock *BB = I.getParent();
2368 
2369     // If DS instructions require M0 initialization, insert it before selecting.
2370     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2371       .addImm(-1);
2372   }
2373 }
2374 
2375 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2376   MachineInstr &I) const {
2377   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2378     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2379     unsigned AS = PtrTy.getAddressSpace();
2380     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2381       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2382   }
2383 
2384   initM0(I);
2385   return selectImpl(I, *CoverageInfo);
2386 }
2387 
2388 // TODO: No rtn optimization.
2389 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2390   MachineInstr &MI) const {
2391   Register PtrReg = MI.getOperand(1).getReg();
2392   const LLT PtrTy = MRI->getType(PtrReg);
2393   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2394       STI.useFlatForGlobal())
2395     return selectImpl(MI, *CoverageInfo);
2396 
2397   Register DstReg = MI.getOperand(0).getReg();
2398   const LLT Ty = MRI->getType(DstReg);
2399   const bool Is64 = Ty.getSizeInBits() == 64;
2400   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2401   Register TmpReg = MRI->createVirtualRegister(
2402     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2403 
2404   const DebugLoc &DL = MI.getDebugLoc();
2405   MachineBasicBlock *BB = MI.getParent();
2406 
2407   Register VAddr, RSrcReg, SOffset;
2408   int64_t Offset = 0;
2409 
2410   unsigned Opcode;
2411   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2412     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2413                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2414   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2415                                    RSrcReg, SOffset, Offset)) {
2416     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2417                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2418   } else
2419     return selectImpl(MI, *CoverageInfo);
2420 
2421   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2422     .addReg(MI.getOperand(2).getReg());
2423 
2424   if (VAddr)
2425     MIB.addReg(VAddr);
2426 
2427   MIB.addReg(RSrcReg);
2428   if (SOffset)
2429     MIB.addReg(SOffset);
2430   else
2431     MIB.addImm(0);
2432 
2433   MIB.addImm(Offset);
2434   MIB.addImm(AMDGPU::CPol::GLC);
2435   MIB.cloneMemRefs(MI);
2436 
2437   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2438     .addReg(TmpReg, RegState::Kill, SubReg);
2439 
2440   MI.eraseFromParent();
2441 
2442   MRI->setRegClass(
2443     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2444   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2445 }
2446 
2447 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2448   if (Reg.isPhysical())
2449     return false;
2450 
2451   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2452   const unsigned Opcode = MI.getOpcode();
2453 
2454   if (Opcode == AMDGPU::COPY)
2455     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2456 
2457   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2458       Opcode == AMDGPU::G_XOR)
2459     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2460            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2461 
2462   if (Opcode == TargetOpcode::G_INTRINSIC)
2463     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2464 
2465   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2466 }
2467 
2468 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2469   MachineBasicBlock *BB = I.getParent();
2470   MachineOperand &CondOp = I.getOperand(0);
2471   Register CondReg = CondOp.getReg();
2472   const DebugLoc &DL = I.getDebugLoc();
2473 
2474   unsigned BrOpcode;
2475   Register CondPhysReg;
2476   const TargetRegisterClass *ConstrainRC;
2477 
2478   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2479   // whether the branch is uniform when selecting the instruction. In
2480   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2481   // RegBankSelect knows what it's doing if the branch condition is scc, even
2482   // though it currently does not.
2483   if (!isVCC(CondReg, *MRI)) {
2484     if (MRI->getType(CondReg) != LLT::scalar(32))
2485       return false;
2486 
2487     CondPhysReg = AMDGPU::SCC;
2488     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2489     ConstrainRC = &AMDGPU::SReg_32RegClass;
2490   } else {
2491     // FIXME: Should scc->vcc copies and with exec?
2492 
2493     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2494     // need to insert an and with exec.
2495     if (!isVCmpResult(CondReg, *MRI)) {
2496       const bool Is64 = STI.isWave64();
2497       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2498       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2499 
2500       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2501       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2502           .addReg(CondReg)
2503           .addReg(Exec);
2504       CondReg = TmpReg;
2505     }
2506 
2507     CondPhysReg = TRI.getVCC();
2508     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2509     ConstrainRC = TRI.getBoolRC();
2510   }
2511 
2512   if (!MRI->getRegClassOrNull(CondReg))
2513     MRI->setRegClass(CondReg, ConstrainRC);
2514 
2515   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2516     .addReg(CondReg);
2517   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2518     .addMBB(I.getOperand(1).getMBB());
2519 
2520   I.eraseFromParent();
2521   return true;
2522 }
2523 
2524 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2525   MachineInstr &I) const {
2526   Register DstReg = I.getOperand(0).getReg();
2527   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2528   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2529   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2530   if (IsVGPR)
2531     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2532 
2533   return RBI.constrainGenericRegister(
2534     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2535 }
2536 
2537 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2538   Register DstReg = I.getOperand(0).getReg();
2539   Register SrcReg = I.getOperand(1).getReg();
2540   Register MaskReg = I.getOperand(2).getReg();
2541   LLT Ty = MRI->getType(DstReg);
2542   LLT MaskTy = MRI->getType(MaskReg);
2543   MachineBasicBlock *BB = I.getParent();
2544   const DebugLoc &DL = I.getDebugLoc();
2545 
2546   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2547   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2548   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2549   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2550   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2551     return false;
2552 
2553   // Try to avoid emitting a bit operation when we only need to touch half of
2554   // the 64-bit pointer.
2555   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2556   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2557   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2558 
2559   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2560   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2561 
2562   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2563       !CanCopyLow32 && !CanCopyHi32) {
2564     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2565       .addReg(SrcReg)
2566       .addReg(MaskReg);
2567     I.eraseFromParent();
2568     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2569   }
2570 
2571   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2572   const TargetRegisterClass &RegRC
2573     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2574 
2575   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2576                                                                   *MRI);
2577   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2578                                                                   *MRI);
2579   const TargetRegisterClass *MaskRC =
2580       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2581 
2582   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2583       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2584       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2585     return false;
2586 
2587   if (Ty.getSizeInBits() == 32) {
2588     assert(MaskTy.getSizeInBits() == 32 &&
2589            "ptrmask should have been narrowed during legalize");
2590 
2591     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2592       .addReg(SrcReg)
2593       .addReg(MaskReg);
2594     I.eraseFromParent();
2595     return true;
2596   }
2597 
2598   Register HiReg = MRI->createVirtualRegister(&RegRC);
2599   Register LoReg = MRI->createVirtualRegister(&RegRC);
2600 
2601   // Extract the subregisters from the source pointer.
2602   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2603     .addReg(SrcReg, 0, AMDGPU::sub0);
2604   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2605     .addReg(SrcReg, 0, AMDGPU::sub1);
2606 
2607   Register MaskedLo, MaskedHi;
2608 
2609   if (CanCopyLow32) {
2610     // If all the bits in the low half are 1, we only need a copy for it.
2611     MaskedLo = LoReg;
2612   } else {
2613     // Extract the mask subregister and apply the and.
2614     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2615     MaskedLo = MRI->createVirtualRegister(&RegRC);
2616 
2617     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2618       .addReg(MaskReg, 0, AMDGPU::sub0);
2619     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2620       .addReg(LoReg)
2621       .addReg(MaskLo);
2622   }
2623 
2624   if (CanCopyHi32) {
2625     // If all the bits in the high half are 1, we only need a copy for it.
2626     MaskedHi = HiReg;
2627   } else {
2628     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2629     MaskedHi = MRI->createVirtualRegister(&RegRC);
2630 
2631     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2632       .addReg(MaskReg, 0, AMDGPU::sub1);
2633     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2634       .addReg(HiReg)
2635       .addReg(MaskHi);
2636   }
2637 
2638   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2639     .addReg(MaskedLo)
2640     .addImm(AMDGPU::sub0)
2641     .addReg(MaskedHi)
2642     .addImm(AMDGPU::sub1);
2643   I.eraseFromParent();
2644   return true;
2645 }
2646 
2647 /// Return the register to use for the index value, and the subregister to use
2648 /// for the indirectly accessed register.
2649 static std::pair<Register, unsigned>
2650 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2651                         const SIRegisterInfo &TRI,
2652                         const TargetRegisterClass *SuperRC,
2653                         Register IdxReg,
2654                         unsigned EltSize) {
2655   Register IdxBaseReg;
2656   int Offset;
2657 
2658   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2659   if (IdxBaseReg == AMDGPU::NoRegister) {
2660     // This will happen if the index is a known constant. This should ordinarily
2661     // be legalized out, but handle it as a register just in case.
2662     assert(Offset == 0);
2663     IdxBaseReg = IdxReg;
2664   }
2665 
2666   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2667 
2668   // Skip out of bounds offsets, or else we would end up using an undefined
2669   // register.
2670   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2671     return std::make_pair(IdxReg, SubRegs[0]);
2672   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2673 }
2674 
2675 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2676   MachineInstr &MI) const {
2677   Register DstReg = MI.getOperand(0).getReg();
2678   Register SrcReg = MI.getOperand(1).getReg();
2679   Register IdxReg = MI.getOperand(2).getReg();
2680 
2681   LLT DstTy = MRI->getType(DstReg);
2682   LLT SrcTy = MRI->getType(SrcReg);
2683 
2684   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2685   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2686   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2687 
2688   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2689   // into a waterfall loop.
2690   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2691     return false;
2692 
2693   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2694                                                                   *MRI);
2695   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2696                                                                   *MRI);
2697   if (!SrcRC || !DstRC)
2698     return false;
2699   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2700       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2701       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2702     return false;
2703 
2704   MachineBasicBlock *BB = MI.getParent();
2705   const DebugLoc &DL = MI.getDebugLoc();
2706   const bool Is64 = DstTy.getSizeInBits() == 64;
2707 
2708   unsigned SubReg;
2709   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2710                                                      DstTy.getSizeInBits() / 8);
2711 
2712   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2713     if (DstTy.getSizeInBits() != 32 && !Is64)
2714       return false;
2715 
2716     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2717       .addReg(IdxReg);
2718 
2719     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2720     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2721       .addReg(SrcReg, 0, SubReg)
2722       .addReg(SrcReg, RegState::Implicit);
2723     MI.eraseFromParent();
2724     return true;
2725   }
2726 
2727   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2728     return false;
2729 
2730   if (!STI.useVGPRIndexMode()) {
2731     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2732       .addReg(IdxReg);
2733     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2734       .addReg(SrcReg, 0, SubReg)
2735       .addReg(SrcReg, RegState::Implicit);
2736     MI.eraseFromParent();
2737     return true;
2738   }
2739 
2740   const MCInstrDesc &GPRIDXDesc =
2741       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2742   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2743       .addReg(SrcReg)
2744       .addReg(IdxReg)
2745       .addImm(SubReg);
2746 
2747   MI.eraseFromParent();
2748   return true;
2749 }
2750 
2751 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2752 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2753   MachineInstr &MI) const {
2754   Register DstReg = MI.getOperand(0).getReg();
2755   Register VecReg = MI.getOperand(1).getReg();
2756   Register ValReg = MI.getOperand(2).getReg();
2757   Register IdxReg = MI.getOperand(3).getReg();
2758 
2759   LLT VecTy = MRI->getType(DstReg);
2760   LLT ValTy = MRI->getType(ValReg);
2761   unsigned VecSize = VecTy.getSizeInBits();
2762   unsigned ValSize = ValTy.getSizeInBits();
2763 
2764   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2765   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2766   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2767 
2768   assert(VecTy.getElementType() == ValTy);
2769 
2770   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2771   // into a waterfall loop.
2772   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2773     return false;
2774 
2775   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2776                                                                   *MRI);
2777   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2778                                                                   *MRI);
2779 
2780   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2781       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2782       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2783       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2784     return false;
2785 
2786   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2787     return false;
2788 
2789   unsigned SubReg;
2790   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2791                                                      ValSize / 8);
2792 
2793   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2794                          STI.useVGPRIndexMode();
2795 
2796   MachineBasicBlock *BB = MI.getParent();
2797   const DebugLoc &DL = MI.getDebugLoc();
2798 
2799   if (!IndexMode) {
2800     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2801       .addReg(IdxReg);
2802 
2803     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2804         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2805     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2806         .addReg(VecReg)
2807         .addReg(ValReg)
2808         .addImm(SubReg);
2809     MI.eraseFromParent();
2810     return true;
2811   }
2812 
2813   const MCInstrDesc &GPRIDXDesc =
2814       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2815   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2816       .addReg(VecReg)
2817       .addReg(ValReg)
2818       .addReg(IdxReg)
2819       .addImm(SubReg);
2820 
2821   MI.eraseFromParent();
2822   return true;
2823 }
2824 
2825 static bool isZeroOrUndef(int X) {
2826   return X == 0 || X == -1;
2827 }
2828 
2829 static bool isOneOrUndef(int X) {
2830   return X == 1 || X == -1;
2831 }
2832 
2833 static bool isZeroOrOneOrUndef(int X) {
2834   return X == 0 || X == 1 || X == -1;
2835 }
2836 
2837 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2838 // 32-bit register.
2839 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2840                                    ArrayRef<int> Mask) {
2841   NewMask[0] = Mask[0];
2842   NewMask[1] = Mask[1];
2843   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2844     return Src0;
2845 
2846   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2847   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2848 
2849   // Shift the mask inputs to be 0/1;
2850   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2851   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2852   return Src1;
2853 }
2854 
2855 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2856 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2857   MachineInstr &MI) const {
2858   Register DstReg = MI.getOperand(0).getReg();
2859   Register Src0Reg = MI.getOperand(1).getReg();
2860   Register Src1Reg = MI.getOperand(2).getReg();
2861   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2862 
2863   const LLT V2S16 = LLT::fixed_vector(2, 16);
2864   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2865     return false;
2866 
2867   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2868     return false;
2869 
2870   assert(ShufMask.size() == 2);
2871   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2872 
2873   MachineBasicBlock *MBB = MI.getParent();
2874   const DebugLoc &DL = MI.getDebugLoc();
2875 
2876   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2877   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2878   const TargetRegisterClass &RC = IsVALU ?
2879     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2880 
2881   // Handle the degenerate case which should have folded out.
2882   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2883     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2884 
2885     MI.eraseFromParent();
2886     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2887   }
2888 
2889   // A legal VOP3P mask only reads one of the sources.
2890   int Mask[2];
2891   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2892 
2893   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2894       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2895     return false;
2896 
2897   // TODO: This also should have been folded out
2898   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2899     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2900       .addReg(SrcVec);
2901 
2902     MI.eraseFromParent();
2903     return true;
2904   }
2905 
2906   if (Mask[0] == 1 && Mask[1] == -1) {
2907     if (IsVALU) {
2908       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2909         .addImm(16)
2910         .addReg(SrcVec);
2911     } else {
2912       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2913         .addReg(SrcVec)
2914         .addImm(16);
2915     }
2916   } else if (Mask[0] == -1 && Mask[1] == 0) {
2917     if (IsVALU) {
2918       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2919         .addImm(16)
2920         .addReg(SrcVec);
2921     } else {
2922       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2923         .addReg(SrcVec)
2924         .addImm(16);
2925     }
2926   } else if (Mask[0] == 0 && Mask[1] == 0) {
2927     if (IsVALU) {
2928       // Write low half of the register into the high half.
2929       MachineInstr *MovSDWA =
2930         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2931         .addImm(0)                             // $src0_modifiers
2932         .addReg(SrcVec)                        // $src0
2933         .addImm(0)                             // $clamp
2934         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2935         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2936         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2937         .addReg(SrcVec, RegState::Implicit);
2938       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2939     } else {
2940       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2941         .addReg(SrcVec)
2942         .addReg(SrcVec);
2943     }
2944   } else if (Mask[0] == 1 && Mask[1] == 1) {
2945     if (IsVALU) {
2946       // Write high half of the register into the low half.
2947       MachineInstr *MovSDWA =
2948         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2949         .addImm(0)                             // $src0_modifiers
2950         .addReg(SrcVec)                        // $src0
2951         .addImm(0)                             // $clamp
2952         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2953         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2954         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2955         .addReg(SrcVec, RegState::Implicit);
2956       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2957     } else {
2958       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2959         .addReg(SrcVec)
2960         .addReg(SrcVec);
2961     }
2962   } else if (Mask[0] == 1 && Mask[1] == 0) {
2963     if (IsVALU) {
2964       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2965         .addReg(SrcVec)
2966         .addReg(SrcVec)
2967         .addImm(16);
2968     } else {
2969       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2970       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2971         .addReg(SrcVec)
2972         .addImm(16);
2973       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2974         .addReg(TmpReg)
2975         .addReg(SrcVec);
2976     }
2977   } else
2978     llvm_unreachable("all shuffle masks should be handled");
2979 
2980   MI.eraseFromParent();
2981   return true;
2982 }
2983 
2984 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2985   MachineInstr &MI) const {
2986   if (STI.hasGFX90AInsts())
2987     return selectImpl(MI, *CoverageInfo);
2988 
2989   MachineBasicBlock *MBB = MI.getParent();
2990   const DebugLoc &DL = MI.getDebugLoc();
2991 
2992   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2993     Function &F = MBB->getParent()->getFunction();
2994     DiagnosticInfoUnsupported
2995       NoFpRet(F, "return versions of fp atomics not supported",
2996               MI.getDebugLoc(), DS_Error);
2997     F.getContext().diagnose(NoFpRet);
2998     return false;
2999   }
3000 
3001   // FIXME: This is only needed because tablegen requires number of dst operands
3002   // in match and replace pattern to be the same. Otherwise patterns can be
3003   // exported from SDag path.
3004   MachineOperand &VDataIn = MI.getOperand(1);
3005   MachineOperand &VIndex = MI.getOperand(3);
3006   MachineOperand &VOffset = MI.getOperand(4);
3007   MachineOperand &SOffset = MI.getOperand(5);
3008   int16_t Offset = MI.getOperand(6).getImm();
3009 
3010   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
3011   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
3012 
3013   unsigned Opcode;
3014   if (HasVOffset) {
3015     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
3016                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
3017   } else {
3018     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
3019                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
3020   }
3021 
3022   if (MRI->getType(VDataIn.getReg()).isVector()) {
3023     switch (Opcode) {
3024     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
3025       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
3026       break;
3027     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
3028       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
3029       break;
3030     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
3031       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
3032       break;
3033     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
3034       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
3035       break;
3036     }
3037   }
3038 
3039   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
3040   I.add(VDataIn);
3041 
3042   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
3043       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
3044     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3045     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3046       .addReg(VIndex.getReg())
3047       .addImm(AMDGPU::sub0)
3048       .addReg(VOffset.getReg())
3049       .addImm(AMDGPU::sub1);
3050 
3051     I.addReg(IdxReg);
3052   } else if (HasVIndex) {
3053     I.add(VIndex);
3054   } else if (HasVOffset) {
3055     I.add(VOffset);
3056   }
3057 
3058   I.add(MI.getOperand(2)); // rsrc
3059   I.add(SOffset);
3060   I.addImm(Offset);
3061   I.addImm(MI.getOperand(7).getImm()); // cpol
3062   I.cloneMemRefs(MI);
3063 
3064   MI.eraseFromParent();
3065 
3066   return true;
3067 }
3068 
3069 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3070   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3071 
3072   if (STI.hasGFX90AInsts()) {
3073     // gfx90a adds return versions of the global atomic fadd instructions so no
3074     // special handling is required.
3075     return selectImpl(MI, *CoverageInfo);
3076   }
3077 
3078   MachineBasicBlock *MBB = MI.getParent();
3079   const DebugLoc &DL = MI.getDebugLoc();
3080 
3081   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3082     Function &F = MBB->getParent()->getFunction();
3083     DiagnosticInfoUnsupported
3084       NoFpRet(F, "return versions of fp atomics not supported",
3085               MI.getDebugLoc(), DS_Error);
3086     F.getContext().diagnose(NoFpRet);
3087     return false;
3088   }
3089 
3090   // FIXME: This is only needed because tablegen requires number of dst operands
3091   // in match and replace pattern to be the same. Otherwise patterns can be
3092   // exported from SDag path.
3093   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3094 
3095   Register Data = DataOp.getReg();
3096   const unsigned Opc = MRI->getType(Data).isVector() ?
3097     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3098   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3099     .addReg(Addr.first)
3100     .addReg(Data)
3101     .addImm(Addr.second)
3102     .addImm(0) // cpol
3103     .cloneMemRefs(MI);
3104 
3105   MI.eraseFromParent();
3106   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3107 }
3108 
3109 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3110   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3111   MI.removeOperand(1);
3112   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3113   return true;
3114 }
3115 
3116 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3117   Register DstReg = MI.getOperand(0).getReg();
3118   Register SrcReg = MI.getOperand(1).getReg();
3119   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3120   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3121   MachineBasicBlock *MBB = MI.getParent();
3122   const DebugLoc &DL = MI.getDebugLoc();
3123 
3124   if (IsVALU) {
3125     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3126       .addImm(Subtarget->getWavefrontSizeLog2())
3127       .addReg(SrcReg);
3128   } else {
3129     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3130       .addReg(SrcReg)
3131       .addImm(Subtarget->getWavefrontSizeLog2());
3132   }
3133 
3134   const TargetRegisterClass &RC =
3135       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3136   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3137     return false;
3138 
3139   MI.eraseFromParent();
3140   return true;
3141 }
3142 
3143 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3144   if (I.isPHI())
3145     return selectPHI(I);
3146 
3147   if (!I.isPreISelOpcode()) {
3148     if (I.isCopy())
3149       return selectCOPY(I);
3150     return true;
3151   }
3152 
3153   switch (I.getOpcode()) {
3154   case TargetOpcode::G_AND:
3155   case TargetOpcode::G_OR:
3156   case TargetOpcode::G_XOR:
3157     if (selectImpl(I, *CoverageInfo))
3158       return true;
3159     return selectG_AND_OR_XOR(I);
3160   case TargetOpcode::G_ADD:
3161   case TargetOpcode::G_SUB:
3162     if (selectImpl(I, *CoverageInfo))
3163       return true;
3164     return selectG_ADD_SUB(I);
3165   case TargetOpcode::G_UADDO:
3166   case TargetOpcode::G_USUBO:
3167   case TargetOpcode::G_UADDE:
3168   case TargetOpcode::G_USUBE:
3169     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3170   case TargetOpcode::G_INTTOPTR:
3171   case TargetOpcode::G_BITCAST:
3172   case TargetOpcode::G_PTRTOINT:
3173     return selectCOPY(I);
3174   case TargetOpcode::G_CONSTANT:
3175   case TargetOpcode::G_FCONSTANT:
3176     return selectG_CONSTANT(I);
3177   case TargetOpcode::G_FNEG:
3178     if (selectImpl(I, *CoverageInfo))
3179       return true;
3180     return selectG_FNEG(I);
3181   case TargetOpcode::G_FABS:
3182     if (selectImpl(I, *CoverageInfo))
3183       return true;
3184     return selectG_FABS(I);
3185   case TargetOpcode::G_EXTRACT:
3186     return selectG_EXTRACT(I);
3187   case TargetOpcode::G_MERGE_VALUES:
3188   case TargetOpcode::G_BUILD_VECTOR:
3189   case TargetOpcode::G_CONCAT_VECTORS:
3190     return selectG_MERGE_VALUES(I);
3191   case TargetOpcode::G_UNMERGE_VALUES:
3192     return selectG_UNMERGE_VALUES(I);
3193   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3194     return selectG_BUILD_VECTOR_TRUNC(I);
3195   case TargetOpcode::G_PTR_ADD:
3196     return selectG_PTR_ADD(I);
3197   case TargetOpcode::G_IMPLICIT_DEF:
3198     return selectG_IMPLICIT_DEF(I);
3199   case TargetOpcode::G_FREEZE:
3200     return selectCOPY(I);
3201   case TargetOpcode::G_INSERT:
3202     return selectG_INSERT(I);
3203   case TargetOpcode::G_INTRINSIC:
3204     return selectG_INTRINSIC(I);
3205   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3206     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3207   case TargetOpcode::G_ICMP:
3208     if (selectG_ICMP(I))
3209       return true;
3210     return selectImpl(I, *CoverageInfo);
3211   case TargetOpcode::G_LOAD:
3212   case TargetOpcode::G_STORE:
3213   case TargetOpcode::G_ATOMIC_CMPXCHG:
3214   case TargetOpcode::G_ATOMICRMW_XCHG:
3215   case TargetOpcode::G_ATOMICRMW_ADD:
3216   case TargetOpcode::G_ATOMICRMW_SUB:
3217   case TargetOpcode::G_ATOMICRMW_AND:
3218   case TargetOpcode::G_ATOMICRMW_OR:
3219   case TargetOpcode::G_ATOMICRMW_XOR:
3220   case TargetOpcode::G_ATOMICRMW_MIN:
3221   case TargetOpcode::G_ATOMICRMW_MAX:
3222   case TargetOpcode::G_ATOMICRMW_UMIN:
3223   case TargetOpcode::G_ATOMICRMW_UMAX:
3224   case TargetOpcode::G_ATOMICRMW_FADD:
3225   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3226   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3227   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3228   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3229     return selectG_LOAD_STORE_ATOMICRMW(I);
3230   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3231     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3232   case TargetOpcode::G_SELECT:
3233     return selectG_SELECT(I);
3234   case TargetOpcode::G_TRUNC:
3235     return selectG_TRUNC(I);
3236   case TargetOpcode::G_SEXT:
3237   case TargetOpcode::G_ZEXT:
3238   case TargetOpcode::G_ANYEXT:
3239   case TargetOpcode::G_SEXT_INREG:
3240     if (selectImpl(I, *CoverageInfo))
3241       return true;
3242     return selectG_SZA_EXT(I);
3243   case TargetOpcode::G_BRCOND:
3244     return selectG_BRCOND(I);
3245   case TargetOpcode::G_GLOBAL_VALUE:
3246     return selectG_GLOBAL_VALUE(I);
3247   case TargetOpcode::G_PTRMASK:
3248     return selectG_PTRMASK(I);
3249   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3250     return selectG_EXTRACT_VECTOR_ELT(I);
3251   case TargetOpcode::G_INSERT_VECTOR_ELT:
3252     return selectG_INSERT_VECTOR_ELT(I);
3253   case TargetOpcode::G_SHUFFLE_VECTOR:
3254     return selectG_SHUFFLE_VECTOR(I);
3255   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3256   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3257   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3258   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3259     const AMDGPU::ImageDimIntrinsicInfo *Intr
3260       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3261     assert(Intr && "not an image intrinsic with image pseudo");
3262     return selectImageIntrinsic(I, Intr);
3263   }
3264   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3265     return selectBVHIntrinsic(I);
3266   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3267     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3268   case AMDGPU::G_SBFX:
3269   case AMDGPU::G_UBFX:
3270     return selectG_SBFX_UBFX(I);
3271   case AMDGPU::G_SI_CALL:
3272     I.setDesc(TII.get(AMDGPU::SI_CALL));
3273     return true;
3274   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3275     return selectWaveAddress(I);
3276   default:
3277     return selectImpl(I, *CoverageInfo);
3278   }
3279   return false;
3280 }
3281 
3282 InstructionSelector::ComplexRendererFns
3283 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3284   return {{
3285       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3286   }};
3287 
3288 }
3289 
3290 std::pair<Register, unsigned>
3291 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3292                                               bool AllowAbs) const {
3293   Register Src = Root.getReg();
3294   Register OrigSrc = Src;
3295   unsigned Mods = 0;
3296   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3297 
3298   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3299     Src = MI->getOperand(1).getReg();
3300     Mods |= SISrcMods::NEG;
3301     MI = getDefIgnoringCopies(Src, *MRI);
3302   }
3303 
3304   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3305     Src = MI->getOperand(1).getReg();
3306     Mods |= SISrcMods::ABS;
3307   }
3308 
3309   if (Mods != 0 &&
3310       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3311     MachineInstr *UseMI = Root.getParent();
3312 
3313     // If we looked through copies to find source modifiers on an SGPR operand,
3314     // we now have an SGPR register source. To avoid potentially violating the
3315     // constant bus restriction, we need to insert a copy to a VGPR.
3316     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3317     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3318             TII.get(AMDGPU::COPY), VGPRSrc)
3319       .addReg(Src);
3320     Src = VGPRSrc;
3321   }
3322 
3323   return std::make_pair(Src, Mods);
3324 }
3325 
3326 ///
3327 /// This will select either an SGPR or VGPR operand and will save us from
3328 /// having to write an extra tablegen pattern.
3329 InstructionSelector::ComplexRendererFns
3330 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3331   return {{
3332       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3333   }};
3334 }
3335 
3336 InstructionSelector::ComplexRendererFns
3337 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3338   Register Src;
3339   unsigned Mods;
3340   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3341 
3342   return {{
3343       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3344       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3345       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3346       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3347   }};
3348 }
3349 
3350 InstructionSelector::ComplexRendererFns
3351 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3352   Register Src;
3353   unsigned Mods;
3354   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3355 
3356   return {{
3357       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3358       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3359       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3360       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3361   }};
3362 }
3363 
3364 InstructionSelector::ComplexRendererFns
3365 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3366   return {{
3367       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3368       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3369       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3370   }};
3371 }
3372 
3373 InstructionSelector::ComplexRendererFns
3374 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3375   Register Src;
3376   unsigned Mods;
3377   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3378 
3379   return {{
3380       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3381       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3382   }};
3383 }
3384 
3385 InstructionSelector::ComplexRendererFns
3386 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3387   Register Src;
3388   unsigned Mods;
3389   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3390 
3391   return {{
3392       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3393       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3394   }};
3395 }
3396 
3397 InstructionSelector::ComplexRendererFns
3398 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3399   Register Reg = Root.getReg();
3400   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3401   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3402               Def->getOpcode() == AMDGPU::G_FABS))
3403     return {};
3404   return {{
3405       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3406   }};
3407 }
3408 
3409 std::pair<Register, unsigned>
3410 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3411   Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3412   unsigned Mods = 0;
3413   MachineInstr *MI = MRI.getVRegDef(Src);
3414 
3415   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3416       // It's possible to see an f32 fneg here, but unlikely.
3417       // TODO: Treat f32 fneg as only high bit.
3418       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3419     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3420     Src = MI->getOperand(1).getReg();
3421     MI = MRI.getVRegDef(Src);
3422   }
3423 
3424   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3425   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3426 
3427   // Packed instructions do not have abs modifiers.
3428   Mods |= SISrcMods::OP_SEL_1;
3429 
3430   return std::make_pair(Src, Mods);
3431 }
3432 
3433 InstructionSelector::ComplexRendererFns
3434 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3435   MachineRegisterInfo &MRI
3436     = Root.getParent()->getParent()->getParent()->getRegInfo();
3437 
3438   Register Src;
3439   unsigned Mods;
3440   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3441 
3442   return {{
3443       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3444       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3445   }};
3446 }
3447 
3448 InstructionSelector::ComplexRendererFns
3449 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3450   MachineRegisterInfo &MRI
3451     = Root.getParent()->getParent()->getParent()->getRegInfo();
3452 
3453   Register Src;
3454   unsigned Mods;
3455   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3456 
3457   return {{
3458       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3459       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3460   }};
3461 }
3462 
3463 InstructionSelector::ComplexRendererFns
3464 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3465   Register Src;
3466   unsigned Mods;
3467   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3468   if (!isKnownNeverNaN(Src, *MRI))
3469     return None;
3470 
3471   return {{
3472       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3473       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3474   }};
3475 }
3476 
3477 InstructionSelector::ComplexRendererFns
3478 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3479   // FIXME: Handle op_sel
3480   return {{
3481       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3482       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3483   }};
3484 }
3485 
3486 InstructionSelector::ComplexRendererFns
3487 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3488   SmallVector<GEPInfo, 4> AddrInfo;
3489   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3490 
3491   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3492     return None;
3493 
3494   const GEPInfo &GEPInfo = AddrInfo[0];
3495   Optional<int64_t> EncodedImm =
3496       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3497   if (!EncodedImm)
3498     return None;
3499 
3500   unsigned PtrReg = GEPInfo.SgprParts[0];
3501   return {{
3502     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3503     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3504   }};
3505 }
3506 
3507 InstructionSelector::ComplexRendererFns
3508 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3509   SmallVector<GEPInfo, 4> AddrInfo;
3510   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3511 
3512   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3513     return None;
3514 
3515   const GEPInfo &GEPInfo = AddrInfo[0];
3516   Register PtrReg = GEPInfo.SgprParts[0];
3517   Optional<int64_t> EncodedImm =
3518       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3519   if (!EncodedImm)
3520     return None;
3521 
3522   return {{
3523     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3524     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3525   }};
3526 }
3527 
3528 InstructionSelector::ComplexRendererFns
3529 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3530   MachineInstr *MI = Root.getParent();
3531   MachineBasicBlock *MBB = MI->getParent();
3532 
3533   SmallVector<GEPInfo, 4> AddrInfo;
3534   getAddrModeInfo(*MI, *MRI, AddrInfo);
3535 
3536   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3537   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3538   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3539     return None;
3540 
3541   const GEPInfo &GEPInfo = AddrInfo[0];
3542   // SGPR offset is unsigned.
3543   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3544     return None;
3545 
3546   // If we make it this far we have a load with an 32-bit immediate offset.
3547   // It is OK to select this using a sgpr offset, because we have already
3548   // failed trying to select this load into one of the _IMM variants since
3549   // the _IMM Patterns are considered before the _SGPR patterns.
3550   Register PtrReg = GEPInfo.SgprParts[0];
3551   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3552   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3553           .addImm(GEPInfo.Imm);
3554   return {{
3555     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3556     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3557   }};
3558 }
3559 
3560 std::pair<Register, int>
3561 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3562                                                 uint64_t FlatVariant) const {
3563   MachineInstr *MI = Root.getParent();
3564 
3565   auto Default = std::make_pair(Root.getReg(), 0);
3566 
3567   if (!STI.hasFlatInstOffsets())
3568     return Default;
3569 
3570   Register PtrBase;
3571   int64_t ConstOffset;
3572   std::tie(PtrBase, ConstOffset) =
3573       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3574   if (ConstOffset == 0)
3575     return Default;
3576 
3577   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3578   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3579     return Default;
3580 
3581   return std::make_pair(PtrBase, ConstOffset);
3582 }
3583 
3584 InstructionSelector::ComplexRendererFns
3585 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3586   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3587 
3588   return {{
3589       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3590       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3591     }};
3592 }
3593 
3594 InstructionSelector::ComplexRendererFns
3595 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3596   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3597 
3598   return {{
3599       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3600       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3601   }};
3602 }
3603 
3604 InstructionSelector::ComplexRendererFns
3605 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3606   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3607 
3608   return {{
3609       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3610       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3611     }};
3612 }
3613 
3614 /// Match a zero extend from a 32-bit value to 64-bits.
3615 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3616   Register ZExtSrc;
3617   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3618     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3619 
3620   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3621   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3622   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3623     return false;
3624 
3625   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3626     return Def->getOperand(1).getReg();
3627   }
3628 
3629   return Register();
3630 }
3631 
3632 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3633 InstructionSelector::ComplexRendererFns
3634 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3635   Register Addr = Root.getReg();
3636   Register PtrBase;
3637   int64_t ConstOffset;
3638   int64_t ImmOffset = 0;
3639 
3640   // Match the immediate offset first, which canonically is moved as low as
3641   // possible.
3642   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3643 
3644   if (ConstOffset != 0) {
3645     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3646                               SIInstrFlags::FlatGlobal)) {
3647       Addr = PtrBase;
3648       ImmOffset = ConstOffset;
3649     } else {
3650       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3651       if (!PtrBaseDef)
3652         return None;
3653 
3654       if (isSGPR(PtrBaseDef->Reg)) {
3655         if (ConstOffset > 0) {
3656           // Offset is too large.
3657           //
3658           // saddr + large_offset -> saddr +
3659           //                         (voffset = large_offset & ~MaxOffset) +
3660           //                         (large_offset & MaxOffset);
3661           int64_t SplitImmOffset, RemainderOffset;
3662           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3663               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3664 
3665           if (isUInt<32>(RemainderOffset)) {
3666             MachineInstr *MI = Root.getParent();
3667             MachineBasicBlock *MBB = MI->getParent();
3668             Register HighBits =
3669                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3670 
3671             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3672                     HighBits)
3673                 .addImm(RemainderOffset);
3674 
3675             return {{
3676                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3677                 [=](MachineInstrBuilder &MIB) {
3678                   MIB.addReg(HighBits);
3679                 }, // voffset
3680                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3681             }};
3682           }
3683         }
3684 
3685         // We are adding a 64 bit SGPR and a constant. If constant bus limit
3686         // is 1 we would need to perform 1 or 2 extra moves for each half of
3687         // the constant and it is better to do a scalar add and then issue a
3688         // single VALU instruction to materialize zero. Otherwise it is less
3689         // instructions to perform VALU adds with immediates or inline literals.
3690         unsigned NumLiterals =
3691             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3692             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3693         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3694           return None;
3695       }
3696     }
3697   }
3698 
3699   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3700   if (!AddrDef)
3701     return None;
3702 
3703   // Match the variable offset.
3704   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3705     // Look through the SGPR->VGPR copy.
3706     Register SAddr =
3707         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3708 
3709     if (SAddr && isSGPR(SAddr)) {
3710       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3711 
3712       // It's possible voffset is an SGPR here, but the copy to VGPR will be
3713       // inserted later.
3714       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3715         return {{[=](MachineInstrBuilder &MIB) { // saddr
3716                    MIB.addReg(SAddr);
3717                  },
3718                  [=](MachineInstrBuilder &MIB) { // voffset
3719                    MIB.addReg(VOffset);
3720                  },
3721                  [=](MachineInstrBuilder &MIB) { // offset
3722                    MIB.addImm(ImmOffset);
3723                  }}};
3724       }
3725     }
3726   }
3727 
3728   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3729   // drop this.
3730   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3731       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
3732     return None;
3733 
3734   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3735   // moves required to copy a 64-bit SGPR to VGPR.
3736   MachineInstr *MI = Root.getParent();
3737   MachineBasicBlock *MBB = MI->getParent();
3738   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3739 
3740   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3741       .addImm(0);
3742 
3743   return {{
3744       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
3745       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
3746       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
3747   }};
3748 }
3749 
3750 InstructionSelector::ComplexRendererFns
3751 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3752   Register Addr = Root.getReg();
3753   Register PtrBase;
3754   int64_t ConstOffset;
3755   int64_t ImmOffset = 0;
3756 
3757   // Match the immediate offset first, which canonically is moved as low as
3758   // possible.
3759   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3760 
3761   if (ConstOffset != 0 &&
3762       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
3763                             SIInstrFlags::FlatScratch)) {
3764     Addr = PtrBase;
3765     ImmOffset = ConstOffset;
3766   }
3767 
3768   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3769   if (!AddrDef)
3770     return None;
3771 
3772   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3773     int FI = AddrDef->MI->getOperand(1).getIndex();
3774     return {{
3775         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3776         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3777     }};
3778   }
3779 
3780   Register SAddr = AddrDef->Reg;
3781 
3782   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3783     Register LHS = AddrDef->MI->getOperand(1).getReg();
3784     Register RHS = AddrDef->MI->getOperand(2).getReg();
3785     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3786     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3787 
3788     if (LHSDef && RHSDef &&
3789         LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3790         isSGPR(RHSDef->Reg)) {
3791       int FI = LHSDef->MI->getOperand(1).getIndex();
3792       MachineInstr &I = *Root.getParent();
3793       MachineBasicBlock *BB = I.getParent();
3794       const DebugLoc &DL = I.getDebugLoc();
3795       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3796 
3797       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
3798           .addFrameIndex(FI)
3799           .addReg(RHSDef->Reg);
3800     }
3801   }
3802 
3803   if (!isSGPR(SAddr))
3804     return None;
3805 
3806   return {{
3807       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3808       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3809   }};
3810 }
3811 
3812 InstructionSelector::ComplexRendererFns
3813 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
3814   Register Addr = Root.getReg();
3815   Register PtrBase;
3816   int64_t ConstOffset;
3817   int64_t ImmOffset = 0;
3818 
3819   // Match the immediate offset first, which canonically is moved as low as
3820   // possible.
3821   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3822 
3823   if (ConstOffset != 0 &&
3824       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
3825     Addr = PtrBase;
3826     ImmOffset = ConstOffset;
3827   }
3828 
3829   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3830   if (!AddrDef)
3831     return None;
3832 
3833   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
3834     return None;
3835 
3836   Register RHS = AddrDef->MI->getOperand(2).getReg();
3837   if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
3838     return None;
3839 
3840   Register LHS = AddrDef->MI->getOperand(1).getReg();
3841   auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3842 
3843   if (LHSDef && LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3844     int FI = LHSDef->MI->getOperand(1).getIndex();
3845     return {{
3846         [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
3847         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3848         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3849     }};
3850   }
3851 
3852   if (!isSGPR(LHS))
3853     return None;
3854 
3855   return {{
3856       [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
3857       [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
3858       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3859   }};
3860 }
3861 
3862 InstructionSelector::ComplexRendererFns
3863 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3864   MachineInstr *MI = Root.getParent();
3865   MachineBasicBlock *MBB = MI->getParent();
3866   MachineFunction *MF = MBB->getParent();
3867   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3868 
3869   int64_t Offset = 0;
3870   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3871       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3872     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3873 
3874     // TODO: Should this be inside the render function? The iterator seems to
3875     // move.
3876     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3877             HighBits)
3878       .addImm(Offset & ~4095);
3879 
3880     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3881                MIB.addReg(Info->getScratchRSrcReg());
3882              },
3883              [=](MachineInstrBuilder &MIB) { // vaddr
3884                MIB.addReg(HighBits);
3885              },
3886              [=](MachineInstrBuilder &MIB) { // soffset
3887                // Use constant zero for soffset and rely on eliminateFrameIndex
3888                // to choose the appropriate frame register if need be.
3889                MIB.addImm(0);
3890              },
3891              [=](MachineInstrBuilder &MIB) { // offset
3892                MIB.addImm(Offset & 4095);
3893              }}};
3894   }
3895 
3896   assert(Offset == 0 || Offset == -1);
3897 
3898   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3899   // offsets.
3900   Optional<int> FI;
3901   Register VAddr = Root.getReg();
3902   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3903     Register PtrBase;
3904     int64_t ConstOffset;
3905     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
3906     if (ConstOffset != 0) {
3907       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
3908           (!STI.privateMemoryResourceIsRangeChecked() ||
3909            KnownBits->signBitIsZero(PtrBase))) {
3910         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
3911         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3912           FI = PtrBaseDef->getOperand(1).getIndex();
3913         else
3914           VAddr = PtrBase;
3915         Offset = ConstOffset;
3916       }
3917     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3918       FI = RootDef->getOperand(1).getIndex();
3919     }
3920   }
3921 
3922   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3923              MIB.addReg(Info->getScratchRSrcReg());
3924            },
3925            [=](MachineInstrBuilder &MIB) { // vaddr
3926              if (FI.hasValue())
3927                MIB.addFrameIndex(FI.getValue());
3928              else
3929                MIB.addReg(VAddr);
3930            },
3931            [=](MachineInstrBuilder &MIB) { // soffset
3932              // Use constant zero for soffset and rely on eliminateFrameIndex
3933              // to choose the appropriate frame register if need be.
3934              MIB.addImm(0);
3935            },
3936            [=](MachineInstrBuilder &MIB) { // offset
3937              MIB.addImm(Offset);
3938            }}};
3939 }
3940 
3941 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3942                                                 int64_t Offset) const {
3943   if (!isUInt<16>(Offset))
3944     return false;
3945 
3946   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3947     return true;
3948 
3949   // On Southern Islands instruction with a negative base value and an offset
3950   // don't seem to work.
3951   return KnownBits->signBitIsZero(Base);
3952 }
3953 
3954 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
3955                                                  int64_t Offset1,
3956                                                  unsigned Size) const {
3957   if (Offset0 % Size != 0 || Offset1 % Size != 0)
3958     return false;
3959   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
3960     return false;
3961 
3962   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3963     return true;
3964 
3965   // On Southern Islands instruction with a negative base value and an offset
3966   // don't seem to work.
3967   return KnownBits->signBitIsZero(Base);
3968 }
3969 
3970 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
3971                                                     unsigned ShAmtBits) const {
3972   assert(MI.getOpcode() == TargetOpcode::G_AND);
3973 
3974   Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
3975   if (!RHS)
3976     return false;
3977 
3978   if (RHS->countTrailingOnes() >= ShAmtBits)
3979     return true;
3980 
3981   const APInt &LHSKnownZeros =
3982       KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
3983   return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
3984 }
3985 
3986 // Return the wave level SGPR base address if this is a wave address.
3987 static Register getWaveAddress(const MachineInstr *Def) {
3988   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
3989              ? Def->getOperand(1).getReg()
3990              : Register();
3991 }
3992 
3993 InstructionSelector::ComplexRendererFns
3994 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3995     MachineOperand &Root) const {
3996   Register Reg = Root.getReg();
3997   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3998 
3999   const MachineInstr *Def = MRI->getVRegDef(Reg);
4000   if (Register WaveBase = getWaveAddress(Def)) {
4001     return {{
4002         [=](MachineInstrBuilder &MIB) { // rsrc
4003           MIB.addReg(Info->getScratchRSrcReg());
4004         },
4005         [=](MachineInstrBuilder &MIB) { // soffset
4006           MIB.addReg(WaveBase);
4007         },
4008         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4009     }};
4010   }
4011 
4012   int64_t Offset = 0;
4013 
4014   // FIXME: Copy check is a hack
4015   Register BasePtr;
4016   if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4017     if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4018       return {};
4019     const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4020     Register WaveBase = getWaveAddress(BasePtrDef);
4021     if (!WaveBase)
4022       return {};
4023 
4024     return {{
4025         [=](MachineInstrBuilder &MIB) { // rsrc
4026           MIB.addReg(Info->getScratchRSrcReg());
4027         },
4028         [=](MachineInstrBuilder &MIB) { // soffset
4029           MIB.addReg(WaveBase);
4030         },
4031         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4032     }};
4033   }
4034 
4035   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4036       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4037     return {};
4038 
4039   return {{
4040       [=](MachineInstrBuilder &MIB) { // rsrc
4041         MIB.addReg(Info->getScratchRSrcReg());
4042       },
4043       [=](MachineInstrBuilder &MIB) { // soffset
4044         MIB.addImm(0);
4045       },
4046       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4047   }};
4048 }
4049 
4050 std::pair<Register, unsigned>
4051 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4052   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4053   if (!RootDef)
4054     return std::make_pair(Root.getReg(), 0);
4055 
4056   int64_t ConstAddr = 0;
4057 
4058   Register PtrBase;
4059   int64_t Offset;
4060   std::tie(PtrBase, Offset) =
4061     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4062 
4063   if (Offset) {
4064     if (isDSOffsetLegal(PtrBase, Offset)) {
4065       // (add n0, c0)
4066       return std::make_pair(PtrBase, Offset);
4067     }
4068   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4069     // TODO
4070 
4071 
4072   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4073     // TODO
4074 
4075   }
4076 
4077   return std::make_pair(Root.getReg(), 0);
4078 }
4079 
4080 InstructionSelector::ComplexRendererFns
4081 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4082   Register Reg;
4083   unsigned Offset;
4084   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4085   return {{
4086       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4087       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4088     }};
4089 }
4090 
4091 InstructionSelector::ComplexRendererFns
4092 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4093   return selectDSReadWrite2(Root, 4);
4094 }
4095 
4096 InstructionSelector::ComplexRendererFns
4097 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4098   return selectDSReadWrite2(Root, 8);
4099 }
4100 
4101 InstructionSelector::ComplexRendererFns
4102 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4103                                               unsigned Size) const {
4104   Register Reg;
4105   unsigned Offset;
4106   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4107   return {{
4108       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4109       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4110       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4111     }};
4112 }
4113 
4114 std::pair<Register, unsigned>
4115 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4116                                                   unsigned Size) const {
4117   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4118   if (!RootDef)
4119     return std::make_pair(Root.getReg(), 0);
4120 
4121   int64_t ConstAddr = 0;
4122 
4123   Register PtrBase;
4124   int64_t Offset;
4125   std::tie(PtrBase, Offset) =
4126     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4127 
4128   if (Offset) {
4129     int64_t OffsetValue0 = Offset;
4130     int64_t OffsetValue1 = Offset + Size;
4131     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4132       // (add n0, c0)
4133       return std::make_pair(PtrBase, OffsetValue0 / Size);
4134     }
4135   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4136     // TODO
4137 
4138   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4139     // TODO
4140 
4141   }
4142 
4143   return std::make_pair(Root.getReg(), 0);
4144 }
4145 
4146 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4147 /// the base value with the constant offset. There may be intervening copies
4148 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4149 /// not match the pattern.
4150 std::pair<Register, int64_t>
4151 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4152   Register Root, const MachineRegisterInfo &MRI) const {
4153   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4154   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4155     return {Root, 0};
4156 
4157   MachineOperand &RHS = RootI->getOperand(2);
4158   Optional<ValueAndVReg> MaybeOffset =
4159       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4160   if (!MaybeOffset)
4161     return {Root, 0};
4162   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4163 }
4164 
4165 static void addZeroImm(MachineInstrBuilder &MIB) {
4166   MIB.addImm(0);
4167 }
4168 
4169 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4170 /// BasePtr is not valid, a null base pointer will be used.
4171 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4172                           uint32_t FormatLo, uint32_t FormatHi,
4173                           Register BasePtr) {
4174   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4175   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4176   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4177   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4178 
4179   B.buildInstr(AMDGPU::S_MOV_B32)
4180     .addDef(RSrc2)
4181     .addImm(FormatLo);
4182   B.buildInstr(AMDGPU::S_MOV_B32)
4183     .addDef(RSrc3)
4184     .addImm(FormatHi);
4185 
4186   // Build the half of the subregister with the constants before building the
4187   // full 128-bit register. If we are building multiple resource descriptors,
4188   // this will allow CSEing of the 2-component register.
4189   B.buildInstr(AMDGPU::REG_SEQUENCE)
4190     .addDef(RSrcHi)
4191     .addReg(RSrc2)
4192     .addImm(AMDGPU::sub0)
4193     .addReg(RSrc3)
4194     .addImm(AMDGPU::sub1);
4195 
4196   Register RSrcLo = BasePtr;
4197   if (!BasePtr) {
4198     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4199     B.buildInstr(AMDGPU::S_MOV_B64)
4200       .addDef(RSrcLo)
4201       .addImm(0);
4202   }
4203 
4204   B.buildInstr(AMDGPU::REG_SEQUENCE)
4205     .addDef(RSrc)
4206     .addReg(RSrcLo)
4207     .addImm(AMDGPU::sub0_sub1)
4208     .addReg(RSrcHi)
4209     .addImm(AMDGPU::sub2_sub3);
4210 
4211   return RSrc;
4212 }
4213 
4214 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4215                                 const SIInstrInfo &TII, Register BasePtr) {
4216   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4217 
4218   // FIXME: Why are half the "default" bits ignored based on the addressing
4219   // mode?
4220   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4221 }
4222 
4223 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4224                                const SIInstrInfo &TII, Register BasePtr) {
4225   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4226 
4227   // FIXME: Why are half the "default" bits ignored based on the addressing
4228   // mode?
4229   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4230 }
4231 
4232 AMDGPUInstructionSelector::MUBUFAddressData
4233 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4234   MUBUFAddressData Data;
4235   Data.N0 = Src;
4236 
4237   Register PtrBase;
4238   int64_t Offset;
4239 
4240   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4241   if (isUInt<32>(Offset)) {
4242     Data.N0 = PtrBase;
4243     Data.Offset = Offset;
4244   }
4245 
4246   if (MachineInstr *InputAdd
4247       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4248     Data.N2 = InputAdd->getOperand(1).getReg();
4249     Data.N3 = InputAdd->getOperand(2).getReg();
4250 
4251     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4252     // FIXME: Don't know this was defined by operand 0
4253     //
4254     // TODO: Remove this when we have copy folding optimizations after
4255     // RegBankSelect.
4256     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4257     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4258   }
4259 
4260   return Data;
4261 }
4262 
4263 /// Return if the addr64 mubuf mode should be used for the given address.
4264 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4265   // (ptr_add N2, N3) -> addr64, or
4266   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4267   if (Addr.N2)
4268     return true;
4269 
4270   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4271   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4272 }
4273 
4274 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4275 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4276 /// component.
4277 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4278   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4279   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4280     return;
4281 
4282   // Illegal offset, store it in soffset.
4283   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4284   B.buildInstr(AMDGPU::S_MOV_B32)
4285     .addDef(SOffset)
4286     .addImm(ImmOffset);
4287   ImmOffset = 0;
4288 }
4289 
4290 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4291   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4292   Register &SOffset, int64_t &Offset) const {
4293   // FIXME: Predicates should stop this from reaching here.
4294   // addr64 bit was removed for volcanic islands.
4295   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4296     return false;
4297 
4298   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4299   if (!shouldUseAddr64(AddrData))
4300     return false;
4301 
4302   Register N0 = AddrData.N0;
4303   Register N2 = AddrData.N2;
4304   Register N3 = AddrData.N3;
4305   Offset = AddrData.Offset;
4306 
4307   // Base pointer for the SRD.
4308   Register SRDPtr;
4309 
4310   if (N2) {
4311     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4312       assert(N3);
4313       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4314         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4315         // addr64, and construct the default resource from a 0 address.
4316         VAddr = N0;
4317       } else {
4318         SRDPtr = N3;
4319         VAddr = N2;
4320       }
4321     } else {
4322       // N2 is not divergent.
4323       SRDPtr = N2;
4324       VAddr = N3;
4325     }
4326   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4327     // Use the default null pointer in the resource
4328     VAddr = N0;
4329   } else {
4330     // N0 -> offset, or
4331     // (N0 + C1) -> offset
4332     SRDPtr = N0;
4333   }
4334 
4335   MachineIRBuilder B(*Root.getParent());
4336   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4337   splitIllegalMUBUFOffset(B, SOffset, Offset);
4338   return true;
4339 }
4340 
4341 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4342   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4343   int64_t &Offset) const {
4344 
4345   // FIXME: Pattern should not reach here.
4346   if (STI.useFlatForGlobal())
4347     return false;
4348 
4349   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4350   if (shouldUseAddr64(AddrData))
4351     return false;
4352 
4353   // N0 -> offset, or
4354   // (N0 + C1) -> offset
4355   Register SRDPtr = AddrData.N0;
4356   Offset = AddrData.Offset;
4357 
4358   // TODO: Look through extensions for 32-bit soffset.
4359   MachineIRBuilder B(*Root.getParent());
4360 
4361   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4362   splitIllegalMUBUFOffset(B, SOffset, Offset);
4363   return true;
4364 }
4365 
4366 InstructionSelector::ComplexRendererFns
4367 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4368   Register VAddr;
4369   Register RSrcReg;
4370   Register SOffset;
4371   int64_t Offset = 0;
4372 
4373   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4374     return {};
4375 
4376   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4377   // pattern.
4378   return {{
4379       [=](MachineInstrBuilder &MIB) {  // rsrc
4380         MIB.addReg(RSrcReg);
4381       },
4382       [=](MachineInstrBuilder &MIB) { // vaddr
4383         MIB.addReg(VAddr);
4384       },
4385       [=](MachineInstrBuilder &MIB) { // soffset
4386         if (SOffset)
4387           MIB.addReg(SOffset);
4388         else
4389           MIB.addImm(0);
4390       },
4391       [=](MachineInstrBuilder &MIB) { // offset
4392         MIB.addImm(Offset);
4393       },
4394       addZeroImm, //  cpol
4395       addZeroImm, //  tfe
4396       addZeroImm  //  swz
4397     }};
4398 }
4399 
4400 InstructionSelector::ComplexRendererFns
4401 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4402   Register RSrcReg;
4403   Register SOffset;
4404   int64_t Offset = 0;
4405 
4406   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4407     return {};
4408 
4409   return {{
4410       [=](MachineInstrBuilder &MIB) {  // rsrc
4411         MIB.addReg(RSrcReg);
4412       },
4413       [=](MachineInstrBuilder &MIB) { // soffset
4414         if (SOffset)
4415           MIB.addReg(SOffset);
4416         else
4417           MIB.addImm(0);
4418       },
4419       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4420       addZeroImm, //  cpol
4421       addZeroImm, //  tfe
4422       addZeroImm, //  swz
4423     }};
4424 }
4425 
4426 InstructionSelector::ComplexRendererFns
4427 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4428   Register VAddr;
4429   Register RSrcReg;
4430   Register SOffset;
4431   int64_t Offset = 0;
4432 
4433   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4434     return {};
4435 
4436   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4437   // pattern.
4438   return {{
4439       [=](MachineInstrBuilder &MIB) {  // rsrc
4440         MIB.addReg(RSrcReg);
4441       },
4442       [=](MachineInstrBuilder &MIB) { // vaddr
4443         MIB.addReg(VAddr);
4444       },
4445       [=](MachineInstrBuilder &MIB) { // soffset
4446         if (SOffset)
4447           MIB.addReg(SOffset);
4448         else
4449           MIB.addImm(0);
4450       },
4451       [=](MachineInstrBuilder &MIB) { // offset
4452         MIB.addImm(Offset);
4453       },
4454       [=](MachineInstrBuilder &MIB) {
4455         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4456       }
4457     }};
4458 }
4459 
4460 InstructionSelector::ComplexRendererFns
4461 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4462   Register RSrcReg;
4463   Register SOffset;
4464   int64_t Offset = 0;
4465 
4466   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4467     return {};
4468 
4469   return {{
4470       [=](MachineInstrBuilder &MIB) {  // rsrc
4471         MIB.addReg(RSrcReg);
4472       },
4473       [=](MachineInstrBuilder &MIB) { // soffset
4474         if (SOffset)
4475           MIB.addReg(SOffset);
4476         else
4477           MIB.addImm(0);
4478       },
4479       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4480       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4481     }};
4482 }
4483 
4484 /// Get an immediate that must be 32-bits, and treated as zero extended.
4485 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4486                                                const MachineRegisterInfo &MRI) {
4487   // getIConstantVRegVal sexts any values, so see if that matters.
4488   Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4489   if (!OffsetVal || !isInt<32>(*OffsetVal))
4490     return None;
4491   return Lo_32(*OffsetVal);
4492 }
4493 
4494 InstructionSelector::ComplexRendererFns
4495 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4496   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4497   if (!OffsetVal)
4498     return {};
4499 
4500   Optional<int64_t> EncodedImm =
4501       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4502   if (!EncodedImm)
4503     return {};
4504 
4505   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4506 }
4507 
4508 InstructionSelector::ComplexRendererFns
4509 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4510   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4511 
4512   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4513   if (!OffsetVal)
4514     return {};
4515 
4516   Optional<int64_t> EncodedImm
4517     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4518   if (!EncodedImm)
4519     return {};
4520 
4521   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4522 }
4523 
4524 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4525                                                  const MachineInstr &MI,
4526                                                  int OpIdx) const {
4527   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4528          "Expected G_CONSTANT");
4529   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4530 }
4531 
4532 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4533                                                 const MachineInstr &MI,
4534                                                 int OpIdx) const {
4535   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4536          "Expected G_CONSTANT");
4537   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4538 }
4539 
4540 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4541                                                  const MachineInstr &MI,
4542                                                  int OpIdx) const {
4543   assert(OpIdx == -1);
4544 
4545   const MachineOperand &Op = MI.getOperand(1);
4546   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4547     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4548   else {
4549     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4550     MIB.addImm(Op.getCImm()->getSExtValue());
4551   }
4552 }
4553 
4554 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4555                                                 const MachineInstr &MI,
4556                                                 int OpIdx) const {
4557   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4558          "Expected G_CONSTANT");
4559   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4560 }
4561 
4562 /// This only really exists to satisfy DAG type checking machinery, so is a
4563 /// no-op here.
4564 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4565                                                 const MachineInstr &MI,
4566                                                 int OpIdx) const {
4567   MIB.addImm(MI.getOperand(OpIdx).getImm());
4568 }
4569 
4570 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4571                                                   const MachineInstr &MI,
4572                                                   int OpIdx) const {
4573   assert(OpIdx >= 0 && "expected to match an immediate operand");
4574   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4575 }
4576 
4577 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4578                                                  const MachineInstr &MI,
4579                                                  int OpIdx) const {
4580   assert(OpIdx >= 0 && "expected to match an immediate operand");
4581   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4582 }
4583 
4584 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4585                                              const MachineInstr &MI,
4586                                              int OpIdx) const {
4587   assert(OpIdx >= 0 && "expected to match an immediate operand");
4588   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4589 }
4590 
4591 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4592                                                  const MachineInstr &MI,
4593                                                  int OpIdx) const {
4594   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4595 }
4596 
4597 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4598   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4599 }
4600 
4601 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4602   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4603 }
4604 
4605 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4606   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4607 }
4608 
4609 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4610   return TII.isInlineConstant(Imm);
4611 }
4612