1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 
30 #define DEBUG_TYPE "amdgpu-isel"
31 
32 using namespace llvm;
33 using namespace MIPatternMatch;
34 
35 static cl::opt<bool> AllowRiskySelect(
36   "amdgpu-global-isel-risky-select",
37   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
38   cl::init(false),
39   cl::ReallyHidden);
40 
41 #define GET_GLOBALISEL_IMPL
42 #define AMDGPUSubtarget GCNSubtarget
43 #include "AMDGPUGenGlobalISel.inc"
44 #undef GET_GLOBALISEL_IMPL
45 #undef AMDGPUSubtarget
46 
47 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
48     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
49     const AMDGPUTargetMachine &TM)
50     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51       STI(STI),
52       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53 #define GET_GLOBALISEL_PREDICATES_INIT
54 #include "AMDGPUGenGlobalISel.inc"
55 #undef GET_GLOBALISEL_PREDICATES_INIT
56 #define GET_GLOBALISEL_TEMPORARIES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_TEMPORARIES_INIT
59 {
60 }
61 
62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                                         CodeGenCoverage &CoverageInfo,
66                                         ProfileSummaryInfo *PSI,
67                                         BlockFrequencyInfo *BFI) {
68   MRI = &MF.getRegInfo();
69   Subtarget = &MF.getSubtarget<GCNSubtarget>();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   // The verifier is oblivious to s1 being a valid value for wavesize registers.
76   if (Reg.isPhysical())
77     return false;
78 
79   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80   const TargetRegisterClass *RC =
81       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82   if (RC) {
83     const LLT Ty = MRI.getType(Reg);
84     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
85            Ty.isValid() && Ty.getSizeInBits() == 1;
86   }
87 
88   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
89   return RB->getID() == AMDGPU::VCCRegBankID;
90 }
91 
92 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
93                                                         unsigned NewOpc) const {
94   MI.setDesc(TII.get(NewOpc));
95   MI.removeOperand(1); // Remove intrinsic ID.
96   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
97 
98   MachineOperand &Dst = MI.getOperand(0);
99   MachineOperand &Src = MI.getOperand(1);
100 
101   // TODO: This should be legalized to s32 if needed
102   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
103     return false;
104 
105   const TargetRegisterClass *DstRC
106     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
107   const TargetRegisterClass *SrcRC
108     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
109   if (!DstRC || DstRC != SrcRC)
110     return false;
111 
112   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
113          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
114 }
115 
116 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
117   const DebugLoc &DL = I.getDebugLoc();
118   MachineBasicBlock *BB = I.getParent();
119   I.setDesc(TII.get(TargetOpcode::COPY));
120 
121   const MachineOperand &Src = I.getOperand(1);
122   MachineOperand &Dst = I.getOperand(0);
123   Register DstReg = Dst.getReg();
124   Register SrcReg = Src.getReg();
125 
126   if (isVCC(DstReg, *MRI)) {
127     if (SrcReg == AMDGPU::SCC) {
128       const TargetRegisterClass *RC
129         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
130       if (!RC)
131         return true;
132       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
133     }
134 
135     if (!isVCC(SrcReg, *MRI)) {
136       // TODO: Should probably leave the copy and let copyPhysReg expand it.
137       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
138         return false;
139 
140       const TargetRegisterClass *SrcRC
141         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
142 
143       Optional<ValueAndVReg> ConstVal =
144           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
145       if (ConstVal) {
146         unsigned MovOpc =
147             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
148         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
149             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
150       } else {
151         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
152 
153         // We can't trust the high bits at this point, so clear them.
154 
155         // TODO: Skip masking high bits if def is known boolean.
156 
157         unsigned AndOpc =
158             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
159         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
160             .addImm(1)
161             .addReg(SrcReg);
162         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
163             .addImm(0)
164             .addReg(MaskedReg);
165       }
166 
167       if (!MRI->getRegClassOrNull(SrcReg))
168         MRI->setRegClass(SrcReg, SrcRC);
169       I.eraseFromParent();
170       return true;
171     }
172 
173     const TargetRegisterClass *RC =
174       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
175     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
176       return false;
177 
178     return true;
179   }
180 
181   for (const MachineOperand &MO : I.operands()) {
182     if (MO.getReg().isPhysical())
183       continue;
184 
185     const TargetRegisterClass *RC =
186             TRI.getConstrainedRegClassForOperand(MO, *MRI);
187     if (!RC)
188       continue;
189     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
190   }
191   return true;
192 }
193 
194 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
195   const Register DefReg = I.getOperand(0).getReg();
196   const LLT DefTy = MRI->getType(DefReg);
197   if (DefTy == LLT::scalar(1)) {
198     if (!AllowRiskySelect) {
199       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
200       return false;
201     }
202 
203     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
204   }
205 
206   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
207 
208   const RegClassOrRegBank &RegClassOrBank =
209     MRI->getRegClassOrRegBank(DefReg);
210 
211   const TargetRegisterClass *DefRC
212     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
213   if (!DefRC) {
214     if (!DefTy.isValid()) {
215       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
216       return false;
217     }
218 
219     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
220     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
221     if (!DefRC) {
222       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
223       return false;
224     }
225   }
226 
227   // TODO: Verify that all registers have the same bank
228   I.setDesc(TII.get(TargetOpcode::PHI));
229   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
230 }
231 
232 MachineOperand
233 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
234                                            const TargetRegisterClass &SubRC,
235                                            unsigned SubIdx) const {
236 
237   MachineInstr *MI = MO.getParent();
238   MachineBasicBlock *BB = MO.getParent()->getParent();
239   Register DstReg = MRI->createVirtualRegister(&SubRC);
240 
241   if (MO.isReg()) {
242     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
243     Register Reg = MO.getReg();
244     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
245             .addReg(Reg, 0, ComposedSubIdx);
246 
247     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
248                                      MO.isKill(), MO.isDead(), MO.isUndef(),
249                                      MO.isEarlyClobber(), 0, MO.isDebug(),
250                                      MO.isInternalRead());
251   }
252 
253   assert(MO.isImm());
254 
255   APInt Imm(64, MO.getImm());
256 
257   switch (SubIdx) {
258   default:
259     llvm_unreachable("do not know to split immediate with this sub index.");
260   case AMDGPU::sub0:
261     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
262   case AMDGPU::sub1:
263     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
264   }
265 }
266 
267 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
268   switch (Opc) {
269   case AMDGPU::G_AND:
270     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
271   case AMDGPU::G_OR:
272     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
273   case AMDGPU::G_XOR:
274     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
275   default:
276     llvm_unreachable("not a bit op");
277   }
278 }
279 
280 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
281   Register DstReg = I.getOperand(0).getReg();
282   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
283 
284   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
285   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
286       DstRB->getID() != AMDGPU::VCCRegBankID)
287     return false;
288 
289   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
290                             STI.isWave64());
291   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
292 
293   // Dead implicit-def of scc
294   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
295                                          true, // isImp
296                                          false, // isKill
297                                          true)); // isDead
298   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
299 }
300 
301 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
302   MachineBasicBlock *BB = I.getParent();
303   MachineFunction *MF = BB->getParent();
304   Register DstReg = I.getOperand(0).getReg();
305   const DebugLoc &DL = I.getDebugLoc();
306   LLT Ty = MRI->getType(DstReg);
307   if (Ty.isVector())
308     return false;
309 
310   unsigned Size = Ty.getSizeInBits();
311   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
312   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
313   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
314 
315   if (Size == 32) {
316     if (IsSALU) {
317       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
318       MachineInstr *Add =
319         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
320         .add(I.getOperand(1))
321         .add(I.getOperand(2));
322       I.eraseFromParent();
323       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
324     }
325 
326     if (STI.hasAddNoCarry()) {
327       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
328       I.setDesc(TII.get(Opc));
329       I.addOperand(*MF, MachineOperand::CreateImm(0));
330       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
331       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
332     }
333 
334     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
335 
336     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
337     MachineInstr *Add
338       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
339       .addDef(UnusedCarry, RegState::Dead)
340       .add(I.getOperand(1))
341       .add(I.getOperand(2))
342       .addImm(0);
343     I.eraseFromParent();
344     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
345   }
346 
347   assert(!Sub && "illegal sub should not reach here");
348 
349   const TargetRegisterClass &RC
350     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
351   const TargetRegisterClass &HalfRC
352     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
353 
354   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
355   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
356   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
357   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
358 
359   Register DstLo = MRI->createVirtualRegister(&HalfRC);
360   Register DstHi = MRI->createVirtualRegister(&HalfRC);
361 
362   if (IsSALU) {
363     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
364       .add(Lo1)
365       .add(Lo2);
366     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
367       .add(Hi1)
368       .add(Hi2);
369   } else {
370     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
371     Register CarryReg = MRI->createVirtualRegister(CarryRC);
372     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
373       .addDef(CarryReg)
374       .add(Lo1)
375       .add(Lo2)
376       .addImm(0);
377     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
378       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
379       .add(Hi1)
380       .add(Hi2)
381       .addReg(CarryReg, RegState::Kill)
382       .addImm(0);
383 
384     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
385       return false;
386   }
387 
388   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
389     .addReg(DstLo)
390     .addImm(AMDGPU::sub0)
391     .addReg(DstHi)
392     .addImm(AMDGPU::sub1);
393 
394 
395   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
396     return false;
397 
398   I.eraseFromParent();
399   return true;
400 }
401 
402 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
403   MachineInstr &I) const {
404   MachineBasicBlock *BB = I.getParent();
405   MachineFunction *MF = BB->getParent();
406   const DebugLoc &DL = I.getDebugLoc();
407   Register Dst0Reg = I.getOperand(0).getReg();
408   Register Dst1Reg = I.getOperand(1).getReg();
409   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
410                      I.getOpcode() == AMDGPU::G_UADDE;
411   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
412                           I.getOpcode() == AMDGPU::G_USUBE;
413 
414   if (isVCC(Dst1Reg, *MRI)) {
415     unsigned NoCarryOpc =
416         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
417     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
418     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
419     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
420     I.addOperand(*MF, MachineOperand::CreateImm(0));
421     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
422   }
423 
424   Register Src0Reg = I.getOperand(2).getReg();
425   Register Src1Reg = I.getOperand(3).getReg();
426 
427   if (HasCarryIn) {
428     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
429       .addReg(I.getOperand(4).getReg());
430   }
431 
432   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
433   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
434 
435   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
436     .add(I.getOperand(2))
437     .add(I.getOperand(3));
438   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
439     .addReg(AMDGPU::SCC);
440 
441   if (!MRI->getRegClassOrNull(Dst1Reg))
442     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
443 
444   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
445       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
446       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
447     return false;
448 
449   if (HasCarryIn &&
450       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
451                                     AMDGPU::SReg_32RegClass, *MRI))
452     return false;
453 
454   I.eraseFromParent();
455   return true;
456 }
457 
458 // TODO: We should probably legalize these to only using 32-bit results.
459 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
460   MachineBasicBlock *BB = I.getParent();
461   Register DstReg = I.getOperand(0).getReg();
462   Register SrcReg = I.getOperand(1).getReg();
463   LLT DstTy = MRI->getType(DstReg);
464   LLT SrcTy = MRI->getType(SrcReg);
465   const unsigned SrcSize = SrcTy.getSizeInBits();
466   unsigned DstSize = DstTy.getSizeInBits();
467 
468   // TODO: Should handle any multiple of 32 offset.
469   unsigned Offset = I.getOperand(2).getImm();
470   if (Offset % 32 != 0 || DstSize > 128)
471     return false;
472 
473   // 16-bit operations really use 32-bit registers.
474   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
475   if (DstSize == 16)
476     DstSize = 32;
477 
478   const TargetRegisterClass *DstRC =
479     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
480   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
481     return false;
482 
483   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
484   const TargetRegisterClass *SrcRC =
485       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
486   if (!SrcRC)
487     return false;
488   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
489                                                          DstSize / 32);
490   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
491   if (!SrcRC)
492     return false;
493 
494   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
495                                     *SrcRC, I.getOperand(1));
496   const DebugLoc &DL = I.getDebugLoc();
497   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
498     .addReg(SrcReg, 0, SubReg);
499 
500   I.eraseFromParent();
501   return true;
502 }
503 
504 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
505   MachineBasicBlock *BB = MI.getParent();
506   Register DstReg = MI.getOperand(0).getReg();
507   LLT DstTy = MRI->getType(DstReg);
508   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
509 
510   const unsigned SrcSize = SrcTy.getSizeInBits();
511   if (SrcSize < 32)
512     return selectImpl(MI, *CoverageInfo);
513 
514   const DebugLoc &DL = MI.getDebugLoc();
515   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
516   const unsigned DstSize = DstTy.getSizeInBits();
517   const TargetRegisterClass *DstRC =
518       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
519   if (!DstRC)
520     return false;
521 
522   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
523   MachineInstrBuilder MIB =
524     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
525   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
526     MachineOperand &Src = MI.getOperand(I + 1);
527     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
528     MIB.addImm(SubRegs[I]);
529 
530     const TargetRegisterClass *SrcRC
531       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
532     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
533       return false;
534   }
535 
536   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
537     return false;
538 
539   MI.eraseFromParent();
540   return true;
541 }
542 
543 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
544   MachineBasicBlock *BB = MI.getParent();
545   const int NumDst = MI.getNumOperands() - 1;
546 
547   MachineOperand &Src = MI.getOperand(NumDst);
548 
549   Register SrcReg = Src.getReg();
550   Register DstReg0 = MI.getOperand(0).getReg();
551   LLT DstTy = MRI->getType(DstReg0);
552   LLT SrcTy = MRI->getType(SrcReg);
553 
554   const unsigned DstSize = DstTy.getSizeInBits();
555   const unsigned SrcSize = SrcTy.getSizeInBits();
556   const DebugLoc &DL = MI.getDebugLoc();
557   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
558 
559   const TargetRegisterClass *SrcRC =
560       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
561   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
562     return false;
563 
564   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
565   // source, and this relies on the fact that the same subregister indices are
566   // used for both.
567   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
568   for (int I = 0, E = NumDst; I != E; ++I) {
569     MachineOperand &Dst = MI.getOperand(I);
570     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
571       .addReg(SrcReg, 0, SubRegs[I]);
572 
573     // Make sure the subregister index is valid for the source register.
574     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
575     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
576       return false;
577 
578     const TargetRegisterClass *DstRC =
579       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
580     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
581       return false;
582   }
583 
584   MI.eraseFromParent();
585   return true;
586 }
587 
588 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
589   MachineInstr &MI) const {
590   if (selectImpl(MI, *CoverageInfo))
591     return true;
592 
593   const LLT S32 = LLT::scalar(32);
594   const LLT V2S16 = LLT::fixed_vector(2, 16);
595 
596   Register Dst = MI.getOperand(0).getReg();
597   if (MRI->getType(Dst) != V2S16)
598     return false;
599 
600   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
601   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
602     return false;
603 
604   Register Src0 = MI.getOperand(1).getReg();
605   Register Src1 = MI.getOperand(2).getReg();
606   if (MRI->getType(Src0) != S32)
607     return false;
608 
609   const DebugLoc &DL = MI.getDebugLoc();
610   MachineBasicBlock *BB = MI.getParent();
611 
612   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
613   if (ConstSrc1) {
614     auto ConstSrc0 =
615         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
616     if (ConstSrc0) {
617       const int64_t K0 = ConstSrc0->Value.getSExtValue();
618       const int64_t K1 = ConstSrc1->Value.getSExtValue();
619       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
620       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
621 
622       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
623         .addImm(Lo16 | (Hi16 << 16));
624       MI.eraseFromParent();
625       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
626     }
627   }
628 
629   // TODO: This should probably be a combine somewhere
630   // (build_vector_trunc $src0, undef -> copy $src0
631   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
632   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
633     MI.setDesc(TII.get(AMDGPU::COPY));
634     MI.removeOperand(2);
635     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
636            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
637   }
638 
639   Register ShiftSrc0;
640   Register ShiftSrc1;
641 
642   // With multiple uses of the shift, this will duplicate the shift and
643   // increase register pressure.
644   //
645   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
646   //  => (S_PACK_HH_B32_B16 $src0, $src1)
647   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
648   //  => (S_PACK_LH_B32_B16 $src0, $src1)
649   // (build_vector_trunc $src0, $src1)
650   //  => (S_PACK_LL_B32_B16 $src0, $src1)
651 
652   bool Shift0 = mi_match(
653       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
654 
655   bool Shift1 = mi_match(
656       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
657 
658   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
659   if (Shift0 && Shift1) {
660     Opc = AMDGPU::S_PACK_HH_B32_B16;
661     MI.getOperand(1).setReg(ShiftSrc0);
662     MI.getOperand(2).setReg(ShiftSrc1);
663   } else if (Shift1) {
664     Opc = AMDGPU::S_PACK_LH_B32_B16;
665     MI.getOperand(2).setReg(ShiftSrc1);
666   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
667     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
668     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
669       .addReg(ShiftSrc0)
670       .addImm(16);
671 
672     MI.eraseFromParent();
673     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
674   }
675 
676   MI.setDesc(TII.get(Opc));
677   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
678 }
679 
680 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
681   return selectG_ADD_SUB(I);
682 }
683 
684 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
685   const MachineOperand &MO = I.getOperand(0);
686 
687   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
688   // regbank check here is to know why getConstrainedRegClassForOperand failed.
689   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
690   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
691       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
692     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
693     return true;
694   }
695 
696   return false;
697 }
698 
699 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
700   MachineBasicBlock *BB = I.getParent();
701 
702   Register DstReg = I.getOperand(0).getReg();
703   Register Src0Reg = I.getOperand(1).getReg();
704   Register Src1Reg = I.getOperand(2).getReg();
705   LLT Src1Ty = MRI->getType(Src1Reg);
706 
707   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
708   unsigned InsSize = Src1Ty.getSizeInBits();
709 
710   int64_t Offset = I.getOperand(3).getImm();
711 
712   // FIXME: These cases should have been illegal and unnecessary to check here.
713   if (Offset % 32 != 0 || InsSize % 32 != 0)
714     return false;
715 
716   // Currently not handled by getSubRegFromChannel.
717   if (InsSize > 128)
718     return false;
719 
720   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
721   if (SubReg == AMDGPU::NoSubRegister)
722     return false;
723 
724   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
725   const TargetRegisterClass *DstRC =
726       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
727   if (!DstRC)
728     return false;
729 
730   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
731   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
732   const TargetRegisterClass *Src0RC =
733       TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
734   const TargetRegisterClass *Src1RC =
735       TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
736 
737   // Deal with weird cases where the class only partially supports the subreg
738   // index.
739   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
740   if (!Src0RC || !Src1RC)
741     return false;
742 
743   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
744       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
745       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
746     return false;
747 
748   const DebugLoc &DL = I.getDebugLoc();
749   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
750     .addReg(Src0Reg)
751     .addReg(Src1Reg)
752     .addImm(SubReg);
753 
754   I.eraseFromParent();
755   return true;
756 }
757 
758 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
759   Register DstReg = MI.getOperand(0).getReg();
760   Register SrcReg = MI.getOperand(1).getReg();
761   Register OffsetReg = MI.getOperand(2).getReg();
762   Register WidthReg = MI.getOperand(3).getReg();
763 
764   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
765          "scalar BFX instructions are expanded in regbankselect");
766   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
767          "64-bit vector BFX instructions are expanded in regbankselect");
768 
769   const DebugLoc &DL = MI.getDebugLoc();
770   MachineBasicBlock *MBB = MI.getParent();
771 
772   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
773   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
774   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
775                  .addReg(SrcReg)
776                  .addReg(OffsetReg)
777                  .addReg(WidthReg);
778   MI.eraseFromParent();
779   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
780 }
781 
782 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
783   if (STI.getLDSBankCount() != 16)
784     return selectImpl(MI, *CoverageInfo);
785 
786   Register Dst = MI.getOperand(0).getReg();
787   Register Src0 = MI.getOperand(2).getReg();
788   Register M0Val = MI.getOperand(6).getReg();
789   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
790       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
791       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
792     return false;
793 
794   // This requires 2 instructions. It is possible to write a pattern to support
795   // this, but the generated isel emitter doesn't correctly deal with multiple
796   // output instructions using the same physical register input. The copy to m0
797   // is incorrectly placed before the second instruction.
798   //
799   // TODO: Match source modifiers.
800 
801   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
802   const DebugLoc &DL = MI.getDebugLoc();
803   MachineBasicBlock *MBB = MI.getParent();
804 
805   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
806     .addReg(M0Val);
807   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
808     .addImm(2)
809     .addImm(MI.getOperand(4).getImm())  // $attr
810     .addImm(MI.getOperand(3).getImm()); // $attrchan
811 
812   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
813     .addImm(0)                          // $src0_modifiers
814     .addReg(Src0)                       // $src0
815     .addImm(MI.getOperand(4).getImm())  // $attr
816     .addImm(MI.getOperand(3).getImm())  // $attrchan
817     .addImm(0)                          // $src2_modifiers
818     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
819     .addImm(MI.getOperand(5).getImm())  // $high
820     .addImm(0)                          // $clamp
821     .addImm(0);                         // $omod
822 
823   MI.eraseFromParent();
824   return true;
825 }
826 
827 // Writelane is special in that it can use SGPR and M0 (which would normally
828 // count as using the constant bus twice - but in this case it is allowed since
829 // the lane selector doesn't count as a use of the constant bus). However, it is
830 // still required to abide by the 1 SGPR rule. Fix this up if we might have
831 // multiple SGPRs.
832 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
833   // With a constant bus limit of at least 2, there's no issue.
834   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
835     return selectImpl(MI, *CoverageInfo);
836 
837   MachineBasicBlock *MBB = MI.getParent();
838   const DebugLoc &DL = MI.getDebugLoc();
839   Register VDst = MI.getOperand(0).getReg();
840   Register Val = MI.getOperand(2).getReg();
841   Register LaneSelect = MI.getOperand(3).getReg();
842   Register VDstIn = MI.getOperand(4).getReg();
843 
844   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
845 
846   Optional<ValueAndVReg> ConstSelect =
847       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
848   if (ConstSelect) {
849     // The selector has to be an inline immediate, so we can use whatever for
850     // the other operands.
851     MIB.addReg(Val);
852     MIB.addImm(ConstSelect->Value.getSExtValue() &
853                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
854   } else {
855     Optional<ValueAndVReg> ConstVal =
856         getIConstantVRegValWithLookThrough(Val, *MRI);
857 
858     // If the value written is an inline immediate, we can get away without a
859     // copy to m0.
860     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
861                                                  STI.hasInv2PiInlineImm())) {
862       MIB.addImm(ConstVal->Value.getSExtValue());
863       MIB.addReg(LaneSelect);
864     } else {
865       MIB.addReg(Val);
866 
867       // If the lane selector was originally in a VGPR and copied with
868       // readfirstlane, there's a hazard to read the same SGPR from the
869       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
870       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
871 
872       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
873         .addReg(LaneSelect);
874       MIB.addReg(AMDGPU::M0);
875     }
876   }
877 
878   MIB.addReg(VDstIn);
879 
880   MI.eraseFromParent();
881   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
882 }
883 
884 // We need to handle this here because tablegen doesn't support matching
885 // instructions with multiple outputs.
886 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
887   Register Dst0 = MI.getOperand(0).getReg();
888   Register Dst1 = MI.getOperand(1).getReg();
889 
890   LLT Ty = MRI->getType(Dst0);
891   unsigned Opc;
892   if (Ty == LLT::scalar(32))
893     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
894   else if (Ty == LLT::scalar(64))
895     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
896   else
897     return false;
898 
899   // TODO: Match source modifiers.
900 
901   const DebugLoc &DL = MI.getDebugLoc();
902   MachineBasicBlock *MBB = MI.getParent();
903 
904   Register Numer = MI.getOperand(3).getReg();
905   Register Denom = MI.getOperand(4).getReg();
906   unsigned ChooseDenom = MI.getOperand(5).getImm();
907 
908   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
909 
910   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
911     .addDef(Dst1)
912     .addImm(0)     // $src0_modifiers
913     .addUse(Src0)  // $src0
914     .addImm(0)     // $src1_modifiers
915     .addUse(Denom) // $src1
916     .addImm(0)     // $src2_modifiers
917     .addUse(Numer) // $src2
918     .addImm(0)     // $clamp
919     .addImm(0);    // $omod
920 
921   MI.eraseFromParent();
922   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
923 }
924 
925 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
926   unsigned IntrinsicID = I.getIntrinsicID();
927   switch (IntrinsicID) {
928   case Intrinsic::amdgcn_if_break: {
929     MachineBasicBlock *BB = I.getParent();
930 
931     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
932     // SelectionDAG uses for wave32 vs wave64.
933     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
934       .add(I.getOperand(0))
935       .add(I.getOperand(2))
936       .add(I.getOperand(3));
937 
938     Register DstReg = I.getOperand(0).getReg();
939     Register Src0Reg = I.getOperand(2).getReg();
940     Register Src1Reg = I.getOperand(3).getReg();
941 
942     I.eraseFromParent();
943 
944     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
945       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
946 
947     return true;
948   }
949   case Intrinsic::amdgcn_interp_p1_f16:
950     return selectInterpP1F16(I);
951   case Intrinsic::amdgcn_wqm:
952     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
953   case Intrinsic::amdgcn_softwqm:
954     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
955   case Intrinsic::amdgcn_strict_wwm:
956   case Intrinsic::amdgcn_wwm:
957     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
958   case Intrinsic::amdgcn_strict_wqm:
959     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
960   case Intrinsic::amdgcn_writelane:
961     return selectWritelane(I);
962   case Intrinsic::amdgcn_div_scale:
963     return selectDivScale(I);
964   case Intrinsic::amdgcn_icmp:
965     return selectIntrinsicIcmp(I);
966   case Intrinsic::amdgcn_ballot:
967     return selectBallot(I);
968   case Intrinsic::amdgcn_reloc_constant:
969     return selectRelocConstant(I);
970   case Intrinsic::amdgcn_groupstaticsize:
971     return selectGroupStaticSize(I);
972   case Intrinsic::returnaddress:
973     return selectReturnAddress(I);
974   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
975   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
976   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
977   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
978   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
979   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
980     return selectSMFMACIntrin(I);
981   default:
982     return selectImpl(I, *CoverageInfo);
983   }
984 }
985 
986 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
987   if (Size != 32 && Size != 64)
988     return -1;
989   switch (P) {
990   default:
991     llvm_unreachable("Unknown condition code!");
992   case CmpInst::ICMP_NE:
993     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
994   case CmpInst::ICMP_EQ:
995     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
996   case CmpInst::ICMP_SGT:
997     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
998   case CmpInst::ICMP_SGE:
999     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
1000   case CmpInst::ICMP_SLT:
1001     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
1002   case CmpInst::ICMP_SLE:
1003     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
1004   case CmpInst::ICMP_UGT:
1005     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
1006   case CmpInst::ICMP_UGE:
1007     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1008   case CmpInst::ICMP_ULT:
1009     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1010   case CmpInst::ICMP_ULE:
1011     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1012   }
1013 }
1014 
1015 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1016                                               unsigned Size) const {
1017   if (Size == 64) {
1018     if (!STI.hasScalarCompareEq64())
1019       return -1;
1020 
1021     switch (P) {
1022     case CmpInst::ICMP_NE:
1023       return AMDGPU::S_CMP_LG_U64;
1024     case CmpInst::ICMP_EQ:
1025       return AMDGPU::S_CMP_EQ_U64;
1026     default:
1027       return -1;
1028     }
1029   }
1030 
1031   if (Size != 32)
1032     return -1;
1033 
1034   switch (P) {
1035   case CmpInst::ICMP_NE:
1036     return AMDGPU::S_CMP_LG_U32;
1037   case CmpInst::ICMP_EQ:
1038     return AMDGPU::S_CMP_EQ_U32;
1039   case CmpInst::ICMP_SGT:
1040     return AMDGPU::S_CMP_GT_I32;
1041   case CmpInst::ICMP_SGE:
1042     return AMDGPU::S_CMP_GE_I32;
1043   case CmpInst::ICMP_SLT:
1044     return AMDGPU::S_CMP_LT_I32;
1045   case CmpInst::ICMP_SLE:
1046     return AMDGPU::S_CMP_LE_I32;
1047   case CmpInst::ICMP_UGT:
1048     return AMDGPU::S_CMP_GT_U32;
1049   case CmpInst::ICMP_UGE:
1050     return AMDGPU::S_CMP_GE_U32;
1051   case CmpInst::ICMP_ULT:
1052     return AMDGPU::S_CMP_LT_U32;
1053   case CmpInst::ICMP_ULE:
1054     return AMDGPU::S_CMP_LE_U32;
1055   default:
1056     llvm_unreachable("Unknown condition code!");
1057   }
1058 }
1059 
1060 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1061   MachineBasicBlock *BB = I.getParent();
1062   const DebugLoc &DL = I.getDebugLoc();
1063 
1064   Register SrcReg = I.getOperand(2).getReg();
1065   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1066 
1067   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1068 
1069   Register CCReg = I.getOperand(0).getReg();
1070   if (!isVCC(CCReg, *MRI)) {
1071     int Opcode = getS_CMPOpcode(Pred, Size);
1072     if (Opcode == -1)
1073       return false;
1074     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1075             .add(I.getOperand(2))
1076             .add(I.getOperand(3));
1077     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1078       .addReg(AMDGPU::SCC);
1079     bool Ret =
1080         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1081         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1082     I.eraseFromParent();
1083     return Ret;
1084   }
1085 
1086   int Opcode = getV_CMPOpcode(Pred, Size);
1087   if (Opcode == -1)
1088     return false;
1089 
1090   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1091             I.getOperand(0).getReg())
1092             .add(I.getOperand(2))
1093             .add(I.getOperand(3));
1094   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1095                                *TRI.getBoolRC(), *MRI);
1096   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1097   I.eraseFromParent();
1098   return Ret;
1099 }
1100 
1101 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1102   Register Dst = I.getOperand(0).getReg();
1103   if (isVCC(Dst, *MRI))
1104     return false;
1105 
1106   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1107     return false;
1108 
1109   MachineBasicBlock *BB = I.getParent();
1110   const DebugLoc &DL = I.getDebugLoc();
1111   Register SrcReg = I.getOperand(2).getReg();
1112   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1113 
1114   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1115   if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1116     MachineInstr *ICmp =
1117         BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1118 
1119     if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1120                                       *TRI.getBoolRC(), *MRI))
1121       return false;
1122     I.eraseFromParent();
1123     return true;
1124   }
1125 
1126   int Opcode = getV_CMPOpcode(Pred, Size);
1127   if (Opcode == -1)
1128     return false;
1129 
1130   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1131                            .add(I.getOperand(2))
1132                            .add(I.getOperand(3));
1133   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1134                                *MRI);
1135   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1136   I.eraseFromParent();
1137   return Ret;
1138 }
1139 
1140 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1141   MachineBasicBlock *BB = I.getParent();
1142   const DebugLoc &DL = I.getDebugLoc();
1143   Register DstReg = I.getOperand(0).getReg();
1144   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1145   const bool Is64 = Size == 64;
1146 
1147   if (Size != STI.getWavefrontSize())
1148     return false;
1149 
1150   Optional<ValueAndVReg> Arg =
1151       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1152 
1153   if (Arg.hasValue()) {
1154     const int64_t Value = Arg.getValue().Value.getSExtValue();
1155     if (Value == 0) {
1156       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1157       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1158     } else if (Value == -1) { // all ones
1159       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1160       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1161     } else
1162       return false;
1163   } else {
1164     Register SrcReg = I.getOperand(2).getReg();
1165     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1166   }
1167 
1168   I.eraseFromParent();
1169   return true;
1170 }
1171 
1172 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1173   Register DstReg = I.getOperand(0).getReg();
1174   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1175   const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1176   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1177     return false;
1178 
1179   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1180 
1181   Module *M = MF->getFunction().getParent();
1182   const MDNode *Metadata = I.getOperand(2).getMetadata();
1183   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1184   auto RelocSymbol = cast<GlobalVariable>(
1185     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1186 
1187   MachineBasicBlock *BB = I.getParent();
1188   BuildMI(*BB, &I, I.getDebugLoc(),
1189           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1190     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1191 
1192   I.eraseFromParent();
1193   return true;
1194 }
1195 
1196 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1197   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1198 
1199   Register DstReg = I.getOperand(0).getReg();
1200   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1201   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1202     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1203 
1204   MachineBasicBlock *MBB = I.getParent();
1205   const DebugLoc &DL = I.getDebugLoc();
1206 
1207   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1208 
1209   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1210     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1211     MIB.addImm(MFI->getLDSSize());
1212   } else {
1213     Module *M = MF->getFunction().getParent();
1214     const GlobalValue *GV
1215       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1216     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1217   }
1218 
1219   I.eraseFromParent();
1220   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1221 }
1222 
1223 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1224   MachineBasicBlock *MBB = I.getParent();
1225   MachineFunction &MF = *MBB->getParent();
1226   const DebugLoc &DL = I.getDebugLoc();
1227 
1228   MachineOperand &Dst = I.getOperand(0);
1229   Register DstReg = Dst.getReg();
1230   unsigned Depth = I.getOperand(2).getImm();
1231 
1232   const TargetRegisterClass *RC
1233     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1234   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1235       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1236     return false;
1237 
1238   // Check for kernel and shader functions
1239   if (Depth != 0 ||
1240       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1241     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1242       .addImm(0);
1243     I.eraseFromParent();
1244     return true;
1245   }
1246 
1247   MachineFrameInfo &MFI = MF.getFrameInfo();
1248   // There is a call to @llvm.returnaddress in this function
1249   MFI.setReturnAddressIsTaken(true);
1250 
1251   // Get the return address reg and mark it as an implicit live-in
1252   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1253   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1254                                              AMDGPU::SReg_64RegClass, DL);
1255   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1256     .addReg(LiveIn);
1257   I.eraseFromParent();
1258   return true;
1259 }
1260 
1261 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1262   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1263   // SelectionDAG uses for wave32 vs wave64.
1264   MachineBasicBlock *BB = MI.getParent();
1265   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1266       .add(MI.getOperand(1));
1267 
1268   Register Reg = MI.getOperand(1).getReg();
1269   MI.eraseFromParent();
1270 
1271   if (!MRI->getRegClassOrNull(Reg))
1272     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1273   return true;
1274 }
1275 
1276 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1277   MachineInstr &MI, Intrinsic::ID IntrID) const {
1278   MachineBasicBlock *MBB = MI.getParent();
1279   MachineFunction *MF = MBB->getParent();
1280   const DebugLoc &DL = MI.getDebugLoc();
1281 
1282   unsigned IndexOperand = MI.getOperand(7).getImm();
1283   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1284   bool WaveDone = MI.getOperand(9).getImm() != 0;
1285 
1286   if (WaveDone && !WaveRelease)
1287     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1288 
1289   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1290   IndexOperand &= ~0x3f;
1291   unsigned CountDw = 0;
1292 
1293   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1294     CountDw = (IndexOperand >> 24) & 0xf;
1295     IndexOperand &= ~(0xf << 24);
1296 
1297     if (CountDw < 1 || CountDw > 4) {
1298       report_fatal_error(
1299         "ds_ordered_count: dword count must be between 1 and 4");
1300     }
1301   }
1302 
1303   if (IndexOperand)
1304     report_fatal_error("ds_ordered_count: bad index operand");
1305 
1306   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1307   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1308 
1309   unsigned Offset0 = OrderedCountIndex << 2;
1310   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1311                      (Instruction << 4);
1312 
1313   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1314     Offset1 |= (CountDw - 1) << 6;
1315 
1316   unsigned Offset = Offset0 | (Offset1 << 8);
1317 
1318   Register M0Val = MI.getOperand(2).getReg();
1319   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1320     .addReg(M0Val);
1321 
1322   Register DstReg = MI.getOperand(0).getReg();
1323   Register ValReg = MI.getOperand(3).getReg();
1324   MachineInstrBuilder DS =
1325     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1326       .addReg(ValReg)
1327       .addImm(Offset)
1328       .cloneMemRefs(MI);
1329 
1330   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1331     return false;
1332 
1333   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1334   MI.eraseFromParent();
1335   return Ret;
1336 }
1337 
1338 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1339   switch (IntrID) {
1340   case Intrinsic::amdgcn_ds_gws_init:
1341     return AMDGPU::DS_GWS_INIT;
1342   case Intrinsic::amdgcn_ds_gws_barrier:
1343     return AMDGPU::DS_GWS_BARRIER;
1344   case Intrinsic::amdgcn_ds_gws_sema_v:
1345     return AMDGPU::DS_GWS_SEMA_V;
1346   case Intrinsic::amdgcn_ds_gws_sema_br:
1347     return AMDGPU::DS_GWS_SEMA_BR;
1348   case Intrinsic::amdgcn_ds_gws_sema_p:
1349     return AMDGPU::DS_GWS_SEMA_P;
1350   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1351     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1352   default:
1353     llvm_unreachable("not a gws intrinsic");
1354   }
1355 }
1356 
1357 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1358                                                      Intrinsic::ID IID) const {
1359   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1360       !STI.hasGWSSemaReleaseAll())
1361     return false;
1362 
1363   // intrinsic ID, vsrc, offset
1364   const bool HasVSrc = MI.getNumOperands() == 3;
1365   assert(HasVSrc || MI.getNumOperands() == 2);
1366 
1367   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1368   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1369   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1370     return false;
1371 
1372   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1373   assert(OffsetDef);
1374 
1375   unsigned ImmOffset;
1376 
1377   MachineBasicBlock *MBB = MI.getParent();
1378   const DebugLoc &DL = MI.getDebugLoc();
1379 
1380   MachineInstr *Readfirstlane = nullptr;
1381 
1382   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1383   // incoming offset, in case there's an add of a constant. We'll have to put it
1384   // back later.
1385   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1386     Readfirstlane = OffsetDef;
1387     BaseOffset = OffsetDef->getOperand(1).getReg();
1388     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1389   }
1390 
1391   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1392     // If we have a constant offset, try to use the 0 in m0 as the base.
1393     // TODO: Look into changing the default m0 initialization value. If the
1394     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1395     // the immediate offset.
1396 
1397     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1398     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1399       .addImm(0);
1400   } else {
1401     std::tie(BaseOffset, ImmOffset) =
1402         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1403 
1404     if (Readfirstlane) {
1405       // We have the constant offset now, so put the readfirstlane back on the
1406       // variable component.
1407       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1408         return false;
1409 
1410       Readfirstlane->getOperand(1).setReg(BaseOffset);
1411       BaseOffset = Readfirstlane->getOperand(0).getReg();
1412     } else {
1413       if (!RBI.constrainGenericRegister(BaseOffset,
1414                                         AMDGPU::SReg_32RegClass, *MRI))
1415         return false;
1416     }
1417 
1418     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1419     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1420       .addReg(BaseOffset)
1421       .addImm(16);
1422 
1423     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1424       .addReg(M0Base);
1425   }
1426 
1427   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1428   // offset field) % 64. Some versions of the programming guide omit the m0
1429   // part, or claim it's from offset 0.
1430   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1431 
1432   if (HasVSrc) {
1433     Register VSrc = MI.getOperand(1).getReg();
1434 
1435     if (STI.needsAlignedVGPRs()) {
1436       // Add implicit aligned super-reg to force alignment on the data operand.
1437       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1438       BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1439       Register NewVR =
1440           MRI->createVirtualRegister(&AMDGPU::VReg_64_Align2RegClass);
1441       BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), NewVR)
1442           .addReg(VSrc, 0, MI.getOperand(1).getSubReg())
1443           .addImm(AMDGPU::sub0)
1444           .addReg(Undef)
1445           .addImm(AMDGPU::sub1);
1446       MIB.addReg(NewVR, 0, AMDGPU::sub0);
1447       MIB.addReg(NewVR, RegState::Implicit);
1448     } else {
1449       MIB.addReg(VSrc);
1450     }
1451 
1452     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1453       return false;
1454   }
1455 
1456   MIB.addImm(ImmOffset)
1457      .cloneMemRefs(MI);
1458 
1459   MI.eraseFromParent();
1460   return true;
1461 }
1462 
1463 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1464                                                       bool IsAppend) const {
1465   Register PtrBase = MI.getOperand(2).getReg();
1466   LLT PtrTy = MRI->getType(PtrBase);
1467   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1468 
1469   unsigned Offset;
1470   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1471 
1472   // TODO: Should this try to look through readfirstlane like GWS?
1473   if (!isDSOffsetLegal(PtrBase, Offset)) {
1474     PtrBase = MI.getOperand(2).getReg();
1475     Offset = 0;
1476   }
1477 
1478   MachineBasicBlock *MBB = MI.getParent();
1479   const DebugLoc &DL = MI.getDebugLoc();
1480   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1481 
1482   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1483     .addReg(PtrBase);
1484   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1485     return false;
1486 
1487   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1488     .addImm(Offset)
1489     .addImm(IsGDS ? -1 : 0)
1490     .cloneMemRefs(MI);
1491   MI.eraseFromParent();
1492   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1493 }
1494 
1495 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1496   if (TM.getOptLevel() > CodeGenOpt::None) {
1497     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1498     if (WGSize <= STI.getWavefrontSize()) {
1499       MachineBasicBlock *MBB = MI.getParent();
1500       const DebugLoc &DL = MI.getDebugLoc();
1501       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1502       MI.eraseFromParent();
1503       return true;
1504     }
1505   }
1506   return selectImpl(MI, *CoverageInfo);
1507 }
1508 
1509 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1510                          bool &IsTexFail) {
1511   if (TexFailCtrl)
1512     IsTexFail = true;
1513 
1514   TFE = (TexFailCtrl & 0x1) ? true : false;
1515   TexFailCtrl &= ~(uint64_t)0x1;
1516   LWE = (TexFailCtrl & 0x2) ? true : false;
1517   TexFailCtrl &= ~(uint64_t)0x2;
1518 
1519   return TexFailCtrl == 0;
1520 }
1521 
1522 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1523   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1524   MachineBasicBlock *MBB = MI.getParent();
1525   const DebugLoc &DL = MI.getDebugLoc();
1526 
1527   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1528     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1529 
1530   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1531   unsigned IntrOpcode = Intr->BaseOpcode;
1532   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1533 
1534   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1535 
1536   Register VDataIn, VDataOut;
1537   LLT VDataTy;
1538   int NumVDataDwords = -1;
1539   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1540                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1541 
1542   bool Unorm;
1543   if (!BaseOpcode->Sampler)
1544     Unorm = true;
1545   else
1546     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1547 
1548   bool TFE;
1549   bool LWE;
1550   bool IsTexFail = false;
1551   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1552                     TFE, LWE, IsTexFail))
1553     return false;
1554 
1555   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1556   const bool IsA16 = (Flags & 1) != 0;
1557   const bool IsG16 = (Flags & 2) != 0;
1558 
1559   // A16 implies 16 bit gradients if subtarget doesn't support G16
1560   if (IsA16 && !STI.hasG16() && !IsG16)
1561     return false;
1562 
1563   unsigned DMask = 0;
1564   unsigned DMaskLanes = 0;
1565 
1566   if (BaseOpcode->Atomic) {
1567     VDataOut = MI.getOperand(0).getReg();
1568     VDataIn = MI.getOperand(2).getReg();
1569     LLT Ty = MRI->getType(VDataIn);
1570 
1571     // Be careful to allow atomic swap on 16-bit element vectors.
1572     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1573       Ty.getSizeInBits() == 128 :
1574       Ty.getSizeInBits() == 64;
1575 
1576     if (BaseOpcode->AtomicX2) {
1577       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1578 
1579       DMask = Is64Bit ? 0xf : 0x3;
1580       NumVDataDwords = Is64Bit ? 4 : 2;
1581     } else {
1582       DMask = Is64Bit ? 0x3 : 0x1;
1583       NumVDataDwords = Is64Bit ? 2 : 1;
1584     }
1585   } else {
1586     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1587     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1588 
1589     if (BaseOpcode->Store) {
1590       VDataIn = MI.getOperand(1).getReg();
1591       VDataTy = MRI->getType(VDataIn);
1592       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1593     } else {
1594       VDataOut = MI.getOperand(0).getReg();
1595       VDataTy = MRI->getType(VDataOut);
1596       NumVDataDwords = DMaskLanes;
1597 
1598       if (IsD16 && !STI.hasUnpackedD16VMem())
1599         NumVDataDwords = (DMaskLanes + 1) / 2;
1600     }
1601   }
1602 
1603   // Set G16 opcode
1604   if (IsG16 && !IsA16) {
1605     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1606         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1607     assert(G16MappingInfo);
1608     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1609   }
1610 
1611   // TODO: Check this in verifier.
1612   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1613 
1614   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1615   if (BaseOpcode->Atomic)
1616     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1617   if (CPol & ~AMDGPU::CPol::ALL)
1618     return false;
1619 
1620   int NumVAddrRegs = 0;
1621   int NumVAddrDwords = 0;
1622   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1623     // Skip the $noregs and 0s inserted during legalization.
1624     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1625     if (!AddrOp.isReg())
1626       continue; // XXX - Break?
1627 
1628     Register Addr = AddrOp.getReg();
1629     if (!Addr)
1630       break;
1631 
1632     ++NumVAddrRegs;
1633     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1634   }
1635 
1636   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1637   // NSA, these should have been packed into a single value in the first
1638   // address register
1639   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1640   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1641     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1642     return false;
1643   }
1644 
1645   if (IsTexFail)
1646     ++NumVDataDwords;
1647 
1648   int Opcode = -1;
1649   if (IsGFX10Plus) {
1650     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1651                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1652                                           : AMDGPU::MIMGEncGfx10Default,
1653                                    NumVDataDwords, NumVAddrDwords);
1654   } else {
1655     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1656       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1657                                      NumVDataDwords, NumVAddrDwords);
1658     if (Opcode == -1)
1659       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1660                                      NumVDataDwords, NumVAddrDwords);
1661   }
1662   assert(Opcode != -1);
1663 
1664   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1665     .cloneMemRefs(MI);
1666 
1667   if (VDataOut) {
1668     if (BaseOpcode->AtomicX2) {
1669       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1670 
1671       Register TmpReg = MRI->createVirtualRegister(
1672         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1673       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1674 
1675       MIB.addDef(TmpReg);
1676       if (!MRI->use_empty(VDataOut)) {
1677         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1678             .addReg(TmpReg, RegState::Kill, SubReg);
1679       }
1680 
1681     } else {
1682       MIB.addDef(VDataOut); // vdata output
1683     }
1684   }
1685 
1686   if (VDataIn)
1687     MIB.addReg(VDataIn); // vdata input
1688 
1689   for (int I = 0; I != NumVAddrRegs; ++I) {
1690     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1691     if (SrcOp.isReg()) {
1692       assert(SrcOp.getReg() != 0);
1693       MIB.addReg(SrcOp.getReg());
1694     }
1695   }
1696 
1697   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1698   if (BaseOpcode->Sampler)
1699     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1700 
1701   MIB.addImm(DMask); // dmask
1702 
1703   if (IsGFX10Plus)
1704     MIB.addImm(DimInfo->Encoding);
1705   MIB.addImm(Unorm);
1706 
1707   MIB.addImm(CPol);
1708   MIB.addImm(IsA16 &&  // a16 or r128
1709              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1710   if (IsGFX10Plus)
1711     MIB.addImm(IsA16 ? -1 : 0);
1712 
1713   MIB.addImm(TFE); // tfe
1714   MIB.addImm(LWE); // lwe
1715   if (!IsGFX10Plus)
1716     MIB.addImm(DimInfo->DA ? -1 : 0);
1717   if (BaseOpcode->HasD16)
1718     MIB.addImm(IsD16 ? -1 : 0);
1719 
1720   if (IsTexFail) {
1721     // An image load instruction with TFE/LWE only conditionally writes to its
1722     // result registers. Initialize them to zero so that we always get well
1723     // defined result values.
1724     assert(VDataOut && !VDataIn);
1725     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1726     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1727     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1728       .addImm(0);
1729     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1730     if (STI.usePRTStrictNull()) {
1731       // With enable-prt-strict-null enabled, initialize all result registers to
1732       // zero.
1733       auto RegSeq =
1734           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1735       for (auto Sub : Parts)
1736         RegSeq.addReg(Zero).addImm(Sub);
1737     } else {
1738       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1739       // result register.
1740       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1741       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1742       auto RegSeq =
1743           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1744       for (auto Sub : Parts.drop_back(1))
1745         RegSeq.addReg(Undef).addImm(Sub);
1746       RegSeq.addReg(Zero).addImm(Parts.back());
1747     }
1748     MIB.addReg(Tied, RegState::Implicit);
1749     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1750   }
1751 
1752   MI.eraseFromParent();
1753   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1754 }
1755 
1756 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1757     MachineInstr &I) const {
1758   unsigned IntrinsicID = I.getIntrinsicID();
1759   switch (IntrinsicID) {
1760   case Intrinsic::amdgcn_end_cf:
1761     return selectEndCfIntrinsic(I);
1762   case Intrinsic::amdgcn_ds_ordered_add:
1763   case Intrinsic::amdgcn_ds_ordered_swap:
1764     return selectDSOrderedIntrinsic(I, IntrinsicID);
1765   case Intrinsic::amdgcn_ds_gws_init:
1766   case Intrinsic::amdgcn_ds_gws_barrier:
1767   case Intrinsic::amdgcn_ds_gws_sema_v:
1768   case Intrinsic::amdgcn_ds_gws_sema_br:
1769   case Intrinsic::amdgcn_ds_gws_sema_p:
1770   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1771     return selectDSGWSIntrinsic(I, IntrinsicID);
1772   case Intrinsic::amdgcn_ds_append:
1773     return selectDSAppendConsume(I, true);
1774   case Intrinsic::amdgcn_ds_consume:
1775     return selectDSAppendConsume(I, false);
1776   case Intrinsic::amdgcn_s_barrier:
1777     return selectSBarrier(I);
1778   case Intrinsic::amdgcn_global_atomic_fadd:
1779     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1780   default: {
1781     return selectImpl(I, *CoverageInfo);
1782   }
1783   }
1784 }
1785 
1786 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1787   if (selectImpl(I, *CoverageInfo))
1788     return true;
1789 
1790   MachineBasicBlock *BB = I.getParent();
1791   const DebugLoc &DL = I.getDebugLoc();
1792 
1793   Register DstReg = I.getOperand(0).getReg();
1794   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1795   assert(Size <= 32 || Size == 64);
1796   const MachineOperand &CCOp = I.getOperand(1);
1797   Register CCReg = CCOp.getReg();
1798   if (!isVCC(CCReg, *MRI)) {
1799     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1800                                          AMDGPU::S_CSELECT_B32;
1801     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1802             .addReg(CCReg);
1803 
1804     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1805     // bank, because it does not cover the register class that we used to represent
1806     // for it.  So we need to manually set the register class here.
1807     if (!MRI->getRegClassOrNull(CCReg))
1808         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1809     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1810             .add(I.getOperand(2))
1811             .add(I.getOperand(3));
1812 
1813     bool Ret = false;
1814     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1815     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1816     I.eraseFromParent();
1817     return Ret;
1818   }
1819 
1820   // Wide VGPR select should have been split in RegBankSelect.
1821   if (Size > 32)
1822     return false;
1823 
1824   MachineInstr *Select =
1825       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1826               .addImm(0)
1827               .add(I.getOperand(3))
1828               .addImm(0)
1829               .add(I.getOperand(2))
1830               .add(I.getOperand(1));
1831 
1832   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1833   I.eraseFromParent();
1834   return Ret;
1835 }
1836 
1837 static int sizeToSubRegIndex(unsigned Size) {
1838   switch (Size) {
1839   case 32:
1840     return AMDGPU::sub0;
1841   case 64:
1842     return AMDGPU::sub0_sub1;
1843   case 96:
1844     return AMDGPU::sub0_sub1_sub2;
1845   case 128:
1846     return AMDGPU::sub0_sub1_sub2_sub3;
1847   case 256:
1848     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1849   default:
1850     if (Size < 32)
1851       return AMDGPU::sub0;
1852     if (Size > 256)
1853       return -1;
1854     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1855   }
1856 }
1857 
1858 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1859   Register DstReg = I.getOperand(0).getReg();
1860   Register SrcReg = I.getOperand(1).getReg();
1861   const LLT DstTy = MRI->getType(DstReg);
1862   const LLT SrcTy = MRI->getType(SrcReg);
1863   const LLT S1 = LLT::scalar(1);
1864 
1865   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1866   const RegisterBank *DstRB;
1867   if (DstTy == S1) {
1868     // This is a special case. We don't treat s1 for legalization artifacts as
1869     // vcc booleans.
1870     DstRB = SrcRB;
1871   } else {
1872     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1873     if (SrcRB != DstRB)
1874       return false;
1875   }
1876 
1877   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1878 
1879   unsigned DstSize = DstTy.getSizeInBits();
1880   unsigned SrcSize = SrcTy.getSizeInBits();
1881 
1882   const TargetRegisterClass *SrcRC =
1883       TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
1884   const TargetRegisterClass *DstRC =
1885       TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
1886   if (!SrcRC || !DstRC)
1887     return false;
1888 
1889   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1890       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1891     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1892     return false;
1893   }
1894 
1895   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1896     MachineBasicBlock *MBB = I.getParent();
1897     const DebugLoc &DL = I.getDebugLoc();
1898 
1899     Register LoReg = MRI->createVirtualRegister(DstRC);
1900     Register HiReg = MRI->createVirtualRegister(DstRC);
1901     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1902       .addReg(SrcReg, 0, AMDGPU::sub0);
1903     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1904       .addReg(SrcReg, 0, AMDGPU::sub1);
1905 
1906     if (IsVALU && STI.hasSDWA()) {
1907       // Write the low 16-bits of the high element into the high 16-bits of the
1908       // low element.
1909       MachineInstr *MovSDWA =
1910         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1911         .addImm(0)                             // $src0_modifiers
1912         .addReg(HiReg)                         // $src0
1913         .addImm(0)                             // $clamp
1914         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1915         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1916         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1917         .addReg(LoReg, RegState::Implicit);
1918       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1919     } else {
1920       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1921       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1922       Register ImmReg = MRI->createVirtualRegister(DstRC);
1923       if (IsVALU) {
1924         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1925           .addImm(16)
1926           .addReg(HiReg);
1927       } else {
1928         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1929           .addReg(HiReg)
1930           .addImm(16);
1931       }
1932 
1933       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1934       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1935       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1936 
1937       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1938         .addImm(0xffff);
1939       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1940         .addReg(LoReg)
1941         .addReg(ImmReg);
1942       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1943         .addReg(TmpReg0)
1944         .addReg(TmpReg1);
1945     }
1946 
1947     I.eraseFromParent();
1948     return true;
1949   }
1950 
1951   if (!DstTy.isScalar())
1952     return false;
1953 
1954   if (SrcSize > 32) {
1955     int SubRegIdx = sizeToSubRegIndex(DstSize);
1956     if (SubRegIdx == -1)
1957       return false;
1958 
1959     // Deal with weird cases where the class only partially supports the subreg
1960     // index.
1961     const TargetRegisterClass *SrcWithSubRC
1962       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1963     if (!SrcWithSubRC)
1964       return false;
1965 
1966     if (SrcWithSubRC != SrcRC) {
1967       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1968         return false;
1969     }
1970 
1971     I.getOperand(1).setSubReg(SubRegIdx);
1972   }
1973 
1974   I.setDesc(TII.get(TargetOpcode::COPY));
1975   return true;
1976 }
1977 
1978 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1979 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1980   Mask = maskTrailingOnes<unsigned>(Size);
1981   int SignedMask = static_cast<int>(Mask);
1982   return SignedMask >= -16 && SignedMask <= 64;
1983 }
1984 
1985 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1986 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1987   Register Reg, const MachineRegisterInfo &MRI,
1988   const TargetRegisterInfo &TRI) const {
1989   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1990   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1991     return RB;
1992 
1993   // Ignore the type, since we don't use vcc in artifacts.
1994   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1995     return &RBI.getRegBankFromRegClass(*RC, LLT());
1996   return nullptr;
1997 }
1998 
1999 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2000   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2001   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2002   const DebugLoc &DL = I.getDebugLoc();
2003   MachineBasicBlock &MBB = *I.getParent();
2004   const Register DstReg = I.getOperand(0).getReg();
2005   const Register SrcReg = I.getOperand(1).getReg();
2006 
2007   const LLT DstTy = MRI->getType(DstReg);
2008   const LLT SrcTy = MRI->getType(SrcReg);
2009   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2010     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2011   const unsigned DstSize = DstTy.getSizeInBits();
2012   if (!DstTy.isScalar())
2013     return false;
2014 
2015   // Artifact casts should never use vcc.
2016   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2017 
2018   // FIXME: This should probably be illegal and split earlier.
2019   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2020     if (DstSize <= 32)
2021       return selectCOPY(I);
2022 
2023     const TargetRegisterClass *SrcRC =
2024         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2025     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2026     const TargetRegisterClass *DstRC =
2027         TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2028 
2029     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2030     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2031     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2032       .addReg(SrcReg)
2033       .addImm(AMDGPU::sub0)
2034       .addReg(UndefReg)
2035       .addImm(AMDGPU::sub1);
2036     I.eraseFromParent();
2037 
2038     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2039            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2040   }
2041 
2042   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2043     // 64-bit should have been split up in RegBankSelect
2044 
2045     // Try to use an and with a mask if it will save code size.
2046     unsigned Mask;
2047     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2048       MachineInstr *ExtI =
2049       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2050         .addImm(Mask)
2051         .addReg(SrcReg);
2052       I.eraseFromParent();
2053       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2054     }
2055 
2056     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2057     MachineInstr *ExtI =
2058       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2059       .addReg(SrcReg)
2060       .addImm(0) // Offset
2061       .addImm(SrcSize); // Width
2062     I.eraseFromParent();
2063     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2064   }
2065 
2066   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2067     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2068       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2069     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2070       return false;
2071 
2072     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2073       const unsigned SextOpc = SrcSize == 8 ?
2074         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2075       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2076         .addReg(SrcReg);
2077       I.eraseFromParent();
2078       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2079     }
2080 
2081     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2082     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2083 
2084     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2085     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2086       // We need a 64-bit register source, but the high bits don't matter.
2087       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2088       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2089       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2090 
2091       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2092       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2093         .addReg(SrcReg, 0, SubReg)
2094         .addImm(AMDGPU::sub0)
2095         .addReg(UndefReg)
2096         .addImm(AMDGPU::sub1);
2097 
2098       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2099         .addReg(ExtReg)
2100         .addImm(SrcSize << 16);
2101 
2102       I.eraseFromParent();
2103       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2104     }
2105 
2106     unsigned Mask;
2107     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2108       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2109         .addReg(SrcReg)
2110         .addImm(Mask);
2111     } else {
2112       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2113         .addReg(SrcReg)
2114         .addImm(SrcSize << 16);
2115     }
2116 
2117     I.eraseFromParent();
2118     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2119   }
2120 
2121   return false;
2122 }
2123 
2124 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2125   MachineBasicBlock *BB = I.getParent();
2126   MachineOperand &ImmOp = I.getOperand(1);
2127   Register DstReg = I.getOperand(0).getReg();
2128   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2129 
2130   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2131   if (ImmOp.isFPImm()) {
2132     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2133     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2134   } else if (ImmOp.isCImm()) {
2135     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2136   } else {
2137     llvm_unreachable("Not supported by g_constants");
2138   }
2139 
2140   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2141   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2142 
2143   unsigned Opcode;
2144   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2145     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2146   } else {
2147     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2148 
2149     // We should never produce s1 values on banks other than VCC. If the user of
2150     // this already constrained the register, we may incorrectly think it's VCC
2151     // if it wasn't originally.
2152     if (Size == 1)
2153       return false;
2154   }
2155 
2156   if (Size != 64) {
2157     I.setDesc(TII.get(Opcode));
2158     I.addImplicitDefUseOperands(*MF);
2159     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2160   }
2161 
2162   const DebugLoc &DL = I.getDebugLoc();
2163 
2164   APInt Imm(Size, I.getOperand(1).getImm());
2165 
2166   MachineInstr *ResInst;
2167   if (IsSgpr && TII.isInlineConstant(Imm)) {
2168     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2169       .addImm(I.getOperand(1).getImm());
2170   } else {
2171     const TargetRegisterClass *RC = IsSgpr ?
2172       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2173     Register LoReg = MRI->createVirtualRegister(RC);
2174     Register HiReg = MRI->createVirtualRegister(RC);
2175 
2176     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2177       .addImm(Imm.trunc(32).getZExtValue());
2178 
2179     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2180       .addImm(Imm.ashr(32).getZExtValue());
2181 
2182     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2183       .addReg(LoReg)
2184       .addImm(AMDGPU::sub0)
2185       .addReg(HiReg)
2186       .addImm(AMDGPU::sub1);
2187   }
2188 
2189   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2190   // work for target independent opcodes
2191   I.eraseFromParent();
2192   const TargetRegisterClass *DstRC =
2193     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2194   if (!DstRC)
2195     return true;
2196   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2197 }
2198 
2199 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2200   // Only manually handle the f64 SGPR case.
2201   //
2202   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2203   // the bit ops theoretically have a second result due to the implicit def of
2204   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2205   // that is easy by disabling the check. The result works, but uses a
2206   // nonsensical sreg32orlds_and_sreg_1 regclass.
2207   //
2208   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2209   // the variadic REG_SEQUENCE operands.
2210 
2211   Register Dst = MI.getOperand(0).getReg();
2212   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2213   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2214       MRI->getType(Dst) != LLT::scalar(64))
2215     return false;
2216 
2217   Register Src = MI.getOperand(1).getReg();
2218   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2219   if (Fabs)
2220     Src = Fabs->getOperand(1).getReg();
2221 
2222   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2223       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2224     return false;
2225 
2226   MachineBasicBlock *BB = MI.getParent();
2227   const DebugLoc &DL = MI.getDebugLoc();
2228   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2229   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2230   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2231   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2232 
2233   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2234     .addReg(Src, 0, AMDGPU::sub0);
2235   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2236     .addReg(Src, 0, AMDGPU::sub1);
2237   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2238     .addImm(0x80000000);
2239 
2240   // Set or toggle sign bit.
2241   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2242   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2243     .addReg(HiReg)
2244     .addReg(ConstReg);
2245   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2246     .addReg(LoReg)
2247     .addImm(AMDGPU::sub0)
2248     .addReg(OpReg)
2249     .addImm(AMDGPU::sub1);
2250   MI.eraseFromParent();
2251   return true;
2252 }
2253 
2254 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2255 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2256   Register Dst = MI.getOperand(0).getReg();
2257   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2258   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2259       MRI->getType(Dst) != LLT::scalar(64))
2260     return false;
2261 
2262   Register Src = MI.getOperand(1).getReg();
2263   MachineBasicBlock *BB = MI.getParent();
2264   const DebugLoc &DL = MI.getDebugLoc();
2265   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2266   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2267   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2268   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2269 
2270   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2271       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2272     return false;
2273 
2274   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2275     .addReg(Src, 0, AMDGPU::sub0);
2276   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2277     .addReg(Src, 0, AMDGPU::sub1);
2278   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2279     .addImm(0x7fffffff);
2280 
2281   // Clear sign bit.
2282   // TODO: Should this used S_BITSET0_*?
2283   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2284     .addReg(HiReg)
2285     .addReg(ConstReg);
2286   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2287     .addReg(LoReg)
2288     .addImm(AMDGPU::sub0)
2289     .addReg(OpReg)
2290     .addImm(AMDGPU::sub1);
2291 
2292   MI.eraseFromParent();
2293   return true;
2294 }
2295 
2296 static bool isConstant(const MachineInstr &MI) {
2297   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2298 }
2299 
2300 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2301     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2302 
2303   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2304 
2305   assert(PtrMI);
2306 
2307   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2308     return;
2309 
2310   GEPInfo GEPInfo(*PtrMI);
2311 
2312   for (unsigned i = 1; i != 3; ++i) {
2313     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2314     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2315     assert(OpDef);
2316     if (i == 2 && isConstant(*OpDef)) {
2317       // TODO: Could handle constant base + variable offset, but a combine
2318       // probably should have commuted it.
2319       assert(GEPInfo.Imm == 0);
2320       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2321       continue;
2322     }
2323     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2324     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2325       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2326     else
2327       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2328   }
2329 
2330   AddrInfo.push_back(GEPInfo);
2331   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2332 }
2333 
2334 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2335   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2336 }
2337 
2338 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2339   if (!MI.hasOneMemOperand())
2340     return false;
2341 
2342   const MachineMemOperand *MMO = *MI.memoperands_begin();
2343   const Value *Ptr = MMO->getValue();
2344 
2345   // UndefValue means this is a load of a kernel input.  These are uniform.
2346   // Sometimes LDS instructions have constant pointers.
2347   // If Ptr is null, then that means this mem operand contains a
2348   // PseudoSourceValue like GOT.
2349   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2350       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2351     return true;
2352 
2353   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2354     return true;
2355 
2356   const Instruction *I = dyn_cast<Instruction>(Ptr);
2357   return I && I->getMetadata("amdgpu.uniform");
2358 }
2359 
2360 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2361   for (const GEPInfo &GEPInfo : AddrInfo) {
2362     if (!GEPInfo.VgprParts.empty())
2363       return true;
2364   }
2365   return false;
2366 }
2367 
2368 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2369   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2370   unsigned AS = PtrTy.getAddressSpace();
2371   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2372       STI.ldsRequiresM0Init()) {
2373     MachineBasicBlock *BB = I.getParent();
2374 
2375     // If DS instructions require M0 initialization, insert it before selecting.
2376     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2377       .addImm(-1);
2378   }
2379 }
2380 
2381 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2382   MachineInstr &I) const {
2383   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2384     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2385     unsigned AS = PtrTy.getAddressSpace();
2386     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2387       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2388   }
2389 
2390   initM0(I);
2391   return selectImpl(I, *CoverageInfo);
2392 }
2393 
2394 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2395   if (Reg.isPhysical())
2396     return false;
2397 
2398   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2399   const unsigned Opcode = MI.getOpcode();
2400 
2401   if (Opcode == AMDGPU::COPY)
2402     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2403 
2404   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2405       Opcode == AMDGPU::G_XOR)
2406     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2407            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2408 
2409   if (Opcode == TargetOpcode::G_INTRINSIC)
2410     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2411 
2412   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2413 }
2414 
2415 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2416   MachineBasicBlock *BB = I.getParent();
2417   MachineOperand &CondOp = I.getOperand(0);
2418   Register CondReg = CondOp.getReg();
2419   const DebugLoc &DL = I.getDebugLoc();
2420 
2421   unsigned BrOpcode;
2422   Register CondPhysReg;
2423   const TargetRegisterClass *ConstrainRC;
2424 
2425   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2426   // whether the branch is uniform when selecting the instruction. In
2427   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2428   // RegBankSelect knows what it's doing if the branch condition is scc, even
2429   // though it currently does not.
2430   if (!isVCC(CondReg, *MRI)) {
2431     if (MRI->getType(CondReg) != LLT::scalar(32))
2432       return false;
2433 
2434     CondPhysReg = AMDGPU::SCC;
2435     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2436     ConstrainRC = &AMDGPU::SReg_32RegClass;
2437   } else {
2438     // FIXME: Should scc->vcc copies and with exec?
2439 
2440     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2441     // need to insert an and with exec.
2442     if (!isVCmpResult(CondReg, *MRI)) {
2443       const bool Is64 = STI.isWave64();
2444       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2445       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2446 
2447       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2448       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2449           .addReg(CondReg)
2450           .addReg(Exec);
2451       CondReg = TmpReg;
2452     }
2453 
2454     CondPhysReg = TRI.getVCC();
2455     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2456     ConstrainRC = TRI.getBoolRC();
2457   }
2458 
2459   if (!MRI->getRegClassOrNull(CondReg))
2460     MRI->setRegClass(CondReg, ConstrainRC);
2461 
2462   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2463     .addReg(CondReg);
2464   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2465     .addMBB(I.getOperand(1).getMBB());
2466 
2467   I.eraseFromParent();
2468   return true;
2469 }
2470 
2471 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2472   MachineInstr &I) const {
2473   Register DstReg = I.getOperand(0).getReg();
2474   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2475   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2476   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2477   if (IsVGPR)
2478     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2479 
2480   return RBI.constrainGenericRegister(
2481     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2482 }
2483 
2484 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2485   Register DstReg = I.getOperand(0).getReg();
2486   Register SrcReg = I.getOperand(1).getReg();
2487   Register MaskReg = I.getOperand(2).getReg();
2488   LLT Ty = MRI->getType(DstReg);
2489   LLT MaskTy = MRI->getType(MaskReg);
2490   MachineBasicBlock *BB = I.getParent();
2491   const DebugLoc &DL = I.getDebugLoc();
2492 
2493   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2494   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2495   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2496   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2497   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2498     return false;
2499 
2500   // Try to avoid emitting a bit operation when we only need to touch half of
2501   // the 64-bit pointer.
2502   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2503   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2504   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2505 
2506   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2507   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2508 
2509   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2510       !CanCopyLow32 && !CanCopyHi32) {
2511     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2512       .addReg(SrcReg)
2513       .addReg(MaskReg);
2514     I.eraseFromParent();
2515     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2516   }
2517 
2518   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2519   const TargetRegisterClass &RegRC
2520     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2521 
2522   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2523   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2524   const TargetRegisterClass *MaskRC =
2525       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2526 
2527   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2528       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2529       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2530     return false;
2531 
2532   if (Ty.getSizeInBits() == 32) {
2533     assert(MaskTy.getSizeInBits() == 32 &&
2534            "ptrmask should have been narrowed during legalize");
2535 
2536     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2537       .addReg(SrcReg)
2538       .addReg(MaskReg);
2539     I.eraseFromParent();
2540     return true;
2541   }
2542 
2543   Register HiReg = MRI->createVirtualRegister(&RegRC);
2544   Register LoReg = MRI->createVirtualRegister(&RegRC);
2545 
2546   // Extract the subregisters from the source pointer.
2547   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2548     .addReg(SrcReg, 0, AMDGPU::sub0);
2549   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2550     .addReg(SrcReg, 0, AMDGPU::sub1);
2551 
2552   Register MaskedLo, MaskedHi;
2553 
2554   if (CanCopyLow32) {
2555     // If all the bits in the low half are 1, we only need a copy for it.
2556     MaskedLo = LoReg;
2557   } else {
2558     // Extract the mask subregister and apply the and.
2559     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2560     MaskedLo = MRI->createVirtualRegister(&RegRC);
2561 
2562     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2563       .addReg(MaskReg, 0, AMDGPU::sub0);
2564     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2565       .addReg(LoReg)
2566       .addReg(MaskLo);
2567   }
2568 
2569   if (CanCopyHi32) {
2570     // If all the bits in the high half are 1, we only need a copy for it.
2571     MaskedHi = HiReg;
2572   } else {
2573     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2574     MaskedHi = MRI->createVirtualRegister(&RegRC);
2575 
2576     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2577       .addReg(MaskReg, 0, AMDGPU::sub1);
2578     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2579       .addReg(HiReg)
2580       .addReg(MaskHi);
2581   }
2582 
2583   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2584     .addReg(MaskedLo)
2585     .addImm(AMDGPU::sub0)
2586     .addReg(MaskedHi)
2587     .addImm(AMDGPU::sub1);
2588   I.eraseFromParent();
2589   return true;
2590 }
2591 
2592 /// Return the register to use for the index value, and the subregister to use
2593 /// for the indirectly accessed register.
2594 static std::pair<Register, unsigned>
2595 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2596                         const SIRegisterInfo &TRI,
2597                         const TargetRegisterClass *SuperRC,
2598                         Register IdxReg,
2599                         unsigned EltSize) {
2600   Register IdxBaseReg;
2601   int Offset;
2602 
2603   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2604   if (IdxBaseReg == AMDGPU::NoRegister) {
2605     // This will happen if the index is a known constant. This should ordinarily
2606     // be legalized out, but handle it as a register just in case.
2607     assert(Offset == 0);
2608     IdxBaseReg = IdxReg;
2609   }
2610 
2611   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2612 
2613   // Skip out of bounds offsets, or else we would end up using an undefined
2614   // register.
2615   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2616     return std::make_pair(IdxReg, SubRegs[0]);
2617   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2618 }
2619 
2620 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2621   MachineInstr &MI) const {
2622   Register DstReg = MI.getOperand(0).getReg();
2623   Register SrcReg = MI.getOperand(1).getReg();
2624   Register IdxReg = MI.getOperand(2).getReg();
2625 
2626   LLT DstTy = MRI->getType(DstReg);
2627   LLT SrcTy = MRI->getType(SrcReg);
2628 
2629   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2630   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2631   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2632 
2633   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2634   // into a waterfall loop.
2635   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2636     return false;
2637 
2638   const TargetRegisterClass *SrcRC =
2639       TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2640   const TargetRegisterClass *DstRC =
2641       TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2642   if (!SrcRC || !DstRC)
2643     return false;
2644   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2645       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2646       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2647     return false;
2648 
2649   MachineBasicBlock *BB = MI.getParent();
2650   const DebugLoc &DL = MI.getDebugLoc();
2651   const bool Is64 = DstTy.getSizeInBits() == 64;
2652 
2653   unsigned SubReg;
2654   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2655                                                      DstTy.getSizeInBits() / 8);
2656 
2657   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2658     if (DstTy.getSizeInBits() != 32 && !Is64)
2659       return false;
2660 
2661     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2662       .addReg(IdxReg);
2663 
2664     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2665     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2666       .addReg(SrcReg, 0, SubReg)
2667       .addReg(SrcReg, RegState::Implicit);
2668     MI.eraseFromParent();
2669     return true;
2670   }
2671 
2672   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2673     return false;
2674 
2675   if (!STI.useVGPRIndexMode()) {
2676     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2677       .addReg(IdxReg);
2678     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2679       .addReg(SrcReg, 0, SubReg)
2680       .addReg(SrcReg, RegState::Implicit);
2681     MI.eraseFromParent();
2682     return true;
2683   }
2684 
2685   const MCInstrDesc &GPRIDXDesc =
2686       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2687   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2688       .addReg(SrcReg)
2689       .addReg(IdxReg)
2690       .addImm(SubReg);
2691 
2692   MI.eraseFromParent();
2693   return true;
2694 }
2695 
2696 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2697 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2698   MachineInstr &MI) const {
2699   Register DstReg = MI.getOperand(0).getReg();
2700   Register VecReg = MI.getOperand(1).getReg();
2701   Register ValReg = MI.getOperand(2).getReg();
2702   Register IdxReg = MI.getOperand(3).getReg();
2703 
2704   LLT VecTy = MRI->getType(DstReg);
2705   LLT ValTy = MRI->getType(ValReg);
2706   unsigned VecSize = VecTy.getSizeInBits();
2707   unsigned ValSize = ValTy.getSizeInBits();
2708 
2709   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2710   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2711   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2712 
2713   assert(VecTy.getElementType() == ValTy);
2714 
2715   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2716   // into a waterfall loop.
2717   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2718     return false;
2719 
2720   const TargetRegisterClass *VecRC =
2721       TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
2722   const TargetRegisterClass *ValRC =
2723       TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
2724 
2725   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2726       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2727       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2728       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2729     return false;
2730 
2731   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2732     return false;
2733 
2734   unsigned SubReg;
2735   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2736                                                      ValSize / 8);
2737 
2738   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2739                          STI.useVGPRIndexMode();
2740 
2741   MachineBasicBlock *BB = MI.getParent();
2742   const DebugLoc &DL = MI.getDebugLoc();
2743 
2744   if (!IndexMode) {
2745     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2746       .addReg(IdxReg);
2747 
2748     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2749         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2750     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2751         .addReg(VecReg)
2752         .addReg(ValReg)
2753         .addImm(SubReg);
2754     MI.eraseFromParent();
2755     return true;
2756   }
2757 
2758   const MCInstrDesc &GPRIDXDesc =
2759       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2760   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2761       .addReg(VecReg)
2762       .addReg(ValReg)
2763       .addReg(IdxReg)
2764       .addImm(SubReg);
2765 
2766   MI.eraseFromParent();
2767   return true;
2768 }
2769 
2770 static bool isZeroOrUndef(int X) {
2771   return X == 0 || X == -1;
2772 }
2773 
2774 static bool isOneOrUndef(int X) {
2775   return X == 1 || X == -1;
2776 }
2777 
2778 static bool isZeroOrOneOrUndef(int X) {
2779   return X == 0 || X == 1 || X == -1;
2780 }
2781 
2782 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2783 // 32-bit register.
2784 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2785                                    ArrayRef<int> Mask) {
2786   NewMask[0] = Mask[0];
2787   NewMask[1] = Mask[1];
2788   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2789     return Src0;
2790 
2791   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2792   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2793 
2794   // Shift the mask inputs to be 0/1;
2795   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2796   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2797   return Src1;
2798 }
2799 
2800 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2801 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2802   MachineInstr &MI) const {
2803   Register DstReg = MI.getOperand(0).getReg();
2804   Register Src0Reg = MI.getOperand(1).getReg();
2805   Register Src1Reg = MI.getOperand(2).getReg();
2806   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2807 
2808   const LLT V2S16 = LLT::fixed_vector(2, 16);
2809   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2810     return false;
2811 
2812   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2813     return false;
2814 
2815   assert(ShufMask.size() == 2);
2816   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2817 
2818   MachineBasicBlock *MBB = MI.getParent();
2819   const DebugLoc &DL = MI.getDebugLoc();
2820 
2821   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2822   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2823   const TargetRegisterClass &RC = IsVALU ?
2824     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2825 
2826   // Handle the degenerate case which should have folded out.
2827   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2828     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2829 
2830     MI.eraseFromParent();
2831     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2832   }
2833 
2834   // A legal VOP3P mask only reads one of the sources.
2835   int Mask[2];
2836   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2837 
2838   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2839       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2840     return false;
2841 
2842   // TODO: This also should have been folded out
2843   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2844     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2845       .addReg(SrcVec);
2846 
2847     MI.eraseFromParent();
2848     return true;
2849   }
2850 
2851   if (Mask[0] == 1 && Mask[1] == -1) {
2852     if (IsVALU) {
2853       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2854         .addImm(16)
2855         .addReg(SrcVec);
2856     } else {
2857       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2858         .addReg(SrcVec)
2859         .addImm(16);
2860     }
2861   } else if (Mask[0] == -1 && Mask[1] == 0) {
2862     if (IsVALU) {
2863       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2864         .addImm(16)
2865         .addReg(SrcVec);
2866     } else {
2867       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2868         .addReg(SrcVec)
2869         .addImm(16);
2870     }
2871   } else if (Mask[0] == 0 && Mask[1] == 0) {
2872     if (IsVALU) {
2873       // Write low half of the register into the high half.
2874       MachineInstr *MovSDWA =
2875         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2876         .addImm(0)                             // $src0_modifiers
2877         .addReg(SrcVec)                        // $src0
2878         .addImm(0)                             // $clamp
2879         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2880         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2881         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2882         .addReg(SrcVec, RegState::Implicit);
2883       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2884     } else {
2885       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2886         .addReg(SrcVec)
2887         .addReg(SrcVec);
2888     }
2889   } else if (Mask[0] == 1 && Mask[1] == 1) {
2890     if (IsVALU) {
2891       // Write high half of the register into the low half.
2892       MachineInstr *MovSDWA =
2893         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2894         .addImm(0)                             // $src0_modifiers
2895         .addReg(SrcVec)                        // $src0
2896         .addImm(0)                             // $clamp
2897         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2898         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2899         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2900         .addReg(SrcVec, RegState::Implicit);
2901       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2902     } else {
2903       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2904         .addReg(SrcVec)
2905         .addReg(SrcVec);
2906     }
2907   } else if (Mask[0] == 1 && Mask[1] == 0) {
2908     if (IsVALU) {
2909       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2910         .addReg(SrcVec)
2911         .addReg(SrcVec)
2912         .addImm(16);
2913     } else {
2914       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2915       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2916         .addReg(SrcVec)
2917         .addImm(16);
2918       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2919         .addReg(TmpReg)
2920         .addReg(SrcVec);
2921     }
2922   } else
2923     llvm_unreachable("all shuffle masks should be handled");
2924 
2925   MI.eraseFromParent();
2926   return true;
2927 }
2928 
2929 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2930   MachineInstr &MI) const {
2931   if (STI.hasGFX90AInsts())
2932     return selectImpl(MI, *CoverageInfo);
2933 
2934   MachineBasicBlock *MBB = MI.getParent();
2935   const DebugLoc &DL = MI.getDebugLoc();
2936 
2937   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2938     Function &F = MBB->getParent()->getFunction();
2939     DiagnosticInfoUnsupported
2940       NoFpRet(F, "return versions of fp atomics not supported",
2941               MI.getDebugLoc(), DS_Error);
2942     F.getContext().diagnose(NoFpRet);
2943     return false;
2944   }
2945 
2946   // FIXME: This is only needed because tablegen requires number of dst operands
2947   // in match and replace pattern to be the same. Otherwise patterns can be
2948   // exported from SDag path.
2949   MachineOperand &VDataIn = MI.getOperand(1);
2950   MachineOperand &VIndex = MI.getOperand(3);
2951   MachineOperand &VOffset = MI.getOperand(4);
2952   MachineOperand &SOffset = MI.getOperand(5);
2953   int16_t Offset = MI.getOperand(6).getImm();
2954 
2955   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2956   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2957 
2958   unsigned Opcode;
2959   if (HasVOffset) {
2960     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2961                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2962   } else {
2963     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2964                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2965   }
2966 
2967   if (MRI->getType(VDataIn.getReg()).isVector()) {
2968     switch (Opcode) {
2969     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2970       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2971       break;
2972     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2973       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2974       break;
2975     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
2976       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
2977       break;
2978     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
2979       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
2980       break;
2981     }
2982   }
2983 
2984   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
2985   I.add(VDataIn);
2986 
2987   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
2988       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
2989     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
2990     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
2991       .addReg(VIndex.getReg())
2992       .addImm(AMDGPU::sub0)
2993       .addReg(VOffset.getReg())
2994       .addImm(AMDGPU::sub1);
2995 
2996     I.addReg(IdxReg);
2997   } else if (HasVIndex) {
2998     I.add(VIndex);
2999   } else if (HasVOffset) {
3000     I.add(VOffset);
3001   }
3002 
3003   I.add(MI.getOperand(2)); // rsrc
3004   I.add(SOffset);
3005   I.addImm(Offset);
3006   I.addImm(MI.getOperand(7).getImm()); // cpol
3007   I.cloneMemRefs(MI);
3008 
3009   MI.eraseFromParent();
3010 
3011   return true;
3012 }
3013 
3014 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3015   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3016 
3017   if (STI.hasGFX90AInsts()) {
3018     // gfx90a adds return versions of the global atomic fadd instructions so no
3019     // special handling is required.
3020     return selectImpl(MI, *CoverageInfo);
3021   }
3022 
3023   MachineBasicBlock *MBB = MI.getParent();
3024   const DebugLoc &DL = MI.getDebugLoc();
3025 
3026   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3027     Function &F = MBB->getParent()->getFunction();
3028     DiagnosticInfoUnsupported
3029       NoFpRet(F, "return versions of fp atomics not supported",
3030               MI.getDebugLoc(), DS_Error);
3031     F.getContext().diagnose(NoFpRet);
3032     return false;
3033   }
3034 
3035   // FIXME: This is only needed because tablegen requires number of dst operands
3036   // in match and replace pattern to be the same. Otherwise patterns can be
3037   // exported from SDag path.
3038   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3039 
3040   Register Data = DataOp.getReg();
3041   const unsigned Opc = MRI->getType(Data).isVector() ?
3042     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3043   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3044     .addReg(Addr.first)
3045     .addReg(Data)
3046     .addImm(Addr.second)
3047     .addImm(0) // cpol
3048     .cloneMemRefs(MI);
3049 
3050   MI.eraseFromParent();
3051   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3052 }
3053 
3054 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3055   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3056   MI.removeOperand(1);
3057   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3058   return true;
3059 }
3060 
3061 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3062   unsigned Opc;
3063   switch (MI.getIntrinsicID()) {
3064   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3065     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3066     break;
3067   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3068     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3069     break;
3070   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3071     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3072     break;
3073   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3074     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3075     break;
3076   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3077     Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3078     break;
3079   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3080     Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3081     break;
3082   default:
3083     llvm_unreachable("unhandled smfmac intrinsic");
3084   }
3085 
3086   auto VDst_In = MI.getOperand(4);
3087 
3088   MI.setDesc(TII.get(Opc));
3089   MI.removeOperand(4); // VDst_In
3090   MI.removeOperand(1); // Intrinsic ID
3091   MI.addOperand(VDst_In); // Readd VDst_In to the end
3092   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3093   return true;
3094 }
3095 
3096 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3097   Register DstReg = MI.getOperand(0).getReg();
3098   Register SrcReg = MI.getOperand(1).getReg();
3099   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3100   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3101   MachineBasicBlock *MBB = MI.getParent();
3102   const DebugLoc &DL = MI.getDebugLoc();
3103 
3104   if (IsVALU) {
3105     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3106       .addImm(Subtarget->getWavefrontSizeLog2())
3107       .addReg(SrcReg);
3108   } else {
3109     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3110       .addReg(SrcReg)
3111       .addImm(Subtarget->getWavefrontSizeLog2());
3112   }
3113 
3114   const TargetRegisterClass &RC =
3115       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3116   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3117     return false;
3118 
3119   MI.eraseFromParent();
3120   return true;
3121 }
3122 
3123 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3124   if (I.isPHI())
3125     return selectPHI(I);
3126 
3127   if (!I.isPreISelOpcode()) {
3128     if (I.isCopy())
3129       return selectCOPY(I);
3130     return true;
3131   }
3132 
3133   switch (I.getOpcode()) {
3134   case TargetOpcode::G_AND:
3135   case TargetOpcode::G_OR:
3136   case TargetOpcode::G_XOR:
3137     if (selectImpl(I, *CoverageInfo))
3138       return true;
3139     return selectG_AND_OR_XOR(I);
3140   case TargetOpcode::G_ADD:
3141   case TargetOpcode::G_SUB:
3142     if (selectImpl(I, *CoverageInfo))
3143       return true;
3144     return selectG_ADD_SUB(I);
3145   case TargetOpcode::G_UADDO:
3146   case TargetOpcode::G_USUBO:
3147   case TargetOpcode::G_UADDE:
3148   case TargetOpcode::G_USUBE:
3149     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3150   case TargetOpcode::G_INTTOPTR:
3151   case TargetOpcode::G_BITCAST:
3152   case TargetOpcode::G_PTRTOINT:
3153     return selectCOPY(I);
3154   case TargetOpcode::G_CONSTANT:
3155   case TargetOpcode::G_FCONSTANT:
3156     return selectG_CONSTANT(I);
3157   case TargetOpcode::G_FNEG:
3158     if (selectImpl(I, *CoverageInfo))
3159       return true;
3160     return selectG_FNEG(I);
3161   case TargetOpcode::G_FABS:
3162     if (selectImpl(I, *CoverageInfo))
3163       return true;
3164     return selectG_FABS(I);
3165   case TargetOpcode::G_EXTRACT:
3166     return selectG_EXTRACT(I);
3167   case TargetOpcode::G_MERGE_VALUES:
3168   case TargetOpcode::G_BUILD_VECTOR:
3169   case TargetOpcode::G_CONCAT_VECTORS:
3170     return selectG_MERGE_VALUES(I);
3171   case TargetOpcode::G_UNMERGE_VALUES:
3172     return selectG_UNMERGE_VALUES(I);
3173   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3174     return selectG_BUILD_VECTOR_TRUNC(I);
3175   case TargetOpcode::G_PTR_ADD:
3176     return selectG_PTR_ADD(I);
3177   case TargetOpcode::G_IMPLICIT_DEF:
3178     return selectG_IMPLICIT_DEF(I);
3179   case TargetOpcode::G_FREEZE:
3180     return selectCOPY(I);
3181   case TargetOpcode::G_INSERT:
3182     return selectG_INSERT(I);
3183   case TargetOpcode::G_INTRINSIC:
3184     return selectG_INTRINSIC(I);
3185   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3186     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3187   case TargetOpcode::G_ICMP:
3188     if (selectG_ICMP(I))
3189       return true;
3190     return selectImpl(I, *CoverageInfo);
3191   case TargetOpcode::G_LOAD:
3192   case TargetOpcode::G_STORE:
3193   case TargetOpcode::G_ATOMIC_CMPXCHG:
3194   case TargetOpcode::G_ATOMICRMW_XCHG:
3195   case TargetOpcode::G_ATOMICRMW_ADD:
3196   case TargetOpcode::G_ATOMICRMW_SUB:
3197   case TargetOpcode::G_ATOMICRMW_AND:
3198   case TargetOpcode::G_ATOMICRMW_OR:
3199   case TargetOpcode::G_ATOMICRMW_XOR:
3200   case TargetOpcode::G_ATOMICRMW_MIN:
3201   case TargetOpcode::G_ATOMICRMW_MAX:
3202   case TargetOpcode::G_ATOMICRMW_UMIN:
3203   case TargetOpcode::G_ATOMICRMW_UMAX:
3204   case TargetOpcode::G_ATOMICRMW_FADD:
3205   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3206   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3207   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3208   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3209     return selectG_LOAD_STORE_ATOMICRMW(I);
3210   case TargetOpcode::G_SELECT:
3211     return selectG_SELECT(I);
3212   case TargetOpcode::G_TRUNC:
3213     return selectG_TRUNC(I);
3214   case TargetOpcode::G_SEXT:
3215   case TargetOpcode::G_ZEXT:
3216   case TargetOpcode::G_ANYEXT:
3217   case TargetOpcode::G_SEXT_INREG:
3218     if (selectImpl(I, *CoverageInfo))
3219       return true;
3220     return selectG_SZA_EXT(I);
3221   case TargetOpcode::G_BRCOND:
3222     return selectG_BRCOND(I);
3223   case TargetOpcode::G_GLOBAL_VALUE:
3224     return selectG_GLOBAL_VALUE(I);
3225   case TargetOpcode::G_PTRMASK:
3226     return selectG_PTRMASK(I);
3227   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3228     return selectG_EXTRACT_VECTOR_ELT(I);
3229   case TargetOpcode::G_INSERT_VECTOR_ELT:
3230     return selectG_INSERT_VECTOR_ELT(I);
3231   case TargetOpcode::G_SHUFFLE_VECTOR:
3232     return selectG_SHUFFLE_VECTOR(I);
3233   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3234   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3235   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3236   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3237     const AMDGPU::ImageDimIntrinsicInfo *Intr
3238       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3239     assert(Intr && "not an image intrinsic with image pseudo");
3240     return selectImageIntrinsic(I, Intr);
3241   }
3242   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3243     return selectBVHIntrinsic(I);
3244   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3245     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3246   case AMDGPU::G_SBFX:
3247   case AMDGPU::G_UBFX:
3248     return selectG_SBFX_UBFX(I);
3249   case AMDGPU::G_SI_CALL:
3250     I.setDesc(TII.get(AMDGPU::SI_CALL));
3251     return true;
3252   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3253     return selectWaveAddress(I);
3254   default:
3255     return selectImpl(I, *CoverageInfo);
3256   }
3257   return false;
3258 }
3259 
3260 InstructionSelector::ComplexRendererFns
3261 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3262   return {{
3263       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3264   }};
3265 
3266 }
3267 
3268 std::pair<Register, unsigned>
3269 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3270                                               bool AllowAbs) const {
3271   Register Src = Root.getReg();
3272   Register OrigSrc = Src;
3273   unsigned Mods = 0;
3274   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3275 
3276   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3277     Src = MI->getOperand(1).getReg();
3278     Mods |= SISrcMods::NEG;
3279     MI = getDefIgnoringCopies(Src, *MRI);
3280   }
3281 
3282   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3283     Src = MI->getOperand(1).getReg();
3284     Mods |= SISrcMods::ABS;
3285   }
3286 
3287   if (Mods != 0 &&
3288       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3289     MachineInstr *UseMI = Root.getParent();
3290 
3291     // If we looked through copies to find source modifiers on an SGPR operand,
3292     // we now have an SGPR register source. To avoid potentially violating the
3293     // constant bus restriction, we need to insert a copy to a VGPR.
3294     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3295     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3296             TII.get(AMDGPU::COPY), VGPRSrc)
3297       .addReg(Src);
3298     Src = VGPRSrc;
3299   }
3300 
3301   return std::make_pair(Src, Mods);
3302 }
3303 
3304 ///
3305 /// This will select either an SGPR or VGPR operand and will save us from
3306 /// having to write an extra tablegen pattern.
3307 InstructionSelector::ComplexRendererFns
3308 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3309   return {{
3310       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3311   }};
3312 }
3313 
3314 InstructionSelector::ComplexRendererFns
3315 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3316   Register Src;
3317   unsigned Mods;
3318   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3319 
3320   return {{
3321       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3322       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3323       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3324       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3325   }};
3326 }
3327 
3328 InstructionSelector::ComplexRendererFns
3329 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3330   Register Src;
3331   unsigned Mods;
3332   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3333 
3334   return {{
3335       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3336       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3337       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3338       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3339   }};
3340 }
3341 
3342 InstructionSelector::ComplexRendererFns
3343 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3344   return {{
3345       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3346       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3347       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3348   }};
3349 }
3350 
3351 InstructionSelector::ComplexRendererFns
3352 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3353   Register Src;
3354   unsigned Mods;
3355   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3356 
3357   return {{
3358       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3359       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3360   }};
3361 }
3362 
3363 InstructionSelector::ComplexRendererFns
3364 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3365   Register Src;
3366   unsigned Mods;
3367   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3368 
3369   return {{
3370       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3371       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3372   }};
3373 }
3374 
3375 InstructionSelector::ComplexRendererFns
3376 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3377   Register Reg = Root.getReg();
3378   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3379   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3380               Def->getOpcode() == AMDGPU::G_FABS))
3381     return {};
3382   return {{
3383       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3384   }};
3385 }
3386 
3387 std::pair<Register, unsigned>
3388 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3389   Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3390   unsigned Mods = 0;
3391   MachineInstr *MI = MRI.getVRegDef(Src);
3392 
3393   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3394       // It's possible to see an f32 fneg here, but unlikely.
3395       // TODO: Treat f32 fneg as only high bit.
3396       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3397     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3398     Src = MI->getOperand(1).getReg();
3399     MI = MRI.getVRegDef(Src);
3400   }
3401 
3402   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3403   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3404 
3405   // Packed instructions do not have abs modifiers.
3406   Mods |= SISrcMods::OP_SEL_1;
3407 
3408   return std::make_pair(Src, Mods);
3409 }
3410 
3411 InstructionSelector::ComplexRendererFns
3412 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3413   MachineRegisterInfo &MRI
3414     = Root.getParent()->getParent()->getParent()->getRegInfo();
3415 
3416   Register Src;
3417   unsigned Mods;
3418   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3419 
3420   return {{
3421       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3422       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3423   }};
3424 }
3425 
3426 InstructionSelector::ComplexRendererFns
3427 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3428   MachineRegisterInfo &MRI
3429     = Root.getParent()->getParent()->getParent()->getRegInfo();
3430 
3431   Register Src;
3432   unsigned Mods;
3433   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3434 
3435   return {{
3436       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3437       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3438   }};
3439 }
3440 
3441 InstructionSelector::ComplexRendererFns
3442 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3443   Register Src;
3444   unsigned Mods;
3445   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3446   if (!isKnownNeverNaN(Src, *MRI))
3447     return None;
3448 
3449   return {{
3450       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3451       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3452   }};
3453 }
3454 
3455 InstructionSelector::ComplexRendererFns
3456 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3457   // FIXME: Handle op_sel
3458   return {{
3459       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3460       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3461   }};
3462 }
3463 
3464 InstructionSelector::ComplexRendererFns
3465 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3466   SmallVector<GEPInfo, 4> AddrInfo;
3467   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3468 
3469   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3470     return None;
3471 
3472   const GEPInfo &GEPInfo = AddrInfo[0];
3473   Optional<int64_t> EncodedImm =
3474       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3475   if (!EncodedImm)
3476     return None;
3477 
3478   unsigned PtrReg = GEPInfo.SgprParts[0];
3479   return {{
3480     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3481     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3482   }};
3483 }
3484 
3485 InstructionSelector::ComplexRendererFns
3486 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3487   SmallVector<GEPInfo, 4> AddrInfo;
3488   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3489 
3490   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3491     return None;
3492 
3493   const GEPInfo &GEPInfo = AddrInfo[0];
3494   Register PtrReg = GEPInfo.SgprParts[0];
3495   Optional<int64_t> EncodedImm =
3496       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3497   if (!EncodedImm)
3498     return None;
3499 
3500   return {{
3501     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3502     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3503   }};
3504 }
3505 
3506 InstructionSelector::ComplexRendererFns
3507 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3508   MachineInstr *MI = Root.getParent();
3509   MachineBasicBlock *MBB = MI->getParent();
3510 
3511   SmallVector<GEPInfo, 4> AddrInfo;
3512   getAddrModeInfo(*MI, *MRI, AddrInfo);
3513 
3514   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3515   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3516   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3517     return None;
3518 
3519   const GEPInfo &GEPInfo = AddrInfo[0];
3520   // SGPR offset is unsigned.
3521   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3522     return None;
3523 
3524   // If we make it this far we have a load with an 32-bit immediate offset.
3525   // It is OK to select this using a sgpr offset, because we have already
3526   // failed trying to select this load into one of the _IMM variants since
3527   // the _IMM Patterns are considered before the _SGPR patterns.
3528   Register PtrReg = GEPInfo.SgprParts[0];
3529   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3530   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3531           .addImm(GEPInfo.Imm);
3532   return {{
3533     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3534     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3535   }};
3536 }
3537 
3538 std::pair<Register, int>
3539 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3540                                                 uint64_t FlatVariant) const {
3541   MachineInstr *MI = Root.getParent();
3542 
3543   auto Default = std::make_pair(Root.getReg(), 0);
3544 
3545   if (!STI.hasFlatInstOffsets())
3546     return Default;
3547 
3548   Register PtrBase;
3549   int64_t ConstOffset;
3550   std::tie(PtrBase, ConstOffset) =
3551       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3552   if (ConstOffset == 0)
3553     return Default;
3554 
3555   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3556   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3557     return Default;
3558 
3559   return std::make_pair(PtrBase, ConstOffset);
3560 }
3561 
3562 InstructionSelector::ComplexRendererFns
3563 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3564   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3565 
3566   return {{
3567       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3568       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3569     }};
3570 }
3571 
3572 InstructionSelector::ComplexRendererFns
3573 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3574   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3575 
3576   return {{
3577       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3578       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3579   }};
3580 }
3581 
3582 InstructionSelector::ComplexRendererFns
3583 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3584   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3585 
3586   return {{
3587       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3588       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3589     }};
3590 }
3591 
3592 /// Match a zero extend from a 32-bit value to 64-bits.
3593 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3594   Register ZExtSrc;
3595   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3596     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3597 
3598   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3599   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3600   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3601     return false;
3602 
3603   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3604     return Def->getOperand(1).getReg();
3605   }
3606 
3607   return Register();
3608 }
3609 
3610 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3611 InstructionSelector::ComplexRendererFns
3612 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3613   Register Addr = Root.getReg();
3614   Register PtrBase;
3615   int64_t ConstOffset;
3616   int64_t ImmOffset = 0;
3617 
3618   // Match the immediate offset first, which canonically is moved as low as
3619   // possible.
3620   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3621 
3622   if (ConstOffset != 0) {
3623     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3624                               SIInstrFlags::FlatGlobal)) {
3625       Addr = PtrBase;
3626       ImmOffset = ConstOffset;
3627     } else {
3628       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3629       if (!PtrBaseDef)
3630         return None;
3631 
3632       if (isSGPR(PtrBaseDef->Reg)) {
3633         if (ConstOffset > 0) {
3634           // Offset is too large.
3635           //
3636           // saddr + large_offset -> saddr +
3637           //                         (voffset = large_offset & ~MaxOffset) +
3638           //                         (large_offset & MaxOffset);
3639           int64_t SplitImmOffset, RemainderOffset;
3640           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3641               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3642 
3643           if (isUInt<32>(RemainderOffset)) {
3644             MachineInstr *MI = Root.getParent();
3645             MachineBasicBlock *MBB = MI->getParent();
3646             Register HighBits =
3647                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3648 
3649             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3650                     HighBits)
3651                 .addImm(RemainderOffset);
3652 
3653             return {{
3654                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3655                 [=](MachineInstrBuilder &MIB) {
3656                   MIB.addReg(HighBits);
3657                 }, // voffset
3658                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3659             }};
3660           }
3661         }
3662 
3663         // We are adding a 64 bit SGPR and a constant. If constant bus limit
3664         // is 1 we would need to perform 1 or 2 extra moves for each half of
3665         // the constant and it is better to do a scalar add and then issue a
3666         // single VALU instruction to materialize zero. Otherwise it is less
3667         // instructions to perform VALU adds with immediates or inline literals.
3668         unsigned NumLiterals =
3669             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3670             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3671         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3672           return None;
3673       }
3674     }
3675   }
3676 
3677   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3678   if (!AddrDef)
3679     return None;
3680 
3681   // Match the variable offset.
3682   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3683     // Look through the SGPR->VGPR copy.
3684     Register SAddr =
3685         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3686 
3687     if (SAddr && isSGPR(SAddr)) {
3688       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3689 
3690       // It's possible voffset is an SGPR here, but the copy to VGPR will be
3691       // inserted later.
3692       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3693         return {{[=](MachineInstrBuilder &MIB) { // saddr
3694                    MIB.addReg(SAddr);
3695                  },
3696                  [=](MachineInstrBuilder &MIB) { // voffset
3697                    MIB.addReg(VOffset);
3698                  },
3699                  [=](MachineInstrBuilder &MIB) { // offset
3700                    MIB.addImm(ImmOffset);
3701                  }}};
3702       }
3703     }
3704   }
3705 
3706   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3707   // drop this.
3708   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3709       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
3710     return None;
3711 
3712   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3713   // moves required to copy a 64-bit SGPR to VGPR.
3714   MachineInstr *MI = Root.getParent();
3715   MachineBasicBlock *MBB = MI->getParent();
3716   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3717 
3718   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3719       .addImm(0);
3720 
3721   return {{
3722       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
3723       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
3724       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
3725   }};
3726 }
3727 
3728 InstructionSelector::ComplexRendererFns
3729 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3730   Register Addr = Root.getReg();
3731   Register PtrBase;
3732   int64_t ConstOffset;
3733   int64_t ImmOffset = 0;
3734 
3735   // Match the immediate offset first, which canonically is moved as low as
3736   // possible.
3737   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3738 
3739   if (ConstOffset != 0 &&
3740       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
3741                             SIInstrFlags::FlatScratch)) {
3742     Addr = PtrBase;
3743     ImmOffset = ConstOffset;
3744   }
3745 
3746   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3747   if (!AddrDef)
3748     return None;
3749 
3750   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3751     int FI = AddrDef->MI->getOperand(1).getIndex();
3752     return {{
3753         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3754         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3755     }};
3756   }
3757 
3758   Register SAddr = AddrDef->Reg;
3759 
3760   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3761     Register LHS = AddrDef->MI->getOperand(1).getReg();
3762     Register RHS = AddrDef->MI->getOperand(2).getReg();
3763     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3764     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3765 
3766     if (LHSDef && RHSDef &&
3767         LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3768         isSGPR(RHSDef->Reg)) {
3769       int FI = LHSDef->MI->getOperand(1).getIndex();
3770       MachineInstr &I = *Root.getParent();
3771       MachineBasicBlock *BB = I.getParent();
3772       const DebugLoc &DL = I.getDebugLoc();
3773       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3774 
3775       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
3776           .addFrameIndex(FI)
3777           .addReg(RHSDef->Reg);
3778     }
3779   }
3780 
3781   if (!isSGPR(SAddr))
3782     return None;
3783 
3784   return {{
3785       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3786       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3787   }};
3788 }
3789 
3790 InstructionSelector::ComplexRendererFns
3791 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
3792   Register Addr = Root.getReg();
3793   Register PtrBase;
3794   int64_t ConstOffset;
3795   int64_t ImmOffset = 0;
3796 
3797   // Match the immediate offset first, which canonically is moved as low as
3798   // possible.
3799   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3800 
3801   if (ConstOffset != 0 &&
3802       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
3803     Addr = PtrBase;
3804     ImmOffset = ConstOffset;
3805   }
3806 
3807   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3808   if (!AddrDef)
3809     return None;
3810 
3811   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
3812     return None;
3813 
3814   Register RHS = AddrDef->MI->getOperand(2).getReg();
3815   if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
3816     return None;
3817 
3818   Register LHS = AddrDef->MI->getOperand(1).getReg();
3819   auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3820 
3821   if (LHSDef && LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3822     int FI = LHSDef->MI->getOperand(1).getIndex();
3823     return {{
3824         [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
3825         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3826         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3827     }};
3828   }
3829 
3830   if (!isSGPR(LHS))
3831     return None;
3832 
3833   return {{
3834       [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
3835       [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
3836       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3837   }};
3838 }
3839 
3840 InstructionSelector::ComplexRendererFns
3841 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3842   MachineInstr *MI = Root.getParent();
3843   MachineBasicBlock *MBB = MI->getParent();
3844   MachineFunction *MF = MBB->getParent();
3845   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3846 
3847   int64_t Offset = 0;
3848   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3849       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3850     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3851 
3852     // TODO: Should this be inside the render function? The iterator seems to
3853     // move.
3854     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3855             HighBits)
3856       .addImm(Offset & ~4095);
3857 
3858     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3859                MIB.addReg(Info->getScratchRSrcReg());
3860              },
3861              [=](MachineInstrBuilder &MIB) { // vaddr
3862                MIB.addReg(HighBits);
3863              },
3864              [=](MachineInstrBuilder &MIB) { // soffset
3865                // Use constant zero for soffset and rely on eliminateFrameIndex
3866                // to choose the appropriate frame register if need be.
3867                MIB.addImm(0);
3868              },
3869              [=](MachineInstrBuilder &MIB) { // offset
3870                MIB.addImm(Offset & 4095);
3871              }}};
3872   }
3873 
3874   assert(Offset == 0 || Offset == -1);
3875 
3876   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3877   // offsets.
3878   Optional<int> FI;
3879   Register VAddr = Root.getReg();
3880   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3881     Register PtrBase;
3882     int64_t ConstOffset;
3883     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
3884     if (ConstOffset != 0) {
3885       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
3886           (!STI.privateMemoryResourceIsRangeChecked() ||
3887            KnownBits->signBitIsZero(PtrBase))) {
3888         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
3889         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3890           FI = PtrBaseDef->getOperand(1).getIndex();
3891         else
3892           VAddr = PtrBase;
3893         Offset = ConstOffset;
3894       }
3895     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3896       FI = RootDef->getOperand(1).getIndex();
3897     }
3898   }
3899 
3900   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3901              MIB.addReg(Info->getScratchRSrcReg());
3902            },
3903            [=](MachineInstrBuilder &MIB) { // vaddr
3904              if (FI.hasValue())
3905                MIB.addFrameIndex(FI.getValue());
3906              else
3907                MIB.addReg(VAddr);
3908            },
3909            [=](MachineInstrBuilder &MIB) { // soffset
3910              // Use constant zero for soffset and rely on eliminateFrameIndex
3911              // to choose the appropriate frame register if need be.
3912              MIB.addImm(0);
3913            },
3914            [=](MachineInstrBuilder &MIB) { // offset
3915              MIB.addImm(Offset);
3916            }}};
3917 }
3918 
3919 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3920                                                 int64_t Offset) const {
3921   if (!isUInt<16>(Offset))
3922     return false;
3923 
3924   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3925     return true;
3926 
3927   // On Southern Islands instruction with a negative base value and an offset
3928   // don't seem to work.
3929   return KnownBits->signBitIsZero(Base);
3930 }
3931 
3932 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
3933                                                  int64_t Offset1,
3934                                                  unsigned Size) const {
3935   if (Offset0 % Size != 0 || Offset1 % Size != 0)
3936     return false;
3937   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
3938     return false;
3939 
3940   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3941     return true;
3942 
3943   // On Southern Islands instruction with a negative base value and an offset
3944   // don't seem to work.
3945   return KnownBits->signBitIsZero(Base);
3946 }
3947 
3948 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
3949                                                     unsigned ShAmtBits) const {
3950   assert(MI.getOpcode() == TargetOpcode::G_AND);
3951 
3952   Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
3953   if (!RHS)
3954     return false;
3955 
3956   if (RHS->countTrailingOnes() >= ShAmtBits)
3957     return true;
3958 
3959   const APInt &LHSKnownZeros =
3960       KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
3961   return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
3962 }
3963 
3964 // Return the wave level SGPR base address if this is a wave address.
3965 static Register getWaveAddress(const MachineInstr *Def) {
3966   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
3967              ? Def->getOperand(1).getReg()
3968              : Register();
3969 }
3970 
3971 InstructionSelector::ComplexRendererFns
3972 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3973     MachineOperand &Root) const {
3974   Register Reg = Root.getReg();
3975   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3976 
3977   const MachineInstr *Def = MRI->getVRegDef(Reg);
3978   if (Register WaveBase = getWaveAddress(Def)) {
3979     return {{
3980         [=](MachineInstrBuilder &MIB) { // rsrc
3981           MIB.addReg(Info->getScratchRSrcReg());
3982         },
3983         [=](MachineInstrBuilder &MIB) { // soffset
3984           MIB.addReg(WaveBase);
3985         },
3986         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
3987     }};
3988   }
3989 
3990   int64_t Offset = 0;
3991 
3992   // FIXME: Copy check is a hack
3993   Register BasePtr;
3994   if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
3995     if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3996       return {};
3997     const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
3998     Register WaveBase = getWaveAddress(BasePtrDef);
3999     if (!WaveBase)
4000       return {};
4001 
4002     return {{
4003         [=](MachineInstrBuilder &MIB) { // rsrc
4004           MIB.addReg(Info->getScratchRSrcReg());
4005         },
4006         [=](MachineInstrBuilder &MIB) { // soffset
4007           MIB.addReg(WaveBase);
4008         },
4009         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4010     }};
4011   }
4012 
4013   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4014       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4015     return {};
4016 
4017   return {{
4018       [=](MachineInstrBuilder &MIB) { // rsrc
4019         MIB.addReg(Info->getScratchRSrcReg());
4020       },
4021       [=](MachineInstrBuilder &MIB) { // soffset
4022         MIB.addImm(0);
4023       },
4024       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4025   }};
4026 }
4027 
4028 std::pair<Register, unsigned>
4029 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4030   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4031   if (!RootDef)
4032     return std::make_pair(Root.getReg(), 0);
4033 
4034   int64_t ConstAddr = 0;
4035 
4036   Register PtrBase;
4037   int64_t Offset;
4038   std::tie(PtrBase, Offset) =
4039     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4040 
4041   if (Offset) {
4042     if (isDSOffsetLegal(PtrBase, Offset)) {
4043       // (add n0, c0)
4044       return std::make_pair(PtrBase, Offset);
4045     }
4046   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4047     // TODO
4048 
4049 
4050   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4051     // TODO
4052 
4053   }
4054 
4055   return std::make_pair(Root.getReg(), 0);
4056 }
4057 
4058 InstructionSelector::ComplexRendererFns
4059 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4060   Register Reg;
4061   unsigned Offset;
4062   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4063   return {{
4064       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4065       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4066     }};
4067 }
4068 
4069 InstructionSelector::ComplexRendererFns
4070 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4071   return selectDSReadWrite2(Root, 4);
4072 }
4073 
4074 InstructionSelector::ComplexRendererFns
4075 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4076   return selectDSReadWrite2(Root, 8);
4077 }
4078 
4079 InstructionSelector::ComplexRendererFns
4080 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4081                                               unsigned Size) const {
4082   Register Reg;
4083   unsigned Offset;
4084   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4085   return {{
4086       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4087       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4088       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4089     }};
4090 }
4091 
4092 std::pair<Register, unsigned>
4093 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4094                                                   unsigned Size) const {
4095   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4096   if (!RootDef)
4097     return std::make_pair(Root.getReg(), 0);
4098 
4099   int64_t ConstAddr = 0;
4100 
4101   Register PtrBase;
4102   int64_t Offset;
4103   std::tie(PtrBase, Offset) =
4104     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4105 
4106   if (Offset) {
4107     int64_t OffsetValue0 = Offset;
4108     int64_t OffsetValue1 = Offset + Size;
4109     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4110       // (add n0, c0)
4111       return std::make_pair(PtrBase, OffsetValue0 / Size);
4112     }
4113   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4114     // TODO
4115 
4116   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4117     // TODO
4118 
4119   }
4120 
4121   return std::make_pair(Root.getReg(), 0);
4122 }
4123 
4124 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4125 /// the base value with the constant offset. There may be intervening copies
4126 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4127 /// not match the pattern.
4128 std::pair<Register, int64_t>
4129 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4130   Register Root, const MachineRegisterInfo &MRI) const {
4131   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4132   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4133     return {Root, 0};
4134 
4135   MachineOperand &RHS = RootI->getOperand(2);
4136   Optional<ValueAndVReg> MaybeOffset =
4137       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4138   if (!MaybeOffset)
4139     return {Root, 0};
4140   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4141 }
4142 
4143 static void addZeroImm(MachineInstrBuilder &MIB) {
4144   MIB.addImm(0);
4145 }
4146 
4147 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4148 /// BasePtr is not valid, a null base pointer will be used.
4149 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4150                           uint32_t FormatLo, uint32_t FormatHi,
4151                           Register BasePtr) {
4152   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4153   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4154   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4155   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4156 
4157   B.buildInstr(AMDGPU::S_MOV_B32)
4158     .addDef(RSrc2)
4159     .addImm(FormatLo);
4160   B.buildInstr(AMDGPU::S_MOV_B32)
4161     .addDef(RSrc3)
4162     .addImm(FormatHi);
4163 
4164   // Build the half of the subregister with the constants before building the
4165   // full 128-bit register. If we are building multiple resource descriptors,
4166   // this will allow CSEing of the 2-component register.
4167   B.buildInstr(AMDGPU::REG_SEQUENCE)
4168     .addDef(RSrcHi)
4169     .addReg(RSrc2)
4170     .addImm(AMDGPU::sub0)
4171     .addReg(RSrc3)
4172     .addImm(AMDGPU::sub1);
4173 
4174   Register RSrcLo = BasePtr;
4175   if (!BasePtr) {
4176     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4177     B.buildInstr(AMDGPU::S_MOV_B64)
4178       .addDef(RSrcLo)
4179       .addImm(0);
4180   }
4181 
4182   B.buildInstr(AMDGPU::REG_SEQUENCE)
4183     .addDef(RSrc)
4184     .addReg(RSrcLo)
4185     .addImm(AMDGPU::sub0_sub1)
4186     .addReg(RSrcHi)
4187     .addImm(AMDGPU::sub2_sub3);
4188 
4189   return RSrc;
4190 }
4191 
4192 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4193                                 const SIInstrInfo &TII, Register BasePtr) {
4194   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4195 
4196   // FIXME: Why are half the "default" bits ignored based on the addressing
4197   // mode?
4198   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4199 }
4200 
4201 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4202                                const SIInstrInfo &TII, Register BasePtr) {
4203   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4204 
4205   // FIXME: Why are half the "default" bits ignored based on the addressing
4206   // mode?
4207   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4208 }
4209 
4210 AMDGPUInstructionSelector::MUBUFAddressData
4211 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4212   MUBUFAddressData Data;
4213   Data.N0 = Src;
4214 
4215   Register PtrBase;
4216   int64_t Offset;
4217 
4218   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4219   if (isUInt<32>(Offset)) {
4220     Data.N0 = PtrBase;
4221     Data.Offset = Offset;
4222   }
4223 
4224   if (MachineInstr *InputAdd
4225       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4226     Data.N2 = InputAdd->getOperand(1).getReg();
4227     Data.N3 = InputAdd->getOperand(2).getReg();
4228 
4229     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4230     // FIXME: Don't know this was defined by operand 0
4231     //
4232     // TODO: Remove this when we have copy folding optimizations after
4233     // RegBankSelect.
4234     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4235     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4236   }
4237 
4238   return Data;
4239 }
4240 
4241 /// Return if the addr64 mubuf mode should be used for the given address.
4242 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4243   // (ptr_add N2, N3) -> addr64, or
4244   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4245   if (Addr.N2)
4246     return true;
4247 
4248   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4249   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4250 }
4251 
4252 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4253 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4254 /// component.
4255 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4256   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4257   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4258     return;
4259 
4260   // Illegal offset, store it in soffset.
4261   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4262   B.buildInstr(AMDGPU::S_MOV_B32)
4263     .addDef(SOffset)
4264     .addImm(ImmOffset);
4265   ImmOffset = 0;
4266 }
4267 
4268 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4269   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4270   Register &SOffset, int64_t &Offset) const {
4271   // FIXME: Predicates should stop this from reaching here.
4272   // addr64 bit was removed for volcanic islands.
4273   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4274     return false;
4275 
4276   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4277   if (!shouldUseAddr64(AddrData))
4278     return false;
4279 
4280   Register N0 = AddrData.N0;
4281   Register N2 = AddrData.N2;
4282   Register N3 = AddrData.N3;
4283   Offset = AddrData.Offset;
4284 
4285   // Base pointer for the SRD.
4286   Register SRDPtr;
4287 
4288   if (N2) {
4289     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4290       assert(N3);
4291       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4292         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4293         // addr64, and construct the default resource from a 0 address.
4294         VAddr = N0;
4295       } else {
4296         SRDPtr = N3;
4297         VAddr = N2;
4298       }
4299     } else {
4300       // N2 is not divergent.
4301       SRDPtr = N2;
4302       VAddr = N3;
4303     }
4304   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4305     // Use the default null pointer in the resource
4306     VAddr = N0;
4307   } else {
4308     // N0 -> offset, or
4309     // (N0 + C1) -> offset
4310     SRDPtr = N0;
4311   }
4312 
4313   MachineIRBuilder B(*Root.getParent());
4314   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4315   splitIllegalMUBUFOffset(B, SOffset, Offset);
4316   return true;
4317 }
4318 
4319 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4320   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4321   int64_t &Offset) const {
4322 
4323   // FIXME: Pattern should not reach here.
4324   if (STI.useFlatForGlobal())
4325     return false;
4326 
4327   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4328   if (shouldUseAddr64(AddrData))
4329     return false;
4330 
4331   // N0 -> offset, or
4332   // (N0 + C1) -> offset
4333   Register SRDPtr = AddrData.N0;
4334   Offset = AddrData.Offset;
4335 
4336   // TODO: Look through extensions for 32-bit soffset.
4337   MachineIRBuilder B(*Root.getParent());
4338 
4339   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4340   splitIllegalMUBUFOffset(B, SOffset, Offset);
4341   return true;
4342 }
4343 
4344 InstructionSelector::ComplexRendererFns
4345 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4346   Register VAddr;
4347   Register RSrcReg;
4348   Register SOffset;
4349   int64_t Offset = 0;
4350 
4351   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4352     return {};
4353 
4354   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4355   // pattern.
4356   return {{
4357       [=](MachineInstrBuilder &MIB) {  // rsrc
4358         MIB.addReg(RSrcReg);
4359       },
4360       [=](MachineInstrBuilder &MIB) { // vaddr
4361         MIB.addReg(VAddr);
4362       },
4363       [=](MachineInstrBuilder &MIB) { // soffset
4364         if (SOffset)
4365           MIB.addReg(SOffset);
4366         else
4367           MIB.addImm(0);
4368       },
4369       [=](MachineInstrBuilder &MIB) { // offset
4370         MIB.addImm(Offset);
4371       },
4372       addZeroImm, //  cpol
4373       addZeroImm, //  tfe
4374       addZeroImm  //  swz
4375     }};
4376 }
4377 
4378 InstructionSelector::ComplexRendererFns
4379 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4380   Register RSrcReg;
4381   Register SOffset;
4382   int64_t Offset = 0;
4383 
4384   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4385     return {};
4386 
4387   return {{
4388       [=](MachineInstrBuilder &MIB) {  // rsrc
4389         MIB.addReg(RSrcReg);
4390       },
4391       [=](MachineInstrBuilder &MIB) { // soffset
4392         if (SOffset)
4393           MIB.addReg(SOffset);
4394         else
4395           MIB.addImm(0);
4396       },
4397       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4398       addZeroImm, //  cpol
4399       addZeroImm, //  tfe
4400       addZeroImm, //  swz
4401     }};
4402 }
4403 
4404 InstructionSelector::ComplexRendererFns
4405 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4406   Register VAddr;
4407   Register RSrcReg;
4408   Register SOffset;
4409   int64_t Offset = 0;
4410 
4411   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4412     return {};
4413 
4414   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4415   // pattern.
4416   return {{
4417       [=](MachineInstrBuilder &MIB) {  // rsrc
4418         MIB.addReg(RSrcReg);
4419       },
4420       [=](MachineInstrBuilder &MIB) { // vaddr
4421         MIB.addReg(VAddr);
4422       },
4423       [=](MachineInstrBuilder &MIB) { // soffset
4424         if (SOffset)
4425           MIB.addReg(SOffset);
4426         else
4427           MIB.addImm(0);
4428       },
4429       [=](MachineInstrBuilder &MIB) { // offset
4430         MIB.addImm(Offset);
4431       },
4432       [=](MachineInstrBuilder &MIB) {
4433         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4434       }
4435     }};
4436 }
4437 
4438 InstructionSelector::ComplexRendererFns
4439 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4440   Register RSrcReg;
4441   Register SOffset;
4442   int64_t Offset = 0;
4443 
4444   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4445     return {};
4446 
4447   return {{
4448       [=](MachineInstrBuilder &MIB) {  // rsrc
4449         MIB.addReg(RSrcReg);
4450       },
4451       [=](MachineInstrBuilder &MIB) { // soffset
4452         if (SOffset)
4453           MIB.addReg(SOffset);
4454         else
4455           MIB.addImm(0);
4456       },
4457       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4458       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4459     }};
4460 }
4461 
4462 /// Get an immediate that must be 32-bits, and treated as zero extended.
4463 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4464                                                const MachineRegisterInfo &MRI) {
4465   // getIConstantVRegVal sexts any values, so see if that matters.
4466   Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4467   if (!OffsetVal || !isInt<32>(*OffsetVal))
4468     return None;
4469   return Lo_32(*OffsetVal);
4470 }
4471 
4472 InstructionSelector::ComplexRendererFns
4473 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4474   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4475   if (!OffsetVal)
4476     return {};
4477 
4478   Optional<int64_t> EncodedImm =
4479       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4480   if (!EncodedImm)
4481     return {};
4482 
4483   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4484 }
4485 
4486 InstructionSelector::ComplexRendererFns
4487 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4488   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4489 
4490   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4491   if (!OffsetVal)
4492     return {};
4493 
4494   Optional<int64_t> EncodedImm
4495     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4496   if (!EncodedImm)
4497     return {};
4498 
4499   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4500 }
4501 
4502 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4503                                                  const MachineInstr &MI,
4504                                                  int OpIdx) const {
4505   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4506          "Expected G_CONSTANT");
4507   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4508 }
4509 
4510 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4511                                                 const MachineInstr &MI,
4512                                                 int OpIdx) const {
4513   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4514          "Expected G_CONSTANT");
4515   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4516 }
4517 
4518 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4519                                                  const MachineInstr &MI,
4520                                                  int OpIdx) const {
4521   assert(OpIdx == -1);
4522 
4523   const MachineOperand &Op = MI.getOperand(1);
4524   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4525     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4526   else {
4527     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4528     MIB.addImm(Op.getCImm()->getSExtValue());
4529   }
4530 }
4531 
4532 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4533                                                 const MachineInstr &MI,
4534                                                 int OpIdx) const {
4535   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4536          "Expected G_CONSTANT");
4537   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4538 }
4539 
4540 /// This only really exists to satisfy DAG type checking machinery, so is a
4541 /// no-op here.
4542 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4543                                                 const MachineInstr &MI,
4544                                                 int OpIdx) const {
4545   MIB.addImm(MI.getOperand(OpIdx).getImm());
4546 }
4547 
4548 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4549                                                   const MachineInstr &MI,
4550                                                   int OpIdx) const {
4551   assert(OpIdx >= 0 && "expected to match an immediate operand");
4552   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4553 }
4554 
4555 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4556                                                  const MachineInstr &MI,
4557                                                  int OpIdx) const {
4558   assert(OpIdx >= 0 && "expected to match an immediate operand");
4559   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4560 }
4561 
4562 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4563                                              const MachineInstr &MI,
4564                                              int OpIdx) const {
4565   assert(OpIdx >= 0 && "expected to match an immediate operand");
4566   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4567 }
4568 
4569 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4570                                                  const MachineInstr &MI,
4571                                                  int OpIdx) const {
4572   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4573 }
4574 
4575 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4576   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4577 }
4578 
4579 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4580   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4581 }
4582 
4583 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4584   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4585 }
4586 
4587 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4588   return TII.isInlineConstant(Imm);
4589 }
4590