1 //===- SIInstrInfo.cpp - SI Instruction Information  ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// SI Implementation of TargetInstrInfo.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SIInstrInfo.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUInstrInfo.h"
17 #include "GCNHazardRecognizer.h"
18 #include "GCNSubtarget.h"
19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineDominators.h"
24 #include "llvm/CodeGen/RegisterScavenging.h"
25 #include "llvm/CodeGen/ScheduleDAG.h"
26 #include "llvm/IR/DiagnosticInfo.h"
27 #include "llvm/IR/IntrinsicsAMDGPU.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Target/TargetMachine.h"
30 
31 using namespace llvm;
32 
33 #define DEBUG_TYPE "si-instr-info"
34 
35 #define GET_INSTRINFO_CTOR_DTOR
36 #include "AMDGPUGenInstrInfo.inc"
37 
38 namespace llvm {
39 
40 class AAResults;
41 
42 namespace AMDGPU {
43 #define GET_D16ImageDimIntrinsics_IMPL
44 #define GET_ImageDimIntrinsicTable_IMPL
45 #define GET_RsrcIntrinsics_IMPL
46 #include "AMDGPUGenSearchableTables.inc"
47 }
48 }
49 
50 
51 // Must be at least 4 to be able to branch over minimum unconditional branch
52 // code. This is only for making it possible to write reasonably small tests for
53 // long branches.
54 static cl::opt<unsigned>
55 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
56                  cl::desc("Restrict range of branch instructions (DEBUG)"));
57 
58 static cl::opt<bool> Fix16BitCopies(
59   "amdgpu-fix-16-bit-physreg-copies",
60   cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"),
61   cl::init(true),
62   cl::ReallyHidden);
63 
64 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST)
65   : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN),
66     RI(ST), ST(ST) {
67   SchedModel.init(&ST);
68 }
69 
70 //===----------------------------------------------------------------------===//
71 // TargetInstrInfo callbacks
72 //===----------------------------------------------------------------------===//
73 
74 static unsigned getNumOperandsNoGlue(SDNode *Node) {
75   unsigned N = Node->getNumOperands();
76   while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
77     --N;
78   return N;
79 }
80 
81 /// Returns true if both nodes have the same value for the given
82 ///        operand \p Op, or if both nodes do not have this operand.
83 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
84   unsigned Opc0 = N0->getMachineOpcode();
85   unsigned Opc1 = N1->getMachineOpcode();
86 
87   int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
88   int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
89 
90   if (Op0Idx == -1 && Op1Idx == -1)
91     return true;
92 
93 
94   if ((Op0Idx == -1 && Op1Idx != -1) ||
95       (Op1Idx == -1 && Op0Idx != -1))
96     return false;
97 
98   // getNamedOperandIdx returns the index for the MachineInstr's operands,
99   // which includes the result as the first operand. We are indexing into the
100   // MachineSDNode's operands, so we need to skip the result operand to get
101   // the real index.
102   --Op0Idx;
103   --Op1Idx;
104 
105   return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
106 }
107 
108 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
109                                                     AAResults *AA) const {
110   // TODO: The generic check fails for VALU instructions that should be
111   // rematerializable due to implicit reads of exec. We really want all of the
112   // generic logic for this except for this.
113   switch (MI.getOpcode()) {
114   case AMDGPU::V_MOV_B32_e32:
115   case AMDGPU::V_MOV_B32_e64:
116   case AMDGPU::V_MOV_B64_PSEUDO:
117   case AMDGPU::V_ACCVGPR_READ_B32_e64:
118   case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
119     // No non-standard implicit operands.
120     assert(MI.getDesc().getNumOperands() == 2);
121     assert(MI.getDesc().getNumImplicitDefs() == 0);
122     assert(MI.getDesc().getNumImplicitUses() == 1);
123     return MI.getNumOperands() == 3;
124   default:
125     return false;
126   }
127 }
128 
129 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
130                                           int64_t &Offset0,
131                                           int64_t &Offset1) const {
132   if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
133     return false;
134 
135   unsigned Opc0 = Load0->getMachineOpcode();
136   unsigned Opc1 = Load1->getMachineOpcode();
137 
138   // Make sure both are actually loads.
139   if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
140     return false;
141 
142   if (isDS(Opc0) && isDS(Opc1)) {
143 
144     // FIXME: Handle this case:
145     if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
146       return false;
147 
148     // Check base reg.
149     if (Load0->getOperand(0) != Load1->getOperand(0))
150       return false;
151 
152     // Skip read2 / write2 variants for simplicity.
153     // TODO: We should report true if the used offsets are adjacent (excluded
154     // st64 versions).
155     int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
156     int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
157     if (Offset0Idx == -1 || Offset1Idx == -1)
158       return false;
159 
160     // XXX - be careful of datalesss loads
161     // getNamedOperandIdx returns the index for MachineInstrs.  Since they
162     // include the output in the operand list, but SDNodes don't, we need to
163     // subtract the index by one.
164     Offset0Idx -= get(Opc0).NumDefs;
165     Offset1Idx -= get(Opc1).NumDefs;
166     Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue();
167     Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue();
168     return true;
169   }
170 
171   if (isSMRD(Opc0) && isSMRD(Opc1)) {
172     // Skip time and cache invalidation instructions.
173     if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 ||
174         AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1)
175       return false;
176 
177     assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
178 
179     // Check base reg.
180     if (Load0->getOperand(0) != Load1->getOperand(0))
181       return false;
182 
183     const ConstantSDNode *Load0Offset =
184         dyn_cast<ConstantSDNode>(Load0->getOperand(1));
185     const ConstantSDNode *Load1Offset =
186         dyn_cast<ConstantSDNode>(Load1->getOperand(1));
187 
188     if (!Load0Offset || !Load1Offset)
189       return false;
190 
191     Offset0 = Load0Offset->getZExtValue();
192     Offset1 = Load1Offset->getZExtValue();
193     return true;
194   }
195 
196   // MUBUF and MTBUF can access the same addresses.
197   if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
198 
199     // MUBUF and MTBUF have vaddr at different indices.
200     if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
201         !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
202         !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
203       return false;
204 
205     int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
206     int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
207 
208     if (OffIdx0 == -1 || OffIdx1 == -1)
209       return false;
210 
211     // getNamedOperandIdx returns the index for MachineInstrs.  Since they
212     // include the output in the operand list, but SDNodes don't, we need to
213     // subtract the index by one.
214     OffIdx0 -= get(Opc0).NumDefs;
215     OffIdx1 -= get(Opc1).NumDefs;
216 
217     SDValue Off0 = Load0->getOperand(OffIdx0);
218     SDValue Off1 = Load1->getOperand(OffIdx1);
219 
220     // The offset might be a FrameIndexSDNode.
221     if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
222       return false;
223 
224     Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
225     Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
226     return true;
227   }
228 
229   return false;
230 }
231 
232 static bool isStride64(unsigned Opc) {
233   switch (Opc) {
234   case AMDGPU::DS_READ2ST64_B32:
235   case AMDGPU::DS_READ2ST64_B64:
236   case AMDGPU::DS_WRITE2ST64_B32:
237   case AMDGPU::DS_WRITE2ST64_B64:
238     return true;
239   default:
240     return false;
241   }
242 }
243 
244 bool SIInstrInfo::getMemOperandsWithOffsetWidth(
245     const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
246     int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
247     const TargetRegisterInfo *TRI) const {
248   if (!LdSt.mayLoadOrStore())
249     return false;
250 
251   unsigned Opc = LdSt.getOpcode();
252   OffsetIsScalable = false;
253   const MachineOperand *BaseOp, *OffsetOp;
254   int DataOpIdx;
255 
256   if (isDS(LdSt)) {
257     BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
258     OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
259     if (OffsetOp) {
260       // Normal, single offset LDS instruction.
261       if (!BaseOp) {
262         // DS_CONSUME/DS_APPEND use M0 for the base address.
263         // TODO: find the implicit use operand for M0 and use that as BaseOp?
264         return false;
265       }
266       BaseOps.push_back(BaseOp);
267       Offset = OffsetOp->getImm();
268       // Get appropriate operand, and compute width accordingly.
269       DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
270       if (DataOpIdx == -1)
271         DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
272       Width = getOpSize(LdSt, DataOpIdx);
273     } else {
274       // The 2 offset instructions use offset0 and offset1 instead. We can treat
275       // these as a load with a single offset if the 2 offsets are consecutive.
276       // We will use this for some partially aligned loads.
277       const MachineOperand *Offset0Op =
278           getNamedOperand(LdSt, AMDGPU::OpName::offset0);
279       const MachineOperand *Offset1Op =
280           getNamedOperand(LdSt, AMDGPU::OpName::offset1);
281 
282       unsigned Offset0 = Offset0Op->getImm();
283       unsigned Offset1 = Offset1Op->getImm();
284       if (Offset0 + 1 != Offset1)
285         return false;
286 
287       // Each of these offsets is in element sized units, so we need to convert
288       // to bytes of the individual reads.
289 
290       unsigned EltSize;
291       if (LdSt.mayLoad())
292         EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16;
293       else {
294         assert(LdSt.mayStore());
295         int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
296         EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8;
297       }
298 
299       if (isStride64(Opc))
300         EltSize *= 64;
301 
302       BaseOps.push_back(BaseOp);
303       Offset = EltSize * Offset0;
304       // Get appropriate operand(s), and compute width accordingly.
305       DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
306       if (DataOpIdx == -1) {
307         DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
308         Width = getOpSize(LdSt, DataOpIdx);
309         DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
310         Width += getOpSize(LdSt, DataOpIdx);
311       } else {
312         Width = getOpSize(LdSt, DataOpIdx);
313       }
314     }
315     return true;
316   }
317 
318   if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
319     const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset);
320     if (SOffset && SOffset->isReg()) {
321       // We can only handle this if it's a stack access, as any other resource
322       // would require reporting multiple base registers.
323       const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
324       if (AddrReg && !AddrReg->isFI())
325         return false;
326 
327       const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc);
328       const SIMachineFunctionInfo *MFI
329         = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>();
330       if (RSrc->getReg() != MFI->getScratchRSrcReg())
331         return false;
332 
333       const MachineOperand *OffsetImm =
334         getNamedOperand(LdSt, AMDGPU::OpName::offset);
335       BaseOps.push_back(RSrc);
336       BaseOps.push_back(SOffset);
337       Offset = OffsetImm->getImm();
338     } else {
339       BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc);
340       if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL
341         return false;
342       BaseOps.push_back(BaseOp);
343 
344       BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
345       if (BaseOp)
346         BaseOps.push_back(BaseOp);
347 
348       const MachineOperand *OffsetImm =
349           getNamedOperand(LdSt, AMDGPU::OpName::offset);
350       Offset = OffsetImm->getImm();
351       if (SOffset) // soffset can be an inline immediate.
352         Offset += SOffset->getImm();
353     }
354     // Get appropriate operand, and compute width accordingly.
355     DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
356     if (DataOpIdx == -1)
357       DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
358     Width = getOpSize(LdSt, DataOpIdx);
359     return true;
360   }
361 
362   if (isMIMG(LdSt)) {
363     int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
364     BaseOps.push_back(&LdSt.getOperand(SRsrcIdx));
365     int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
366     if (VAddr0Idx >= 0) {
367       // GFX10 possible NSA encoding.
368       for (int I = VAddr0Idx; I < SRsrcIdx; ++I)
369         BaseOps.push_back(&LdSt.getOperand(I));
370     } else {
371       BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr));
372     }
373     Offset = 0;
374     // Get appropriate operand, and compute width accordingly.
375     DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
376     Width = getOpSize(LdSt, DataOpIdx);
377     return true;
378   }
379 
380   if (isSMRD(LdSt)) {
381     BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
382     if (!BaseOp) // e.g. S_MEMTIME
383       return false;
384     BaseOps.push_back(BaseOp);
385     OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
386     Offset = OffsetOp ? OffsetOp->getImm() : 0;
387     // Get appropriate operand, and compute width accordingly.
388     DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst);
389     Width = getOpSize(LdSt, DataOpIdx);
390     return true;
391   }
392 
393   if (isFLAT(LdSt)) {
394     // Instructions have either vaddr or saddr or both or none.
395     BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
396     if (BaseOp)
397       BaseOps.push_back(BaseOp);
398     BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr);
399     if (BaseOp)
400       BaseOps.push_back(BaseOp);
401     Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
402     // Get appropriate operand, and compute width accordingly.
403     DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
404     if (DataOpIdx == -1)
405       DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
406     Width = getOpSize(LdSt, DataOpIdx);
407     return true;
408   }
409 
410   return false;
411 }
412 
413 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
414                                   ArrayRef<const MachineOperand *> BaseOps1,
415                                   const MachineInstr &MI2,
416                                   ArrayRef<const MachineOperand *> BaseOps2) {
417   // Only examine the first "base" operand of each instruction, on the
418   // assumption that it represents the real base address of the memory access.
419   // Other operands are typically offsets or indices from this base address.
420   if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front()))
421     return true;
422 
423   if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
424     return false;
425 
426   auto MO1 = *MI1.memoperands_begin();
427   auto MO2 = *MI2.memoperands_begin();
428   if (MO1->getAddrSpace() != MO2->getAddrSpace())
429     return false;
430 
431   auto Base1 = MO1->getValue();
432   auto Base2 = MO2->getValue();
433   if (!Base1 || !Base2)
434     return false;
435   Base1 = getUnderlyingObject(Base1);
436   Base2 = getUnderlyingObject(Base2);
437 
438   if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
439     return false;
440 
441   return Base1 == Base2;
442 }
443 
444 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
445                                       ArrayRef<const MachineOperand *> BaseOps2,
446                                       unsigned NumLoads,
447                                       unsigned NumBytes) const {
448   // If the mem ops (to be clustered) do not have the same base ptr, then they
449   // should not be clustered
450   if (!BaseOps1.empty() && !BaseOps2.empty()) {
451     const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
452     const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
453     if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
454       return false;
455   } else if (!BaseOps1.empty() || !BaseOps2.empty()) {
456     // If only one base op is empty, they do not have the same base ptr
457     return false;
458   }
459 
460   // In order to avoid regester pressure, on an average, the number of DWORDS
461   // loaded together by all clustered mem ops should not exceed 8. This is an
462   // empirical value based on certain observations and performance related
463   // experiments.
464   // The good thing about this heuristic is - it avoids clustering of too many
465   // sub-word loads, and also avoids clustering of wide loads. Below is the
466   // brief summary of how the heuristic behaves for various `LoadSize`.
467   // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops
468   // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops
469   // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops
470   // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops
471   // (5) LoadSize >= 17: do not cluster
472   const unsigned LoadSize = NumBytes / NumLoads;
473   const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads;
474   return NumDWORDs <= 8;
475 }
476 
477 // FIXME: This behaves strangely. If, for example, you have 32 load + stores,
478 // the first 16 loads will be interleaved with the stores, and the next 16 will
479 // be clustered as expected. It should really split into 2 16 store batches.
480 //
481 // Loads are clustered until this returns false, rather than trying to schedule
482 // groups of stores. This also means we have to deal with saying different
483 // address space loads should be clustered, and ones which might cause bank
484 // conflicts.
485 //
486 // This might be deprecated so it might not be worth that much effort to fix.
487 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
488                                           int64_t Offset0, int64_t Offset1,
489                                           unsigned NumLoads) const {
490   assert(Offset1 > Offset0 &&
491          "Second offset should be larger than first offset!");
492   // If we have less than 16 loads in a row, and the offsets are within 64
493   // bytes, then schedule together.
494 
495   // A cacheline is 64 bytes (for global memory).
496   return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
497 }
498 
499 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB,
500                               MachineBasicBlock::iterator MI,
501                               const DebugLoc &DL, MCRegister DestReg,
502                               MCRegister SrcReg, bool KillSrc,
503                               const char *Msg = "illegal SGPR to VGPR copy") {
504   MachineFunction *MF = MBB.getParent();
505   DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error);
506   LLVMContext &C = MF->getFunction().getContext();
507   C.diagnose(IllegalCopy);
508 
509   BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg)
510     .addReg(SrcReg, getKillRegState(KillSrc));
511 }
512 
513 /// Handle copying from SGPR to AGPR, or from AGPR to AGPR. It is not possible
514 /// to directly copy, so an intermediate VGPR needs to be used.
515 static void indirectCopyToAGPR(const SIInstrInfo &TII,
516                                MachineBasicBlock &MBB,
517                                MachineBasicBlock::iterator MI,
518                                const DebugLoc &DL, MCRegister DestReg,
519                                MCRegister SrcReg, bool KillSrc,
520                                RegScavenger &RS,
521                                Register ImpDefSuperReg = Register(),
522                                Register ImpUseSuperReg = Register()) {
523   const SIRegisterInfo &RI = TII.getRegisterInfo();
524 
525   assert(AMDGPU::SReg_32RegClass.contains(SrcReg) ||
526          AMDGPU::AGPR_32RegClass.contains(SrcReg));
527 
528   // First try to find defining accvgpr_write to avoid temporary registers.
529   for (auto Def = MI, E = MBB.begin(); Def != E; ) {
530     --Def;
531     if (!Def->definesRegister(SrcReg, &RI))
532       continue;
533     if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
534       break;
535 
536     MachineOperand &DefOp = Def->getOperand(1);
537     assert(DefOp.isReg() || DefOp.isImm());
538 
539     if (DefOp.isReg()) {
540       // Check that register source operand if not clobbered before MI.
541       // Immediate operands are always safe to propagate.
542       bool SafeToPropagate = true;
543       for (auto I = Def; I != MI && SafeToPropagate; ++I)
544         if (I->modifiesRegister(DefOp.getReg(), &RI))
545           SafeToPropagate = false;
546 
547       if (!SafeToPropagate)
548         break;
549 
550       DefOp.setIsKill(false);
551     }
552 
553     MachineInstrBuilder Builder =
554       BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
555       .add(DefOp);
556     if (ImpDefSuperReg)
557       Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit);
558 
559     if (ImpUseSuperReg) {
560       Builder.addReg(ImpUseSuperReg,
561                      getKillRegState(KillSrc) | RegState::Implicit);
562     }
563 
564     return;
565   }
566 
567   RS.enterBasicBlock(MBB);
568   RS.forward(MI);
569 
570   // Ideally we want to have three registers for a long reg_sequence copy
571   // to hide 2 waitstates between v_mov_b32 and accvgpr_write.
572   unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
573                                              *MBB.getParent());
574 
575   // Registers in the sequence are allocated contiguously so we can just
576   // use register number to pick one of three round-robin temps.
577   unsigned RegNo = DestReg % 3;
578   Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0);
579   if (!Tmp)
580     report_fatal_error("Cannot scavenge VGPR to copy to AGPR");
581   RS.setRegUsed(Tmp);
582 
583   if (!TII.getSubtarget().hasGFX90AInsts()) {
584     // Only loop through if there are any free registers left, otherwise
585     // scavenger may report a fatal error without emergency spill slot
586     // or spill with the slot.
587     while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) {
588       Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0);
589       if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs)
590         break;
591       Tmp = Tmp2;
592       RS.setRegUsed(Tmp);
593     }
594   }
595 
596   // Insert copy to temporary VGPR.
597   unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32;
598   if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) {
599     TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64;
600   } else {
601     assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
602   }
603 
604   MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp)
605     .addReg(SrcReg, getKillRegState(KillSrc));
606   if (ImpUseSuperReg) {
607     UseBuilder.addReg(ImpUseSuperReg,
608                       getKillRegState(KillSrc) | RegState::Implicit);
609   }
610 
611   MachineInstrBuilder DefBuilder
612     = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
613     .addReg(Tmp, RegState::Kill);
614 
615   if (ImpDefSuperReg)
616     DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit);
617 }
618 
619 static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB,
620                            MachineBasicBlock::iterator MI, const DebugLoc &DL,
621                            MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
622                            const TargetRegisterClass *RC, bool Forward) {
623   const SIRegisterInfo &RI = TII.getRegisterInfo();
624   ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4);
625   MachineBasicBlock::iterator I = MI;
626   MachineInstr *FirstMI = nullptr, *LastMI = nullptr;
627 
628   for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) {
629     int16_t SubIdx = BaseIndices[Idx];
630     Register Reg = RI.getSubReg(DestReg, SubIdx);
631     unsigned Opcode = AMDGPU::S_MOV_B32;
632 
633     // Is SGPR aligned? If so try to combine with next.
634     Register Src = RI.getSubReg(SrcReg, SubIdx);
635     bool AlignedDest = ((Reg - AMDGPU::SGPR0) % 2) == 0;
636     bool AlignedSrc = ((Src - AMDGPU::SGPR0) % 2) == 0;
637     if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) {
638       // Can use SGPR64 copy
639       unsigned Channel = RI.getChannelFromSubReg(SubIdx);
640       SubIdx = RI.getSubRegFromChannel(Channel, 2);
641       Opcode = AMDGPU::S_MOV_B64;
642       Idx++;
643     }
644 
645     LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx))
646                  .addReg(RI.getSubReg(SrcReg, SubIdx))
647                  .addReg(SrcReg, RegState::Implicit);
648 
649     if (!FirstMI)
650       FirstMI = LastMI;
651 
652     if (!Forward)
653       I--;
654   }
655 
656   assert(FirstMI && LastMI);
657   if (!Forward)
658     std::swap(FirstMI, LastMI);
659 
660   FirstMI->addOperand(
661       MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/));
662 
663   if (KillSrc)
664     LastMI->addRegisterKilled(SrcReg, &RI);
665 }
666 
667 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
668                               MachineBasicBlock::iterator MI,
669                               const DebugLoc &DL, MCRegister DestReg,
670                               MCRegister SrcReg, bool KillSrc) const {
671   const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg);
672 
673   // FIXME: This is hack to resolve copies between 16 bit and 32 bit
674   // registers until all patterns are fixed.
675   if (Fix16BitCopies &&
676       ((RI.getRegSizeInBits(*RC) == 16) ^
677        (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) {
678     MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg;
679     MCRegister Super = RI.get32BitRegister(RegToFix);
680     assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix);
681     RegToFix = Super;
682 
683     if (DestReg == SrcReg) {
684       // Insert empty bundle since ExpandPostRA expects an instruction here.
685       BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE));
686       return;
687     }
688 
689     RC = RI.getPhysRegClass(DestReg);
690   }
691 
692   if (RC == &AMDGPU::VGPR_32RegClass) {
693     assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
694            AMDGPU::SReg_32RegClass.contains(SrcReg) ||
695            AMDGPU::AGPR_32RegClass.contains(SrcReg));
696     unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ?
697                      AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32;
698     BuildMI(MBB, MI, DL, get(Opc), DestReg)
699       .addReg(SrcReg, getKillRegState(KillSrc));
700     return;
701   }
702 
703   if (RC == &AMDGPU::SReg_32_XM0RegClass ||
704       RC == &AMDGPU::SReg_32RegClass) {
705     if (SrcReg == AMDGPU::SCC) {
706       BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
707           .addImm(1)
708           .addImm(0);
709       return;
710     }
711 
712     if (DestReg == AMDGPU::VCC_LO) {
713       if (AMDGPU::SReg_32RegClass.contains(SrcReg)) {
714         BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO)
715           .addReg(SrcReg, getKillRegState(KillSrc));
716       } else {
717         // FIXME: Hack until VReg_1 removed.
718         assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
719         BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
720           .addImm(0)
721           .addReg(SrcReg, getKillRegState(KillSrc));
722       }
723 
724       return;
725     }
726 
727     if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) {
728       reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
729       return;
730     }
731 
732     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
733             .addReg(SrcReg, getKillRegState(KillSrc));
734     return;
735   }
736 
737   if (RC == &AMDGPU::SReg_64RegClass) {
738     if (SrcReg == AMDGPU::SCC) {
739       BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg)
740           .addImm(1)
741           .addImm(0);
742       return;
743     }
744 
745     if (DestReg == AMDGPU::VCC) {
746       if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
747         BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
748           .addReg(SrcReg, getKillRegState(KillSrc));
749       } else {
750         // FIXME: Hack until VReg_1 removed.
751         assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
752         BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
753           .addImm(0)
754           .addReg(SrcReg, getKillRegState(KillSrc));
755       }
756 
757       return;
758     }
759 
760     if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) {
761       reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
762       return;
763     }
764 
765     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
766             .addReg(SrcReg, getKillRegState(KillSrc));
767     return;
768   }
769 
770   if (DestReg == AMDGPU::SCC) {
771     // Copying 64-bit or 32-bit sources to SCC barely makes sense,
772     // but SelectionDAG emits such copies for i1 sources.
773     if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
774       // This copy can only be produced by patterns
775       // with explicit SCC, which are known to be enabled
776       // only for subtargets with S_CMP_LG_U64 present.
777       assert(ST.hasScalarCompareEq64());
778       BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64))
779           .addReg(SrcReg, getKillRegState(KillSrc))
780           .addImm(0);
781     } else {
782       assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
783       BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
784           .addReg(SrcReg, getKillRegState(KillSrc))
785           .addImm(0);
786     }
787 
788     return;
789   }
790 
791   if (RC == &AMDGPU::AGPR_32RegClass) {
792     if (AMDGPU::VGPR_32RegClass.contains(SrcReg)) {
793       BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
794         .addReg(SrcReg, getKillRegState(KillSrc));
795       return;
796     }
797 
798     if (AMDGPU::AGPR_32RegClass.contains(SrcReg) && ST.hasGFX90AInsts()) {
799       BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_MOV_B32), DestReg)
800         .addReg(SrcReg, getKillRegState(KillSrc));
801       return;
802     }
803 
804     // FIXME: Pass should maintain scavenger to avoid scan through the block on
805     // every AGPR spill.
806     RegScavenger RS;
807     indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS);
808     return;
809   }
810 
811   const unsigned Size = RI.getRegSizeInBits(*RC);
812   if (Size == 16) {
813     assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) ||
814            AMDGPU::VGPR_HI16RegClass.contains(SrcReg) ||
815            AMDGPU::SReg_LO16RegClass.contains(SrcReg) ||
816            AMDGPU::AGPR_LO16RegClass.contains(SrcReg));
817 
818     bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg);
819     bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg);
820     bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg);
821     bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
822     bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) ||
823                   AMDGPU::SReg_LO16RegClass.contains(DestReg) ||
824                   AMDGPU::AGPR_LO16RegClass.contains(DestReg);
825     bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) ||
826                   AMDGPU::SReg_LO16RegClass.contains(SrcReg) ||
827                   AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
828     MCRegister NewDestReg = RI.get32BitRegister(DestReg);
829     MCRegister NewSrcReg = RI.get32BitRegister(SrcReg);
830 
831     if (IsSGPRDst) {
832       if (!IsSGPRSrc) {
833         reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
834         return;
835       }
836 
837       BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg)
838         .addReg(NewSrcReg, getKillRegState(KillSrc));
839       return;
840     }
841 
842     if (IsAGPRDst || IsAGPRSrc) {
843       if (!DstLow || !SrcLow) {
844         reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
845                           "Cannot use hi16 subreg with an AGPR!");
846       }
847 
848       copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc);
849       return;
850     }
851 
852     if (IsSGPRSrc && !ST.hasSDWAScalar()) {
853       if (!DstLow || !SrcLow) {
854         reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
855                           "Cannot use hi16 subreg on VI!");
856       }
857 
858       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg)
859         .addReg(NewSrcReg, getKillRegState(KillSrc));
860       return;
861     }
862 
863     auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg)
864       .addImm(0) // src0_modifiers
865       .addReg(NewSrcReg)
866       .addImm(0) // clamp
867       .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0
868                      : AMDGPU::SDWA::SdwaSel::WORD_1)
869       .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE)
870       .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0
871                      : AMDGPU::SDWA::SdwaSel::WORD_1)
872       .addReg(NewDestReg, RegState::Implicit | RegState::Undef);
873     // First implicit operand is $exec.
874     MIB->tieOperands(0, MIB->getNumOperands() - 1);
875     return;
876   }
877 
878   if (RC->hasSuperClassEq(&AMDGPU::VReg_64RegClass) &&
879       !RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) {
880     if (ST.hasPackedFP32Ops()) {
881       BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestReg)
882         .addImm(SISrcMods::OP_SEL_1)
883         .addReg(SrcReg)
884         .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1)
885         .addReg(SrcReg)
886         .addImm(0) // op_sel_lo
887         .addImm(0) // op_sel_hi
888         .addImm(0) // neg_lo
889         .addImm(0) // neg_hi
890         .addImm(0) // clamp
891         .addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
892       return;
893     }
894   }
895 
896   const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
897   if (RI.isSGPRClass(RC)) {
898     if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) {
899       reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
900       return;
901     }
902     expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RC, Forward);
903     return;
904   }
905 
906   unsigned EltSize = 4;
907   unsigned Opcode = AMDGPU::V_MOV_B32_e32;
908   if (RI.hasAGPRs(RC)) {
909     Opcode = (RI.hasVGPRs(RI.getPhysRegClass(SrcReg))) ?
910       AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::INSTRUCTION_LIST_END;
911   } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) {
912     Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64;
913   } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) &&
914              !RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) {
915     // TODO: In 96-bit case, could do a 64-bit mov and then a 32-bit mov.
916     if (ST.hasPackedFP32Ops()) {
917       Opcode = AMDGPU::V_PK_MOV_B32;
918       EltSize = 8;
919     }
920   }
921 
922   // For the cases where we need an intermediate instruction/temporary register
923   // (destination is an AGPR), we need a scavenger.
924   //
925   // FIXME: The pass should maintain this for us so we don't have to re-scan the
926   // whole block for every handled copy.
927   std::unique_ptr<RegScavenger> RS;
928   if (Opcode == AMDGPU::INSTRUCTION_LIST_END)
929     RS.reset(new RegScavenger());
930 
931   ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
932 
933   // If there is an overlap, we can't kill the super-register on the last
934   // instruction, since it will also kill the components made live by this def.
935   const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg);
936 
937   for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
938     unsigned SubIdx;
939     if (Forward)
940       SubIdx = SubIndices[Idx];
941     else
942       SubIdx = SubIndices[SubIndices.size() - Idx - 1];
943 
944     bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1;
945 
946     if (Opcode == AMDGPU::INSTRUCTION_LIST_END) {
947       Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register();
948       Register ImpUseSuper = SrcReg;
949       indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx),
950                          RI.getSubReg(SrcReg, SubIdx), UseKill, *RS,
951                          ImpDefSuper, ImpUseSuper);
952     } else if (Opcode == AMDGPU::V_PK_MOV_B32) {
953       Register DstSubReg = RI.getSubReg(DestReg, SubIdx);
954       Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
955       MachineInstrBuilder MIB =
956         BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DstSubReg)
957         .addImm(SISrcMods::OP_SEL_1)
958         .addReg(SrcSubReg)
959         .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1)
960         .addReg(SrcSubReg)
961         .addImm(0) // op_sel_lo
962         .addImm(0) // op_sel_hi
963         .addImm(0) // neg_lo
964         .addImm(0) // neg_hi
965         .addImm(0) // clamp
966         .addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
967       if (Idx == 0)
968         MIB.addReg(DestReg, RegState::Define | RegState::Implicit);
969     } else {
970       MachineInstrBuilder Builder =
971         BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx))
972         .addReg(RI.getSubReg(SrcReg, SubIdx));
973       if (Idx == 0)
974         Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
975 
976       Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
977     }
978   }
979 }
980 
981 int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
982   int NewOpc;
983 
984   // Try to map original to commuted opcode
985   NewOpc = AMDGPU::getCommuteRev(Opcode);
986   if (NewOpc != -1)
987     // Check if the commuted (REV) opcode exists on the target.
988     return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
989 
990   // Try to map commuted to original opcode
991   NewOpc = AMDGPU::getCommuteOrig(Opcode);
992   if (NewOpc != -1)
993     // Check if the original (non-REV) opcode exists on the target.
994     return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
995 
996   return Opcode;
997 }
998 
999 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB,
1000                                        MachineBasicBlock::iterator MI,
1001                                        const DebugLoc &DL, unsigned DestReg,
1002                                        int64_t Value) const {
1003   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1004   const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg);
1005   if (RegClass == &AMDGPU::SReg_32RegClass ||
1006       RegClass == &AMDGPU::SGPR_32RegClass ||
1007       RegClass == &AMDGPU::SReg_32_XM0RegClass ||
1008       RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) {
1009     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
1010       .addImm(Value);
1011     return;
1012   }
1013 
1014   if (RegClass == &AMDGPU::SReg_64RegClass ||
1015       RegClass == &AMDGPU::SGPR_64RegClass ||
1016       RegClass == &AMDGPU::SReg_64_XEXECRegClass) {
1017     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
1018       .addImm(Value);
1019     return;
1020   }
1021 
1022   if (RegClass == &AMDGPU::VGPR_32RegClass) {
1023     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
1024       .addImm(Value);
1025     return;
1026   }
1027   if (RegClass->hasSuperClassEq(&AMDGPU::VReg_64RegClass)) {
1028     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg)
1029       .addImm(Value);
1030     return;
1031   }
1032 
1033   unsigned EltSize = 4;
1034   unsigned Opcode = AMDGPU::V_MOV_B32_e32;
1035   if (RI.isSGPRClass(RegClass)) {
1036     if (RI.getRegSizeInBits(*RegClass) > 32) {
1037       Opcode =  AMDGPU::S_MOV_B64;
1038       EltSize = 8;
1039     } else {
1040       Opcode = AMDGPU::S_MOV_B32;
1041       EltSize = 4;
1042     }
1043   }
1044 
1045   ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize);
1046   for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
1047     int64_t IdxValue = Idx == 0 ? Value : 0;
1048 
1049     MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
1050       get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx]));
1051     Builder.addImm(IdxValue);
1052   }
1053 }
1054 
1055 const TargetRegisterClass *
1056 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const {
1057   return &AMDGPU::VGPR_32RegClass;
1058 }
1059 
1060 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
1061                                      MachineBasicBlock::iterator I,
1062                                      const DebugLoc &DL, Register DstReg,
1063                                      ArrayRef<MachineOperand> Cond,
1064                                      Register TrueReg,
1065                                      Register FalseReg) const {
1066   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1067   const TargetRegisterClass *BoolXExecRC =
1068     RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1069   assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass &&
1070          "Not a VGPR32 reg");
1071 
1072   if (Cond.size() == 1) {
1073     Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1074     BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1075       .add(Cond[0]);
1076     BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1077       .addImm(0)
1078       .addReg(FalseReg)
1079       .addImm(0)
1080       .addReg(TrueReg)
1081       .addReg(SReg);
1082   } else if (Cond.size() == 2) {
1083     assert(Cond[0].isImm() && "Cond[0] is not an immediate");
1084     switch (Cond[0].getImm()) {
1085     case SIInstrInfo::SCC_TRUE: {
1086       Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1087       BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1088                                             : AMDGPU::S_CSELECT_B64), SReg)
1089         .addImm(1)
1090         .addImm(0);
1091       BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1092         .addImm(0)
1093         .addReg(FalseReg)
1094         .addImm(0)
1095         .addReg(TrueReg)
1096         .addReg(SReg);
1097       break;
1098     }
1099     case SIInstrInfo::SCC_FALSE: {
1100       Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1101       BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1102                                             : AMDGPU::S_CSELECT_B64), SReg)
1103         .addImm(0)
1104         .addImm(1);
1105       BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1106         .addImm(0)
1107         .addReg(FalseReg)
1108         .addImm(0)
1109         .addReg(TrueReg)
1110         .addReg(SReg);
1111       break;
1112     }
1113     case SIInstrInfo::VCCNZ: {
1114       MachineOperand RegOp = Cond[1];
1115       RegOp.setImplicit(false);
1116       Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1117       BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1118         .add(RegOp);
1119       BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1120           .addImm(0)
1121           .addReg(FalseReg)
1122           .addImm(0)
1123           .addReg(TrueReg)
1124           .addReg(SReg);
1125       break;
1126     }
1127     case SIInstrInfo::VCCZ: {
1128       MachineOperand RegOp = Cond[1];
1129       RegOp.setImplicit(false);
1130       Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1131       BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1132         .add(RegOp);
1133       BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1134           .addImm(0)
1135           .addReg(TrueReg)
1136           .addImm(0)
1137           .addReg(FalseReg)
1138           .addReg(SReg);
1139       break;
1140     }
1141     case SIInstrInfo::EXECNZ: {
1142       Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1143       Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1144       BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
1145                                             : AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
1146         .addImm(0);
1147       BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1148                                             : AMDGPU::S_CSELECT_B64), SReg)
1149         .addImm(1)
1150         .addImm(0);
1151       BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1152         .addImm(0)
1153         .addReg(FalseReg)
1154         .addImm(0)
1155         .addReg(TrueReg)
1156         .addReg(SReg);
1157       break;
1158     }
1159     case SIInstrInfo::EXECZ: {
1160       Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1161       Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1162       BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
1163                                             : AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
1164         .addImm(0);
1165       BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1166                                             : AMDGPU::S_CSELECT_B64), SReg)
1167         .addImm(0)
1168         .addImm(1);
1169       BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1170         .addImm(0)
1171         .addReg(FalseReg)
1172         .addImm(0)
1173         .addReg(TrueReg)
1174         .addReg(SReg);
1175       llvm_unreachable("Unhandled branch predicate EXECZ");
1176       break;
1177     }
1178     default:
1179       llvm_unreachable("invalid branch predicate");
1180     }
1181   } else {
1182     llvm_unreachable("Can only handle Cond size 1 or 2");
1183   }
1184 }
1185 
1186 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
1187                                MachineBasicBlock::iterator I,
1188                                const DebugLoc &DL,
1189                                Register SrcReg, int Value) const {
1190   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1191   Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1192   BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
1193     .addImm(Value)
1194     .addReg(SrcReg);
1195 
1196   return Reg;
1197 }
1198 
1199 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB,
1200                                MachineBasicBlock::iterator I,
1201                                const DebugLoc &DL,
1202                                Register SrcReg, int Value) const {
1203   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1204   Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1205   BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
1206     .addImm(Value)
1207     .addReg(SrcReg);
1208 
1209   return Reg;
1210 }
1211 
1212 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
1213 
1214   if (RI.hasAGPRs(DstRC))
1215     return AMDGPU::COPY;
1216   if (RI.getRegSizeInBits(*DstRC) == 32) {
1217     return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1218   } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) {
1219     return AMDGPU::S_MOV_B64;
1220   } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) {
1221     return  AMDGPU::V_MOV_B64_PSEUDO;
1222   }
1223   return AMDGPU::COPY;
1224 }
1225 
1226 const MCInstrDesc &
1227 SIInstrInfo::getIndirectGPRIDXPseudo(unsigned VecSize,
1228                                      bool IsIndirectSrc) const {
1229   if (IsIndirectSrc) {
1230     if (VecSize <= 32) // 4 bytes
1231       return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1);
1232     if (VecSize <= 64) // 8 bytes
1233       return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2);
1234     if (VecSize <= 96) // 12 bytes
1235       return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3);
1236     if (VecSize <= 128) // 16 bytes
1237       return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4);
1238     if (VecSize <= 160) // 20 bytes
1239       return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5);
1240     if (VecSize <= 256) // 32 bytes
1241       return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8);
1242     if (VecSize <= 512) // 64 bytes
1243       return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16);
1244     if (VecSize <= 1024) // 128 bytes
1245       return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32);
1246 
1247     llvm_unreachable("unsupported size for IndirectRegReadGPRIDX pseudos");
1248   }
1249 
1250   if (VecSize <= 32) // 4 bytes
1251     return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1);
1252   if (VecSize <= 64) // 8 bytes
1253     return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2);
1254   if (VecSize <= 96) // 12 bytes
1255     return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3);
1256   if (VecSize <= 128) // 16 bytes
1257     return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4);
1258   if (VecSize <= 160) // 20 bytes
1259     return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5);
1260   if (VecSize <= 256) // 32 bytes
1261     return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8);
1262   if (VecSize <= 512) // 64 bytes
1263     return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16);
1264   if (VecSize <= 1024) // 128 bytes
1265     return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32);
1266 
1267   llvm_unreachable("unsupported size for IndirectRegWriteGPRIDX pseudos");
1268 }
1269 
1270 static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize) {
1271   if (VecSize <= 32) // 4 bytes
1272     return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1273   if (VecSize <= 64) // 8 bytes
1274     return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1275   if (VecSize <= 96) // 12 bytes
1276     return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1277   if (VecSize <= 128) // 16 bytes
1278     return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1279   if (VecSize <= 160) // 20 bytes
1280     return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1281   if (VecSize <= 256) // 32 bytes
1282     return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1283   if (VecSize <= 512) // 64 bytes
1284     return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1285   if (VecSize <= 1024) // 128 bytes
1286     return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1287 
1288   llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1289 }
1290 
1291 static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize) {
1292   if (VecSize <= 32) // 4 bytes
1293     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1294   if (VecSize <= 64) // 8 bytes
1295     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1296   if (VecSize <= 96) // 12 bytes
1297     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1298   if (VecSize <= 128) // 16 bytes
1299     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1300   if (VecSize <= 160) // 20 bytes
1301     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1302   if (VecSize <= 256) // 32 bytes
1303     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1304   if (VecSize <= 512) // 64 bytes
1305     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1306   if (VecSize <= 1024) // 128 bytes
1307     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1308 
1309   llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1310 }
1311 
1312 static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize) {
1313   if (VecSize <= 64) // 8 bytes
1314     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1;
1315   if (VecSize <= 128) // 16 bytes
1316     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2;
1317   if (VecSize <= 256) // 32 bytes
1318     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4;
1319   if (VecSize <= 512) // 64 bytes
1320     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8;
1321   if (VecSize <= 1024) // 128 bytes
1322     return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16;
1323 
1324   llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1325 }
1326 
1327 const MCInstrDesc &
1328 SIInstrInfo::getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize,
1329                                              bool IsSGPR) const {
1330   if (IsSGPR) {
1331     switch (EltSize) {
1332     case 32:
1333       return get(getIndirectSGPRWriteMovRelPseudo32(VecSize));
1334     case 64:
1335       return get(getIndirectSGPRWriteMovRelPseudo64(VecSize));
1336     default:
1337       llvm_unreachable("invalid reg indexing elt size");
1338     }
1339   }
1340 
1341   assert(EltSize == 32 && "invalid reg indexing elt size");
1342   return get(getIndirectVGPRWriteMovRelPseudoOpc(VecSize));
1343 }
1344 
1345 static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
1346   switch (Size) {
1347   case 4:
1348     return AMDGPU::SI_SPILL_S32_SAVE;
1349   case 8:
1350     return AMDGPU::SI_SPILL_S64_SAVE;
1351   case 12:
1352     return AMDGPU::SI_SPILL_S96_SAVE;
1353   case 16:
1354     return AMDGPU::SI_SPILL_S128_SAVE;
1355   case 20:
1356     return AMDGPU::SI_SPILL_S160_SAVE;
1357   case 24:
1358     return AMDGPU::SI_SPILL_S192_SAVE;
1359   case 32:
1360     return AMDGPU::SI_SPILL_S256_SAVE;
1361   case 64:
1362     return AMDGPU::SI_SPILL_S512_SAVE;
1363   case 128:
1364     return AMDGPU::SI_SPILL_S1024_SAVE;
1365   default:
1366     llvm_unreachable("unknown register size");
1367   }
1368 }
1369 
1370 static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
1371   switch (Size) {
1372   case 4:
1373     return AMDGPU::SI_SPILL_V32_SAVE;
1374   case 8:
1375     return AMDGPU::SI_SPILL_V64_SAVE;
1376   case 12:
1377     return AMDGPU::SI_SPILL_V96_SAVE;
1378   case 16:
1379     return AMDGPU::SI_SPILL_V128_SAVE;
1380   case 20:
1381     return AMDGPU::SI_SPILL_V160_SAVE;
1382   case 24:
1383     return AMDGPU::SI_SPILL_V192_SAVE;
1384   case 32:
1385     return AMDGPU::SI_SPILL_V256_SAVE;
1386   case 64:
1387     return AMDGPU::SI_SPILL_V512_SAVE;
1388   case 128:
1389     return AMDGPU::SI_SPILL_V1024_SAVE;
1390   default:
1391     llvm_unreachable("unknown register size");
1392   }
1393 }
1394 
1395 static unsigned getAGPRSpillSaveOpcode(unsigned Size) {
1396   switch (Size) {
1397   case 4:
1398     return AMDGPU::SI_SPILL_A32_SAVE;
1399   case 8:
1400     return AMDGPU::SI_SPILL_A64_SAVE;
1401   case 12:
1402     return AMDGPU::SI_SPILL_A96_SAVE;
1403   case 16:
1404     return AMDGPU::SI_SPILL_A128_SAVE;
1405   case 20:
1406     return AMDGPU::SI_SPILL_A160_SAVE;
1407   case 24:
1408     return AMDGPU::SI_SPILL_A192_SAVE;
1409   case 32:
1410     return AMDGPU::SI_SPILL_A256_SAVE;
1411   case 64:
1412     return AMDGPU::SI_SPILL_A512_SAVE;
1413   case 128:
1414     return AMDGPU::SI_SPILL_A1024_SAVE;
1415   default:
1416     llvm_unreachable("unknown register size");
1417   }
1418 }
1419 
1420 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
1421                                       MachineBasicBlock::iterator MI,
1422                                       Register SrcReg, bool isKill,
1423                                       int FrameIndex,
1424                                       const TargetRegisterClass *RC,
1425                                       const TargetRegisterInfo *TRI) const {
1426   MachineFunction *MF = MBB.getParent();
1427   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1428   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1429   const DebugLoc &DL = MBB.findDebugLoc(MI);
1430 
1431   MachinePointerInfo PtrInfo
1432     = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1433   MachineMemOperand *MMO = MF->getMachineMemOperand(
1434       PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex),
1435       FrameInfo.getObjectAlign(FrameIndex));
1436   unsigned SpillSize = TRI->getSpillSize(*RC);
1437 
1438   if (RI.isSGPRClass(RC)) {
1439     MFI->setHasSpilledSGPRs();
1440     assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled");
1441     assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI &&
1442            SrcReg != AMDGPU::EXEC && "exec should not be spilled");
1443 
1444     // We are only allowed to create one new instruction when spilling
1445     // registers, so we need to use pseudo instruction for spilling SGPRs.
1446     const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize));
1447 
1448     // The SGPR spill/restore instructions only work on number sgprs, so we need
1449     // to make sure we are using the correct register class.
1450     if (SrcReg.isVirtual() && SpillSize == 4) {
1451       MachineRegisterInfo &MRI = MF->getRegInfo();
1452       MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1453     }
1454 
1455     BuildMI(MBB, MI, DL, OpDesc)
1456       .addReg(SrcReg, getKillRegState(isKill)) // data
1457       .addFrameIndex(FrameIndex)               // addr
1458       .addMemOperand(MMO)
1459       .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit);
1460 
1461     if (RI.spillSGPRToVGPR())
1462       FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1463     return;
1464   }
1465 
1466   unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize)
1467                                     : getVGPRSpillSaveOpcode(SpillSize);
1468   MFI->setHasSpilledVGPRs();
1469 
1470   BuildMI(MBB, MI, DL, get(Opcode))
1471     .addReg(SrcReg, getKillRegState(isKill)) // data
1472     .addFrameIndex(FrameIndex)               // addr
1473     .addReg(MFI->getStackPtrOffsetReg())     // scratch_offset
1474     .addImm(0)                               // offset
1475     .addMemOperand(MMO);
1476 }
1477 
1478 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
1479   switch (Size) {
1480   case 4:
1481     return AMDGPU::SI_SPILL_S32_RESTORE;
1482   case 8:
1483     return AMDGPU::SI_SPILL_S64_RESTORE;
1484   case 12:
1485     return AMDGPU::SI_SPILL_S96_RESTORE;
1486   case 16:
1487     return AMDGPU::SI_SPILL_S128_RESTORE;
1488   case 20:
1489     return AMDGPU::SI_SPILL_S160_RESTORE;
1490   case 24:
1491     return AMDGPU::SI_SPILL_S192_RESTORE;
1492   case 32:
1493     return AMDGPU::SI_SPILL_S256_RESTORE;
1494   case 64:
1495     return AMDGPU::SI_SPILL_S512_RESTORE;
1496   case 128:
1497     return AMDGPU::SI_SPILL_S1024_RESTORE;
1498   default:
1499     llvm_unreachable("unknown register size");
1500   }
1501 }
1502 
1503 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
1504   switch (Size) {
1505   case 4:
1506     return AMDGPU::SI_SPILL_V32_RESTORE;
1507   case 8:
1508     return AMDGPU::SI_SPILL_V64_RESTORE;
1509   case 12:
1510     return AMDGPU::SI_SPILL_V96_RESTORE;
1511   case 16:
1512     return AMDGPU::SI_SPILL_V128_RESTORE;
1513   case 20:
1514     return AMDGPU::SI_SPILL_V160_RESTORE;
1515   case 24:
1516     return AMDGPU::SI_SPILL_V192_RESTORE;
1517   case 32:
1518     return AMDGPU::SI_SPILL_V256_RESTORE;
1519   case 64:
1520     return AMDGPU::SI_SPILL_V512_RESTORE;
1521   case 128:
1522     return AMDGPU::SI_SPILL_V1024_RESTORE;
1523   default:
1524     llvm_unreachable("unknown register size");
1525   }
1526 }
1527 
1528 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) {
1529   switch (Size) {
1530   case 4:
1531     return AMDGPU::SI_SPILL_A32_RESTORE;
1532   case 8:
1533     return AMDGPU::SI_SPILL_A64_RESTORE;
1534   case 12:
1535     return AMDGPU::SI_SPILL_A96_RESTORE;
1536   case 16:
1537     return AMDGPU::SI_SPILL_A128_RESTORE;
1538   case 20:
1539     return AMDGPU::SI_SPILL_A160_RESTORE;
1540   case 24:
1541     return AMDGPU::SI_SPILL_A192_RESTORE;
1542   case 32:
1543     return AMDGPU::SI_SPILL_A256_RESTORE;
1544   case 64:
1545     return AMDGPU::SI_SPILL_A512_RESTORE;
1546   case 128:
1547     return AMDGPU::SI_SPILL_A1024_RESTORE;
1548   default:
1549     llvm_unreachable("unknown register size");
1550   }
1551 }
1552 
1553 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
1554                                        MachineBasicBlock::iterator MI,
1555                                        Register DestReg, int FrameIndex,
1556                                        const TargetRegisterClass *RC,
1557                                        const TargetRegisterInfo *TRI) const {
1558   MachineFunction *MF = MBB.getParent();
1559   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1560   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1561   const DebugLoc &DL = MBB.findDebugLoc(MI);
1562   unsigned SpillSize = TRI->getSpillSize(*RC);
1563 
1564   MachinePointerInfo PtrInfo
1565     = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1566 
1567   MachineMemOperand *MMO = MF->getMachineMemOperand(
1568       PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex),
1569       FrameInfo.getObjectAlign(FrameIndex));
1570 
1571   if (RI.isSGPRClass(RC)) {
1572     MFI->setHasSpilledSGPRs();
1573     assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into");
1574     assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI &&
1575            DestReg != AMDGPU::EXEC && "exec should not be spilled");
1576 
1577     // FIXME: Maybe this should not include a memoperand because it will be
1578     // lowered to non-memory instructions.
1579     const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
1580     if (DestReg.isVirtual() && SpillSize == 4) {
1581       MachineRegisterInfo &MRI = MF->getRegInfo();
1582       MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1583     }
1584 
1585     if (RI.spillSGPRToVGPR())
1586       FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1587     BuildMI(MBB, MI, DL, OpDesc, DestReg)
1588       .addFrameIndex(FrameIndex) // addr
1589       .addMemOperand(MMO)
1590       .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit);
1591 
1592     return;
1593   }
1594 
1595   unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize)
1596                                     : getVGPRSpillRestoreOpcode(SpillSize);
1597   BuildMI(MBB, MI, DL, get(Opcode), DestReg)
1598     .addFrameIndex(FrameIndex)        // vaddr
1599     .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1600     .addImm(0)                           // offset
1601     .addMemOperand(MMO);
1602 }
1603 
1604 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
1605                              MachineBasicBlock::iterator MI) const {
1606   insertNoops(MBB, MI, 1);
1607 }
1608 
1609 void SIInstrInfo::insertNoops(MachineBasicBlock &MBB,
1610                               MachineBasicBlock::iterator MI,
1611                               unsigned Quantity) const {
1612   DebugLoc DL = MBB.findDebugLoc(MI);
1613   while (Quantity > 0) {
1614     unsigned Arg = std::min(Quantity, 8u);
1615     Quantity -= Arg;
1616     BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1);
1617   }
1618 }
1619 
1620 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const {
1621   auto MF = MBB.getParent();
1622   SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1623 
1624   assert(Info->isEntryFunction());
1625 
1626   if (MBB.succ_empty()) {
1627     bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end();
1628     if (HasNoTerminator) {
1629       if (Info->returnsVoid()) {
1630         BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0);
1631       } else {
1632         BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG));
1633       }
1634     }
1635   }
1636 }
1637 
1638 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) {
1639   switch (MI.getOpcode()) {
1640   default: return 1; // FIXME: Do wait states equal cycles?
1641 
1642   case AMDGPU::S_NOP:
1643     return MI.getOperand(0).getImm() + 1;
1644   }
1645 }
1646 
1647 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1648   const SIRegisterInfo *TRI = ST.getRegisterInfo();
1649   MachineBasicBlock &MBB = *MI.getParent();
1650   DebugLoc DL = MBB.findDebugLoc(MI);
1651   switch (MI.getOpcode()) {
1652   default: return TargetInstrInfo::expandPostRAPseudo(MI);
1653   case AMDGPU::S_MOV_B64_term:
1654     // This is only a terminator to get the correct spill code placement during
1655     // register allocation.
1656     MI.setDesc(get(AMDGPU::S_MOV_B64));
1657     break;
1658 
1659   case AMDGPU::S_MOV_B32_term:
1660     // This is only a terminator to get the correct spill code placement during
1661     // register allocation.
1662     MI.setDesc(get(AMDGPU::S_MOV_B32));
1663     break;
1664 
1665   case AMDGPU::S_XOR_B64_term:
1666     // This is only a terminator to get the correct spill code placement during
1667     // register allocation.
1668     MI.setDesc(get(AMDGPU::S_XOR_B64));
1669     break;
1670 
1671   case AMDGPU::S_XOR_B32_term:
1672     // This is only a terminator to get the correct spill code placement during
1673     // register allocation.
1674     MI.setDesc(get(AMDGPU::S_XOR_B32));
1675     break;
1676   case AMDGPU::S_OR_B64_term:
1677     // This is only a terminator to get the correct spill code placement during
1678     // register allocation.
1679     MI.setDesc(get(AMDGPU::S_OR_B64));
1680     break;
1681   case AMDGPU::S_OR_B32_term:
1682     // This is only a terminator to get the correct spill code placement during
1683     // register allocation.
1684     MI.setDesc(get(AMDGPU::S_OR_B32));
1685     break;
1686 
1687   case AMDGPU::S_ANDN2_B64_term:
1688     // This is only a terminator to get the correct spill code placement during
1689     // register allocation.
1690     MI.setDesc(get(AMDGPU::S_ANDN2_B64));
1691     break;
1692 
1693   case AMDGPU::S_ANDN2_B32_term:
1694     // This is only a terminator to get the correct spill code placement during
1695     // register allocation.
1696     MI.setDesc(get(AMDGPU::S_ANDN2_B32));
1697     break;
1698 
1699   case AMDGPU::S_AND_B64_term:
1700     // This is only a terminator to get the correct spill code placement during
1701     // register allocation.
1702     MI.setDesc(get(AMDGPU::S_AND_B64));
1703     break;
1704 
1705   case AMDGPU::S_AND_B32_term:
1706     // This is only a terminator to get the correct spill code placement during
1707     // register allocation.
1708     MI.setDesc(get(AMDGPU::S_AND_B32));
1709     break;
1710 
1711   case AMDGPU::V_MOV_B64_PSEUDO: {
1712     Register Dst = MI.getOperand(0).getReg();
1713     Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
1714     Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
1715 
1716     const MachineOperand &SrcOp = MI.getOperand(1);
1717     // FIXME: Will this work for 64-bit floating point immediates?
1718     assert(!SrcOp.isFPImm());
1719     if (SrcOp.isImm()) {
1720       APInt Imm(64, SrcOp.getImm());
1721       APInt Lo(32, Imm.getLoBits(32).getZExtValue());
1722       APInt Hi(32, Imm.getHiBits(32).getZExtValue());
1723       if (ST.hasPackedFP32Ops() && Lo == Hi && isInlineConstant(Lo)) {
1724         BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst)
1725           .addImm(SISrcMods::OP_SEL_1)
1726           .addImm(Lo.getSExtValue())
1727           .addImm(SISrcMods::OP_SEL_1)
1728           .addImm(Lo.getSExtValue())
1729           .addImm(0)  // op_sel_lo
1730           .addImm(0)  // op_sel_hi
1731           .addImm(0)  // neg_lo
1732           .addImm(0)  // neg_hi
1733           .addImm(0); // clamp
1734       } else {
1735         BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
1736           .addImm(Lo.getZExtValue())
1737           .addReg(Dst, RegState::Implicit | RegState::Define);
1738         BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
1739           .addImm(Hi.getZExtValue())
1740           .addReg(Dst, RegState::Implicit | RegState::Define);
1741       }
1742     } else {
1743       assert(SrcOp.isReg());
1744       if (ST.hasPackedFP32Ops() &&
1745           !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) {
1746         BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst)
1747           .addImm(SISrcMods::OP_SEL_1) // src0_mod
1748           .addReg(SrcOp.getReg())
1749           .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) // src1_mod
1750           .addReg(SrcOp.getReg())
1751           .addImm(0)  // op_sel_lo
1752           .addImm(0)  // op_sel_hi
1753           .addImm(0)  // neg_lo
1754           .addImm(0)  // neg_hi
1755           .addImm(0); // clamp
1756       } else {
1757         BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
1758           .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
1759           .addReg(Dst, RegState::Implicit | RegState::Define);
1760         BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
1761           .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
1762           .addReg(Dst, RegState::Implicit | RegState::Define);
1763       }
1764     }
1765     MI.eraseFromParent();
1766     break;
1767   }
1768   case AMDGPU::V_MOV_B64_DPP_PSEUDO: {
1769     expandMovDPP64(MI);
1770     break;
1771   }
1772   case AMDGPU::V_SET_INACTIVE_B32: {
1773     unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64;
1774     unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1775     auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec);
1776     FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten
1777     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg())
1778       .add(MI.getOperand(2));
1779     BuildMI(MBB, MI, DL, get(NotOpc), Exec)
1780       .addReg(Exec);
1781     MI.eraseFromParent();
1782     break;
1783   }
1784   case AMDGPU::V_SET_INACTIVE_B64: {
1785     unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64;
1786     unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1787     auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec);
1788     FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten
1789     MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO),
1790                                  MI.getOperand(0).getReg())
1791       .add(MI.getOperand(2));
1792     expandPostRAPseudo(*Copy);
1793     BuildMI(MBB, MI, DL, get(NotOpc), Exec)
1794       .addReg(Exec);
1795     MI.eraseFromParent();
1796     break;
1797   }
1798   case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1:
1799   case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2:
1800   case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3:
1801   case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4:
1802   case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5:
1803   case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8:
1804   case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16:
1805   case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32:
1806   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1:
1807   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2:
1808   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3:
1809   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4:
1810   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5:
1811   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8:
1812   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16:
1813   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32:
1814   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1:
1815   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2:
1816   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4:
1817   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8:
1818   case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: {
1819     const TargetRegisterClass *EltRC = getOpRegClass(MI, 2);
1820 
1821     unsigned Opc;
1822     if (RI.hasVGPRs(EltRC)) {
1823       Opc = AMDGPU::V_MOVRELD_B32_e32;
1824     } else {
1825       Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64
1826                                               : AMDGPU::S_MOVRELD_B32;
1827     }
1828 
1829     const MCInstrDesc &OpDesc = get(Opc);
1830     Register VecReg = MI.getOperand(0).getReg();
1831     bool IsUndef = MI.getOperand(1).isUndef();
1832     unsigned SubReg = MI.getOperand(3).getImm();
1833     assert(VecReg == MI.getOperand(1).getReg());
1834 
1835     MachineInstrBuilder MIB =
1836       BuildMI(MBB, MI, DL, OpDesc)
1837         .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
1838         .add(MI.getOperand(2))
1839         .addReg(VecReg, RegState::ImplicitDefine)
1840         .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
1841 
1842     const int ImpDefIdx =
1843       OpDesc.getNumOperands() + OpDesc.getNumImplicitUses();
1844     const int ImpUseIdx = ImpDefIdx + 1;
1845     MIB->tieOperands(ImpDefIdx, ImpUseIdx);
1846     MI.eraseFromParent();
1847     break;
1848   }
1849   case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1:
1850   case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2:
1851   case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3:
1852   case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4:
1853   case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5:
1854   case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8:
1855   case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16:
1856   case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: {
1857     assert(ST.useVGPRIndexMode());
1858     Register VecReg = MI.getOperand(0).getReg();
1859     bool IsUndef = MI.getOperand(1).isUndef();
1860     Register Idx = MI.getOperand(3).getReg();
1861     Register SubReg = MI.getOperand(4).getImm();
1862 
1863     MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON))
1864                               .addReg(Idx)
1865                               .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
1866     SetOn->getOperand(3).setIsUndef();
1867 
1868     const MCInstrDesc &OpDesc = get(AMDGPU::V_MOV_B32_indirect);
1869     MachineInstrBuilder MIB =
1870         BuildMI(MBB, MI, DL, OpDesc)
1871             .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
1872             .add(MI.getOperand(2))
1873             .addReg(VecReg, RegState::ImplicitDefine)
1874             .addReg(VecReg,
1875                     RegState::Implicit | (IsUndef ? RegState::Undef : 0));
1876 
1877     const int ImpDefIdx = OpDesc.getNumOperands() + OpDesc.getNumImplicitUses();
1878     const int ImpUseIdx = ImpDefIdx + 1;
1879     MIB->tieOperands(ImpDefIdx, ImpUseIdx);
1880 
1881     MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF));
1882 
1883     finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator()));
1884 
1885     MI.eraseFromParent();
1886     break;
1887   }
1888   case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1:
1889   case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2:
1890   case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3:
1891   case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4:
1892   case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5:
1893   case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8:
1894   case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16:
1895   case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: {
1896     assert(ST.useVGPRIndexMode());
1897     Register Dst = MI.getOperand(0).getReg();
1898     Register VecReg = MI.getOperand(1).getReg();
1899     bool IsUndef = MI.getOperand(1).isUndef();
1900     Register Idx = MI.getOperand(2).getReg();
1901     Register SubReg = MI.getOperand(3).getImm();
1902 
1903     MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON))
1904                               .addReg(Idx)
1905                               .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
1906     SetOn->getOperand(3).setIsUndef();
1907 
1908     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32))
1909         .addDef(Dst)
1910         .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
1911         .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0))
1912         .addReg(AMDGPU::M0, RegState::Implicit);
1913 
1914     MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF));
1915 
1916     finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator()));
1917 
1918     MI.eraseFromParent();
1919     break;
1920   }
1921   case AMDGPU::SI_PC_ADD_REL_OFFSET: {
1922     MachineFunction &MF = *MBB.getParent();
1923     Register Reg = MI.getOperand(0).getReg();
1924     Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
1925     Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
1926 
1927     // Create a bundle so these instructions won't be re-ordered by the
1928     // post-RA scheduler.
1929     MIBundleBuilder Bundler(MBB, MI);
1930     Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
1931 
1932     // Add 32-bit offset from this instruction to the start of the
1933     // constant data.
1934     Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
1935                        .addReg(RegLo)
1936                        .add(MI.getOperand(1)));
1937 
1938     MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
1939                                   .addReg(RegHi);
1940     MIB.add(MI.getOperand(2));
1941 
1942     Bundler.append(MIB);
1943     finalizeBundle(MBB, Bundler.begin());
1944 
1945     MI.eraseFromParent();
1946     break;
1947   }
1948   case AMDGPU::ENTER_STRICT_WWM: {
1949     // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
1950     // Whole Wave Mode is entered.
1951     MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
1952                                  : AMDGPU::S_OR_SAVEEXEC_B64));
1953     break;
1954   }
1955   case AMDGPU::ENTER_STRICT_WQM: {
1956     // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
1957     // STRICT_WQM is entered.
1958     const unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1959     const unsigned WQMOp = ST.isWave32() ? AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64;
1960     const unsigned MovOp = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
1961     BuildMI(MBB, MI, DL, get(MovOp), MI.getOperand(0).getReg()).addReg(Exec);
1962     BuildMI(MBB, MI, DL, get(WQMOp), Exec).addReg(Exec);
1963 
1964     MI.eraseFromParent();
1965     break;
1966   }
1967   case AMDGPU::EXIT_STRICT_WWM:
1968   case AMDGPU::EXIT_STRICT_WQM: {
1969     // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
1970     // WWM/STICT_WQM is exited.
1971     MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64));
1972     break;
1973   }
1974   }
1975   return true;
1976 }
1977 
1978 std::pair<MachineInstr*, MachineInstr*>
1979 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const {
1980   assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
1981 
1982   MachineBasicBlock &MBB = *MI.getParent();
1983   DebugLoc DL = MBB.findDebugLoc(MI);
1984   MachineFunction *MF = MBB.getParent();
1985   MachineRegisterInfo &MRI = MF->getRegInfo();
1986   Register Dst = MI.getOperand(0).getReg();
1987   unsigned Part = 0;
1988   MachineInstr *Split[2];
1989 
1990   for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) {
1991     auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp));
1992     if (Dst.isPhysical()) {
1993       MovDPP.addDef(RI.getSubReg(Dst, Sub));
1994     } else {
1995       assert(MRI.isSSA());
1996       auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1997       MovDPP.addDef(Tmp);
1998     }
1999 
2000     for (unsigned I = 1; I <= 2; ++I) { // old and src operands.
2001       const MachineOperand &SrcOp = MI.getOperand(I);
2002       assert(!SrcOp.isFPImm());
2003       if (SrcOp.isImm()) {
2004         APInt Imm(64, SrcOp.getImm());
2005         Imm.ashrInPlace(Part * 32);
2006         MovDPP.addImm(Imm.getLoBits(32).getZExtValue());
2007       } else {
2008         assert(SrcOp.isReg());
2009         Register Src = SrcOp.getReg();
2010         if (Src.isPhysical())
2011           MovDPP.addReg(RI.getSubReg(Src, Sub));
2012         else
2013           MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub);
2014       }
2015     }
2016 
2017     for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I)
2018       MovDPP.addImm(MI.getOperand(I).getImm());
2019 
2020     Split[Part] = MovDPP;
2021     ++Part;
2022   }
2023 
2024   if (Dst.isVirtual())
2025     BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst)
2026       .addReg(Split[0]->getOperand(0).getReg())
2027       .addImm(AMDGPU::sub0)
2028       .addReg(Split[1]->getOperand(0).getReg())
2029       .addImm(AMDGPU::sub1);
2030 
2031   MI.eraseFromParent();
2032   return std::make_pair(Split[0], Split[1]);
2033 }
2034 
2035 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
2036                                       MachineOperand &Src0,
2037                                       unsigned Src0OpName,
2038                                       MachineOperand &Src1,
2039                                       unsigned Src1OpName) const {
2040   MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
2041   if (!Src0Mods)
2042     return false;
2043 
2044   MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
2045   assert(Src1Mods &&
2046          "All commutable instructions have both src0 and src1 modifiers");
2047 
2048   int Src0ModsVal = Src0Mods->getImm();
2049   int Src1ModsVal = Src1Mods->getImm();
2050 
2051   Src1Mods->setImm(Src0ModsVal);
2052   Src0Mods->setImm(Src1ModsVal);
2053   return true;
2054 }
2055 
2056 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
2057                                              MachineOperand &RegOp,
2058                                              MachineOperand &NonRegOp) {
2059   Register Reg = RegOp.getReg();
2060   unsigned SubReg = RegOp.getSubReg();
2061   bool IsKill = RegOp.isKill();
2062   bool IsDead = RegOp.isDead();
2063   bool IsUndef = RegOp.isUndef();
2064   bool IsDebug = RegOp.isDebug();
2065 
2066   if (NonRegOp.isImm())
2067     RegOp.ChangeToImmediate(NonRegOp.getImm());
2068   else if (NonRegOp.isFI())
2069     RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
2070   else if (NonRegOp.isGlobal()) {
2071     RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(),
2072                      NonRegOp.getTargetFlags());
2073   } else
2074     return nullptr;
2075 
2076   // Make sure we don't reinterpret a subreg index in the target flags.
2077   RegOp.setTargetFlags(NonRegOp.getTargetFlags());
2078 
2079   NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
2080   NonRegOp.setSubReg(SubReg);
2081 
2082   return &MI;
2083 }
2084 
2085 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
2086                                                   unsigned Src0Idx,
2087                                                   unsigned Src1Idx) const {
2088   assert(!NewMI && "this should never be used");
2089 
2090   unsigned Opc = MI.getOpcode();
2091   int CommutedOpcode = commuteOpcode(Opc);
2092   if (CommutedOpcode == -1)
2093     return nullptr;
2094 
2095   assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==
2096            static_cast<int>(Src0Idx) &&
2097          AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==
2098            static_cast<int>(Src1Idx) &&
2099          "inconsistency with findCommutedOpIndices");
2100 
2101   MachineOperand &Src0 = MI.getOperand(Src0Idx);
2102   MachineOperand &Src1 = MI.getOperand(Src1Idx);
2103 
2104   MachineInstr *CommutedMI = nullptr;
2105   if (Src0.isReg() && Src1.isReg()) {
2106     if (isOperandLegal(MI, Src1Idx, &Src0)) {
2107       // Be sure to copy the source modifiers to the right place.
2108       CommutedMI
2109         = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
2110     }
2111 
2112   } else if (Src0.isReg() && !Src1.isReg()) {
2113     // src0 should always be able to support any operand type, so no need to
2114     // check operand legality.
2115     CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
2116   } else if (!Src0.isReg() && Src1.isReg()) {
2117     if (isOperandLegal(MI, Src1Idx, &Src0))
2118       CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
2119   } else {
2120     // FIXME: Found two non registers to commute. This does happen.
2121     return nullptr;
2122   }
2123 
2124   if (CommutedMI) {
2125     swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
2126                         Src1, AMDGPU::OpName::src1_modifiers);
2127 
2128     CommutedMI->setDesc(get(CommutedOpcode));
2129   }
2130 
2131   return CommutedMI;
2132 }
2133 
2134 // This needs to be implemented because the source modifiers may be inserted
2135 // between the true commutable operands, and the base
2136 // TargetInstrInfo::commuteInstruction uses it.
2137 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
2138                                         unsigned &SrcOpIdx0,
2139                                         unsigned &SrcOpIdx1) const {
2140   return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1);
2141 }
2142 
2143 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0,
2144                                         unsigned &SrcOpIdx1) const {
2145   if (!Desc.isCommutable())
2146     return false;
2147 
2148   unsigned Opc = Desc.getOpcode();
2149   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2150   if (Src0Idx == -1)
2151     return false;
2152 
2153   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
2154   if (Src1Idx == -1)
2155     return false;
2156 
2157   return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
2158 }
2159 
2160 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
2161                                         int64_t BrOffset) const {
2162   // BranchRelaxation should never have to check s_setpc_b64 because its dest
2163   // block is unanalyzable.
2164   assert(BranchOp != AMDGPU::S_SETPC_B64);
2165 
2166   // Convert to dwords.
2167   BrOffset /= 4;
2168 
2169   // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
2170   // from the next instruction.
2171   BrOffset -= 1;
2172 
2173   return isIntN(BranchOffsetBits, BrOffset);
2174 }
2175 
2176 MachineBasicBlock *SIInstrInfo::getBranchDestBlock(
2177   const MachineInstr &MI) const {
2178   if (MI.getOpcode() == AMDGPU::S_SETPC_B64) {
2179     // This would be a difficult analysis to perform, but can always be legal so
2180     // there's no need to analyze it.
2181     return nullptr;
2182   }
2183 
2184   return MI.getOperand(0).getMBB();
2185 }
2186 
2187 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
2188                                            MachineBasicBlock &DestBB,
2189                                            const DebugLoc &DL,
2190                                            int64_t BrOffset,
2191                                            RegScavenger *RS) const {
2192   assert(RS && "RegScavenger required for long branching");
2193   assert(MBB.empty() &&
2194          "new block should be inserted for expanding unconditional branch");
2195   assert(MBB.pred_size() == 1);
2196 
2197   MachineFunction *MF = MBB.getParent();
2198   MachineRegisterInfo &MRI = MF->getRegInfo();
2199 
2200   // FIXME: Virtual register workaround for RegScavenger not working with empty
2201   // blocks.
2202   Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2203 
2204   auto I = MBB.end();
2205 
2206   // We need to compute the offset relative to the instruction immediately after
2207   // s_getpc_b64. Insert pc arithmetic code before last terminator.
2208   MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
2209 
2210   // TODO: Handle > 32-bit block address.
2211   if (BrOffset >= 0) {
2212     BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
2213       .addReg(PCReg, RegState::Define, AMDGPU::sub0)
2214       .addReg(PCReg, 0, AMDGPU::sub0)
2215       .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD);
2216     BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
2217       .addReg(PCReg, RegState::Define, AMDGPU::sub1)
2218       .addReg(PCReg, 0, AMDGPU::sub1)
2219       .addImm(0);
2220   } else {
2221     // Backwards branch.
2222     BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32))
2223       .addReg(PCReg, RegState::Define, AMDGPU::sub0)
2224       .addReg(PCReg, 0, AMDGPU::sub0)
2225       .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD);
2226     BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32))
2227       .addReg(PCReg, RegState::Define, AMDGPU::sub1)
2228       .addReg(PCReg, 0, AMDGPU::sub1)
2229       .addImm(0);
2230   }
2231 
2232   // Insert the indirect branch after the other terminator.
2233   BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
2234     .addReg(PCReg);
2235 
2236   // FIXME: If spilling is necessary, this will fail because this scavenger has
2237   // no emergency stack slots. It is non-trivial to spill in this situation,
2238   // because the restore code needs to be specially placed after the
2239   // jump. BranchRelaxation then needs to be made aware of the newly inserted
2240   // block.
2241   //
2242   // If a spill is needed for the pc register pair, we need to insert a spill
2243   // restore block right before the destination block, and insert a short branch
2244   // into the old destination block's fallthrough predecessor.
2245   // e.g.:
2246   //
2247   // s_cbranch_scc0 skip_long_branch:
2248   //
2249   // long_branch_bb:
2250   //   spill s[8:9]
2251   //   s_getpc_b64 s[8:9]
2252   //   s_add_u32 s8, s8, restore_bb
2253   //   s_addc_u32 s9, s9, 0
2254   //   s_setpc_b64 s[8:9]
2255   //
2256   // skip_long_branch:
2257   //   foo;
2258   //
2259   // .....
2260   //
2261   // dest_bb_fallthrough_predecessor:
2262   // bar;
2263   // s_branch dest_bb
2264   //
2265   // restore_bb:
2266   //  restore s[8:9]
2267   //  fallthrough dest_bb
2268   ///
2269   // dest_bb:
2270   //   buzz;
2271 
2272   RS->enterBasicBlockEnd(MBB);
2273   Register Scav = RS->scavengeRegisterBackwards(
2274     AMDGPU::SReg_64RegClass,
2275     MachineBasicBlock::iterator(GetPC), false, 0);
2276   MRI.replaceRegWith(PCReg, Scav);
2277   MRI.clearVirtRegs();
2278   RS->setRegUsed(Scav);
2279 
2280   return 4 + 8 + 4 + 4;
2281 }
2282 
2283 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
2284   switch (Cond) {
2285   case SIInstrInfo::SCC_TRUE:
2286     return AMDGPU::S_CBRANCH_SCC1;
2287   case SIInstrInfo::SCC_FALSE:
2288     return AMDGPU::S_CBRANCH_SCC0;
2289   case SIInstrInfo::VCCNZ:
2290     return AMDGPU::S_CBRANCH_VCCNZ;
2291   case SIInstrInfo::VCCZ:
2292     return AMDGPU::S_CBRANCH_VCCZ;
2293   case SIInstrInfo::EXECNZ:
2294     return AMDGPU::S_CBRANCH_EXECNZ;
2295   case SIInstrInfo::EXECZ:
2296     return AMDGPU::S_CBRANCH_EXECZ;
2297   default:
2298     llvm_unreachable("invalid branch predicate");
2299   }
2300 }
2301 
2302 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
2303   switch (Opcode) {
2304   case AMDGPU::S_CBRANCH_SCC0:
2305     return SCC_FALSE;
2306   case AMDGPU::S_CBRANCH_SCC1:
2307     return SCC_TRUE;
2308   case AMDGPU::S_CBRANCH_VCCNZ:
2309     return VCCNZ;
2310   case AMDGPU::S_CBRANCH_VCCZ:
2311     return VCCZ;
2312   case AMDGPU::S_CBRANCH_EXECNZ:
2313     return EXECNZ;
2314   case AMDGPU::S_CBRANCH_EXECZ:
2315     return EXECZ;
2316   default:
2317     return INVALID_BR;
2318   }
2319 }
2320 
2321 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
2322                                     MachineBasicBlock::iterator I,
2323                                     MachineBasicBlock *&TBB,
2324                                     MachineBasicBlock *&FBB,
2325                                     SmallVectorImpl<MachineOperand> &Cond,
2326                                     bool AllowModify) const {
2327   if (I->getOpcode() == AMDGPU::S_BRANCH) {
2328     // Unconditional Branch
2329     TBB = I->getOperand(0).getMBB();
2330     return false;
2331   }
2332 
2333   MachineBasicBlock *CondBB = nullptr;
2334 
2335   if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
2336     CondBB = I->getOperand(1).getMBB();
2337     Cond.push_back(I->getOperand(0));
2338   } else {
2339     BranchPredicate Pred = getBranchPredicate(I->getOpcode());
2340     if (Pred == INVALID_BR)
2341       return true;
2342 
2343     CondBB = I->getOperand(0).getMBB();
2344     Cond.push_back(MachineOperand::CreateImm(Pred));
2345     Cond.push_back(I->getOperand(1)); // Save the branch register.
2346   }
2347   ++I;
2348 
2349   if (I == MBB.end()) {
2350     // Conditional branch followed by fall-through.
2351     TBB = CondBB;
2352     return false;
2353   }
2354 
2355   if (I->getOpcode() == AMDGPU::S_BRANCH) {
2356     TBB = CondBB;
2357     FBB = I->getOperand(0).getMBB();
2358     return false;
2359   }
2360 
2361   return true;
2362 }
2363 
2364 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
2365                                 MachineBasicBlock *&FBB,
2366                                 SmallVectorImpl<MachineOperand> &Cond,
2367                                 bool AllowModify) const {
2368   MachineBasicBlock::iterator I = MBB.getFirstTerminator();
2369   auto E = MBB.end();
2370   if (I == E)
2371     return false;
2372 
2373   // Skip over the instructions that are artificially terminators for special
2374   // exec management.
2375   while (I != E && !I->isBranch() && !I->isReturn()) {
2376     switch (I->getOpcode()) {
2377     case AMDGPU::S_MOV_B64_term:
2378     case AMDGPU::S_XOR_B64_term:
2379     case AMDGPU::S_OR_B64_term:
2380     case AMDGPU::S_ANDN2_B64_term:
2381     case AMDGPU::S_AND_B64_term:
2382     case AMDGPU::S_MOV_B32_term:
2383     case AMDGPU::S_XOR_B32_term:
2384     case AMDGPU::S_OR_B32_term:
2385     case AMDGPU::S_ANDN2_B32_term:
2386     case AMDGPU::S_AND_B32_term:
2387       break;
2388     case AMDGPU::SI_IF:
2389     case AMDGPU::SI_ELSE:
2390     case AMDGPU::SI_KILL_I1_TERMINATOR:
2391     case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
2392       // FIXME: It's messy that these need to be considered here at all.
2393       return true;
2394     default:
2395       llvm_unreachable("unexpected non-branch terminator inst");
2396     }
2397 
2398     ++I;
2399   }
2400 
2401   if (I == E)
2402     return false;
2403 
2404   return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
2405 }
2406 
2407 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB,
2408                                    int *BytesRemoved) const {
2409   MachineBasicBlock::iterator I = MBB.getFirstTerminator();
2410 
2411   unsigned Count = 0;
2412   unsigned RemovedSize = 0;
2413   while (I != MBB.end()) {
2414     MachineBasicBlock::iterator Next = std::next(I);
2415     RemovedSize += getInstSizeInBytes(*I);
2416     I->eraseFromParent();
2417     ++Count;
2418     I = Next;
2419   }
2420 
2421   if (BytesRemoved)
2422     *BytesRemoved = RemovedSize;
2423 
2424   return Count;
2425 }
2426 
2427 // Copy the flags onto the implicit condition register operand.
2428 static void preserveCondRegFlags(MachineOperand &CondReg,
2429                                  const MachineOperand &OrigCond) {
2430   CondReg.setIsUndef(OrigCond.isUndef());
2431   CondReg.setIsKill(OrigCond.isKill());
2432 }
2433 
2434 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
2435                                    MachineBasicBlock *TBB,
2436                                    MachineBasicBlock *FBB,
2437                                    ArrayRef<MachineOperand> Cond,
2438                                    const DebugLoc &DL,
2439                                    int *BytesAdded) const {
2440   if (!FBB && Cond.empty()) {
2441     BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
2442       .addMBB(TBB);
2443     if (BytesAdded)
2444       *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
2445     return 1;
2446   }
2447 
2448   if(Cond.size() == 1 && Cond[0].isReg()) {
2449      BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO))
2450        .add(Cond[0])
2451        .addMBB(TBB);
2452      return 1;
2453   }
2454 
2455   assert(TBB && Cond[0].isImm());
2456 
2457   unsigned Opcode
2458     = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
2459 
2460   if (!FBB) {
2461     Cond[1].isUndef();
2462     MachineInstr *CondBr =
2463       BuildMI(&MBB, DL, get(Opcode))
2464       .addMBB(TBB);
2465 
2466     // Copy the flags onto the implicit condition register operand.
2467     preserveCondRegFlags(CondBr->getOperand(1), Cond[1]);
2468     fixImplicitOperands(*CondBr);
2469 
2470     if (BytesAdded)
2471       *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
2472     return 1;
2473   }
2474 
2475   assert(TBB && FBB);
2476 
2477   MachineInstr *CondBr =
2478     BuildMI(&MBB, DL, get(Opcode))
2479     .addMBB(TBB);
2480   fixImplicitOperands(*CondBr);
2481   BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
2482     .addMBB(FBB);
2483 
2484   MachineOperand &CondReg = CondBr->getOperand(1);
2485   CondReg.setIsUndef(Cond[1].isUndef());
2486   CondReg.setIsKill(Cond[1].isKill());
2487 
2488   if (BytesAdded)
2489     *BytesAdded = ST.hasOffset3fBug() ? 16 : 8;
2490 
2491   return 2;
2492 }
2493 
2494 bool SIInstrInfo::reverseBranchCondition(
2495   SmallVectorImpl<MachineOperand> &Cond) const {
2496   if (Cond.size() != 2) {
2497     return true;
2498   }
2499 
2500   if (Cond[0].isImm()) {
2501     Cond[0].setImm(-Cond[0].getImm());
2502     return false;
2503   }
2504 
2505   return true;
2506 }
2507 
2508 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
2509                                   ArrayRef<MachineOperand> Cond,
2510                                   Register DstReg, Register TrueReg,
2511                                   Register FalseReg, int &CondCycles,
2512                                   int &TrueCycles, int &FalseCycles) const {
2513   switch (Cond[0].getImm()) {
2514   case VCCNZ:
2515   case VCCZ: {
2516     const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2517     const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
2518     if (MRI.getRegClass(FalseReg) != RC)
2519       return false;
2520 
2521     int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
2522     CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
2523 
2524     // Limit to equal cost for branch vs. N v_cndmask_b32s.
2525     return RI.hasVGPRs(RC) && NumInsts <= 6;
2526   }
2527   case SCC_TRUE:
2528   case SCC_FALSE: {
2529     // FIXME: We could insert for VGPRs if we could replace the original compare
2530     // with a vector one.
2531     const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2532     const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
2533     if (MRI.getRegClass(FalseReg) != RC)
2534       return false;
2535 
2536     int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
2537 
2538     // Multiples of 8 can do s_cselect_b64
2539     if (NumInsts % 2 == 0)
2540       NumInsts /= 2;
2541 
2542     CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
2543     return RI.isSGPRClass(RC);
2544   }
2545   default:
2546     return false;
2547   }
2548 }
2549 
2550 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
2551                                MachineBasicBlock::iterator I, const DebugLoc &DL,
2552                                Register DstReg, ArrayRef<MachineOperand> Cond,
2553                                Register TrueReg, Register FalseReg) const {
2554   BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm());
2555   if (Pred == VCCZ || Pred == SCC_FALSE) {
2556     Pred = static_cast<BranchPredicate>(-Pred);
2557     std::swap(TrueReg, FalseReg);
2558   }
2559 
2560   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2561   const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
2562   unsigned DstSize = RI.getRegSizeInBits(*DstRC);
2563 
2564   if (DstSize == 32) {
2565     MachineInstr *Select;
2566     if (Pred == SCC_TRUE) {
2567       Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg)
2568         .addReg(TrueReg)
2569         .addReg(FalseReg);
2570     } else {
2571       // Instruction's operands are backwards from what is expected.
2572       Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg)
2573         .addReg(FalseReg)
2574         .addReg(TrueReg);
2575     }
2576 
2577     preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2578     return;
2579   }
2580 
2581   if (DstSize == 64 && Pred == SCC_TRUE) {
2582     MachineInstr *Select =
2583       BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg)
2584       .addReg(TrueReg)
2585       .addReg(FalseReg);
2586 
2587     preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2588     return;
2589   }
2590 
2591   static const int16_t Sub0_15[] = {
2592     AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
2593     AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
2594     AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
2595     AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
2596   };
2597 
2598   static const int16_t Sub0_15_64[] = {
2599     AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
2600     AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
2601     AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
2602     AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
2603   };
2604 
2605   unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
2606   const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass;
2607   const int16_t *SubIndices = Sub0_15;
2608   int NElts = DstSize / 32;
2609 
2610   // 64-bit select is only available for SALU.
2611   // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit.
2612   if (Pred == SCC_TRUE) {
2613     if (NElts % 2) {
2614       SelOp = AMDGPU::S_CSELECT_B32;
2615       EltRC = &AMDGPU::SGPR_32RegClass;
2616     } else {
2617       SelOp = AMDGPU::S_CSELECT_B64;
2618       EltRC = &AMDGPU::SGPR_64RegClass;
2619       SubIndices = Sub0_15_64;
2620       NElts /= 2;
2621     }
2622   }
2623 
2624   MachineInstrBuilder MIB = BuildMI(
2625     MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg);
2626 
2627   I = MIB->getIterator();
2628 
2629   SmallVector<Register, 8> Regs;
2630   for (int Idx = 0; Idx != NElts; ++Idx) {
2631     Register DstElt = MRI.createVirtualRegister(EltRC);
2632     Regs.push_back(DstElt);
2633 
2634     unsigned SubIdx = SubIndices[Idx];
2635 
2636     MachineInstr *Select;
2637     if (SelOp == AMDGPU::V_CNDMASK_B32_e32) {
2638       Select =
2639         BuildMI(MBB, I, DL, get(SelOp), DstElt)
2640         .addReg(FalseReg, 0, SubIdx)
2641         .addReg(TrueReg, 0, SubIdx);
2642     } else {
2643       Select =
2644         BuildMI(MBB, I, DL, get(SelOp), DstElt)
2645         .addReg(TrueReg, 0, SubIdx)
2646         .addReg(FalseReg, 0, SubIdx);
2647     }
2648 
2649     preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2650     fixImplicitOperands(*Select);
2651 
2652     MIB.addReg(DstElt)
2653        .addImm(SubIdx);
2654   }
2655 }
2656 
2657 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const {
2658   switch (MI.getOpcode()) {
2659   case AMDGPU::V_MOV_B32_e32:
2660   case AMDGPU::V_MOV_B32_e64:
2661   case AMDGPU::V_MOV_B64_PSEUDO: {
2662     // If there are additional implicit register operands, this may be used for
2663     // register indexing so the source register operand isn't simply copied.
2664     unsigned NumOps = MI.getDesc().getNumOperands() +
2665       MI.getDesc().getNumImplicitUses();
2666 
2667     return MI.getNumOperands() == NumOps;
2668   }
2669   case AMDGPU::S_MOV_B32:
2670   case AMDGPU::S_MOV_B64:
2671   case AMDGPU::COPY:
2672   case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
2673   case AMDGPU::V_ACCVGPR_READ_B32_e64:
2674   case AMDGPU::V_ACCVGPR_MOV_B32:
2675     return true;
2676   default:
2677     return false;
2678   }
2679 }
2680 
2681 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind(
2682     unsigned Kind) const {
2683   switch(Kind) {
2684   case PseudoSourceValue::Stack:
2685   case PseudoSourceValue::FixedStack:
2686     return AMDGPUAS::PRIVATE_ADDRESS;
2687   case PseudoSourceValue::ConstantPool:
2688   case PseudoSourceValue::GOT:
2689   case PseudoSourceValue::JumpTable:
2690   case PseudoSourceValue::GlobalValueCallEntry:
2691   case PseudoSourceValue::ExternalSymbolCallEntry:
2692   case PseudoSourceValue::TargetCustom:
2693     return AMDGPUAS::CONSTANT_ADDRESS;
2694   }
2695   return AMDGPUAS::FLAT_ADDRESS;
2696 }
2697 
2698 static void removeModOperands(MachineInstr &MI) {
2699   unsigned Opc = MI.getOpcode();
2700   int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
2701                                               AMDGPU::OpName::src0_modifiers);
2702   int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
2703                                               AMDGPU::OpName::src1_modifiers);
2704   int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
2705                                               AMDGPU::OpName::src2_modifiers);
2706 
2707   MI.RemoveOperand(Src2ModIdx);
2708   MI.RemoveOperand(Src1ModIdx);
2709   MI.RemoveOperand(Src0ModIdx);
2710 }
2711 
2712 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
2713                                 Register Reg, MachineRegisterInfo *MRI) const {
2714   if (!MRI->hasOneNonDBGUse(Reg))
2715     return false;
2716 
2717   switch (DefMI.getOpcode()) {
2718   default:
2719     return false;
2720   case AMDGPU::S_MOV_B64:
2721     // TODO: We could fold 64-bit immediates, but this get compilicated
2722     // when there are sub-registers.
2723     return false;
2724 
2725   case AMDGPU::V_MOV_B32_e32:
2726   case AMDGPU::S_MOV_B32:
2727   case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
2728     break;
2729   }
2730 
2731   const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
2732   assert(ImmOp);
2733   // FIXME: We could handle FrameIndex values here.
2734   if (!ImmOp->isImm())
2735     return false;
2736 
2737   unsigned Opc = UseMI.getOpcode();
2738   if (Opc == AMDGPU::COPY) {
2739     Register DstReg = UseMI.getOperand(0).getReg();
2740     bool Is16Bit = getOpSize(UseMI, 0) == 2;
2741     bool isVGPRCopy = RI.isVGPR(*MRI, DstReg);
2742     unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2743     APInt Imm(32, ImmOp->getImm());
2744 
2745     if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16)
2746       Imm = Imm.ashr(16);
2747 
2748     if (RI.isAGPR(*MRI, DstReg)) {
2749       if (!isInlineConstant(Imm))
2750         return false;
2751       NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
2752     }
2753 
2754     if (Is16Bit) {
2755        if (isVGPRCopy)
2756          return false; // Do not clobber vgpr_hi16
2757 
2758        if (DstReg.isVirtual() &&
2759            UseMI.getOperand(0).getSubReg() != AMDGPU::lo16)
2760          return false;
2761 
2762       UseMI.getOperand(0).setSubReg(0);
2763       if (DstReg.isPhysical()) {
2764         DstReg = RI.get32BitRegister(DstReg);
2765         UseMI.getOperand(0).setReg(DstReg);
2766       }
2767       assert(UseMI.getOperand(1).getReg().isVirtual());
2768     }
2769 
2770     UseMI.setDesc(get(NewOpc));
2771     UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue());
2772     UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
2773     return true;
2774   }
2775 
2776   if (Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 ||
2777       Opc == AMDGPU::V_MAD_F16_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
2778       Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 ||
2779       Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64) {
2780     // Don't fold if we are using source or output modifiers. The new VOP2
2781     // instructions don't have them.
2782     if (hasAnyModifiersSet(UseMI))
2783       return false;
2784 
2785     // If this is a free constant, there's no reason to do this.
2786     // TODO: We could fold this here instead of letting SIFoldOperands do it
2787     // later.
2788     MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
2789 
2790     // Any src operand can be used for the legality check.
2791     if (isInlineConstant(UseMI, *Src0, *ImmOp))
2792       return false;
2793 
2794     bool IsF32 = Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 ||
2795                  Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64;
2796     bool IsFMA = Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 ||
2797                  Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64;
2798     MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
2799     MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
2800 
2801     // Multiplied part is the constant: Use v_madmk_{f16, f32}.
2802     // We should only expect these to be on src0 due to canonicalizations.
2803     if (Src0->isReg() && Src0->getReg() == Reg) {
2804       if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
2805         return false;
2806 
2807       if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
2808         return false;
2809 
2810       unsigned NewOpc =
2811         IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16)
2812               : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16);
2813       if (pseudoToMCOpcode(NewOpc) == -1)
2814         return false;
2815 
2816       // We need to swap operands 0 and 1 since madmk constant is at operand 1.
2817 
2818       const int64_t Imm = ImmOp->getImm();
2819 
2820       // FIXME: This would be a lot easier if we could return a new instruction
2821       // instead of having to modify in place.
2822 
2823       // Remove these first since they are at the end.
2824       UseMI.RemoveOperand(
2825           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
2826       UseMI.RemoveOperand(
2827           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
2828 
2829       Register Src1Reg = Src1->getReg();
2830       unsigned Src1SubReg = Src1->getSubReg();
2831       Src0->setReg(Src1Reg);
2832       Src0->setSubReg(Src1SubReg);
2833       Src0->setIsKill(Src1->isKill());
2834 
2835       if (Opc == AMDGPU::V_MAC_F32_e64 ||
2836           Opc == AMDGPU::V_MAC_F16_e64 ||
2837           Opc == AMDGPU::V_FMAC_F32_e64 ||
2838           Opc == AMDGPU::V_FMAC_F16_e64)
2839         UseMI.untieRegOperand(
2840             AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
2841 
2842       Src1->ChangeToImmediate(Imm);
2843 
2844       removeModOperands(UseMI);
2845       UseMI.setDesc(get(NewOpc));
2846 
2847       bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
2848       if (DeleteDef)
2849         DefMI.eraseFromParent();
2850 
2851       return true;
2852     }
2853 
2854     // Added part is the constant: Use v_madak_{f16, f32}.
2855     if (Src2->isReg() && Src2->getReg() == Reg) {
2856       // Not allowed to use constant bus for another operand.
2857       // We can however allow an inline immediate as src0.
2858       bool Src0Inlined = false;
2859       if (Src0->isReg()) {
2860         // Try to inline constant if possible.
2861         // If the Def moves immediate and the use is single
2862         // We are saving VGPR here.
2863         MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg());
2864         if (Def && Def->isMoveImmediate() &&
2865           isInlineConstant(Def->getOperand(1)) &&
2866           MRI->hasOneUse(Src0->getReg())) {
2867           Src0->ChangeToImmediate(Def->getOperand(1).getImm());
2868           Src0Inlined = true;
2869         } else if ((Src0->getReg().isPhysical() &&
2870                     (ST.getConstantBusLimit(Opc) <= 1 &&
2871                      RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) ||
2872                    (Src0->getReg().isVirtual() &&
2873                     (ST.getConstantBusLimit(Opc) <= 1 &&
2874                      RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))))
2875           return false;
2876           // VGPR is okay as Src0 - fallthrough
2877       }
2878 
2879       if (Src1->isReg() && !Src0Inlined ) {
2880         // We have one slot for inlinable constant so far - try to fill it
2881         MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg());
2882         if (Def && Def->isMoveImmediate() &&
2883             isInlineConstant(Def->getOperand(1)) &&
2884             MRI->hasOneUse(Src1->getReg()) &&
2885             commuteInstruction(UseMI)) {
2886             Src0->ChangeToImmediate(Def->getOperand(1).getImm());
2887         } else if ((Src1->getReg().isPhysical() &&
2888                     RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) ||
2889                    (Src1->getReg().isVirtual() &&
2890                     RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
2891           return false;
2892           // VGPR is okay as Src1 - fallthrough
2893       }
2894 
2895       unsigned NewOpc =
2896         IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16)
2897               : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16);
2898       if (pseudoToMCOpcode(NewOpc) == -1)
2899         return false;
2900 
2901       const int64_t Imm = ImmOp->getImm();
2902 
2903       // FIXME: This would be a lot easier if we could return a new instruction
2904       // instead of having to modify in place.
2905 
2906       // Remove these first since they are at the end.
2907       UseMI.RemoveOperand(
2908           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
2909       UseMI.RemoveOperand(
2910           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
2911 
2912       if (Opc == AMDGPU::V_MAC_F32_e64 ||
2913           Opc == AMDGPU::V_MAC_F16_e64 ||
2914           Opc == AMDGPU::V_FMAC_F32_e64 ||
2915           Opc == AMDGPU::V_FMAC_F16_e64)
2916         UseMI.untieRegOperand(
2917             AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
2918 
2919       // ChangingToImmediate adds Src2 back to the instruction.
2920       Src2->ChangeToImmediate(Imm);
2921 
2922       // These come before src2.
2923       removeModOperands(UseMI);
2924       UseMI.setDesc(get(NewOpc));
2925       // It might happen that UseMI was commuted
2926       // and we now have SGPR as SRC1. If so 2 inlined
2927       // constant and SGPR are illegal.
2928       legalizeOperands(UseMI);
2929 
2930       bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
2931       if (DeleteDef)
2932         DefMI.eraseFromParent();
2933 
2934       return true;
2935     }
2936   }
2937 
2938   return false;
2939 }
2940 
2941 static bool
2942 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1,
2943                            ArrayRef<const MachineOperand *> BaseOps2) {
2944   if (BaseOps1.size() != BaseOps2.size())
2945     return false;
2946   for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) {
2947     if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I]))
2948       return false;
2949   }
2950   return true;
2951 }
2952 
2953 static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
2954                                 int WidthB, int OffsetB) {
2955   int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
2956   int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
2957   int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2958   return LowOffset + LowWidth <= HighOffset;
2959 }
2960 
2961 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
2962                                                const MachineInstr &MIb) const {
2963   SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1;
2964   int64_t Offset0, Offset1;
2965   unsigned Dummy0, Dummy1;
2966   bool Offset0IsScalable, Offset1IsScalable;
2967   if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable,
2968                                      Dummy0, &RI) ||
2969       !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable,
2970                                      Dummy1, &RI))
2971     return false;
2972 
2973   if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1))
2974     return false;
2975 
2976   if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
2977     // FIXME: Handle ds_read2 / ds_write2.
2978     return false;
2979   }
2980   unsigned Width0 = MIa.memoperands().front()->getSize();
2981   unsigned Width1 = MIb.memoperands().front()->getSize();
2982   return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1);
2983 }
2984 
2985 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
2986                                                   const MachineInstr &MIb) const {
2987   assert(MIa.mayLoadOrStore() &&
2988          "MIa must load from or modify a memory location");
2989   assert(MIb.mayLoadOrStore() &&
2990          "MIb must load from or modify a memory location");
2991 
2992   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
2993     return false;
2994 
2995   // XXX - Can we relax this between address spaces?
2996   if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
2997     return false;
2998 
2999   // TODO: Should we check the address space from the MachineMemOperand? That
3000   // would allow us to distinguish objects we know don't alias based on the
3001   // underlying address space, even if it was lowered to a different one,
3002   // e.g. private accesses lowered to use MUBUF instructions on a scratch
3003   // buffer.
3004   if (isDS(MIa)) {
3005     if (isDS(MIb))
3006       return checkInstOffsetsDoNotOverlap(MIa, MIb);
3007 
3008     return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb);
3009   }
3010 
3011   if (isMUBUF(MIa) || isMTBUF(MIa)) {
3012     if (isMUBUF(MIb) || isMTBUF(MIb))
3013       return checkInstOffsetsDoNotOverlap(MIa, MIb);
3014 
3015     return !isFLAT(MIb) && !isSMRD(MIb);
3016   }
3017 
3018   if (isSMRD(MIa)) {
3019     if (isSMRD(MIb))
3020       return checkInstOffsetsDoNotOverlap(MIa, MIb);
3021 
3022     return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb);
3023   }
3024 
3025   if (isFLAT(MIa)) {
3026     if (isFLAT(MIb))
3027       return checkInstOffsetsDoNotOverlap(MIa, MIb);
3028 
3029     return false;
3030   }
3031 
3032   return false;
3033 }
3034 
3035 static int64_t getFoldableImm(const MachineOperand* MO) {
3036   if (!MO->isReg())
3037     return false;
3038   const MachineFunction *MF = MO->getParent()->getParent()->getParent();
3039   const MachineRegisterInfo &MRI = MF->getRegInfo();
3040   auto Def = MRI.getUniqueVRegDef(MO->getReg());
3041   if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 &&
3042       Def->getOperand(1).isImm())
3043     return Def->getOperand(1).getImm();
3044   return AMDGPU::NoRegister;
3045 }
3046 
3047 static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI,
3048                                 MachineInstr &NewMI) {
3049   if (LV) {
3050     unsigned NumOps = MI.getNumOperands();
3051     for (unsigned I = 1; I < NumOps; ++I) {
3052       MachineOperand &Op = MI.getOperand(I);
3053       if (Op.isReg() && Op.isKill())
3054         LV->replaceKillInstruction(Op.getReg(), MI, NewMI);
3055     }
3056   }
3057 }
3058 
3059 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
3060                                                  MachineInstr &MI,
3061                                                  LiveVariables *LV) const {
3062   unsigned Opc = MI.getOpcode();
3063   bool IsF16 = false;
3064   bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 ||
3065                Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64 ||
3066                Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64;
3067   bool IsF64 = Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64;
3068 
3069   switch (Opc) {
3070   default:
3071     return nullptr;
3072   case AMDGPU::V_MAC_F16_e64:
3073   case AMDGPU::V_FMAC_F16_e64:
3074     IsF16 = true;
3075     LLVM_FALLTHROUGH;
3076   case AMDGPU::V_MAC_F32_e64:
3077   case AMDGPU::V_FMAC_F32_e64:
3078   case AMDGPU::V_FMAC_F64_e64:
3079     break;
3080   case AMDGPU::V_MAC_F16_e32:
3081   case AMDGPU::V_FMAC_F16_e32:
3082     IsF16 = true;
3083     LLVM_FALLTHROUGH;
3084   case AMDGPU::V_MAC_F32_e32:
3085   case AMDGPU::V_FMAC_F32_e32:
3086   case AMDGPU::V_FMAC_F64_e32: {
3087     int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
3088                                              AMDGPU::OpName::src0);
3089     const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
3090     if (!Src0->isReg() && !Src0->isImm())
3091       return nullptr;
3092 
3093     if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
3094       return nullptr;
3095 
3096     break;
3097   }
3098   }
3099 
3100   const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
3101   const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
3102   const MachineOperand *Src0Mods =
3103     getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
3104   const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3105   const MachineOperand *Src1Mods =
3106     getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
3107   const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3108   const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
3109   const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod);
3110   MachineInstrBuilder MIB;
3111 
3112   if (!Src0Mods && !Src1Mods && !Clamp && !Omod && !IsF64 &&
3113       // If we have an SGPR input, we will violate the constant bus restriction.
3114       (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() ||
3115        !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) {
3116     if (auto Imm = getFoldableImm(Src2)) {
3117       unsigned NewOpc =
3118           IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32)
3119                 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32);
3120       if (pseudoToMCOpcode(NewOpc) != -1) {
3121         MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
3122                   .add(*Dst)
3123                   .add(*Src0)
3124                   .add(*Src1)
3125                   .addImm(Imm);
3126         updateLiveVariables(LV, MI, *MIB);
3127         return MIB;
3128       }
3129     }
3130     unsigned NewOpc = IsFMA
3131                           ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32)
3132                           : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32);
3133     if (auto Imm = getFoldableImm(Src1)) {
3134       if (pseudoToMCOpcode(NewOpc) != -1) {
3135         MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
3136                   .add(*Dst)
3137                   .add(*Src0)
3138                   .addImm(Imm)
3139                   .add(*Src2);
3140         updateLiveVariables(LV, MI, *MIB);
3141         return MIB;
3142       }
3143     }
3144     if (auto Imm = getFoldableImm(Src0)) {
3145       if (pseudoToMCOpcode(NewOpc) != -1 &&
3146           isOperandLegal(
3147               MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0),
3148               Src1)) {
3149         MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
3150                   .add(*Dst)
3151                   .add(*Src1)
3152                   .addImm(Imm)
3153                   .add(*Src2);
3154         updateLiveVariables(LV, MI, *MIB);
3155         return MIB;
3156       }
3157     }
3158   }
3159 
3160   unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16_e64
3161                                    : IsF64 ? AMDGPU::V_FMA_F64_e64
3162                                            : AMDGPU::V_FMA_F32_e64)
3163                           : (IsF16 ? AMDGPU::V_MAD_F16_e64 : AMDGPU::V_MAD_F32_e64);
3164   if (pseudoToMCOpcode(NewOpc) == -1)
3165     return nullptr;
3166 
3167   MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
3168             .add(*Dst)
3169             .addImm(Src0Mods ? Src0Mods->getImm() : 0)
3170             .add(*Src0)
3171             .addImm(Src1Mods ? Src1Mods->getImm() : 0)
3172             .add(*Src1)
3173             .addImm(0) // Src mods
3174             .add(*Src2)
3175             .addImm(Clamp ? Clamp->getImm() : 0)
3176             .addImm(Omod ? Omod->getImm() : 0);
3177   updateLiveVariables(LV, MI, *MIB);
3178   return MIB;
3179 }
3180 
3181 // It's not generally safe to move VALU instructions across these since it will
3182 // start using the register as a base index rather than directly.
3183 // XXX - Why isn't hasSideEffects sufficient for these?
3184 static bool changesVGPRIndexingMode(const MachineInstr &MI) {
3185   switch (MI.getOpcode()) {
3186   case AMDGPU::S_SET_GPR_IDX_ON:
3187   case AMDGPU::S_SET_GPR_IDX_MODE:
3188   case AMDGPU::S_SET_GPR_IDX_OFF:
3189     return true;
3190   default:
3191     return false;
3192   }
3193 }
3194 
3195 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
3196                                        const MachineBasicBlock *MBB,
3197                                        const MachineFunction &MF) const {
3198   // Skipping the check for SP writes in the base implementation. The reason it
3199   // was added was apparently due to compile time concerns.
3200   //
3201   // TODO: Do we really want this barrier? It triggers unnecessary hazard nops
3202   // but is probably avoidable.
3203 
3204   // Copied from base implementation.
3205   // Terminators and labels can't be scheduled around.
3206   if (MI.isTerminator() || MI.isPosition())
3207     return true;
3208 
3209   // INLINEASM_BR can jump to another block
3210   if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
3211     return true;
3212 
3213   // Target-independent instructions do not have an implicit-use of EXEC, even
3214   // when they operate on VGPRs. Treating EXEC modifications as scheduling
3215   // boundaries prevents incorrect movements of such instructions.
3216   return MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
3217          MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
3218          MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
3219          changesVGPRIndexingMode(MI);
3220 }
3221 
3222 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const {
3223   return Opcode == AMDGPU::DS_ORDERED_COUNT ||
3224          Opcode == AMDGPU::DS_GWS_INIT ||
3225          Opcode == AMDGPU::DS_GWS_SEMA_V ||
3226          Opcode == AMDGPU::DS_GWS_SEMA_BR ||
3227          Opcode == AMDGPU::DS_GWS_SEMA_P ||
3228          Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL ||
3229          Opcode == AMDGPU::DS_GWS_BARRIER;
3230 }
3231 
3232 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) {
3233   // Skip the full operand and register alias search modifiesRegister
3234   // does. There's only a handful of instructions that touch this, it's only an
3235   // implicit def, and doesn't alias any other registers.
3236   if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) {
3237     for (; ImpDef && *ImpDef; ++ImpDef) {
3238       if (*ImpDef == AMDGPU::MODE)
3239         return true;
3240     }
3241   }
3242 
3243   return false;
3244 }
3245 
3246 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const {
3247   unsigned Opcode = MI.getOpcode();
3248 
3249   if (MI.mayStore() && isSMRD(MI))
3250     return true; // scalar store or atomic
3251 
3252   // This will terminate the function when other lanes may need to continue.
3253   if (MI.isReturn())
3254     return true;
3255 
3256   // These instructions cause shader I/O that may cause hardware lockups
3257   // when executed with an empty EXEC mask.
3258   //
3259   // Note: exp with VM = DONE = 0 is automatically skipped by hardware when
3260   //       EXEC = 0, but checking for that case here seems not worth it
3261   //       given the typical code patterns.
3262   if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT ||
3263       isEXP(Opcode) ||
3264       Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP ||
3265       Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER)
3266     return true;
3267 
3268   if (MI.isCall() || MI.isInlineAsm())
3269     return true; // conservative assumption
3270 
3271   // A mode change is a scalar operation that influences vector instructions.
3272   if (modifiesModeRegister(MI))
3273     return true;
3274 
3275   // These are like SALU instructions in terms of effects, so it's questionable
3276   // whether we should return true for those.
3277   //
3278   // However, executing them with EXEC = 0 causes them to operate on undefined
3279   // data, which we avoid by returning true here.
3280   if (Opcode == AMDGPU::V_READFIRSTLANE_B32 ||
3281       Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32)
3282     return true;
3283 
3284   return false;
3285 }
3286 
3287 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI,
3288                               const MachineInstr &MI) const {
3289   if (MI.isMetaInstruction())
3290     return false;
3291 
3292   // This won't read exec if this is an SGPR->SGPR copy.
3293   if (MI.isCopyLike()) {
3294     if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg()))
3295       return true;
3296 
3297     // Make sure this isn't copying exec as a normal operand
3298     return MI.readsRegister(AMDGPU::EXEC, &RI);
3299   }
3300 
3301   // Make a conservative assumption about the callee.
3302   if (MI.isCall())
3303     return true;
3304 
3305   // Be conservative with any unhandled generic opcodes.
3306   if (!isTargetSpecificOpcode(MI.getOpcode()))
3307     return true;
3308 
3309   return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI);
3310 }
3311 
3312 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
3313   switch (Imm.getBitWidth()) {
3314   case 1: // This likely will be a condition code mask.
3315     return true;
3316 
3317   case 32:
3318     return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
3319                                         ST.hasInv2PiInlineImm());
3320   case 64:
3321     return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
3322                                         ST.hasInv2PiInlineImm());
3323   case 16:
3324     return ST.has16BitInsts() &&
3325            AMDGPU::isInlinableLiteral16(Imm.getSExtValue(),
3326                                         ST.hasInv2PiInlineImm());
3327   default:
3328     llvm_unreachable("invalid bitwidth");
3329   }
3330 }
3331 
3332 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
3333                                    uint8_t OperandType) const {
3334   if (!MO.isImm() ||
3335       OperandType < AMDGPU::OPERAND_SRC_FIRST ||
3336       OperandType > AMDGPU::OPERAND_SRC_LAST)
3337     return false;
3338 
3339   // MachineOperand provides no way to tell the true operand size, since it only
3340   // records a 64-bit value. We need to know the size to determine if a 32-bit
3341   // floating point immediate bit pattern is legal for an integer immediate. It
3342   // would be for any 32-bit integer operand, but would not be for a 64-bit one.
3343 
3344   int64_t Imm = MO.getImm();
3345   switch (OperandType) {
3346   case AMDGPU::OPERAND_REG_IMM_INT32:
3347   case AMDGPU::OPERAND_REG_IMM_FP32:
3348   case AMDGPU::OPERAND_REG_INLINE_C_INT32:
3349   case AMDGPU::OPERAND_REG_INLINE_C_FP32:
3350   case AMDGPU::OPERAND_REG_IMM_V2FP32:
3351   case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
3352   case AMDGPU::OPERAND_REG_IMM_V2INT32:
3353   case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
3354   case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
3355   case AMDGPU::OPERAND_REG_INLINE_AC_FP32: {
3356     int32_t Trunc = static_cast<int32_t>(Imm);
3357     return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
3358   }
3359   case AMDGPU::OPERAND_REG_IMM_INT64:
3360   case AMDGPU::OPERAND_REG_IMM_FP64:
3361   case AMDGPU::OPERAND_REG_INLINE_C_INT64:
3362   case AMDGPU::OPERAND_REG_INLINE_C_FP64:
3363   case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
3364     return AMDGPU::isInlinableLiteral64(MO.getImm(),
3365                                         ST.hasInv2PiInlineImm());
3366   case AMDGPU::OPERAND_REG_IMM_INT16:
3367   case AMDGPU::OPERAND_REG_INLINE_C_INT16:
3368   case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
3369     // We would expect inline immediates to not be concerned with an integer/fp
3370     // distinction. However, in the case of 16-bit integer operations, the
3371     // "floating point" values appear to not work. It seems read the low 16-bits
3372     // of 32-bit immediates, which happens to always work for the integer
3373     // values.
3374     //
3375     // See llvm bugzilla 46302.
3376     //
3377     // TODO: Theoretically we could use op-sel to use the high bits of the
3378     // 32-bit FP values.
3379     return AMDGPU::isInlinableIntLiteral(Imm);
3380   case AMDGPU::OPERAND_REG_IMM_V2INT16:
3381   case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
3382   case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
3383     // This suffers the same problem as the scalar 16-bit cases.
3384     return AMDGPU::isInlinableIntLiteralV216(Imm);
3385   case AMDGPU::OPERAND_REG_IMM_FP16:
3386   case AMDGPU::OPERAND_REG_INLINE_C_FP16:
3387   case AMDGPU::OPERAND_REG_INLINE_AC_FP16: {
3388     if (isInt<16>(Imm) || isUInt<16>(Imm)) {
3389       // A few special case instructions have 16-bit operands on subtargets
3390       // where 16-bit instructions are not legal.
3391       // TODO: Do the 32-bit immediates work? We shouldn't really need to handle
3392       // constants in these cases
3393       int16_t Trunc = static_cast<int16_t>(Imm);
3394       return ST.has16BitInsts() &&
3395              AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
3396     }
3397 
3398     return false;
3399   }
3400   case AMDGPU::OPERAND_REG_IMM_V2FP16:
3401   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
3402   case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
3403     uint32_t Trunc = static_cast<uint32_t>(Imm);
3404     return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm());
3405   }
3406   default:
3407     llvm_unreachable("invalid bitwidth");
3408   }
3409 }
3410 
3411 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO,
3412                                         const MCOperandInfo &OpInfo) const {
3413   switch (MO.getType()) {
3414   case MachineOperand::MO_Register:
3415     return false;
3416   case MachineOperand::MO_Immediate:
3417     return !isInlineConstant(MO, OpInfo);
3418   case MachineOperand::MO_FrameIndex:
3419   case MachineOperand::MO_MachineBasicBlock:
3420   case MachineOperand::MO_ExternalSymbol:
3421   case MachineOperand::MO_GlobalAddress:
3422   case MachineOperand::MO_MCSymbol:
3423     return true;
3424   default:
3425     llvm_unreachable("unexpected operand type");
3426   }
3427 }
3428 
3429 static bool compareMachineOp(const MachineOperand &Op0,
3430                              const MachineOperand &Op1) {
3431   if (Op0.getType() != Op1.getType())
3432     return false;
3433 
3434   switch (Op0.getType()) {
3435   case MachineOperand::MO_Register:
3436     return Op0.getReg() == Op1.getReg();
3437   case MachineOperand::MO_Immediate:
3438     return Op0.getImm() == Op1.getImm();
3439   default:
3440     llvm_unreachable("Didn't expect to be comparing these operand types");
3441   }
3442 }
3443 
3444 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
3445                                     const MachineOperand &MO) const {
3446   const MCInstrDesc &InstDesc = MI.getDesc();
3447   const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
3448 
3449   assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal());
3450 
3451   if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
3452     return true;
3453 
3454   if (OpInfo.RegClass < 0)
3455     return false;
3456 
3457   if (MO.isImm() && isInlineConstant(MO, OpInfo)) {
3458     if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() &&
3459         OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(),
3460                                                     AMDGPU::OpName::src2))
3461       return false;
3462     return RI.opCanUseInlineConstant(OpInfo.OperandType);
3463   }
3464 
3465   if (!RI.opCanUseLiteralConstant(OpInfo.OperandType))
3466     return false;
3467 
3468   if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo))
3469     return true;
3470 
3471   return ST.hasVOP3Literal();
3472 }
3473 
3474 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
3475   // GFX90A does not have V_MUL_LEGACY_F32_e32.
3476   if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts())
3477     return false;
3478 
3479   int Op32 = AMDGPU::getVOPe32(Opcode);
3480   if (Op32 == -1)
3481     return false;
3482 
3483   return pseudoToMCOpcode(Op32) != -1;
3484 }
3485 
3486 bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
3487   // The src0_modifier operand is present on all instructions
3488   // that have modifiers.
3489 
3490   return AMDGPU::getNamedOperandIdx(Opcode,
3491                                     AMDGPU::OpName::src0_modifiers) != -1;
3492 }
3493 
3494 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
3495                                   unsigned OpName) const {
3496   const MachineOperand *Mods = getNamedOperand(MI, OpName);
3497   return Mods && Mods->getImm();
3498 }
3499 
3500 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const {
3501   return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
3502          hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
3503          hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) ||
3504          hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
3505          hasModifiersSet(MI, AMDGPU::OpName::omod);
3506 }
3507 
3508 bool SIInstrInfo::canShrink(const MachineInstr &MI,
3509                             const MachineRegisterInfo &MRI) const {
3510   const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3511   // Can't shrink instruction with three operands.
3512   // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
3513   // a special case for it.  It can only be shrunk if the third operand
3514   // is vcc, and src0_modifiers and src1_modifiers are not set.
3515   // We should handle this the same way we handle vopc, by addding
3516   // a register allocation hint pre-regalloc and then do the shrinking
3517   // post-regalloc.
3518   if (Src2) {
3519     switch (MI.getOpcode()) {
3520       default: return false;
3521 
3522       case AMDGPU::V_ADDC_U32_e64:
3523       case AMDGPU::V_SUBB_U32_e64:
3524       case AMDGPU::V_SUBBREV_U32_e64: {
3525         const MachineOperand *Src1
3526           = getNamedOperand(MI, AMDGPU::OpName::src1);
3527         if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()))
3528           return false;
3529         // Additional verification is needed for sdst/src2.
3530         return true;
3531       }
3532       case AMDGPU::V_MAC_F32_e64:
3533       case AMDGPU::V_MAC_F16_e64:
3534       case AMDGPU::V_FMAC_F32_e64:
3535       case AMDGPU::V_FMAC_F16_e64:
3536       case AMDGPU::V_FMAC_F64_e64:
3537         if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) ||
3538             hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
3539           return false;
3540         break;
3541 
3542       case AMDGPU::V_CNDMASK_B32_e64:
3543         break;
3544     }
3545   }
3546 
3547   const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3548   if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) ||
3549                hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)))
3550     return false;
3551 
3552   // We don't need to check src0, all input types are legal, so just make sure
3553   // src0 isn't using any modifiers.
3554   if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
3555     return false;
3556 
3557   // Can it be shrunk to a valid 32 bit opcode?
3558   if (!hasVALU32BitEncoding(MI.getOpcode()))
3559     return false;
3560 
3561   // Check output modifiers
3562   return !hasModifiersSet(MI, AMDGPU::OpName::omod) &&
3563          !hasModifiersSet(MI, AMDGPU::OpName::clamp);
3564 }
3565 
3566 // Set VCC operand with all flags from \p Orig, except for setting it as
3567 // implicit.
3568 static void copyFlagsToImplicitVCC(MachineInstr &MI,
3569                                    const MachineOperand &Orig) {
3570 
3571   for (MachineOperand &Use : MI.implicit_operands()) {
3572     if (Use.isUse() &&
3573         (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) {
3574       Use.setIsUndef(Orig.isUndef());
3575       Use.setIsKill(Orig.isKill());
3576       return;
3577     }
3578   }
3579 }
3580 
3581 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI,
3582                                            unsigned Op32) const {
3583   MachineBasicBlock *MBB = MI.getParent();;
3584   MachineInstrBuilder Inst32 =
3585     BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32))
3586     .setMIFlags(MI.getFlags());
3587 
3588   // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
3589   // For VOPC instructions, this is replaced by an implicit def of vcc.
3590   int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst);
3591   if (Op32DstIdx != -1) {
3592     // dst
3593     Inst32.add(MI.getOperand(0));
3594   } else {
3595     assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) ||
3596             (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&
3597            "Unexpected case");
3598   }
3599 
3600   Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0));
3601 
3602   const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3603   if (Src1)
3604     Inst32.add(*Src1);
3605 
3606   const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3607 
3608   if (Src2) {
3609     int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2);
3610     if (Op32Src2Idx != -1) {
3611       Inst32.add(*Src2);
3612     } else {
3613       // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
3614       // replaced with an implicit read of vcc or vcc_lo. The implicit read
3615       // of vcc was already added during the initial BuildMI, but we
3616       // 1) may need to change vcc to vcc_lo to preserve the original register
3617       // 2) have to preserve the original flags.
3618       fixImplicitOperands(*Inst32);
3619       copyFlagsToImplicitVCC(*Inst32, *Src2);
3620     }
3621   }
3622 
3623   return Inst32;
3624 }
3625 
3626 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
3627                                   const MachineOperand &MO,
3628                                   const MCOperandInfo &OpInfo) const {
3629   // Literal constants use the constant bus.
3630   //if (isLiteralConstantLike(MO, OpInfo))
3631   // return true;
3632   if (MO.isImm())
3633     return !isInlineConstant(MO, OpInfo);
3634 
3635   if (!MO.isReg())
3636     return true; // Misc other operands like FrameIndex
3637 
3638   if (!MO.isUse())
3639     return false;
3640 
3641   if (MO.getReg().isVirtual())
3642     return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
3643 
3644   // Null is free
3645   if (MO.getReg() == AMDGPU::SGPR_NULL)
3646     return false;
3647 
3648   // SGPRs use the constant bus
3649   if (MO.isImplicit()) {
3650     return MO.getReg() == AMDGPU::M0 ||
3651            MO.getReg() == AMDGPU::VCC ||
3652            MO.getReg() == AMDGPU::VCC_LO;
3653   } else {
3654     return AMDGPU::SReg_32RegClass.contains(MO.getReg()) ||
3655            AMDGPU::SReg_64RegClass.contains(MO.getReg());
3656   }
3657 }
3658 
3659 static Register findImplicitSGPRRead(const MachineInstr &MI) {
3660   for (const MachineOperand &MO : MI.implicit_operands()) {
3661     // We only care about reads.
3662     if (MO.isDef())
3663       continue;
3664 
3665     switch (MO.getReg()) {
3666     case AMDGPU::VCC:
3667     case AMDGPU::VCC_LO:
3668     case AMDGPU::VCC_HI:
3669     case AMDGPU::M0:
3670     case AMDGPU::FLAT_SCR:
3671       return MO.getReg();
3672 
3673     default:
3674       break;
3675     }
3676   }
3677 
3678   return AMDGPU::NoRegister;
3679 }
3680 
3681 static bool shouldReadExec(const MachineInstr &MI) {
3682   if (SIInstrInfo::isVALU(MI)) {
3683     switch (MI.getOpcode()) {
3684     case AMDGPU::V_READLANE_B32:
3685     case AMDGPU::V_WRITELANE_B32:
3686       return false;
3687     }
3688 
3689     return true;
3690   }
3691 
3692   if (MI.isPreISelOpcode() ||
3693       SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
3694       SIInstrInfo::isSALU(MI) ||
3695       SIInstrInfo::isSMRD(MI))
3696     return false;
3697 
3698   return true;
3699 }
3700 
3701 static bool isSubRegOf(const SIRegisterInfo &TRI,
3702                        const MachineOperand &SuperVec,
3703                        const MachineOperand &SubReg) {
3704   if (SubReg.getReg().isPhysical())
3705     return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
3706 
3707   return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
3708          SubReg.getReg() == SuperVec.getReg();
3709 }
3710 
3711 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
3712                                     StringRef &ErrInfo) const {
3713   uint16_t Opcode = MI.getOpcode();
3714   if (SIInstrInfo::isGenericOpcode(MI.getOpcode()))
3715     return true;
3716 
3717   const MachineFunction *MF = MI.getParent()->getParent();
3718   const MachineRegisterInfo &MRI = MF->getRegInfo();
3719 
3720   int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
3721   int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
3722   int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
3723 
3724   // Make sure the number of operands is correct.
3725   const MCInstrDesc &Desc = get(Opcode);
3726   if (!Desc.isVariadic() &&
3727       Desc.getNumOperands() != MI.getNumExplicitOperands()) {
3728     ErrInfo = "Instruction has wrong number of operands.";
3729     return false;
3730   }
3731 
3732   if (MI.isInlineAsm()) {
3733     // Verify register classes for inlineasm constraints.
3734     for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
3735          I != E; ++I) {
3736       const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
3737       if (!RC)
3738         continue;
3739 
3740       const MachineOperand &Op = MI.getOperand(I);
3741       if (!Op.isReg())
3742         continue;
3743 
3744       Register Reg = Op.getReg();
3745       if (!Reg.isVirtual() && !RC->contains(Reg)) {
3746         ErrInfo = "inlineasm operand has incorrect register class.";
3747         return false;
3748       }
3749     }
3750 
3751     return true;
3752   }
3753 
3754   if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) {
3755     ErrInfo = "missing memory operand from MIMG instruction.";
3756     return false;
3757   }
3758 
3759   // Make sure the register classes are correct.
3760   for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
3761     const MachineOperand &MO = MI.getOperand(i);
3762     if (MO.isFPImm()) {
3763       ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
3764                 "all fp values to integers.";
3765       return false;
3766     }
3767 
3768     int RegClass = Desc.OpInfo[i].RegClass;
3769 
3770     switch (Desc.OpInfo[i].OperandType) {
3771     case MCOI::OPERAND_REGISTER:
3772       if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) {
3773         ErrInfo = "Illegal immediate value for operand.";
3774         return false;
3775       }
3776       break;
3777     case AMDGPU::OPERAND_REG_IMM_INT32:
3778     case AMDGPU::OPERAND_REG_IMM_FP32:
3779       break;
3780     case AMDGPU::OPERAND_REG_INLINE_C_INT32:
3781     case AMDGPU::OPERAND_REG_INLINE_C_FP32:
3782     case AMDGPU::OPERAND_REG_INLINE_C_INT64:
3783     case AMDGPU::OPERAND_REG_INLINE_C_FP64:
3784     case AMDGPU::OPERAND_REG_INLINE_C_INT16:
3785     case AMDGPU::OPERAND_REG_INLINE_C_FP16:
3786     case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
3787     case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
3788     case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
3789     case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
3790     case AMDGPU::OPERAND_REG_INLINE_AC_FP64: {
3791       if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
3792         ErrInfo = "Illegal immediate value for operand.";
3793         return false;
3794       }
3795       break;
3796     }
3797     case MCOI::OPERAND_IMMEDIATE:
3798     case AMDGPU::OPERAND_KIMM32:
3799       // Check if this operand is an immediate.
3800       // FrameIndex operands will be replaced by immediates, so they are
3801       // allowed.
3802       if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
3803         ErrInfo = "Expected immediate, but got non-immediate";
3804         return false;
3805       }
3806       LLVM_FALLTHROUGH;
3807     default:
3808       continue;
3809     }
3810 
3811     if (!MO.isReg())
3812       continue;
3813     Register Reg = MO.getReg();
3814     if (!Reg)
3815       continue;
3816 
3817     // FIXME: Ideally we would have separate instruction definitions with the
3818     // aligned register constraint.
3819     // FIXME: We do not verify inline asm operands, but custom inline asm
3820     // verification is broken anyway
3821     if (ST.needsAlignedVGPRs()) {
3822       const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg);
3823       const bool IsVGPR = RI.hasVGPRs(RC);
3824       const bool IsAGPR = !IsVGPR && RI.hasAGPRs(RC);
3825       if ((IsVGPR || IsAGPR) && MO.getSubReg()) {
3826         const TargetRegisterClass *SubRC =
3827             RI.getSubRegClass(RC, MO.getSubReg());
3828         RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg());
3829         if (RC)
3830           RC = SubRC;
3831       }
3832 
3833       // Check that this is the aligned version of the class.
3834       if (!RC || ((IsVGPR && !RC->hasSuperClassEq(RI.getVGPRClassForBitWidth(
3835                                  RI.getRegSizeInBits(*RC)))) ||
3836                   (IsAGPR && !RC->hasSuperClassEq(RI.getAGPRClassForBitWidth(
3837                                  RI.getRegSizeInBits(*RC)))))) {
3838         ErrInfo = "Subtarget requires even aligned vector registers";
3839         return false;
3840       }
3841     }
3842 
3843     if (RegClass != -1) {
3844       if (Reg.isVirtual())
3845         continue;
3846 
3847       const TargetRegisterClass *RC = RI.getRegClass(RegClass);
3848       if (!RC->contains(Reg)) {
3849         ErrInfo = "Operand has incorrect register class.";
3850         return false;
3851       }
3852     }
3853   }
3854 
3855   // Verify SDWA
3856   if (isSDWA(MI)) {
3857     if (!ST.hasSDWA()) {
3858       ErrInfo = "SDWA is not supported on this target";
3859       return false;
3860     }
3861 
3862     int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
3863 
3864     const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx };
3865 
3866     for (int OpIdx: OpIndicies) {
3867       if (OpIdx == -1)
3868         continue;
3869       const MachineOperand &MO = MI.getOperand(OpIdx);
3870 
3871       if (!ST.hasSDWAScalar()) {
3872         // Only VGPRS on VI
3873         if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) {
3874           ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI";
3875           return false;
3876         }
3877       } else {
3878         // No immediates on GFX9
3879         if (!MO.isReg()) {
3880           ErrInfo =
3881             "Only reg allowed as operands in SDWA instructions on GFX9+";
3882           return false;
3883         }
3884       }
3885     }
3886 
3887     if (!ST.hasSDWAOmod()) {
3888       // No omod allowed on VI
3889       const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
3890       if (OMod != nullptr &&
3891         (!OMod->isImm() || OMod->getImm() != 0)) {
3892         ErrInfo = "OMod not allowed in SDWA instructions on VI";
3893         return false;
3894       }
3895     }
3896 
3897     uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode);
3898     if (isVOPC(BasicOpcode)) {
3899       if (!ST.hasSDWASdst() && DstIdx != -1) {
3900         // Only vcc allowed as dst on VI for VOPC
3901         const MachineOperand &Dst = MI.getOperand(DstIdx);
3902         if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) {
3903           ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI";
3904           return false;
3905         }
3906       } else if (!ST.hasSDWAOutModsVOPC()) {
3907         // No clamp allowed on GFX9 for VOPC
3908         const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
3909         if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) {
3910           ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI";
3911           return false;
3912         }
3913 
3914         // No omod allowed on GFX9 for VOPC
3915         const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
3916         if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) {
3917           ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI";
3918           return false;
3919         }
3920       }
3921     }
3922 
3923     const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused);
3924     if (DstUnused && DstUnused->isImm() &&
3925         DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) {
3926       const MachineOperand &Dst = MI.getOperand(DstIdx);
3927       if (!Dst.isReg() || !Dst.isTied()) {
3928         ErrInfo = "Dst register should have tied register";
3929         return false;
3930       }
3931 
3932       const MachineOperand &TiedMO =
3933           MI.getOperand(MI.findTiedOperandIdx(DstIdx));
3934       if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) {
3935         ErrInfo =
3936             "Dst register should be tied to implicit use of preserved register";
3937         return false;
3938       } else if (TiedMO.getReg().isPhysical() &&
3939                  Dst.getReg() != TiedMO.getReg()) {
3940         ErrInfo = "Dst register should use same physical register as preserved";
3941         return false;
3942       }
3943     }
3944   }
3945 
3946   // Verify MIMG
3947   if (isMIMG(MI.getOpcode()) && !MI.mayStore()) {
3948     // Ensure that the return type used is large enough for all the options
3949     // being used TFE/LWE require an extra result register.
3950     const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask);
3951     if (DMask) {
3952       uint64_t DMaskImm = DMask->getImm();
3953       uint32_t RegCount =
3954           isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm);
3955       const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe);
3956       const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe);
3957       const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16);
3958 
3959       // Adjust for packed 16 bit values
3960       if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem())
3961         RegCount >>= 1;
3962 
3963       // Adjust if using LWE or TFE
3964       if ((LWE && LWE->getImm()) || (TFE && TFE->getImm()))
3965         RegCount += 1;
3966 
3967       const uint32_t DstIdx =
3968           AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
3969       const MachineOperand &Dst = MI.getOperand(DstIdx);
3970       if (Dst.isReg()) {
3971         const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx);
3972         uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32;
3973         if (RegCount > DstSize) {
3974           ErrInfo = "MIMG instruction returns too many registers for dst "
3975                     "register class";
3976           return false;
3977         }
3978       }
3979     }
3980   }
3981 
3982   // Verify VOP*. Ignore multiple sgpr operands on writelane.
3983   if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32
3984       && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) {
3985     // Only look at the true operands. Only a real operand can use the constant
3986     // bus, and we don't want to check pseudo-operands like the source modifier
3987     // flags.
3988     const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
3989 
3990     unsigned ConstantBusCount = 0;
3991     unsigned LiteralCount = 0;
3992 
3993     if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
3994       ++ConstantBusCount;
3995 
3996     SmallVector<Register, 2> SGPRsUsed;
3997     Register SGPRUsed;
3998 
3999     for (int OpIdx : OpIndices) {
4000       if (OpIdx == -1)
4001         break;
4002       const MachineOperand &MO = MI.getOperand(OpIdx);
4003       if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
4004         if (MO.isReg()) {
4005           SGPRUsed = MO.getReg();
4006           if (llvm::all_of(SGPRsUsed, [SGPRUsed](unsigned SGPR) {
4007                 return SGPRUsed != SGPR;
4008               })) {
4009             ++ConstantBusCount;
4010             SGPRsUsed.push_back(SGPRUsed);
4011           }
4012         } else {
4013           ++ConstantBusCount;
4014           ++LiteralCount;
4015         }
4016       }
4017     }
4018 
4019     SGPRUsed = findImplicitSGPRRead(MI);
4020     if (SGPRUsed != AMDGPU::NoRegister) {
4021       // Implicit uses may safely overlap true overands
4022       if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) {
4023             return !RI.regsOverlap(SGPRUsed, SGPR);
4024           })) {
4025         ++ConstantBusCount;
4026         SGPRsUsed.push_back(SGPRUsed);
4027       }
4028     }
4029 
4030     // v_writelane_b32 is an exception from constant bus restriction:
4031     // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const
4032     if (ConstantBusCount > ST.getConstantBusLimit(Opcode) &&
4033         Opcode != AMDGPU::V_WRITELANE_B32) {
4034       ErrInfo = "VOP* instruction violates constant bus restriction";
4035       return false;
4036     }
4037 
4038     if (isVOP3(MI) && LiteralCount) {
4039       if (!ST.hasVOP3Literal()) {
4040         ErrInfo = "VOP3 instruction uses literal";
4041         return false;
4042       }
4043       if (LiteralCount > 1) {
4044         ErrInfo = "VOP3 instruction uses more than one literal";
4045         return false;
4046       }
4047     }
4048   }
4049 
4050   // Special case for writelane - this can break the multiple constant bus rule,
4051   // but still can't use more than one SGPR register
4052   if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) {
4053     unsigned SGPRCount = 0;
4054     Register SGPRUsed = AMDGPU::NoRegister;
4055 
4056     for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) {
4057       if (OpIdx == -1)
4058         break;
4059 
4060       const MachineOperand &MO = MI.getOperand(OpIdx);
4061 
4062       if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
4063         if (MO.isReg() && MO.getReg() != AMDGPU::M0) {
4064           if (MO.getReg() != SGPRUsed)
4065             ++SGPRCount;
4066           SGPRUsed = MO.getReg();
4067         }
4068       }
4069       if (SGPRCount > ST.getConstantBusLimit(Opcode)) {
4070         ErrInfo = "WRITELANE instruction violates constant bus restriction";
4071         return false;
4072       }
4073     }
4074   }
4075 
4076   // Verify misc. restrictions on specific instructions.
4077   if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 ||
4078       Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) {
4079     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
4080     const MachineOperand &Src1 = MI.getOperand(Src1Idx);
4081     const MachineOperand &Src2 = MI.getOperand(Src2Idx);
4082     if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
4083       if (!compareMachineOp(Src0, Src1) &&
4084           !compareMachineOp(Src0, Src2)) {
4085         ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
4086         return false;
4087       }
4088     }
4089     if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() &
4090          SISrcMods::ABS) ||
4091         (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() &
4092          SISrcMods::ABS) ||
4093         (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() &
4094          SISrcMods::ABS)) {
4095       ErrInfo = "ABS not allowed in VOP3B instructions";
4096       return false;
4097     }
4098   }
4099 
4100   if (isSOP2(MI) || isSOPC(MI)) {
4101     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
4102     const MachineOperand &Src1 = MI.getOperand(Src1Idx);
4103     unsigned Immediates = 0;
4104 
4105     if (!Src0.isReg() &&
4106         !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType))
4107       Immediates++;
4108     if (!Src1.isReg() &&
4109         !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType))
4110       Immediates++;
4111 
4112     if (Immediates > 1) {
4113       ErrInfo = "SOP2/SOPC instruction requires too many immediate constants";
4114       return false;
4115     }
4116   }
4117 
4118   if (isSOPK(MI)) {
4119     auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16);
4120     if (Desc.isBranch()) {
4121       if (!Op->isMBB()) {
4122         ErrInfo = "invalid branch target for SOPK instruction";
4123         return false;
4124       }
4125     } else {
4126       uint64_t Imm = Op->getImm();
4127       if (sopkIsZext(MI)) {
4128         if (!isUInt<16>(Imm)) {
4129           ErrInfo = "invalid immediate for SOPK instruction";
4130           return false;
4131         }
4132       } else {
4133         if (!isInt<16>(Imm)) {
4134           ErrInfo = "invalid immediate for SOPK instruction";
4135           return false;
4136         }
4137       }
4138     }
4139   }
4140 
4141   if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
4142       Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
4143       Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
4144       Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
4145     const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
4146                        Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
4147 
4148     const unsigned StaticNumOps = Desc.getNumOperands() +
4149       Desc.getNumImplicitUses();
4150     const unsigned NumImplicitOps = IsDst ? 2 : 1;
4151 
4152     // Allow additional implicit operands. This allows a fixup done by the post
4153     // RA scheduler where the main implicit operand is killed and implicit-defs
4154     // are added for sub-registers that remain live after this instruction.
4155     if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
4156       ErrInfo = "missing implicit register operands";
4157       return false;
4158     }
4159 
4160     const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
4161     if (IsDst) {
4162       if (!Dst->isUse()) {
4163         ErrInfo = "v_movreld_b32 vdst should be a use operand";
4164         return false;
4165       }
4166 
4167       unsigned UseOpIdx;
4168       if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
4169           UseOpIdx != StaticNumOps + 1) {
4170         ErrInfo = "movrel implicit operands should be tied";
4171         return false;
4172       }
4173     }
4174 
4175     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
4176     const MachineOperand &ImpUse
4177       = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
4178     if (!ImpUse.isReg() || !ImpUse.isUse() ||
4179         !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
4180       ErrInfo = "src0 should be subreg of implicit vector use";
4181       return false;
4182     }
4183   }
4184 
4185   // Make sure we aren't losing exec uses in the td files. This mostly requires
4186   // being careful when using let Uses to try to add other use registers.
4187   if (shouldReadExec(MI)) {
4188     if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
4189       ErrInfo = "VALU instruction does not implicitly read exec mask";
4190       return false;
4191     }
4192   }
4193 
4194   if (isSMRD(MI)) {
4195     if (MI.mayStore()) {
4196       // The register offset form of scalar stores may only use m0 as the
4197       // soffset register.
4198       const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff);
4199       if (Soff && Soff->getReg() != AMDGPU::M0) {
4200         ErrInfo = "scalar stores must use m0 as offset register";
4201         return false;
4202       }
4203     }
4204   }
4205 
4206   if (isFLAT(MI) && !ST.hasFlatInstOffsets()) {
4207     const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
4208     if (Offset->getImm() != 0) {
4209       ErrInfo = "subtarget does not support offsets in flat instructions";
4210       return false;
4211     }
4212   }
4213 
4214   if (isMIMG(MI)) {
4215     const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim);
4216     if (DimOp) {
4217       int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode,
4218                                                  AMDGPU::OpName::vaddr0);
4219       int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
4220       const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode);
4221       const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
4222           AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
4223       const AMDGPU::MIMGDimInfo *Dim =
4224           AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm());
4225 
4226       if (!Dim) {
4227         ErrInfo = "dim is out of range";
4228         return false;
4229       }
4230 
4231       bool IsA16 = false;
4232       if (ST.hasR128A16()) {
4233         const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128);
4234         IsA16 = R128A16->getImm() != 0;
4235       } else if (ST.hasGFX10A16()) {
4236         const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16);
4237         IsA16 = A16->getImm() != 0;
4238       }
4239 
4240       bool PackDerivatives = IsA16 || BaseOpcode->G16;
4241       bool IsNSA = SRsrcIdx - VAddr0Idx > 1;
4242 
4243       unsigned AddrWords = BaseOpcode->NumExtraArgs;
4244       unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
4245                                 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
4246       if (IsA16)
4247         AddrWords += (AddrComponents + 1) / 2;
4248       else
4249         AddrWords += AddrComponents;
4250 
4251       if (BaseOpcode->Gradients) {
4252         if (PackDerivatives)
4253           // There are two gradients per coordinate, we pack them separately.
4254           // For the 3d case, we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
4255           AddrWords += (Dim->NumGradients / 2 + 1) / 2 * 2;
4256         else
4257           AddrWords += Dim->NumGradients;
4258       }
4259 
4260       unsigned VAddrWords;
4261       if (IsNSA) {
4262         VAddrWords = SRsrcIdx - VAddr0Idx;
4263       } else {
4264         const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx);
4265         VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32;
4266         if (AddrWords > 8)
4267           AddrWords = 16;
4268         else if (AddrWords > 4)
4269           AddrWords = 8;
4270         else if (AddrWords == 4)
4271           AddrWords = 4;
4272         else if (AddrWords == 3)
4273           AddrWords = 3;
4274       }
4275 
4276       if (VAddrWords != AddrWords) {
4277         LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords
4278                           << " but got " << VAddrWords << "\n");
4279         ErrInfo = "bad vaddr size";
4280         return false;
4281       }
4282     }
4283   }
4284 
4285   const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl);
4286   if (DppCt) {
4287     using namespace AMDGPU::DPP;
4288 
4289     unsigned DC = DppCt->getImm();
4290     if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 ||
4291         DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST ||
4292         (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) ||
4293         (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) ||
4294         (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) ||
4295         (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) ||
4296         (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) {
4297       ErrInfo = "Invalid dpp_ctrl value";
4298       return false;
4299     }
4300     if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 &&
4301         ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
4302       ErrInfo = "Invalid dpp_ctrl value: "
4303                 "wavefront shifts are not supported on GFX10+";
4304       return false;
4305     }
4306     if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 &&
4307         ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
4308       ErrInfo = "Invalid dpp_ctrl value: "
4309                 "broadcasts are not supported on GFX10+";
4310       return false;
4311     }
4312     if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST &&
4313         ST.getGeneration() < AMDGPUSubtarget::GFX10) {
4314       if (DC >= DppCtrl::ROW_NEWBCAST_FIRST &&
4315           DC <= DppCtrl::ROW_NEWBCAST_LAST &&
4316           !ST.hasGFX90AInsts()) {
4317         ErrInfo = "Invalid dpp_ctrl value: "
4318                   "row_newbroadcast/row_share is not supported before "
4319                   "GFX90A/GFX10";
4320         return false;
4321       } else if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) {
4322         ErrInfo = "Invalid dpp_ctrl value: "
4323                   "row_share and row_xmask are not supported before GFX10";
4324         return false;
4325       }
4326     }
4327 
4328     int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
4329     int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
4330 
4331     if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO &&
4332         ((DstIdx >= 0 &&
4333           (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID ||
4334            Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) ||
4335          ((Src0Idx >= 0 &&
4336            (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID ||
4337             Desc.OpInfo[Src0Idx].RegClass ==
4338                 AMDGPU::VReg_64_Align2RegClassID)))) &&
4339         !AMDGPU::isLegal64BitDPPControl(DC)) {
4340       ErrInfo = "Invalid dpp_ctrl value: "
4341                 "64 bit dpp only support row_newbcast";
4342       return false;
4343     }
4344   }
4345 
4346   if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) {
4347     const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
4348     uint16_t DataNameIdx = isDS(Opcode) ? AMDGPU::OpName::data0
4349                                         : AMDGPU::OpName::vdata;
4350     const MachineOperand *Data = getNamedOperand(MI, DataNameIdx);
4351     const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1);
4352     if (Data && !Data->isReg())
4353       Data = nullptr;
4354 
4355     if (ST.hasGFX90AInsts()) {
4356       if (Dst && Data &&
4357           (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) {
4358         ErrInfo = "Invalid register class: "
4359                   "vdata and vdst should be both VGPR or AGPR";
4360         return false;
4361       }
4362       if (Data && Data2 &&
4363           (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) {
4364         ErrInfo = "Invalid register class: "
4365                   "both data operands should be VGPR or AGPR";
4366         return false;
4367       }
4368     } else {
4369       if ((Dst && RI.isAGPR(MRI, Dst->getReg())) ||
4370           (Data && RI.isAGPR(MRI, Data->getReg())) ||
4371           (Data2 && RI.isAGPR(MRI, Data2->getReg()))) {
4372         ErrInfo = "Invalid register class: "
4373                   "agpr loads and stores not supported on this GPU";
4374         return false;
4375       }
4376     }
4377   }
4378 
4379   return true;
4380 }
4381 
4382 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const {
4383   switch (MI.getOpcode()) {
4384   default: return AMDGPU::INSTRUCTION_LIST_END;
4385   case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
4386   case AMDGPU::COPY: return AMDGPU::COPY;
4387   case AMDGPU::PHI: return AMDGPU::PHI;
4388   case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
4389   case AMDGPU::WQM: return AMDGPU::WQM;
4390   case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM;
4391   case AMDGPU::STRICT_WWM: return AMDGPU::STRICT_WWM;
4392   case AMDGPU::STRICT_WQM: return AMDGPU::STRICT_WQM;
4393   case AMDGPU::S_MOV_B32: {
4394     const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4395     return MI.getOperand(1).isReg() ||
4396            RI.isAGPR(MRI, MI.getOperand(0).getReg()) ?
4397            AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
4398   }
4399   case AMDGPU::S_ADD_I32:
4400     return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32;
4401   case AMDGPU::S_ADDC_U32:
4402     return AMDGPU::V_ADDC_U32_e32;
4403   case AMDGPU::S_SUB_I32:
4404     return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32;
4405     // FIXME: These are not consistently handled, and selected when the carry is
4406     // used.
4407   case AMDGPU::S_ADD_U32:
4408     return AMDGPU::V_ADD_CO_U32_e32;
4409   case AMDGPU::S_SUB_U32:
4410     return AMDGPU::V_SUB_CO_U32_e32;
4411   case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
4412   case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32_e64;
4413   case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32_e64;
4414   case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32_e64;
4415   case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
4416   case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
4417   case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
4418   case AMDGPU::S_XNOR_B32:
4419     return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END;
4420   case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
4421   case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
4422   case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
4423   case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
4424   case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
4425   case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64_e64;
4426   case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
4427   case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64_e64;
4428   case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
4429   case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64_e64;
4430   case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32_e64;
4431   case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32_e64;
4432   case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32_e64;
4433   case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32_e64;
4434   case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
4435   case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
4436   case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
4437   case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
4438   case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
4439   case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
4440   case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
4441   case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
4442   case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
4443   case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
4444   case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32;
4445   case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32;
4446   case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32;
4447   case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32;
4448   case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32;
4449   case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32;
4450   case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32;
4451   case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32;
4452   case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
4453   case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
4454   case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
4455   case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
4456   case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
4457   case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
4458   }
4459   llvm_unreachable(
4460       "Unexpected scalar opcode without corresponding vector one!");
4461 }
4462 
4463 static unsigned adjustAllocatableRegClass(const GCNSubtarget &ST,
4464                                           const MachineRegisterInfo &MRI,
4465                                           const MCInstrDesc &TID,
4466                                           unsigned RCID,
4467                                           bool IsAllocatable) {
4468   if ((IsAllocatable || !ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) &&
4469       (TID.mayLoad() || TID.mayStore() ||
4470       (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::MIMG)))) {
4471     switch (RCID) {
4472     case AMDGPU::AV_32RegClassID: return AMDGPU::VGPR_32RegClassID;
4473     case AMDGPU::AV_64RegClassID: return AMDGPU::VReg_64RegClassID;
4474     case AMDGPU::AV_96RegClassID: return AMDGPU::VReg_96RegClassID;
4475     case AMDGPU::AV_128RegClassID: return AMDGPU::VReg_128RegClassID;
4476     case AMDGPU::AV_160RegClassID: return AMDGPU::VReg_160RegClassID;
4477     default:
4478       break;
4479     }
4480   }
4481   return RCID;
4482 }
4483 
4484 const TargetRegisterClass *SIInstrInfo::getRegClass(const MCInstrDesc &TID,
4485     unsigned OpNum, const TargetRegisterInfo *TRI,
4486     const MachineFunction &MF)
4487   const {
4488   if (OpNum >= TID.getNumOperands())
4489     return nullptr;
4490   auto RegClass = TID.OpInfo[OpNum].RegClass;
4491   bool IsAllocatable = false;
4492   if (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::FLAT)) {
4493     // vdst and vdata should be both VGPR or AGPR, same for the DS instructions
4494     // with two data operands. Request register class constainted to VGPR only
4495     // of both operands present as Machine Copy Propagation can not check this
4496     // constraint and possibly other passes too.
4497     //
4498     // The check is limited to FLAT and DS because atomics in non-flat encoding
4499     // have their vdst and vdata tied to be the same register.
4500     const int VDstIdx = AMDGPU::getNamedOperandIdx(TID.Opcode,
4501                                                    AMDGPU::OpName::vdst);
4502     const int DataIdx = AMDGPU::getNamedOperandIdx(TID.Opcode,
4503         (TID.TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
4504                                          : AMDGPU::OpName::vdata);
4505     if (DataIdx != -1) {
4506       IsAllocatable = VDstIdx != -1 ||
4507                       AMDGPU::getNamedOperandIdx(TID.Opcode,
4508                                                  AMDGPU::OpName::data1) != -1;
4509     }
4510   }
4511   RegClass = adjustAllocatableRegClass(ST, MF.getRegInfo(), TID, RegClass,
4512                                        IsAllocatable);
4513   return RI.getRegClass(RegClass);
4514 }
4515 
4516 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
4517                                                       unsigned OpNo) const {
4518   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4519   const MCInstrDesc &Desc = get(MI.getOpcode());
4520   if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
4521       Desc.OpInfo[OpNo].RegClass == -1) {
4522     Register Reg = MI.getOperand(OpNo).getReg();
4523 
4524     if (Reg.isVirtual())
4525       return MRI.getRegClass(Reg);
4526     return RI.getPhysRegClass(Reg);
4527   }
4528 
4529   unsigned RCID = Desc.OpInfo[OpNo].RegClass;
4530   RCID = adjustAllocatableRegClass(ST, MRI, Desc, RCID, true);
4531   return RI.getRegClass(RCID);
4532 }
4533 
4534 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
4535   MachineBasicBlock::iterator I = MI;
4536   MachineBasicBlock *MBB = MI.getParent();
4537   MachineOperand &MO = MI.getOperand(OpIdx);
4538   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
4539   unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
4540   const TargetRegisterClass *RC = RI.getRegClass(RCID);
4541   unsigned Size = RI.getRegSizeInBits(*RC);
4542   unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32;
4543   if (MO.isReg())
4544     Opcode = AMDGPU::COPY;
4545   else if (RI.isSGPRClass(RC))
4546     Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
4547 
4548   const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
4549   const TargetRegisterClass *VRC64 = RI.getVGPR64Class();
4550   if (RI.getCommonSubClass(VRC64, VRC))
4551     VRC = VRC64;
4552   else
4553     VRC = &AMDGPU::VGPR_32RegClass;
4554 
4555   Register Reg = MRI.createVirtualRegister(VRC);
4556   DebugLoc DL = MBB->findDebugLoc(I);
4557   BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO);
4558   MO.ChangeToRegister(Reg, false);
4559 }
4560 
4561 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
4562                                          MachineRegisterInfo &MRI,
4563                                          MachineOperand &SuperReg,
4564                                          const TargetRegisterClass *SuperRC,
4565                                          unsigned SubIdx,
4566                                          const TargetRegisterClass *SubRC)
4567                                          const {
4568   MachineBasicBlock *MBB = MI->getParent();
4569   DebugLoc DL = MI->getDebugLoc();
4570   Register SubReg = MRI.createVirtualRegister(SubRC);
4571 
4572   if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
4573     BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
4574       .addReg(SuperReg.getReg(), 0, SubIdx);
4575     return SubReg;
4576   }
4577 
4578   // Just in case the super register is itself a sub-register, copy it to a new
4579   // value so we don't need to worry about merging its subreg index with the
4580   // SubIdx passed to this function. The register coalescer should be able to
4581   // eliminate this extra copy.
4582   Register NewSuperReg = MRI.createVirtualRegister(SuperRC);
4583 
4584   BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
4585     .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
4586 
4587   BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
4588     .addReg(NewSuperReg, 0, SubIdx);
4589 
4590   return SubReg;
4591 }
4592 
4593 MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
4594   MachineBasicBlock::iterator MII,
4595   MachineRegisterInfo &MRI,
4596   MachineOperand &Op,
4597   const TargetRegisterClass *SuperRC,
4598   unsigned SubIdx,
4599   const TargetRegisterClass *SubRC) const {
4600   if (Op.isImm()) {
4601     if (SubIdx == AMDGPU::sub0)
4602       return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
4603     if (SubIdx == AMDGPU::sub1)
4604       return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
4605 
4606     llvm_unreachable("Unhandled register index for immediate");
4607   }
4608 
4609   unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
4610                                        SubIdx, SubRC);
4611   return MachineOperand::CreateReg(SubReg, false);
4612 }
4613 
4614 // Change the order of operands from (0, 1, 2) to (0, 2, 1)
4615 void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
4616   assert(Inst.getNumExplicitOperands() == 3);
4617   MachineOperand Op1 = Inst.getOperand(1);
4618   Inst.RemoveOperand(1);
4619   Inst.addOperand(Op1);
4620 }
4621 
4622 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
4623                                     const MCOperandInfo &OpInfo,
4624                                     const MachineOperand &MO) const {
4625   if (!MO.isReg())
4626     return false;
4627 
4628   Register Reg = MO.getReg();
4629 
4630   const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass);
4631   if (Reg.isPhysical())
4632     return DRC->contains(Reg);
4633 
4634   const TargetRegisterClass *RC = MRI.getRegClass(Reg);
4635 
4636   if (MO.getSubReg()) {
4637     const MachineFunction *MF = MO.getParent()->getParent()->getParent();
4638     const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF);
4639     if (!SuperRC)
4640       return false;
4641 
4642     DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg());
4643     if (!DRC)
4644       return false;
4645   }
4646   return RC->hasSuperClassEq(DRC);
4647 }
4648 
4649 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
4650                                      const MCOperandInfo &OpInfo,
4651                                      const MachineOperand &MO) const {
4652   if (MO.isReg())
4653     return isLegalRegOperand(MRI, OpInfo, MO);
4654 
4655   // Handle non-register types that are treated like immediates.
4656   assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal());
4657   return true;
4658 }
4659 
4660 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
4661                                  const MachineOperand *MO) const {
4662   const MachineFunction &MF = *MI.getParent()->getParent();
4663   const MachineRegisterInfo &MRI = MF.getRegInfo();
4664   const MCInstrDesc &InstDesc = MI.getDesc();
4665   const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
4666   const TargetRegisterClass *DefinedRC =
4667       OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
4668   if (!MO)
4669     MO = &MI.getOperand(OpIdx);
4670 
4671   int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode());
4672   int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
4673   if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) {
4674     if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--)
4675       return false;
4676 
4677     SmallDenseSet<RegSubRegPair> SGPRsUsed;
4678     if (MO->isReg())
4679       SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg()));
4680 
4681     for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
4682       if (i == OpIdx)
4683         continue;
4684       const MachineOperand &Op = MI.getOperand(i);
4685       if (Op.isReg()) {
4686         RegSubRegPair SGPR(Op.getReg(), Op.getSubReg());
4687         if (!SGPRsUsed.count(SGPR) &&
4688             usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
4689           if (--ConstantBusLimit <= 0)
4690             return false;
4691           SGPRsUsed.insert(SGPR);
4692         }
4693       } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) {
4694         if (--ConstantBusLimit <= 0)
4695           return false;
4696       } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) &&
4697                  isLiteralConstantLike(Op, InstDesc.OpInfo[i])) {
4698         if (!VOP3LiteralLimit--)
4699           return false;
4700         if (--ConstantBusLimit <= 0)
4701           return false;
4702       }
4703     }
4704   }
4705 
4706   if (MO->isReg()) {
4707     assert(DefinedRC);
4708     if (!isLegalRegOperand(MRI, OpInfo, *MO))
4709       return false;
4710     bool IsAGPR = RI.isAGPR(MRI, MO->getReg());
4711     if (IsAGPR && !ST.hasMAIInsts())
4712       return false;
4713     unsigned Opc = MI.getOpcode();
4714     if (IsAGPR &&
4715         (!ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) &&
4716         (MI.mayLoad() || MI.mayStore() || isDS(Opc) || isMIMG(Opc)))
4717       return false;
4718     // Atomics should have both vdst and vdata either vgpr or agpr.
4719     const int VDstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
4720     const int DataIdx = AMDGPU::getNamedOperandIdx(Opc,
4721         isDS(Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata);
4722     if ((int)OpIdx == VDstIdx && DataIdx != -1 &&
4723         MI.getOperand(DataIdx).isReg() &&
4724         RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR)
4725       return false;
4726     if ((int)OpIdx == DataIdx) {
4727       if (VDstIdx != -1 &&
4728           RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR)
4729         return false;
4730       // DS instructions with 2 src operands also must have tied RC.
4731       const int Data1Idx = AMDGPU::getNamedOperandIdx(Opc,
4732                                                       AMDGPU::OpName::data1);
4733       if (Data1Idx != -1 && MI.getOperand(Data1Idx).isReg() &&
4734           RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR)
4735         return false;
4736     }
4737     if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 &&
4738         (int)OpIdx == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) &&
4739         RI.isSGPRReg(MRI, MO->getReg()))
4740       return false;
4741     return true;
4742   }
4743 
4744   // Handle non-register types that are treated like immediates.
4745   assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal());
4746 
4747   if (!DefinedRC) {
4748     // This operand expects an immediate.
4749     return true;
4750   }
4751 
4752   return isImmOperandLegal(MI, OpIdx, *MO);
4753 }
4754 
4755 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
4756                                        MachineInstr &MI) const {
4757   unsigned Opc = MI.getOpcode();
4758   const MCInstrDesc &InstrDesc = get(Opc);
4759 
4760   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
4761   MachineOperand &Src0 = MI.getOperand(Src0Idx);
4762 
4763   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
4764   MachineOperand &Src1 = MI.getOperand(Src1Idx);
4765 
4766   // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
4767   // we need to only have one constant bus use before GFX10.
4768   bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
4769   if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 &&
4770       Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) ||
4771        isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx])))
4772     legalizeOpWithMove(MI, Src0Idx);
4773 
4774   // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for
4775   // both the value to write (src0) and lane select (src1).  Fix up non-SGPR
4776   // src0/src1 with V_READFIRSTLANE.
4777   if (Opc == AMDGPU::V_WRITELANE_B32) {
4778     const DebugLoc &DL = MI.getDebugLoc();
4779     if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) {
4780       Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4781       BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4782           .add(Src0);
4783       Src0.ChangeToRegister(Reg, false);
4784     }
4785     if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) {
4786       Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4787       const DebugLoc &DL = MI.getDebugLoc();
4788       BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4789           .add(Src1);
4790       Src1.ChangeToRegister(Reg, false);
4791     }
4792     return;
4793   }
4794 
4795   // No VOP2 instructions support AGPRs.
4796   if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg()))
4797     legalizeOpWithMove(MI, Src0Idx);
4798 
4799   if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg()))
4800     legalizeOpWithMove(MI, Src1Idx);
4801 
4802   // VOP2 src0 instructions support all operand types, so we don't need to check
4803   // their legality. If src1 is already legal, we don't need to do anything.
4804   if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
4805     return;
4806 
4807   // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for
4808   // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane
4809   // select is uniform.
4810   if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() &&
4811       RI.isVGPR(MRI, Src1.getReg())) {
4812     Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4813     const DebugLoc &DL = MI.getDebugLoc();
4814     BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4815         .add(Src1);
4816     Src1.ChangeToRegister(Reg, false);
4817     return;
4818   }
4819 
4820   // We do not use commuteInstruction here because it is too aggressive and will
4821   // commute if it is possible. We only want to commute here if it improves
4822   // legality. This can be called a fairly large number of times so don't waste
4823   // compile time pointlessly swapping and checking legality again.
4824   if (HasImplicitSGPR || !MI.isCommutable()) {
4825     legalizeOpWithMove(MI, Src1Idx);
4826     return;
4827   }
4828 
4829   // If src0 can be used as src1, commuting will make the operands legal.
4830   // Otherwise we have to give up and insert a move.
4831   //
4832   // TODO: Other immediate-like operand kinds could be commuted if there was a
4833   // MachineOperand::ChangeTo* for them.
4834   if ((!Src1.isImm() && !Src1.isReg()) ||
4835       !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
4836     legalizeOpWithMove(MI, Src1Idx);
4837     return;
4838   }
4839 
4840   int CommutedOpc = commuteOpcode(MI);
4841   if (CommutedOpc == -1) {
4842     legalizeOpWithMove(MI, Src1Idx);
4843     return;
4844   }
4845 
4846   MI.setDesc(get(CommutedOpc));
4847 
4848   Register Src0Reg = Src0.getReg();
4849   unsigned Src0SubReg = Src0.getSubReg();
4850   bool Src0Kill = Src0.isKill();
4851 
4852   if (Src1.isImm())
4853     Src0.ChangeToImmediate(Src1.getImm());
4854   else if (Src1.isReg()) {
4855     Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
4856     Src0.setSubReg(Src1.getSubReg());
4857   } else
4858     llvm_unreachable("Should only have register or immediate operands");
4859 
4860   Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
4861   Src1.setSubReg(Src0SubReg);
4862   fixImplicitOperands(MI);
4863 }
4864 
4865 // Legalize VOP3 operands. All operand types are supported for any operand
4866 // but only one literal constant and only starting from GFX10.
4867 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
4868                                        MachineInstr &MI) const {
4869   unsigned Opc = MI.getOpcode();
4870 
4871   int VOP3Idx[3] = {
4872     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
4873     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
4874     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
4875   };
4876 
4877   if (Opc == AMDGPU::V_PERMLANE16_B32_e64 ||
4878       Opc == AMDGPU::V_PERMLANEX16_B32_e64) {
4879     // src1 and src2 must be scalar
4880     MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]);
4881     MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]);
4882     const DebugLoc &DL = MI.getDebugLoc();
4883     if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
4884       Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4885       BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4886         .add(Src1);
4887       Src1.ChangeToRegister(Reg, false);
4888     }
4889     if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) {
4890       Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4891       BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4892         .add(Src2);
4893       Src2.ChangeToRegister(Reg, false);
4894     }
4895   }
4896 
4897   // Find the one SGPR operand we are allowed to use.
4898   int ConstantBusLimit = ST.getConstantBusLimit(Opc);
4899   int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
4900   SmallDenseSet<unsigned> SGPRsUsed;
4901   Register SGPRReg = findUsedSGPR(MI, VOP3Idx);
4902   if (SGPRReg != AMDGPU::NoRegister) {
4903     SGPRsUsed.insert(SGPRReg);
4904     --ConstantBusLimit;
4905   }
4906 
4907   for (unsigned i = 0; i < 3; ++i) {
4908     int Idx = VOP3Idx[i];
4909     if (Idx == -1)
4910       break;
4911     MachineOperand &MO = MI.getOperand(Idx);
4912 
4913     if (!MO.isReg()) {
4914       if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx]))
4915         continue;
4916 
4917       if (LiteralLimit > 0 && ConstantBusLimit > 0) {
4918         --LiteralLimit;
4919         --ConstantBusLimit;
4920         continue;
4921       }
4922 
4923       --LiteralLimit;
4924       --ConstantBusLimit;
4925       legalizeOpWithMove(MI, Idx);
4926       continue;
4927     }
4928 
4929     if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) &&
4930         !isOperandLegal(MI, Idx, &MO)) {
4931       legalizeOpWithMove(MI, Idx);
4932       continue;
4933     }
4934 
4935     if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
4936       continue; // VGPRs are legal
4937 
4938     // We can use one SGPR in each VOP3 instruction prior to GFX10
4939     // and two starting from GFX10.
4940     if (SGPRsUsed.count(MO.getReg()))
4941       continue;
4942     if (ConstantBusLimit > 0) {
4943       SGPRsUsed.insert(MO.getReg());
4944       --ConstantBusLimit;
4945       continue;
4946     }
4947 
4948     // If we make it this far, then the operand is not legal and we must
4949     // legalize it.
4950     legalizeOpWithMove(MI, Idx);
4951   }
4952 }
4953 
4954 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI,
4955                                          MachineRegisterInfo &MRI) const {
4956   const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
4957   const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
4958   Register DstReg = MRI.createVirtualRegister(SRC);
4959   unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
4960 
4961   if (RI.hasAGPRs(VRC)) {
4962     VRC = RI.getEquivalentVGPRClass(VRC);
4963     Register NewSrcReg = MRI.createVirtualRegister(VRC);
4964     BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
4965             get(TargetOpcode::COPY), NewSrcReg)
4966         .addReg(SrcReg);
4967     SrcReg = NewSrcReg;
4968   }
4969 
4970   if (SubRegs == 1) {
4971     BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
4972             get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
4973         .addReg(SrcReg);
4974     return DstReg;
4975   }
4976 
4977   SmallVector<unsigned, 8> SRegs;
4978   for (unsigned i = 0; i < SubRegs; ++i) {
4979     Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
4980     BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
4981             get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
4982         .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
4983     SRegs.push_back(SGPR);
4984   }
4985 
4986   MachineInstrBuilder MIB =
4987       BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
4988               get(AMDGPU::REG_SEQUENCE), DstReg);
4989   for (unsigned i = 0; i < SubRegs; ++i) {
4990     MIB.addReg(SRegs[i]);
4991     MIB.addImm(RI.getSubRegFromChannel(i));
4992   }
4993   return DstReg;
4994 }
4995 
4996 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
4997                                        MachineInstr &MI) const {
4998 
4999   // If the pointer is store in VGPRs, then we need to move them to
5000   // SGPRs using v_readfirstlane.  This is safe because we only select
5001   // loads with uniform pointers to SMRD instruction so we know the
5002   // pointer value is uniform.
5003   MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
5004   if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
5005     Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
5006     SBase->setReg(SGPR);
5007   }
5008   MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff);
5009   if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) {
5010     Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI);
5011     SOff->setReg(SGPR);
5012   }
5013 }
5014 
5015 // FIXME: Remove this when SelectionDAG is obsoleted.
5016 void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI,
5017                                        MachineInstr &MI) const {
5018   if (!isSegmentSpecificFLAT(MI))
5019     return;
5020 
5021   // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence
5022   // thinks they are uniform, so a readfirstlane should be valid.
5023   MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr);
5024   if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg())))
5025     return;
5026 
5027   Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI);
5028   SAddr->setReg(ToSGPR);
5029 }
5030 
5031 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
5032                                          MachineBasicBlock::iterator I,
5033                                          const TargetRegisterClass *DstRC,
5034                                          MachineOperand &Op,
5035                                          MachineRegisterInfo &MRI,
5036                                          const DebugLoc &DL) const {
5037   Register OpReg = Op.getReg();
5038   unsigned OpSubReg = Op.getSubReg();
5039 
5040   const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
5041       RI.getRegClassForReg(MRI, OpReg), OpSubReg);
5042 
5043   // Check if operand is already the correct register class.
5044   if (DstRC == OpRC)
5045     return;
5046 
5047   Register DstReg = MRI.createVirtualRegister(DstRC);
5048   MachineInstr *Copy =
5049       BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op);
5050 
5051   Op.setReg(DstReg);
5052   Op.setSubReg(0);
5053 
5054   MachineInstr *Def = MRI.getVRegDef(OpReg);
5055   if (!Def)
5056     return;
5057 
5058   // Try to eliminate the copy if it is copying an immediate value.
5059   if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass)
5060     FoldImmediate(*Copy, *Def, OpReg, &MRI);
5061 
5062   bool ImpDef = Def->isImplicitDef();
5063   while (!ImpDef && Def && Def->isCopy()) {
5064     if (Def->getOperand(1).getReg().isPhysical())
5065       break;
5066     Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg());
5067     ImpDef = Def && Def->isImplicitDef();
5068   }
5069   if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) &&
5070       !ImpDef)
5071     Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
5072 }
5073 
5074 // Emit the actual waterfall loop, executing the wrapped instruction for each
5075 // unique value of \p Rsrc across all lanes. In the best case we execute 1
5076 // iteration, in the worst case we execute 64 (once per lane).
5077 static void
5078 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
5079                           MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB,
5080                           const DebugLoc &DL, MachineOperand &Rsrc) {
5081   MachineFunction &MF = *OrigBB.getParent();
5082   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
5083   const SIRegisterInfo *TRI = ST.getRegisterInfo();
5084   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
5085   unsigned SaveExecOpc =
5086       ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64;
5087   unsigned XorTermOpc =
5088       ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term;
5089   unsigned AndOpc =
5090       ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
5091   const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5092 
5093   MachineBasicBlock::iterator I = LoopBB.begin();
5094 
5095   SmallVector<Register, 8> ReadlanePieces;
5096   Register CondReg = AMDGPU::NoRegister;
5097 
5098   Register VRsrc = Rsrc.getReg();
5099   unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef());
5100 
5101   unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI);
5102   unsigned NumSubRegs =  RegSize / 32;
5103   assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size");
5104 
5105   for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) {
5106 
5107     Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5108     Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5109 
5110     // Read the next variant <- also loop target.
5111     BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo)
5112             .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx));
5113 
5114     // Read the next variant <- also loop target.
5115     BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi)
5116             .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1));
5117 
5118     ReadlanePieces.push_back(CurRegLo);
5119     ReadlanePieces.push_back(CurRegHi);
5120 
5121     // Comparison is to be done as 64-bit.
5122     Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass);
5123     BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg)
5124             .addReg(CurRegLo)
5125             .addImm(AMDGPU::sub0)
5126             .addReg(CurRegHi)
5127             .addImm(AMDGPU::sub1);
5128 
5129     Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC);
5130     auto Cmp =
5131         BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg)
5132             .addReg(CurReg);
5133     if (NumSubRegs <= 2)
5134       Cmp.addReg(VRsrc);
5135     else
5136       Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2));
5137 
5138     // Combine the comparision results with AND.
5139     if (CondReg == AMDGPU::NoRegister) // First.
5140       CondReg = NewCondReg;
5141     else { // If not the first, we create an AND.
5142       Register AndReg = MRI.createVirtualRegister(BoolXExecRC);
5143       BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg)
5144               .addReg(CondReg)
5145               .addReg(NewCondReg);
5146       CondReg = AndReg;
5147     }
5148   } // End for loop.
5149 
5150   auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc));
5151   Register SRsrc = MRI.createVirtualRegister(SRsrcRC);
5152 
5153   // Build scalar Rsrc.
5154   auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc);
5155   unsigned Channel = 0;
5156   for (Register Piece : ReadlanePieces) {
5157     Merge.addReg(Piece)
5158          .addImm(TRI->getSubRegFromChannel(Channel++));
5159   }
5160 
5161   // Update Rsrc operand to use the SGPR Rsrc.
5162   Rsrc.setReg(SRsrc);
5163   Rsrc.setIsKill(true);
5164 
5165   Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
5166   MRI.setSimpleHint(SaveExec, CondReg);
5167 
5168   // Update EXEC to matching lanes, saving original to SaveExec.
5169   BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec)
5170       .addReg(CondReg, RegState::Kill);
5171 
5172   // The original instruction is here; we insert the terminators after it.
5173   I = LoopBB.end();
5174 
5175   // Update EXEC, switch all done bits to 0 and all todo bits to 1.
5176   BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec)
5177       .addReg(Exec)
5178       .addReg(SaveExec);
5179 
5180   BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB);
5181 }
5182 
5183 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register
5184 // with SGPRs by iterating over all unique values across all lanes.
5185 // Returns the loop basic block that now contains \p MI.
5186 static MachineBasicBlock *
5187 loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
5188                   MachineOperand &Rsrc, MachineDominatorTree *MDT,
5189                   MachineBasicBlock::iterator Begin = nullptr,
5190                   MachineBasicBlock::iterator End = nullptr) {
5191   MachineBasicBlock &MBB = *MI.getParent();
5192   MachineFunction &MF = *MBB.getParent();
5193   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
5194   const SIRegisterInfo *TRI = ST.getRegisterInfo();
5195   MachineRegisterInfo &MRI = MF.getRegInfo();
5196   if (!Begin.isValid())
5197     Begin = &MI;
5198   if (!End.isValid()) {
5199     End = &MI;
5200     ++End;
5201   }
5202   const DebugLoc &DL = MI.getDebugLoc();
5203   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
5204   unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
5205   const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5206 
5207   Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
5208 
5209   // Save the EXEC mask
5210   BuildMI(MBB, Begin, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec);
5211 
5212   // Killed uses in the instruction we are waterfalling around will be
5213   // incorrect due to the added control-flow.
5214   MachineBasicBlock::iterator AfterMI = MI;
5215   ++AfterMI;
5216   for (auto I = Begin; I != AfterMI; I++) {
5217     for (auto &MO : I->uses()) {
5218       if (MO.isReg() && MO.isUse()) {
5219         MRI.clearKillFlags(MO.getReg());
5220       }
5221     }
5222   }
5223 
5224   // To insert the loop we need to split the block. Move everything after this
5225   // point to a new block, and insert a new empty block between the two.
5226   MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
5227   MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
5228   MachineFunction::iterator MBBI(MBB);
5229   ++MBBI;
5230 
5231   MF.insert(MBBI, LoopBB);
5232   MF.insert(MBBI, RemainderBB);
5233 
5234   LoopBB->addSuccessor(LoopBB);
5235   LoopBB->addSuccessor(RemainderBB);
5236 
5237   // Move Begin to MI to the LoopBB, and the remainder of the block to
5238   // RemainderBB.
5239   RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
5240   RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end());
5241   LoopBB->splice(LoopBB->begin(), &MBB, Begin, MBB.end());
5242 
5243   MBB.addSuccessor(LoopBB);
5244 
5245   // Update dominators. We know that MBB immediately dominates LoopBB, that
5246   // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately
5247   // dominates all of the successors transferred to it from MBB that MBB used
5248   // to properly dominate.
5249   if (MDT) {
5250     MDT->addNewBlock(LoopBB, &MBB);
5251     MDT->addNewBlock(RemainderBB, LoopBB);
5252     for (auto &Succ : RemainderBB->successors()) {
5253       if (MDT->properlyDominates(&MBB, Succ)) {
5254         MDT->changeImmediateDominator(Succ, RemainderBB);
5255       }
5256     }
5257   }
5258 
5259   emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc);
5260 
5261   // Restore the EXEC mask
5262   MachineBasicBlock::iterator First = RemainderBB->begin();
5263   BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec);
5264   return LoopBB;
5265 }
5266 
5267 // Extract pointer from Rsrc and return a zero-value Rsrc replacement.
5268 static std::tuple<unsigned, unsigned>
5269 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) {
5270   MachineBasicBlock &MBB = *MI.getParent();
5271   MachineFunction &MF = *MBB.getParent();
5272   MachineRegisterInfo &MRI = MF.getRegInfo();
5273 
5274   // Extract the ptr from the resource descriptor.
5275   unsigned RsrcPtr =
5276       TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass,
5277                              AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
5278 
5279   // Create an empty resource descriptor
5280   Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
5281   Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5282   Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5283   Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
5284   uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat();
5285 
5286   // Zero64 = 0
5287   BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64)
5288       .addImm(0);
5289 
5290   // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
5291   BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
5292       .addImm(RsrcDataFormat & 0xFFFFFFFF);
5293 
5294   // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
5295   BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
5296       .addImm(RsrcDataFormat >> 32);
5297 
5298   // NewSRsrc = {Zero64, SRsrcFormat}
5299   BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc)
5300       .addReg(Zero64)
5301       .addImm(AMDGPU::sub0_sub1)
5302       .addReg(SRsrcFormatLo)
5303       .addImm(AMDGPU::sub2)
5304       .addReg(SRsrcFormatHi)
5305       .addImm(AMDGPU::sub3);
5306 
5307   return std::make_tuple(RsrcPtr, NewSRsrc);
5308 }
5309 
5310 MachineBasicBlock *
5311 SIInstrInfo::legalizeOperands(MachineInstr &MI,
5312                               MachineDominatorTree *MDT) const {
5313   MachineFunction &MF = *MI.getParent()->getParent();
5314   MachineRegisterInfo &MRI = MF.getRegInfo();
5315   MachineBasicBlock *CreatedBB = nullptr;
5316 
5317   // Legalize VOP2
5318   if (isVOP2(MI) || isVOPC(MI)) {
5319     legalizeOperandsVOP2(MRI, MI);
5320     return CreatedBB;
5321   }
5322 
5323   // Legalize VOP3
5324   if (isVOP3(MI)) {
5325     legalizeOperandsVOP3(MRI, MI);
5326     return CreatedBB;
5327   }
5328 
5329   // Legalize SMRD
5330   if (isSMRD(MI)) {
5331     legalizeOperandsSMRD(MRI, MI);
5332     return CreatedBB;
5333   }
5334 
5335   // Legalize FLAT
5336   if (isFLAT(MI)) {
5337     legalizeOperandsFLAT(MRI, MI);
5338     return CreatedBB;
5339   }
5340 
5341   // Legalize REG_SEQUENCE and PHI
5342   // The register class of the operands much be the same type as the register
5343   // class of the output.
5344   if (MI.getOpcode() == AMDGPU::PHI) {
5345     const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
5346     for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
5347       if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual())
5348         continue;
5349       const TargetRegisterClass *OpRC =
5350           MRI.getRegClass(MI.getOperand(i).getReg());
5351       if (RI.hasVectorRegisters(OpRC)) {
5352         VRC = OpRC;
5353       } else {
5354         SRC = OpRC;
5355       }
5356     }
5357 
5358     // If any of the operands are VGPR registers, then they all most be
5359     // otherwise we will create illegal VGPR->SGPR copies when legalizing
5360     // them.
5361     if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
5362       if (!VRC) {
5363         assert(SRC);
5364         if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) {
5365           VRC = &AMDGPU::VReg_1RegClass;
5366         } else
5367           VRC = RI.hasAGPRs(getOpRegClass(MI, 0))
5368                     ? RI.getEquivalentAGPRClass(SRC)
5369                     : RI.getEquivalentVGPRClass(SRC);
5370       } else {
5371           VRC = RI.hasAGPRs(getOpRegClass(MI, 0))
5372                     ? RI.getEquivalentAGPRClass(VRC)
5373                     : RI.getEquivalentVGPRClass(VRC);
5374       }
5375       RC = VRC;
5376     } else {
5377       RC = SRC;
5378     }
5379 
5380     // Update all the operands so they have the same type.
5381     for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
5382       MachineOperand &Op = MI.getOperand(I);
5383       if (!Op.isReg() || !Op.getReg().isVirtual())
5384         continue;
5385 
5386       // MI is a PHI instruction.
5387       MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
5388       MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
5389 
5390       // Avoid creating no-op copies with the same src and dst reg class.  These
5391       // confuse some of the machine passes.
5392       legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
5393     }
5394   }
5395 
5396   // REG_SEQUENCE doesn't really require operand legalization, but if one has a
5397   // VGPR dest type and SGPR sources, insert copies so all operands are
5398   // VGPRs. This seems to help operand folding / the register coalescer.
5399   if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
5400     MachineBasicBlock *MBB = MI.getParent();
5401     const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
5402     if (RI.hasVGPRs(DstRC)) {
5403       // Update all the operands so they are VGPR register classes. These may
5404       // not be the same register class because REG_SEQUENCE supports mixing
5405       // subregister index types e.g. sub0_sub1 + sub2 + sub3
5406       for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
5407         MachineOperand &Op = MI.getOperand(I);
5408         if (!Op.isReg() || !Op.getReg().isVirtual())
5409           continue;
5410 
5411         const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
5412         const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
5413         if (VRC == OpRC)
5414           continue;
5415 
5416         legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
5417         Op.setIsKill();
5418       }
5419     }
5420 
5421     return CreatedBB;
5422   }
5423 
5424   // Legalize INSERT_SUBREG
5425   // src0 must have the same register class as dst
5426   if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
5427     Register Dst = MI.getOperand(0).getReg();
5428     Register Src0 = MI.getOperand(1).getReg();
5429     const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
5430     const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
5431     if (DstRC != Src0RC) {
5432       MachineBasicBlock *MBB = MI.getParent();
5433       MachineOperand &Op = MI.getOperand(1);
5434       legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
5435     }
5436     return CreatedBB;
5437   }
5438 
5439   // Legalize SI_INIT_M0
5440   if (MI.getOpcode() == AMDGPU::SI_INIT_M0) {
5441     MachineOperand &Src = MI.getOperand(0);
5442     if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
5443       Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
5444     return CreatedBB;
5445   }
5446 
5447   // Legalize MIMG and MUBUF/MTBUF for shaders.
5448   //
5449   // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
5450   // scratch memory access. In both cases, the legalization never involves
5451   // conversion to the addr64 form.
5452   if (isMIMG(MI) || (AMDGPU::isGraphics(MF.getFunction().getCallingConv()) &&
5453                      (isMUBUF(MI) || isMTBUF(MI)))) {
5454     MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
5455     if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg())))
5456       CreatedBB = loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT);
5457 
5458     MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
5459     if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg())))
5460       CreatedBB = loadSRsrcFromVGPR(*this, MI, *SSamp, MDT);
5461 
5462     return CreatedBB;
5463   }
5464 
5465   // Legalize SI_CALL
5466   if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
5467     MachineOperand *Dest = &MI.getOperand(0);
5468     if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) {
5469       // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and
5470       // following copies, we also need to move copies from and to physical
5471       // registers into the loop block.
5472       unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
5473       unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
5474 
5475       // Also move the copies to physical registers into the loop block
5476       MachineBasicBlock &MBB = *MI.getParent();
5477       MachineBasicBlock::iterator Start(&MI);
5478       while (Start->getOpcode() != FrameSetupOpcode)
5479         --Start;
5480       MachineBasicBlock::iterator End(&MI);
5481       while (End->getOpcode() != FrameDestroyOpcode)
5482         ++End;
5483       // Also include following copies of the return value
5484       ++End;
5485       while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() &&
5486              MI.definesRegister(End->getOperand(1).getReg()))
5487         ++End;
5488       CreatedBB = loadSRsrcFromVGPR(*this, MI, *Dest, MDT, Start, End);
5489     }
5490   }
5491 
5492   // Legalize MUBUF* instructions.
5493   int RsrcIdx =
5494       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
5495   if (RsrcIdx != -1) {
5496     // We have an MUBUF instruction
5497     MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
5498     unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass;
5499     if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()),
5500                              RI.getRegClass(RsrcRC))) {
5501       // The operands are legal.
5502       // FIXME: We may need to legalize operands besided srsrc.
5503       return CreatedBB;
5504     }
5505 
5506     // Legalize a VGPR Rsrc.
5507     //
5508     // If the instruction is _ADDR64, we can avoid a waterfall by extracting
5509     // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using
5510     // a zero-value SRsrc.
5511     //
5512     // If the instruction is _OFFSET (both idxen and offen disabled), and we
5513     // support ADDR64 instructions, we can convert to ADDR64 and do the same as
5514     // above.
5515     //
5516     // Otherwise we are on non-ADDR64 hardware, and/or we have
5517     // idxen/offen/bothen and we fall back to a waterfall loop.
5518 
5519     MachineBasicBlock &MBB = *MI.getParent();
5520 
5521     MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
5522     if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
5523       // This is already an ADDR64 instruction so we need to add the pointer
5524       // extracted from the resource descriptor to the current value of VAddr.
5525       Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5526       Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5527       Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
5528 
5529       const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5530       Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
5531       Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
5532 
5533       unsigned RsrcPtr, NewSRsrc;
5534       std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
5535 
5536       // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0
5537       const DebugLoc &DL = MI.getDebugLoc();
5538       BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo)
5539         .addDef(CondReg0)
5540         .addReg(RsrcPtr, 0, AMDGPU::sub0)
5541         .addReg(VAddr->getReg(), 0, AMDGPU::sub0)
5542         .addImm(0);
5543 
5544       // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1
5545       BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi)
5546         .addDef(CondReg1, RegState::Dead)
5547         .addReg(RsrcPtr, 0, AMDGPU::sub1)
5548         .addReg(VAddr->getReg(), 0, AMDGPU::sub1)
5549         .addReg(CondReg0, RegState::Kill)
5550         .addImm(0);
5551 
5552       // NewVaddr = {NewVaddrHi, NewVaddrLo}
5553       BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
5554           .addReg(NewVAddrLo)
5555           .addImm(AMDGPU::sub0)
5556           .addReg(NewVAddrHi)
5557           .addImm(AMDGPU::sub1);
5558 
5559       VAddr->setReg(NewVAddr);
5560       Rsrc->setReg(NewSRsrc);
5561     } else if (!VAddr && ST.hasAddr64()) {
5562       // This instructions is the _OFFSET variant, so we need to convert it to
5563       // ADDR64.
5564       assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS &&
5565              "FIXME: Need to emit flat atomics here");
5566 
5567       unsigned RsrcPtr, NewSRsrc;
5568       std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
5569 
5570       Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
5571       MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
5572       MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
5573       MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
5574       unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
5575 
5576       // Atomics rith return have have an additional tied operand and are
5577       // missing some of the special bits.
5578       MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
5579       MachineInstr *Addr64;
5580 
5581       if (!VDataIn) {
5582         // Regular buffer load / store.
5583         MachineInstrBuilder MIB =
5584             BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
5585                 .add(*VData)
5586                 .addReg(NewVAddr)
5587                 .addReg(NewSRsrc)
5588                 .add(*SOffset)
5589                 .add(*Offset);
5590 
5591         // Atomics do not have this operand.
5592         if (const MachineOperand *GLC =
5593                 getNamedOperand(MI, AMDGPU::OpName::glc)) {
5594           MIB.addImm(GLC->getImm());
5595         }
5596         if (const MachineOperand *DLC =
5597                 getNamedOperand(MI, AMDGPU::OpName::dlc)) {
5598           MIB.addImm(DLC->getImm());
5599         }
5600         if (const MachineOperand *SCCB =
5601                 getNamedOperand(MI, AMDGPU::OpName::sccb)) {
5602           MIB.addImm(SCCB->getImm());
5603         }
5604 
5605         MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc));
5606 
5607         if (const MachineOperand *TFE =
5608                 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
5609           MIB.addImm(TFE->getImm());
5610         }
5611 
5612         MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz));
5613 
5614         MIB.cloneMemRefs(MI);
5615         Addr64 = MIB;
5616       } else {
5617         // Atomics with return.
5618         Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
5619                      .add(*VData)
5620                      .add(*VDataIn)
5621                      .addReg(NewVAddr)
5622                      .addReg(NewSRsrc)
5623                      .add(*SOffset)
5624                      .add(*Offset)
5625                      .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
5626                      .cloneMemRefs(MI);
5627       }
5628 
5629       MI.removeFromParent();
5630 
5631       // NewVaddr = {NewVaddrHi, NewVaddrLo}
5632       BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
5633               NewVAddr)
5634           .addReg(RsrcPtr, 0, AMDGPU::sub0)
5635           .addImm(AMDGPU::sub0)
5636           .addReg(RsrcPtr, 0, AMDGPU::sub1)
5637           .addImm(AMDGPU::sub1);
5638     } else {
5639       // This is another variant; legalize Rsrc with waterfall loop from VGPRs
5640       // to SGPRs.
5641       CreatedBB = loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT);
5642       return CreatedBB;
5643     }
5644   }
5645   return CreatedBB;
5646 }
5647 
5648 MachineBasicBlock *SIInstrInfo::moveToVALU(MachineInstr &TopInst,
5649                                            MachineDominatorTree *MDT) const {
5650   SetVectorType Worklist;
5651   Worklist.insert(&TopInst);
5652   MachineBasicBlock *CreatedBB = nullptr;
5653   MachineBasicBlock *CreatedBBTmp = nullptr;
5654 
5655   while (!Worklist.empty()) {
5656     MachineInstr &Inst = *Worklist.pop_back_val();
5657     MachineBasicBlock *MBB = Inst.getParent();
5658     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
5659 
5660     unsigned Opcode = Inst.getOpcode();
5661     unsigned NewOpcode = getVALUOp(Inst);
5662 
5663     // Handle some special cases
5664     switch (Opcode) {
5665     default:
5666       break;
5667     case AMDGPU::S_ADD_U64_PSEUDO:
5668     case AMDGPU::S_SUB_U64_PSEUDO:
5669       splitScalar64BitAddSub(Worklist, Inst, MDT);
5670       Inst.eraseFromParent();
5671       continue;
5672     case AMDGPU::S_ADD_I32:
5673     case AMDGPU::S_SUB_I32: {
5674       // FIXME: The u32 versions currently selected use the carry.
5675       bool Changed;
5676       std::tie(Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT);
5677       if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp)
5678         CreatedBB = CreatedBBTmp;
5679       if (Changed)
5680         continue;
5681 
5682       // Default handling
5683       break;
5684     }
5685     case AMDGPU::S_AND_B64:
5686       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT);
5687       Inst.eraseFromParent();
5688       continue;
5689 
5690     case AMDGPU::S_OR_B64:
5691       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT);
5692       Inst.eraseFromParent();
5693       continue;
5694 
5695     case AMDGPU::S_XOR_B64:
5696       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT);
5697       Inst.eraseFromParent();
5698       continue;
5699 
5700     case AMDGPU::S_NAND_B64:
5701       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT);
5702       Inst.eraseFromParent();
5703       continue;
5704 
5705     case AMDGPU::S_NOR_B64:
5706       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT);
5707       Inst.eraseFromParent();
5708       continue;
5709 
5710     case AMDGPU::S_XNOR_B64:
5711       if (ST.hasDLInsts())
5712         splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
5713       else
5714         splitScalar64BitXnor(Worklist, Inst, MDT);
5715       Inst.eraseFromParent();
5716       continue;
5717 
5718     case AMDGPU::S_ANDN2_B64:
5719       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT);
5720       Inst.eraseFromParent();
5721       continue;
5722 
5723     case AMDGPU::S_ORN2_B64:
5724       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT);
5725       Inst.eraseFromParent();
5726       continue;
5727 
5728     case AMDGPU::S_BREV_B64:
5729       splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_BREV_B32, true);
5730       Inst.eraseFromParent();
5731       continue;
5732 
5733     case AMDGPU::S_NOT_B64:
5734       splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
5735       Inst.eraseFromParent();
5736       continue;
5737 
5738     case AMDGPU::S_BCNT1_I32_B64:
5739       splitScalar64BitBCNT(Worklist, Inst);
5740       Inst.eraseFromParent();
5741       continue;
5742 
5743     case AMDGPU::S_BFE_I64:
5744       splitScalar64BitBFE(Worklist, Inst);
5745       Inst.eraseFromParent();
5746       continue;
5747 
5748     case AMDGPU::S_LSHL_B32:
5749       if (ST.hasOnlyRevVALUShifts()) {
5750         NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
5751         swapOperands(Inst);
5752       }
5753       break;
5754     case AMDGPU::S_ASHR_I32:
5755       if (ST.hasOnlyRevVALUShifts()) {
5756         NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
5757         swapOperands(Inst);
5758       }
5759       break;
5760     case AMDGPU::S_LSHR_B32:
5761       if (ST.hasOnlyRevVALUShifts()) {
5762         NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
5763         swapOperands(Inst);
5764       }
5765       break;
5766     case AMDGPU::S_LSHL_B64:
5767       if (ST.hasOnlyRevVALUShifts()) {
5768         NewOpcode = AMDGPU::V_LSHLREV_B64_e64;
5769         swapOperands(Inst);
5770       }
5771       break;
5772     case AMDGPU::S_ASHR_I64:
5773       if (ST.hasOnlyRevVALUShifts()) {
5774         NewOpcode = AMDGPU::V_ASHRREV_I64_e64;
5775         swapOperands(Inst);
5776       }
5777       break;
5778     case AMDGPU::S_LSHR_B64:
5779       if (ST.hasOnlyRevVALUShifts()) {
5780         NewOpcode = AMDGPU::V_LSHRREV_B64_e64;
5781         swapOperands(Inst);
5782       }
5783       break;
5784 
5785     case AMDGPU::S_ABS_I32:
5786       lowerScalarAbs(Worklist, Inst);
5787       Inst.eraseFromParent();
5788       continue;
5789 
5790     case AMDGPU::S_CBRANCH_SCC0:
5791     case AMDGPU::S_CBRANCH_SCC1:
5792       // Clear unused bits of vcc
5793       if (ST.isWave32())
5794         BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32),
5795                 AMDGPU::VCC_LO)
5796             .addReg(AMDGPU::EXEC_LO)
5797             .addReg(AMDGPU::VCC_LO);
5798       else
5799         BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
5800                 AMDGPU::VCC)
5801             .addReg(AMDGPU::EXEC)
5802             .addReg(AMDGPU::VCC);
5803       break;
5804 
5805     case AMDGPU::S_BFE_U64:
5806     case AMDGPU::S_BFM_B64:
5807       llvm_unreachable("Moving this op to VALU not implemented");
5808 
5809     case AMDGPU::S_PACK_LL_B32_B16:
5810     case AMDGPU::S_PACK_LH_B32_B16:
5811     case AMDGPU::S_PACK_HH_B32_B16:
5812       movePackToVALU(Worklist, MRI, Inst);
5813       Inst.eraseFromParent();
5814       continue;
5815 
5816     case AMDGPU::S_XNOR_B32:
5817       lowerScalarXnor(Worklist, Inst);
5818       Inst.eraseFromParent();
5819       continue;
5820 
5821     case AMDGPU::S_NAND_B32:
5822       splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32);
5823       Inst.eraseFromParent();
5824       continue;
5825 
5826     case AMDGPU::S_NOR_B32:
5827       splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32);
5828       Inst.eraseFromParent();
5829       continue;
5830 
5831     case AMDGPU::S_ANDN2_B32:
5832       splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32);
5833       Inst.eraseFromParent();
5834       continue;
5835 
5836     case AMDGPU::S_ORN2_B32:
5837       splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32);
5838       Inst.eraseFromParent();
5839       continue;
5840 
5841     // TODO: remove as soon as everything is ready
5842     // to replace VGPR to SGPR copy with V_READFIRSTLANEs.
5843     // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO
5844     // can only be selected from the uniform SDNode.
5845     case AMDGPU::S_ADD_CO_PSEUDO:
5846     case AMDGPU::S_SUB_CO_PSEUDO: {
5847       unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
5848                          ? AMDGPU::V_ADDC_U32_e64
5849                          : AMDGPU::V_SUBB_U32_e64;
5850       const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5851 
5852       Register CarryInReg = Inst.getOperand(4).getReg();
5853       if (!MRI.constrainRegClass(CarryInReg, CarryRC)) {
5854         Register NewCarryReg = MRI.createVirtualRegister(CarryRC);
5855         BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg)
5856             .addReg(CarryInReg);
5857       }
5858 
5859       Register CarryOutReg = Inst.getOperand(1).getReg();
5860 
5861       Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass(
5862           MRI.getRegClass(Inst.getOperand(0).getReg())));
5863       MachineInstr *CarryOp =
5864           BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg)
5865               .addReg(CarryOutReg, RegState::Define)
5866               .add(Inst.getOperand(2))
5867               .add(Inst.getOperand(3))
5868               .addReg(CarryInReg)
5869               .addImm(0);
5870       CreatedBBTmp = legalizeOperands(*CarryOp);
5871       if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp)
5872         CreatedBB = CreatedBBTmp;
5873       MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg);
5874       addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
5875       Inst.eraseFromParent();
5876     }
5877       continue;
5878     case AMDGPU::S_UADDO_PSEUDO:
5879     case AMDGPU::S_USUBO_PSEUDO: {
5880       const DebugLoc &DL = Inst.getDebugLoc();
5881       MachineOperand &Dest0 = Inst.getOperand(0);
5882       MachineOperand &Dest1 = Inst.getOperand(1);
5883       MachineOperand &Src0 = Inst.getOperand(2);
5884       MachineOperand &Src1 = Inst.getOperand(3);
5885 
5886       unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO)
5887                          ? AMDGPU::V_ADD_CO_U32_e64
5888                          : AMDGPU::V_SUB_CO_U32_e64;
5889       const TargetRegisterClass *NewRC =
5890           RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg()));
5891       Register DestReg = MRI.createVirtualRegister(NewRC);
5892       MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg)
5893                                    .addReg(Dest1.getReg(), RegState::Define)
5894                                    .add(Src0)
5895                                    .add(Src1)
5896                                    .addImm(0); // clamp bit
5897 
5898       CreatedBBTmp = legalizeOperands(*NewInstr, MDT);
5899       if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp)
5900         CreatedBB = CreatedBBTmp;
5901 
5902       MRI.replaceRegWith(Dest0.getReg(), DestReg);
5903       addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI,
5904                                    Worklist);
5905       Inst.eraseFromParent();
5906     }
5907       continue;
5908 
5909     case AMDGPU::S_CSELECT_B32:
5910     case AMDGPU::S_CSELECT_B64:
5911       lowerSelect(Worklist, Inst, MDT);
5912       Inst.eraseFromParent();
5913       continue;
5914     }
5915 
5916     if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
5917       // We cannot move this instruction to the VALU, so we should try to
5918       // legalize its operands instead.
5919       CreatedBBTmp = legalizeOperands(Inst, MDT);
5920       if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp)
5921         CreatedBB = CreatedBBTmp;
5922       continue;
5923     }
5924 
5925     // Use the new VALU Opcode.
5926     const MCInstrDesc &NewDesc = get(NewOpcode);
5927     Inst.setDesc(NewDesc);
5928 
5929     // Remove any references to SCC. Vector instructions can't read from it, and
5930     // We're just about to add the implicit use / defs of VCC, and we don't want
5931     // both.
5932     for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) {
5933       MachineOperand &Op = Inst.getOperand(i);
5934       if (Op.isReg() && Op.getReg() == AMDGPU::SCC) {
5935         // Only propagate through live-def of SCC.
5936         if (Op.isDef() && !Op.isDead())
5937           addSCCDefUsersToVALUWorklist(Op, Inst, Worklist);
5938         Inst.RemoveOperand(i);
5939       }
5940     }
5941 
5942     if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
5943       // We are converting these to a BFE, so we need to add the missing
5944       // operands for the size and offset.
5945       unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
5946       Inst.addOperand(MachineOperand::CreateImm(0));
5947       Inst.addOperand(MachineOperand::CreateImm(Size));
5948 
5949     } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
5950       // The VALU version adds the second operand to the result, so insert an
5951       // extra 0 operand.
5952       Inst.addOperand(MachineOperand::CreateImm(0));
5953     }
5954 
5955     Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent());
5956     fixImplicitOperands(Inst);
5957 
5958     if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
5959       const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
5960       // If we need to move this to VGPRs, we need to unpack the second operand
5961       // back into the 2 separate ones for bit offset and width.
5962       assert(OffsetWidthOp.isImm() &&
5963              "Scalar BFE is only implemented for constant width and offset");
5964       uint32_t Imm = OffsetWidthOp.getImm();
5965 
5966       uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
5967       uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
5968       Inst.RemoveOperand(2);                     // Remove old immediate.
5969       Inst.addOperand(MachineOperand::CreateImm(Offset));
5970       Inst.addOperand(MachineOperand::CreateImm(BitWidth));
5971     }
5972 
5973     bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
5974     unsigned NewDstReg = AMDGPU::NoRegister;
5975     if (HasDst) {
5976       Register DstReg = Inst.getOperand(0).getReg();
5977       if (DstReg.isPhysical())
5978         continue;
5979 
5980       // Update the destination register class.
5981       const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
5982       if (!NewDstRC)
5983         continue;
5984 
5985       if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() &&
5986           NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
5987         // Instead of creating a copy where src and dst are the same register
5988         // class, we just replace all uses of dst with src.  These kinds of
5989         // copies interfere with the heuristics MachineSink uses to decide
5990         // whether or not to split a critical edge.  Since the pass assumes
5991         // that copies will end up as machine instructions and not be
5992         // eliminated.
5993         addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
5994         MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg());
5995         MRI.clearKillFlags(Inst.getOperand(1).getReg());
5996         Inst.getOperand(0).setReg(DstReg);
5997 
5998         // Make sure we don't leave around a dead VGPR->SGPR copy. Normally
5999         // these are deleted later, but at -O0 it would leave a suspicious
6000         // looking illegal copy of an undef register.
6001         for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I)
6002           Inst.RemoveOperand(I);
6003         Inst.setDesc(get(AMDGPU::IMPLICIT_DEF));
6004         continue;
6005       }
6006 
6007       NewDstReg = MRI.createVirtualRegister(NewDstRC);
6008       MRI.replaceRegWith(DstReg, NewDstReg);
6009     }
6010 
6011     // Legalize the operands
6012     CreatedBBTmp = legalizeOperands(Inst, MDT);
6013     if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp)
6014       CreatedBB = CreatedBBTmp;
6015 
6016     if (HasDst)
6017      addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
6018   }
6019   return CreatedBB;
6020 }
6021 
6022 // Add/sub require special handling to deal with carry outs.
6023 std::pair<bool, MachineBasicBlock *>
6024 SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
6025                               MachineDominatorTree *MDT) const {
6026   if (ST.hasAddNoCarry()) {
6027     // Assume there is no user of scc since we don't select this in that case.
6028     // Since scc isn't used, it doesn't really matter if the i32 or u32 variant
6029     // is used.
6030 
6031     MachineBasicBlock &MBB = *Inst.getParent();
6032     MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6033 
6034     Register OldDstReg = Inst.getOperand(0).getReg();
6035     Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6036 
6037     unsigned Opc = Inst.getOpcode();
6038     assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32);
6039 
6040     unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ?
6041       AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64;
6042 
6043     assert(Inst.getOperand(3).getReg() == AMDGPU::SCC);
6044     Inst.RemoveOperand(3);
6045 
6046     Inst.setDesc(get(NewOpc));
6047     Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit
6048     Inst.addImplicitDefUseOperands(*MBB.getParent());
6049     MRI.replaceRegWith(OldDstReg, ResultReg);
6050     MachineBasicBlock *NewBB = legalizeOperands(Inst, MDT);
6051 
6052     addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6053     return std::make_pair(true, NewBB);
6054   }
6055 
6056   return std::make_pair(false, nullptr);
6057 }
6058 
6059 void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst,
6060                               MachineDominatorTree *MDT) const {
6061 
6062   MachineBasicBlock &MBB = *Inst.getParent();
6063   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6064   MachineBasicBlock::iterator MII = Inst;
6065   DebugLoc DL = Inst.getDebugLoc();
6066 
6067   MachineOperand &Dest = Inst.getOperand(0);
6068   MachineOperand &Src0 = Inst.getOperand(1);
6069   MachineOperand &Src1 = Inst.getOperand(2);
6070   MachineOperand &Cond = Inst.getOperand(3);
6071 
6072   Register SCCSource = Cond.getReg();
6073   // Find SCC def, and if that is a copy (SCC = COPY reg) then use reg instead.
6074   if (!Cond.isUndef()) {
6075     for (MachineInstr &CandI :
6076          make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)),
6077                     Inst.getParent()->rend())) {
6078       if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) !=
6079           -1) {
6080         if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) {
6081           SCCSource = CandI.getOperand(1).getReg();
6082         }
6083         break;
6084       }
6085     }
6086   }
6087 
6088   // If this is a trivial select where the condition is effectively not SCC
6089   // (SCCSource is a source of copy to SCC), then the select is semantically
6090   // equivalent to copying SCCSource. Hence, there is no need to create
6091   // V_CNDMASK, we can just use that and bail out.
6092   if ((SCCSource != AMDGPU::SCC) && Src0.isImm() && (Src0.getImm() == -1) &&
6093       Src1.isImm() && (Src1.getImm() == 0)) {
6094     MRI.replaceRegWith(Dest.getReg(), SCCSource);
6095     return;
6096   }
6097 
6098   const TargetRegisterClass *TC = ST.getWavefrontSize() == 64
6099                                       ? &AMDGPU::SReg_64_XEXECRegClass
6100                                       : &AMDGPU::SReg_32_XM0_XEXECRegClass;
6101   Register CopySCC = MRI.createVirtualRegister(TC);
6102 
6103   if (SCCSource == AMDGPU::SCC) {
6104     // Insert a trivial select instead of creating a copy, because a copy from
6105     // SCC would semantically mean just copying a single bit, but we may need
6106     // the result to be a vector condition mask that needs preserving.
6107     unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64
6108                                                     : AMDGPU::S_CSELECT_B32;
6109     auto NewSelect =
6110         BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0);
6111     NewSelect->getOperand(3).setIsUndef(Cond.isUndef());
6112   } else {
6113     BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC).addReg(SCCSource);
6114   }
6115 
6116   Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6117 
6118   auto UpdatedInst =
6119       BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg)
6120           .addImm(0)
6121           .add(Src1) // False
6122           .addImm(0)
6123           .add(Src0) // True
6124           .addReg(CopySCC);
6125 
6126   MRI.replaceRegWith(Dest.getReg(), ResultReg);
6127   legalizeOperands(*UpdatedInst, MDT);
6128   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6129 }
6130 
6131 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist,
6132                                  MachineInstr &Inst) const {
6133   MachineBasicBlock &MBB = *Inst.getParent();
6134   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6135   MachineBasicBlock::iterator MII = Inst;
6136   DebugLoc DL = Inst.getDebugLoc();
6137 
6138   MachineOperand &Dest = Inst.getOperand(0);
6139   MachineOperand &Src = Inst.getOperand(1);
6140   Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6141   Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6142 
6143   unsigned SubOp = ST.hasAddNoCarry() ?
6144     AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32;
6145 
6146   BuildMI(MBB, MII, DL, get(SubOp), TmpReg)
6147     .addImm(0)
6148     .addReg(Src.getReg());
6149 
6150   BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
6151     .addReg(Src.getReg())
6152     .addReg(TmpReg);
6153 
6154   MRI.replaceRegWith(Dest.getReg(), ResultReg);
6155   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6156 }
6157 
6158 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist,
6159                                   MachineInstr &Inst) const {
6160   MachineBasicBlock &MBB = *Inst.getParent();
6161   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6162   MachineBasicBlock::iterator MII = Inst;
6163   const DebugLoc &DL = Inst.getDebugLoc();
6164 
6165   MachineOperand &Dest = Inst.getOperand(0);
6166   MachineOperand &Src0 = Inst.getOperand(1);
6167   MachineOperand &Src1 = Inst.getOperand(2);
6168 
6169   if (ST.hasDLInsts()) {
6170     Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6171     legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL);
6172     legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL);
6173 
6174     BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest)
6175       .add(Src0)
6176       .add(Src1);
6177 
6178     MRI.replaceRegWith(Dest.getReg(), NewDest);
6179     addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
6180   } else {
6181     // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can
6182     // invert either source and then perform the XOR. If either source is a
6183     // scalar register, then we can leave the inversion on the scalar unit to
6184     // acheive a better distrubution of scalar and vector instructions.
6185     bool Src0IsSGPR = Src0.isReg() &&
6186                       RI.isSGPRClass(MRI.getRegClass(Src0.getReg()));
6187     bool Src1IsSGPR = Src1.isReg() &&
6188                       RI.isSGPRClass(MRI.getRegClass(Src1.getReg()));
6189     MachineInstr *Xor;
6190     Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
6191     Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
6192 
6193     // Build a pair of scalar instructions and add them to the work list.
6194     // The next iteration over the work list will lower these to the vector
6195     // unit as necessary.
6196     if (Src0IsSGPR) {
6197       BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0);
6198       Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest)
6199       .addReg(Temp)
6200       .add(Src1);
6201     } else if (Src1IsSGPR) {
6202       BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1);
6203       Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest)
6204       .add(Src0)
6205       .addReg(Temp);
6206     } else {
6207       Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp)
6208         .add(Src0)
6209         .add(Src1);
6210       MachineInstr *Not =
6211           BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp);
6212       Worklist.insert(Not);
6213     }
6214 
6215     MRI.replaceRegWith(Dest.getReg(), NewDest);
6216 
6217     Worklist.insert(Xor);
6218 
6219     addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
6220   }
6221 }
6222 
6223 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist,
6224                                       MachineInstr &Inst,
6225                                       unsigned Opcode) const {
6226   MachineBasicBlock &MBB = *Inst.getParent();
6227   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6228   MachineBasicBlock::iterator MII = Inst;
6229   const DebugLoc &DL = Inst.getDebugLoc();
6230 
6231   MachineOperand &Dest = Inst.getOperand(0);
6232   MachineOperand &Src0 = Inst.getOperand(1);
6233   MachineOperand &Src1 = Inst.getOperand(2);
6234 
6235   Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
6236   Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
6237 
6238   MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm)
6239     .add(Src0)
6240     .add(Src1);
6241 
6242   MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest)
6243     .addReg(Interm);
6244 
6245   Worklist.insert(&Op);
6246   Worklist.insert(&Not);
6247 
6248   MRI.replaceRegWith(Dest.getReg(), NewDest);
6249   addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
6250 }
6251 
6252 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist,
6253                                      MachineInstr &Inst,
6254                                      unsigned Opcode) const {
6255   MachineBasicBlock &MBB = *Inst.getParent();
6256   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6257   MachineBasicBlock::iterator MII = Inst;
6258   const DebugLoc &DL = Inst.getDebugLoc();
6259 
6260   MachineOperand &Dest = Inst.getOperand(0);
6261   MachineOperand &Src0 = Inst.getOperand(1);
6262   MachineOperand &Src1 = Inst.getOperand(2);
6263 
6264   Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6265   Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6266 
6267   MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm)
6268     .add(Src1);
6269 
6270   MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest)
6271     .add(Src0)
6272     .addReg(Interm);
6273 
6274   Worklist.insert(&Not);
6275   Worklist.insert(&Op);
6276 
6277   MRI.replaceRegWith(Dest.getReg(), NewDest);
6278   addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
6279 }
6280 
6281 void SIInstrInfo::splitScalar64BitUnaryOp(
6282     SetVectorType &Worklist, MachineInstr &Inst,
6283     unsigned Opcode, bool Swap) const {
6284   MachineBasicBlock &MBB = *Inst.getParent();
6285   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6286 
6287   MachineOperand &Dest = Inst.getOperand(0);
6288   MachineOperand &Src0 = Inst.getOperand(1);
6289   DebugLoc DL = Inst.getDebugLoc();
6290 
6291   MachineBasicBlock::iterator MII = Inst;
6292 
6293   const MCInstrDesc &InstDesc = get(Opcode);
6294   const TargetRegisterClass *Src0RC = Src0.isReg() ?
6295     MRI.getRegClass(Src0.getReg()) :
6296     &AMDGPU::SGPR_32RegClass;
6297 
6298   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
6299 
6300   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
6301                                                        AMDGPU::sub0, Src0SubRC);
6302 
6303   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
6304   const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
6305   const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
6306 
6307   Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
6308   MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0);
6309 
6310   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
6311                                                        AMDGPU::sub1, Src0SubRC);
6312 
6313   Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
6314   MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1);
6315 
6316   if (Swap)
6317     std::swap(DestSub0, DestSub1);
6318 
6319   Register FullDestReg = MRI.createVirtualRegister(NewDestRC);
6320   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
6321     .addReg(DestSub0)
6322     .addImm(AMDGPU::sub0)
6323     .addReg(DestSub1)
6324     .addImm(AMDGPU::sub1);
6325 
6326   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
6327 
6328   Worklist.insert(&LoHalf);
6329   Worklist.insert(&HiHalf);
6330 
6331   // We don't need to legalizeOperands here because for a single operand, src0
6332   // will support any kind of input.
6333 
6334   // Move all users of this moved value.
6335   addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
6336 }
6337 
6338 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist,
6339                                          MachineInstr &Inst,
6340                                          MachineDominatorTree *MDT) const {
6341   bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
6342 
6343   MachineBasicBlock &MBB = *Inst.getParent();
6344   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6345   const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
6346 
6347   Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
6348   Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6349   Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6350 
6351   Register CarryReg = MRI.createVirtualRegister(CarryRC);
6352   Register DeadCarryReg = MRI.createVirtualRegister(CarryRC);
6353 
6354   MachineOperand &Dest = Inst.getOperand(0);
6355   MachineOperand &Src0 = Inst.getOperand(1);
6356   MachineOperand &Src1 = Inst.getOperand(2);
6357   const DebugLoc &DL = Inst.getDebugLoc();
6358   MachineBasicBlock::iterator MII = Inst;
6359 
6360   const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
6361   const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg());
6362   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
6363   const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
6364 
6365   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
6366                                                        AMDGPU::sub0, Src0SubRC);
6367   MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
6368                                                        AMDGPU::sub0, Src1SubRC);
6369 
6370 
6371   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
6372                                                        AMDGPU::sub1, Src0SubRC);
6373   MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
6374                                                        AMDGPU::sub1, Src1SubRC);
6375 
6376   unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
6377   MachineInstr *LoHalf =
6378     BuildMI(MBB, MII, DL, get(LoOpc), DestSub0)
6379     .addReg(CarryReg, RegState::Define)
6380     .add(SrcReg0Sub0)
6381     .add(SrcReg1Sub0)
6382     .addImm(0); // clamp bit
6383 
6384   unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
6385   MachineInstr *HiHalf =
6386     BuildMI(MBB, MII, DL, get(HiOpc), DestSub1)
6387     .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
6388     .add(SrcReg0Sub1)
6389     .add(SrcReg1Sub1)
6390     .addReg(CarryReg, RegState::Kill)
6391     .addImm(0); // clamp bit
6392 
6393   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
6394     .addReg(DestSub0)
6395     .addImm(AMDGPU::sub0)
6396     .addReg(DestSub1)
6397     .addImm(AMDGPU::sub1);
6398 
6399   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
6400 
6401   // Try to legalize the operands in case we need to swap the order to keep it
6402   // valid.
6403   legalizeOperands(*LoHalf, MDT);
6404   legalizeOperands(*HiHalf, MDT);
6405 
6406   // Move all users of this moved vlaue.
6407   addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
6408 }
6409 
6410 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist,
6411                                            MachineInstr &Inst, unsigned Opcode,
6412                                            MachineDominatorTree *MDT) const {
6413   MachineBasicBlock &MBB = *Inst.getParent();
6414   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6415 
6416   MachineOperand &Dest = Inst.getOperand(0);
6417   MachineOperand &Src0 = Inst.getOperand(1);
6418   MachineOperand &Src1 = Inst.getOperand(2);
6419   DebugLoc DL = Inst.getDebugLoc();
6420 
6421   MachineBasicBlock::iterator MII = Inst;
6422 
6423   const MCInstrDesc &InstDesc = get(Opcode);
6424   const TargetRegisterClass *Src0RC = Src0.isReg() ?
6425     MRI.getRegClass(Src0.getReg()) :
6426     &AMDGPU::SGPR_32RegClass;
6427 
6428   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
6429   const TargetRegisterClass *Src1RC = Src1.isReg() ?
6430     MRI.getRegClass(Src1.getReg()) :
6431     &AMDGPU::SGPR_32RegClass;
6432 
6433   const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
6434 
6435   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
6436                                                        AMDGPU::sub0, Src0SubRC);
6437   MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
6438                                                        AMDGPU::sub0, Src1SubRC);
6439   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
6440                                                        AMDGPU::sub1, Src0SubRC);
6441   MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
6442                                                        AMDGPU::sub1, Src1SubRC);
6443 
6444   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
6445   const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
6446   const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
6447 
6448   Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
6449   MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
6450                               .add(SrcReg0Sub0)
6451                               .add(SrcReg1Sub0);
6452 
6453   Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
6454   MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
6455                               .add(SrcReg0Sub1)
6456                               .add(SrcReg1Sub1);
6457 
6458   Register FullDestReg = MRI.createVirtualRegister(NewDestRC);
6459   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
6460     .addReg(DestSub0)
6461     .addImm(AMDGPU::sub0)
6462     .addReg(DestSub1)
6463     .addImm(AMDGPU::sub1);
6464 
6465   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
6466 
6467   Worklist.insert(&LoHalf);
6468   Worklist.insert(&HiHalf);
6469 
6470   // Move all users of this moved vlaue.
6471   addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
6472 }
6473 
6474 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist,
6475                                        MachineInstr &Inst,
6476                                        MachineDominatorTree *MDT) const {
6477   MachineBasicBlock &MBB = *Inst.getParent();
6478   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6479 
6480   MachineOperand &Dest = Inst.getOperand(0);
6481   MachineOperand &Src0 = Inst.getOperand(1);
6482   MachineOperand &Src1 = Inst.getOperand(2);
6483   const DebugLoc &DL = Inst.getDebugLoc();
6484 
6485   MachineBasicBlock::iterator MII = Inst;
6486 
6487   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
6488 
6489   Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
6490 
6491   MachineOperand* Op0;
6492   MachineOperand* Op1;
6493 
6494   if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) {
6495     Op0 = &Src0;
6496     Op1 = &Src1;
6497   } else {
6498     Op0 = &Src1;
6499     Op1 = &Src0;
6500   }
6501 
6502   BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm)
6503     .add(*Op0);
6504 
6505   Register NewDest = MRI.createVirtualRegister(DestRC);
6506 
6507   MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest)
6508     .addReg(Interm)
6509     .add(*Op1);
6510 
6511   MRI.replaceRegWith(Dest.getReg(), NewDest);
6512 
6513   Worklist.insert(&Xor);
6514 }
6515 
6516 void SIInstrInfo::splitScalar64BitBCNT(
6517     SetVectorType &Worklist, MachineInstr &Inst) const {
6518   MachineBasicBlock &MBB = *Inst.getParent();
6519   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6520 
6521   MachineBasicBlock::iterator MII = Inst;
6522   const DebugLoc &DL = Inst.getDebugLoc();
6523 
6524   MachineOperand &Dest = Inst.getOperand(0);
6525   MachineOperand &Src = Inst.getOperand(1);
6526 
6527   const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
6528   const TargetRegisterClass *SrcRC = Src.isReg() ?
6529     MRI.getRegClass(Src.getReg()) :
6530     &AMDGPU::SGPR_32RegClass;
6531 
6532   Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6533   Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6534 
6535   const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
6536 
6537   MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
6538                                                       AMDGPU::sub0, SrcSubRC);
6539   MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
6540                                                       AMDGPU::sub1, SrcSubRC);
6541 
6542   BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0);
6543 
6544   BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg);
6545 
6546   MRI.replaceRegWith(Dest.getReg(), ResultReg);
6547 
6548   // We don't need to legalize operands here. src0 for etiher instruction can be
6549   // an SGPR, and the second input is unused or determined here.
6550   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6551 }
6552 
6553 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist,
6554                                       MachineInstr &Inst) const {
6555   MachineBasicBlock &MBB = *Inst.getParent();
6556   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6557   MachineBasicBlock::iterator MII = Inst;
6558   const DebugLoc &DL = Inst.getDebugLoc();
6559 
6560   MachineOperand &Dest = Inst.getOperand(0);
6561   uint32_t Imm = Inst.getOperand(2).getImm();
6562   uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
6563   uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
6564 
6565   (void) Offset;
6566 
6567   // Only sext_inreg cases handled.
6568   assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
6569          Offset == 0 && "Not implemented");
6570 
6571   if (BitWidth < 32) {
6572     Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6573     Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6574     Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
6575 
6576     BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32_e64), MidRegLo)
6577         .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
6578         .addImm(0)
6579         .addImm(BitWidth);
6580 
6581     BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
6582       .addImm(31)
6583       .addReg(MidRegLo);
6584 
6585     BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
6586       .addReg(MidRegLo)
6587       .addImm(AMDGPU::sub0)
6588       .addReg(MidRegHi)
6589       .addImm(AMDGPU::sub1);
6590 
6591     MRI.replaceRegWith(Dest.getReg(), ResultReg);
6592     addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6593     return;
6594   }
6595 
6596   MachineOperand &Src = Inst.getOperand(1);
6597   Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6598   Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
6599 
6600   BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
6601     .addImm(31)
6602     .addReg(Src.getReg(), 0, AMDGPU::sub0);
6603 
6604   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
6605     .addReg(Src.getReg(), 0, AMDGPU::sub0)
6606     .addImm(AMDGPU::sub0)
6607     .addReg(TmpReg)
6608     .addImm(AMDGPU::sub1);
6609 
6610   MRI.replaceRegWith(Dest.getReg(), ResultReg);
6611   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6612 }
6613 
6614 void SIInstrInfo::addUsersToMoveToVALUWorklist(
6615   Register DstReg,
6616   MachineRegisterInfo &MRI,
6617   SetVectorType &Worklist) const {
6618   for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg),
6619          E = MRI.use_end(); I != E;) {
6620     MachineInstr &UseMI = *I->getParent();
6621 
6622     unsigned OpNo = 0;
6623 
6624     switch (UseMI.getOpcode()) {
6625     case AMDGPU::COPY:
6626     case AMDGPU::WQM:
6627     case AMDGPU::SOFT_WQM:
6628     case AMDGPU::STRICT_WWM:
6629     case AMDGPU::STRICT_WQM:
6630     case AMDGPU::REG_SEQUENCE:
6631     case AMDGPU::PHI:
6632     case AMDGPU::INSERT_SUBREG:
6633       break;
6634     default:
6635       OpNo = I.getOperandNo();
6636       break;
6637     }
6638 
6639     if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) {
6640       Worklist.insert(&UseMI);
6641 
6642       do {
6643         ++I;
6644       } while (I != E && I->getParent() == &UseMI);
6645     } else {
6646       ++I;
6647     }
6648   }
6649 }
6650 
6651 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist,
6652                                  MachineRegisterInfo &MRI,
6653                                  MachineInstr &Inst) const {
6654   Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6655   MachineBasicBlock *MBB = Inst.getParent();
6656   MachineOperand &Src0 = Inst.getOperand(1);
6657   MachineOperand &Src1 = Inst.getOperand(2);
6658   const DebugLoc &DL = Inst.getDebugLoc();
6659 
6660   switch (Inst.getOpcode()) {
6661   case AMDGPU::S_PACK_LL_B32_B16: {
6662     Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6663     Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6664 
6665     // FIXME: Can do a lot better if we know the high bits of src0 or src1 are
6666     // 0.
6667     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
6668       .addImm(0xffff);
6669 
6670     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg)
6671       .addReg(ImmReg, RegState::Kill)
6672       .add(Src0);
6673 
6674     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32_e64), ResultReg)
6675       .add(Src1)
6676       .addImm(16)
6677       .addReg(TmpReg, RegState::Kill);
6678     break;
6679   }
6680   case AMDGPU::S_PACK_LH_B32_B16: {
6681     Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6682     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
6683       .addImm(0xffff);
6684     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32_e64), ResultReg)
6685       .addReg(ImmReg, RegState::Kill)
6686       .add(Src0)
6687       .add(Src1);
6688     break;
6689   }
6690   case AMDGPU::S_PACK_HH_B32_B16: {
6691     Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6692     Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6693     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
6694       .addImm(16)
6695       .add(Src0);
6696     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
6697       .addImm(0xffff0000);
6698     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32_e64), ResultReg)
6699       .add(Src1)
6700       .addReg(ImmReg, RegState::Kill)
6701       .addReg(TmpReg, RegState::Kill);
6702     break;
6703   }
6704   default:
6705     llvm_unreachable("unhandled s_pack_* instruction");
6706   }
6707 
6708   MachineOperand &Dest = Inst.getOperand(0);
6709   MRI.replaceRegWith(Dest.getReg(), ResultReg);
6710   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6711 }
6712 
6713 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
6714                                                MachineInstr &SCCDefInst,
6715                                                SetVectorType &Worklist) const {
6716   bool SCCUsedImplicitly = false;
6717 
6718   // Ensure that def inst defines SCC, which is still live.
6719   assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() &&
6720          !Op.isDead() && Op.getParent() == &SCCDefInst);
6721   SmallVector<MachineInstr *, 4> CopyToDelete;
6722   // This assumes that all the users of SCC are in the same block
6723   // as the SCC def.
6724   for (MachineInstr &MI : // Skip the def inst itself.
6725        make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)),
6726                   SCCDefInst.getParent()->end())) {
6727     // Check if SCC is used first.
6728     if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) {
6729       if (MI.isCopy()) {
6730         MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
6731         Register DestReg = MI.getOperand(0).getReg();
6732 
6733         for (auto &User : MRI.use_nodbg_instructions(DestReg)) {
6734           if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) ||
6735               (User.getOpcode() == AMDGPU::S_SUB_CO_PSEUDO)) {
6736             User.getOperand(4).setReg(RI.getVCC());
6737             Worklist.insert(&User);
6738           } else if (User.getOpcode() == AMDGPU::V_CNDMASK_B32_e64) {
6739             User.getOperand(5).setReg(RI.getVCC());
6740             // No need to add to Worklist.
6741           }
6742         }
6743         CopyToDelete.push_back(&MI);
6744       } else {
6745         if (MI.getOpcode() == AMDGPU::S_CSELECT_B32 ||
6746             MI.getOpcode() == AMDGPU::S_CSELECT_B64) {
6747           // This is an implicit use of SCC and it is really expected by
6748           // the SCC users to handle.
6749           // We cannot preserve the edge to the user so add the explicit
6750           // copy: SCC = COPY VCC.
6751           // The copy will be cleaned up during the processing of the user
6752           // in lowerSelect.
6753           SCCUsedImplicitly = true;
6754         }
6755 
6756         Worklist.insert(&MI);
6757       }
6758     }
6759     // Exit if we find another SCC def.
6760     if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1)
6761       break;
6762   }
6763   for (auto &Copy : CopyToDelete)
6764     Copy->eraseFromParent();
6765 
6766   if (SCCUsedImplicitly) {
6767     BuildMI(*SCCDefInst.getParent(), std::next(SCCDefInst.getIterator()),
6768             SCCDefInst.getDebugLoc(), get(AMDGPU::COPY), AMDGPU::SCC)
6769         .addReg(RI.getVCC());
6770   }
6771 }
6772 
6773 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
6774   const MachineInstr &Inst) const {
6775   const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
6776 
6777   switch (Inst.getOpcode()) {
6778   // For target instructions, getOpRegClass just returns the virtual register
6779   // class associated with the operand, so we need to find an equivalent VGPR
6780   // register class in order to move the instruction to the VALU.
6781   case AMDGPU::COPY:
6782   case AMDGPU::PHI:
6783   case AMDGPU::REG_SEQUENCE:
6784   case AMDGPU::INSERT_SUBREG:
6785   case AMDGPU::WQM:
6786   case AMDGPU::SOFT_WQM:
6787   case AMDGPU::STRICT_WWM:
6788   case AMDGPU::STRICT_WQM: {
6789     const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1);
6790     if (RI.hasAGPRs(SrcRC)) {
6791       if (RI.hasAGPRs(NewDstRC))
6792         return nullptr;
6793 
6794       switch (Inst.getOpcode()) {
6795       case AMDGPU::PHI:
6796       case AMDGPU::REG_SEQUENCE:
6797       case AMDGPU::INSERT_SUBREG:
6798         NewDstRC = RI.getEquivalentAGPRClass(NewDstRC);
6799         break;
6800       default:
6801         NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
6802       }
6803 
6804       if (!NewDstRC)
6805         return nullptr;
6806     } else {
6807       if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass)
6808         return nullptr;
6809 
6810       NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
6811       if (!NewDstRC)
6812         return nullptr;
6813     }
6814 
6815     return NewDstRC;
6816   }
6817   default:
6818     return NewDstRC;
6819   }
6820 }
6821 
6822 // Find the one SGPR operand we are allowed to use.
6823 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
6824                                    int OpIndices[3]) const {
6825   const MCInstrDesc &Desc = MI.getDesc();
6826 
6827   // Find the one SGPR operand we are allowed to use.
6828   //
6829   // First we need to consider the instruction's operand requirements before
6830   // legalizing. Some operands are required to be SGPRs, such as implicit uses
6831   // of VCC, but we are still bound by the constant bus requirement to only use
6832   // one.
6833   //
6834   // If the operand's class is an SGPR, we can never move it.
6835 
6836   Register SGPRReg = findImplicitSGPRRead(MI);
6837   if (SGPRReg != AMDGPU::NoRegister)
6838     return SGPRReg;
6839 
6840   Register UsedSGPRs[3] = { AMDGPU::NoRegister };
6841   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
6842 
6843   for (unsigned i = 0; i < 3; ++i) {
6844     int Idx = OpIndices[i];
6845     if (Idx == -1)
6846       break;
6847 
6848     const MachineOperand &MO = MI.getOperand(Idx);
6849     if (!MO.isReg())
6850       continue;
6851 
6852     // Is this operand statically required to be an SGPR based on the operand
6853     // constraints?
6854     const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass);
6855     bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
6856     if (IsRequiredSGPR)
6857       return MO.getReg();
6858 
6859     // If this could be a VGPR or an SGPR, Check the dynamic register class.
6860     Register Reg = MO.getReg();
6861     const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
6862     if (RI.isSGPRClass(RegRC))
6863       UsedSGPRs[i] = Reg;
6864   }
6865 
6866   // We don't have a required SGPR operand, so we have a bit more freedom in
6867   // selecting operands to move.
6868 
6869   // Try to select the most used SGPR. If an SGPR is equal to one of the
6870   // others, we choose that.
6871   //
6872   // e.g.
6873   // V_FMA_F32 v0, s0, s0, s0 -> No moves
6874   // V_FMA_F32 v0, s0, s1, s0 -> Move s1
6875 
6876   // TODO: If some of the operands are 64-bit SGPRs and some 32, we should
6877   // prefer those.
6878 
6879   if (UsedSGPRs[0] != AMDGPU::NoRegister) {
6880     if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
6881       SGPRReg = UsedSGPRs[0];
6882   }
6883 
6884   if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
6885     if (UsedSGPRs[1] == UsedSGPRs[2])
6886       SGPRReg = UsedSGPRs[1];
6887   }
6888 
6889   return SGPRReg;
6890 }
6891 
6892 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
6893                                              unsigned OperandName) const {
6894   int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
6895   if (Idx == -1)
6896     return nullptr;
6897 
6898   return &MI.getOperand(Idx);
6899 }
6900 
6901 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
6902   if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
6903     return (AMDGPU::MTBUFFormat::UFMT_32_FLOAT << 44) |
6904            (1ULL << 56) | // RESOURCE_LEVEL = 1
6905            (3ULL << 60); // OOB_SELECT = 3
6906   }
6907 
6908   uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
6909   if (ST.isAmdHsaOS()) {
6910     // Set ATC = 1. GFX9 doesn't have this bit.
6911     if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS)
6912       RsrcDataFormat |= (1ULL << 56);
6913 
6914     // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this.
6915     // BTW, it disables TC L2 and therefore decreases performance.
6916     if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS)
6917       RsrcDataFormat |= (2ULL << 59);
6918   }
6919 
6920   return RsrcDataFormat;
6921 }
6922 
6923 uint64_t SIInstrInfo::getScratchRsrcWords23() const {
6924   uint64_t Rsrc23 = getDefaultRsrcDataFormat() |
6925                     AMDGPU::RSRC_TID_ENABLE |
6926                     0xffffffff; // Size;
6927 
6928   // GFX9 doesn't have ELEMENT_SIZE.
6929   if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
6930     uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize(true)) - 1;
6931     Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT;
6932   }
6933 
6934   // IndexStride = 64 / 32.
6935   uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2;
6936   Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT;
6937 
6938   // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
6939   // Clear them unless we want a huge stride.
6940   if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
6941       ST.getGeneration() <= AMDGPUSubtarget::GFX9)
6942     Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
6943 
6944   return Rsrc23;
6945 }
6946 
6947 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const {
6948   unsigned Opc = MI.getOpcode();
6949 
6950   return isSMRD(Opc);
6951 }
6952 
6953 bool SIInstrInfo::isHighLatencyDef(int Opc) const {
6954   return get(Opc).mayLoad() &&
6955          (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc));
6956 }
6957 
6958 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI,
6959                                     int &FrameIndex) const {
6960   const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
6961   if (!Addr || !Addr->isFI())
6962     return AMDGPU::NoRegister;
6963 
6964   assert(!MI.memoperands_empty() &&
6965          (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
6966 
6967   FrameIndex = Addr->getIndex();
6968   return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
6969 }
6970 
6971 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI,
6972                                         int &FrameIndex) const {
6973   const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr);
6974   assert(Addr && Addr->isFI());
6975   FrameIndex = Addr->getIndex();
6976   return getNamedOperand(MI, AMDGPU::OpName::data)->getReg();
6977 }
6978 
6979 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
6980                                           int &FrameIndex) const {
6981   if (!MI.mayLoad())
6982     return AMDGPU::NoRegister;
6983 
6984   if (isMUBUF(MI) || isVGPRSpill(MI))
6985     return isStackAccess(MI, FrameIndex);
6986 
6987   if (isSGPRSpill(MI))
6988     return isSGPRStackAccess(MI, FrameIndex);
6989 
6990   return AMDGPU::NoRegister;
6991 }
6992 
6993 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
6994                                          int &FrameIndex) const {
6995   if (!MI.mayStore())
6996     return AMDGPU::NoRegister;
6997 
6998   if (isMUBUF(MI) || isVGPRSpill(MI))
6999     return isStackAccess(MI, FrameIndex);
7000 
7001   if (isSGPRSpill(MI))
7002     return isSGPRStackAccess(MI, FrameIndex);
7003 
7004   return AMDGPU::NoRegister;
7005 }
7006 
7007 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const {
7008   unsigned Size = 0;
7009   MachineBasicBlock::const_instr_iterator I = MI.getIterator();
7010   MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
7011   while (++I != E && I->isInsideBundle()) {
7012     assert(!I->isBundle() && "No nested bundle!");
7013     Size += getInstSizeInBytes(*I);
7014   }
7015 
7016   return Size;
7017 }
7018 
7019 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
7020   unsigned Opc = MI.getOpcode();
7021   const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc);
7022   unsigned DescSize = Desc.getSize();
7023 
7024   // If we have a definitive size, we can use it. Otherwise we need to inspect
7025   // the operands to know the size.
7026   if (isFixedSize(MI)) {
7027     unsigned Size = DescSize;
7028 
7029     // If we hit the buggy offset, an extra nop will be inserted in MC so
7030     // estimate the worst case.
7031     if (MI.isBranch() && ST.hasOffset3fBug())
7032       Size += 4;
7033 
7034     return Size;
7035   }
7036 
7037   // 4-byte instructions may have a 32-bit literal encoded after them. Check
7038   // operands that coud ever be literals.
7039   if (isVALU(MI) || isSALU(MI)) {
7040     int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
7041     if (Src0Idx == -1)
7042       return DescSize; // No operands.
7043 
7044     if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx]))
7045       return isVOP3(MI) ? 12 : (DescSize + 4);
7046 
7047     int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
7048     if (Src1Idx == -1)
7049       return DescSize;
7050 
7051     if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx]))
7052       return isVOP3(MI) ? 12 : (DescSize + 4);
7053 
7054     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
7055     if (Src2Idx == -1)
7056       return DescSize;
7057 
7058     if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx]))
7059       return isVOP3(MI) ? 12 : (DescSize + 4);
7060 
7061     return DescSize;
7062   }
7063 
7064   // Check whether we have extra NSA words.
7065   if (isMIMG(MI)) {
7066     int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
7067     if (VAddr0Idx < 0)
7068       return 8;
7069 
7070     int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
7071     return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4);
7072   }
7073 
7074   switch (Opc) {
7075   case TargetOpcode::IMPLICIT_DEF:
7076   case TargetOpcode::KILL:
7077   case TargetOpcode::DBG_VALUE:
7078   case TargetOpcode::EH_LABEL:
7079     return 0;
7080   case TargetOpcode::BUNDLE:
7081     return getInstBundleSize(MI);
7082   case TargetOpcode::INLINEASM:
7083   case TargetOpcode::INLINEASM_BR: {
7084     const MachineFunction *MF = MI.getParent()->getParent();
7085     const char *AsmStr = MI.getOperand(0).getSymbolName();
7086     return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST);
7087   }
7088   default:
7089     return DescSize;
7090   }
7091 }
7092 
7093 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
7094   if (!isFLAT(MI))
7095     return false;
7096 
7097   if (MI.memoperands_empty())
7098     return true;
7099 
7100   for (const MachineMemOperand *MMO : MI.memoperands()) {
7101     if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
7102       return true;
7103   }
7104   return false;
7105 }
7106 
7107 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const {
7108   return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO;
7109 }
7110 
7111 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry,
7112                                             MachineBasicBlock *IfEnd) const {
7113   MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator();
7114   assert(TI != IfEntry->end());
7115 
7116   MachineInstr *Branch = &(*TI);
7117   MachineFunction *MF = IfEntry->getParent();
7118   MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo();
7119 
7120   if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
7121     Register DstReg = MRI.createVirtualRegister(RI.getBoolRC());
7122     MachineInstr *SIIF =
7123         BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg)
7124             .add(Branch->getOperand(0))
7125             .add(Branch->getOperand(1));
7126     MachineInstr *SIEND =
7127         BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF))
7128             .addReg(DstReg);
7129 
7130     IfEntry->erase(TI);
7131     IfEntry->insert(IfEntry->end(), SIIF);
7132     IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND);
7133   }
7134 }
7135 
7136 void SIInstrInfo::convertNonUniformLoopRegion(
7137     MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const {
7138   MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator();
7139   // We expect 2 terminators, one conditional and one unconditional.
7140   assert(TI != LoopEnd->end());
7141 
7142   MachineInstr *Branch = &(*TI);
7143   MachineFunction *MF = LoopEnd->getParent();
7144   MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo();
7145 
7146   if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
7147 
7148     Register DstReg = MRI.createVirtualRegister(RI.getBoolRC());
7149     Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC());
7150     MachineInstrBuilder HeaderPHIBuilder =
7151         BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg);
7152     for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(),
7153                                           E = LoopEntry->pred_end();
7154          PI != E; ++PI) {
7155       if (*PI == LoopEnd) {
7156         HeaderPHIBuilder.addReg(BackEdgeReg);
7157       } else {
7158         MachineBasicBlock *PMBB = *PI;
7159         Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC());
7160         materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(),
7161                              ZeroReg, 0);
7162         HeaderPHIBuilder.addReg(ZeroReg);
7163       }
7164       HeaderPHIBuilder.addMBB(*PI);
7165     }
7166     MachineInstr *HeaderPhi = HeaderPHIBuilder;
7167     MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(),
7168                                       get(AMDGPU::SI_IF_BREAK), BackEdgeReg)
7169                                   .addReg(DstReg)
7170                                   .add(Branch->getOperand(0));
7171     MachineInstr *SILOOP =
7172         BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP))
7173             .addReg(BackEdgeReg)
7174             .addMBB(LoopEntry);
7175 
7176     LoopEntry->insert(LoopEntry->begin(), HeaderPhi);
7177     LoopEnd->erase(TI);
7178     LoopEnd->insert(LoopEnd->end(), SIIFBREAK);
7179     LoopEnd->insert(LoopEnd->end(), SILOOP);
7180   }
7181 }
7182 
7183 ArrayRef<std::pair<int, const char *>>
7184 SIInstrInfo::getSerializableTargetIndices() const {
7185   static const std::pair<int, const char *> TargetIndices[] = {
7186       {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
7187       {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
7188       {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
7189       {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
7190       {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
7191   return makeArrayRef(TargetIndices);
7192 }
7193 
7194 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp).  The
7195 /// post-RA version of misched uses CreateTargetMIHazardRecognizer.
7196 ScheduleHazardRecognizer *
7197 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
7198                                             const ScheduleDAG *DAG) const {
7199   return new GCNHazardRecognizer(DAG->MF);
7200 }
7201 
7202 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
7203 /// pass.
7204 ScheduleHazardRecognizer *
7205 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
7206   return new GCNHazardRecognizer(MF);
7207 }
7208 
7209 std::pair<unsigned, unsigned>
7210 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
7211   return std::make_pair(TF & MO_MASK, TF & ~MO_MASK);
7212 }
7213 
7214 ArrayRef<std::pair<unsigned, const char *>>
7215 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
7216   static const std::pair<unsigned, const char *> TargetFlags[] = {
7217     { MO_GOTPCREL, "amdgpu-gotprel" },
7218     { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" },
7219     { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" },
7220     { MO_REL32_LO, "amdgpu-rel32-lo" },
7221     { MO_REL32_HI, "amdgpu-rel32-hi" },
7222     { MO_ABS32_LO, "amdgpu-abs32-lo" },
7223     { MO_ABS32_HI, "amdgpu-abs32-hi" },
7224   };
7225 
7226   return makeArrayRef(TargetFlags);
7227 }
7228 
7229 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const {
7230   return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY &&
7231          MI.modifiesRegister(AMDGPU::EXEC, &RI);
7232 }
7233 
7234 MachineInstrBuilder
7235 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB,
7236                            MachineBasicBlock::iterator I,
7237                            const DebugLoc &DL,
7238                            Register DestReg) const {
7239   if (ST.hasAddNoCarry())
7240     return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg);
7241 
7242   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
7243   Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC());
7244   MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC());
7245 
7246   return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg)
7247            .addReg(UnusedCarry, RegState::Define | RegState::Dead);
7248 }
7249 
7250 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB,
7251                                                MachineBasicBlock::iterator I,
7252                                                const DebugLoc &DL,
7253                                                Register DestReg,
7254                                                RegScavenger &RS) const {
7255   if (ST.hasAddNoCarry())
7256     return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg);
7257 
7258   // If available, prefer to use vcc.
7259   Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC)
7260                              ? Register(RI.getVCC())
7261                              : RS.scavengeRegister(RI.getBoolRC(), I, 0, false);
7262 
7263   // TODO: Users need to deal with this.
7264   if (!UnusedCarry.isValid())
7265     return MachineInstrBuilder();
7266 
7267   return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg)
7268            .addReg(UnusedCarry, RegState::Define | RegState::Dead);
7269 }
7270 
7271 bool SIInstrInfo::isKillTerminator(unsigned Opcode) {
7272   switch (Opcode) {
7273   case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
7274   case AMDGPU::SI_KILL_I1_TERMINATOR:
7275     return true;
7276   default:
7277     return false;
7278   }
7279 }
7280 
7281 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const {
7282   switch (Opcode) {
7283   case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
7284     return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR);
7285   case AMDGPU::SI_KILL_I1_PSEUDO:
7286     return get(AMDGPU::SI_KILL_I1_TERMINATOR);
7287   default:
7288     llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO");
7289   }
7290 }
7291 
7292 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const {
7293   if (!ST.isWave32())
7294     return;
7295 
7296   for (auto &Op : MI.implicit_operands()) {
7297     if (Op.isReg() && Op.getReg() == AMDGPU::VCC)
7298       Op.setReg(AMDGPU::VCC_LO);
7299   }
7300 }
7301 
7302 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const {
7303   if (!isSMRD(MI))
7304     return false;
7305 
7306   // Check that it is using a buffer resource.
7307   int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase);
7308   if (Idx == -1) // e.g. s_memtime
7309     return false;
7310 
7311   const auto RCID = MI.getDesc().OpInfo[Idx].RegClass;
7312   return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass);
7313 }
7314 
7315 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace,
7316                                     bool Signed) const {
7317   // TODO: Should 0 be special cased?
7318   if (!ST.hasFlatInstOffsets())
7319     return false;
7320 
7321   if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS)
7322     return false;
7323 
7324   unsigned N = AMDGPU::getNumFlatOffsetBits(ST, Signed);
7325   return Signed ? isIntN(N, Offset) : isUIntN(N, Offset);
7326 }
7327 
7328 std::pair<int64_t, int64_t> SIInstrInfo::splitFlatOffset(int64_t COffsetVal,
7329                                                          unsigned AddrSpace,
7330                                                          bool IsSigned) const {
7331   int64_t RemainderOffset = COffsetVal;
7332   int64_t ImmField = 0;
7333   const unsigned NumBits = AMDGPU::getNumFlatOffsetBits(ST, IsSigned);
7334   if (IsSigned) {
7335     // Use signed division by a power of two to truncate towards 0.
7336     int64_t D = 1LL << (NumBits - 1);
7337     RemainderOffset = (COffsetVal / D) * D;
7338     ImmField = COffsetVal - RemainderOffset;
7339   } else if (COffsetVal >= 0) {
7340     ImmField = COffsetVal & maskTrailingOnes<uint64_t>(NumBits);
7341     RemainderOffset = COffsetVal - ImmField;
7342   }
7343 
7344   assert(isLegalFLATOffset(ImmField, AddrSpace, IsSigned));
7345   assert(RemainderOffset + ImmField == COffsetVal);
7346   return {ImmField, RemainderOffset};
7347 }
7348 
7349 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td
7350 enum SIEncodingFamily {
7351   SI = 0,
7352   VI = 1,
7353   SDWA = 2,
7354   SDWA9 = 3,
7355   GFX80 = 4,
7356   GFX9 = 5,
7357   GFX10 = 6,
7358   SDWA10 = 7,
7359   GFX90A = 8
7360 };
7361 
7362 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) {
7363   switch (ST.getGeneration()) {
7364   default:
7365     break;
7366   case AMDGPUSubtarget::SOUTHERN_ISLANDS:
7367   case AMDGPUSubtarget::SEA_ISLANDS:
7368     return SIEncodingFamily::SI;
7369   case AMDGPUSubtarget::VOLCANIC_ISLANDS:
7370   case AMDGPUSubtarget::GFX9:
7371     return SIEncodingFamily::VI;
7372   case AMDGPUSubtarget::GFX10:
7373     return SIEncodingFamily::GFX10;
7374   }
7375   llvm_unreachable("Unknown subtarget generation!");
7376 }
7377 
7378 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const {
7379   switch(MCOp) {
7380   // These opcodes use indirect register addressing so
7381   // they need special handling by codegen (currently missing).
7382   // Therefore it is too risky to allow these opcodes
7383   // to be selected by dpp combiner or sdwa peepholer.
7384   case AMDGPU::V_MOVRELS_B32_dpp_gfx10:
7385   case AMDGPU::V_MOVRELS_B32_sdwa_gfx10:
7386   case AMDGPU::V_MOVRELD_B32_dpp_gfx10:
7387   case AMDGPU::V_MOVRELD_B32_sdwa_gfx10:
7388   case AMDGPU::V_MOVRELSD_B32_dpp_gfx10:
7389   case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10:
7390   case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10:
7391   case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10:
7392     return true;
7393   default:
7394     return false;
7395   }
7396 }
7397 
7398 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const {
7399   SIEncodingFamily Gen = subtargetEncodingFamily(ST);
7400 
7401   if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 &&
7402     ST.getGeneration() == AMDGPUSubtarget::GFX9)
7403     Gen = SIEncodingFamily::GFX9;
7404 
7405   // Adjust the encoding family to GFX80 for D16 buffer instructions when the
7406   // subtarget has UnpackedD16VMem feature.
7407   // TODO: remove this when we discard GFX80 encoding.
7408   if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf))
7409     Gen = SIEncodingFamily::GFX80;
7410 
7411   if (get(Opcode).TSFlags & SIInstrFlags::SDWA) {
7412     switch (ST.getGeneration()) {
7413     default:
7414       Gen = SIEncodingFamily::SDWA;
7415       break;
7416     case AMDGPUSubtarget::GFX9:
7417       Gen = SIEncodingFamily::SDWA9;
7418       break;
7419     case AMDGPUSubtarget::GFX10:
7420       Gen = SIEncodingFamily::SDWA10;
7421       break;
7422     }
7423   }
7424 
7425   int MCOp = AMDGPU::getMCOpcode(Opcode, Gen);
7426 
7427   // -1 means that Opcode is already a native instruction.
7428   if (MCOp == -1)
7429     return Opcode;
7430 
7431   if (ST.hasGFX90AInsts()) {
7432     uint16_t NMCOp = (uint16_t)-1;
7433       NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX90A);
7434     if (NMCOp == (uint16_t)-1)
7435       NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX9);
7436     if (NMCOp != (uint16_t)-1)
7437       MCOp = NMCOp;
7438   }
7439 
7440   // (uint16_t)-1 means that Opcode is a pseudo instruction that has
7441   // no encoding in the given subtarget generation.
7442   if (MCOp == (uint16_t)-1)
7443     return -1;
7444 
7445   if (isAsmOnlyOpcode(MCOp))
7446     return -1;
7447 
7448   return MCOp;
7449 }
7450 
7451 static
7452 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) {
7453   assert(RegOpnd.isReg());
7454   return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() :
7455                              getRegSubRegPair(RegOpnd);
7456 }
7457 
7458 TargetInstrInfo::RegSubRegPair
7459 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) {
7460   assert(MI.isRegSequence());
7461   for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I)
7462     if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) {
7463       auto &RegOp = MI.getOperand(1 + 2 * I);
7464       return getRegOrUndef(RegOp);
7465     }
7466   return TargetInstrInfo::RegSubRegPair();
7467 }
7468 
7469 // Try to find the definition of reg:subreg in subreg-manipulation pseudos
7470 // Following a subreg of reg:subreg isn't supported
7471 static bool followSubRegDef(MachineInstr &MI,
7472                             TargetInstrInfo::RegSubRegPair &RSR) {
7473   if (!RSR.SubReg)
7474     return false;
7475   switch (MI.getOpcode()) {
7476   default: break;
7477   case AMDGPU::REG_SEQUENCE:
7478     RSR = getRegSequenceSubReg(MI, RSR.SubReg);
7479     return true;
7480   // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg
7481   case AMDGPU::INSERT_SUBREG:
7482     if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm())
7483       // inserted the subreg we're looking for
7484       RSR = getRegOrUndef(MI.getOperand(2));
7485     else { // the subreg in the rest of the reg
7486       auto R1 = getRegOrUndef(MI.getOperand(1));
7487       if (R1.SubReg) // subreg of subreg isn't supported
7488         return false;
7489       RSR.Reg = R1.Reg;
7490     }
7491     return true;
7492   }
7493   return false;
7494 }
7495 
7496 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P,
7497                                      MachineRegisterInfo &MRI) {
7498   assert(MRI.isSSA());
7499   if (!P.Reg.isVirtual())
7500     return nullptr;
7501 
7502   auto RSR = P;
7503   auto *DefInst = MRI.getVRegDef(RSR.Reg);
7504   while (auto *MI = DefInst) {
7505     DefInst = nullptr;
7506     switch (MI->getOpcode()) {
7507     case AMDGPU::COPY:
7508     case AMDGPU::V_MOV_B32_e32: {
7509       auto &Op1 = MI->getOperand(1);
7510       if (Op1.isReg() && Op1.getReg().isVirtual()) {
7511         if (Op1.isUndef())
7512           return nullptr;
7513         RSR = getRegSubRegPair(Op1);
7514         DefInst = MRI.getVRegDef(RSR.Reg);
7515       }
7516       break;
7517     }
7518     default:
7519       if (followSubRegDef(*MI, RSR)) {
7520         if (!RSR.Reg)
7521           return nullptr;
7522         DefInst = MRI.getVRegDef(RSR.Reg);
7523       }
7524     }
7525     if (!DefInst)
7526       return MI;
7527   }
7528   return nullptr;
7529 }
7530 
7531 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI,
7532                                       Register VReg,
7533                                       const MachineInstr &DefMI,
7534                                       const MachineInstr &UseMI) {
7535   assert(MRI.isSSA() && "Must be run on SSA");
7536 
7537   auto *TRI = MRI.getTargetRegisterInfo();
7538   auto *DefBB = DefMI.getParent();
7539 
7540   // Don't bother searching between blocks, although it is possible this block
7541   // doesn't modify exec.
7542   if (UseMI.getParent() != DefBB)
7543     return true;
7544 
7545   const int MaxInstScan = 20;
7546   int NumInst = 0;
7547 
7548   // Stop scan at the use.
7549   auto E = UseMI.getIterator();
7550   for (auto I = std::next(DefMI.getIterator()); I != E; ++I) {
7551     if (I->isDebugInstr())
7552       continue;
7553 
7554     if (++NumInst > MaxInstScan)
7555       return true;
7556 
7557     if (I->modifiesRegister(AMDGPU::EXEC, TRI))
7558       return true;
7559   }
7560 
7561   return false;
7562 }
7563 
7564 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI,
7565                                          Register VReg,
7566                                          const MachineInstr &DefMI) {
7567   assert(MRI.isSSA() && "Must be run on SSA");
7568 
7569   auto *TRI = MRI.getTargetRegisterInfo();
7570   auto *DefBB = DefMI.getParent();
7571 
7572   const int MaxUseScan = 10;
7573   int NumUse = 0;
7574 
7575   for (auto &Use : MRI.use_nodbg_operands(VReg)) {
7576     auto &UseInst = *Use.getParent();
7577     // Don't bother searching between blocks, although it is possible this block
7578     // doesn't modify exec.
7579     if (UseInst.getParent() != DefBB)
7580       return true;
7581 
7582     if (++NumUse > MaxUseScan)
7583       return true;
7584   }
7585 
7586   if (NumUse == 0)
7587     return false;
7588 
7589   const int MaxInstScan = 20;
7590   int NumInst = 0;
7591 
7592   // Stop scan when we have seen all the uses.
7593   for (auto I = std::next(DefMI.getIterator()); ; ++I) {
7594     assert(I != DefBB->end());
7595 
7596     if (I->isDebugInstr())
7597       continue;
7598 
7599     if (++NumInst > MaxInstScan)
7600       return true;
7601 
7602     for (const MachineOperand &Op : I->operands()) {
7603       // We don't check reg masks here as they're used only on calls:
7604       // 1. EXEC is only considered const within one BB
7605       // 2. Call should be a terminator instruction if present in a BB
7606 
7607       if (!Op.isReg())
7608         continue;
7609 
7610       Register Reg = Op.getReg();
7611       if (Op.isUse()) {
7612         if (Reg == VReg && --NumUse == 0)
7613           return false;
7614       } else if (TRI->regsOverlap(Reg, AMDGPU::EXEC))
7615         return true;
7616     }
7617   }
7618 }
7619 
7620 MachineInstr *SIInstrInfo::createPHIDestinationCopy(
7621     MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt,
7622     const DebugLoc &DL, Register Src, Register Dst) const {
7623   auto Cur = MBB.begin();
7624   if (Cur != MBB.end())
7625     do {
7626       if (!Cur->isPHI() && Cur->readsRegister(Dst))
7627         return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src);
7628       ++Cur;
7629     } while (Cur != MBB.end() && Cur != LastPHIIt);
7630 
7631   return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src,
7632                                                    Dst);
7633 }
7634 
7635 MachineInstr *SIInstrInfo::createPHISourceCopy(
7636     MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt,
7637     const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const {
7638   if (InsPt != MBB.end() &&
7639       (InsPt->getOpcode() == AMDGPU::SI_IF ||
7640        InsPt->getOpcode() == AMDGPU::SI_ELSE ||
7641        InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) &&
7642       InsPt->definesRegister(Src)) {
7643     InsPt++;
7644     return BuildMI(MBB, InsPt, DL,
7645                    get(ST.isWave32() ? AMDGPU::S_MOV_B32_term
7646                                      : AMDGPU::S_MOV_B64_term),
7647                    Dst)
7648         .addReg(Src, 0, SrcSubReg)
7649         .addReg(AMDGPU::EXEC, RegState::Implicit);
7650   }
7651   return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg,
7652                                               Dst);
7653 }
7654 
7655 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); }
7656 
7657 MachineInstr *SIInstrInfo::foldMemoryOperandImpl(
7658     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
7659     MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
7660     VirtRegMap *VRM) const {
7661   // This is a bit of a hack (copied from AArch64). Consider this instruction:
7662   //
7663   //   %0:sreg_32 = COPY $m0
7664   //
7665   // We explicitly chose SReg_32 for the virtual register so such a copy might
7666   // be eliminated by RegisterCoalescer. However, that may not be possible, and
7667   // %0 may even spill. We can't spill $m0 normally (it would require copying to
7668   // a numbered SGPR anyway), and since it is in the SReg_32 register class,
7669   // TargetInstrInfo::foldMemoryOperand() is going to try.
7670   // A similar issue also exists with spilling and reloading $exec registers.
7671   //
7672   // To prevent that, constrain the %0 register class here.
7673   if (MI.isFullCopy()) {
7674     Register DstReg = MI.getOperand(0).getReg();
7675     Register SrcReg = MI.getOperand(1).getReg();
7676     if ((DstReg.isVirtual() || SrcReg.isVirtual()) &&
7677         (DstReg.isVirtual() != SrcReg.isVirtual())) {
7678       MachineRegisterInfo &MRI = MF.getRegInfo();
7679       Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg;
7680       const TargetRegisterClass *RC = MRI.getRegClass(VirtReg);
7681       if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) {
7682         MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
7683         return nullptr;
7684       } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) {
7685         MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass);
7686         return nullptr;
7687       }
7688     }
7689   }
7690 
7691   return nullptr;
7692 }
7693 
7694 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
7695                                       const MachineInstr &MI,
7696                                       unsigned *PredCost) const {
7697   if (MI.isBundle()) {
7698     MachineBasicBlock::const_instr_iterator I(MI.getIterator());
7699     MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end());
7700     unsigned Lat = 0, Count = 0;
7701     for (++I; I != E && I->isBundledWithPred(); ++I) {
7702       ++Count;
7703       Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I));
7704     }
7705     return Lat + Count - 1;
7706   }
7707 
7708   return SchedModel.computeInstrLatency(&MI);
7709 }
7710 
7711 unsigned SIInstrInfo::getDSShaderTypeValue(const MachineFunction &MF) {
7712   switch (MF.getFunction().getCallingConv()) {
7713   case CallingConv::AMDGPU_PS:
7714     return 1;
7715   case CallingConv::AMDGPU_VS:
7716     return 2;
7717   case CallingConv::AMDGPU_GS:
7718     return 3;
7719   case CallingConv::AMDGPU_HS:
7720   case CallingConv::AMDGPU_LS:
7721   case CallingConv::AMDGPU_ES:
7722     report_fatal_error("ds_ordered_count unsupported for this calling conv");
7723   case CallingConv::AMDGPU_CS:
7724   case CallingConv::AMDGPU_KERNEL:
7725   case CallingConv::C:
7726   case CallingConv::Fast:
7727   default:
7728     // Assume other calling conventions are various compute callable functions
7729     return 0;
7730   }
7731 }
7732