1 //===-- SIInstrInfo.cpp - SI Instruction Information  ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief SI Implementation of TargetInstrInfo.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "SIInstrInfo.h"
16 #include "AMDGPUTargetMachine.h"
17 #include "GCNHazardRecognizer.h"
18 #include "SIDefines.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/ScheduleDAG.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/Support/Debug.h"
28 
29 using namespace llvm;
30 
31 // Must be at least 4 to be able to branch over minimum unconditional branch
32 // code. This is only for making it possible to write reasonably small tests for
33 // long branches.
34 static cl::opt<unsigned>
35 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
36                  cl::desc("Restrict range of branch instructions (DEBUG)"));
37 
38 SIInstrInfo::SIInstrInfo(const SISubtarget &ST)
39   : AMDGPUInstrInfo(ST), RI(ST), ST(ST) {}
40 
41 //===----------------------------------------------------------------------===//
42 // TargetInstrInfo callbacks
43 //===----------------------------------------------------------------------===//
44 
45 static unsigned getNumOperandsNoGlue(SDNode *Node) {
46   unsigned N = Node->getNumOperands();
47   while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
48     --N;
49   return N;
50 }
51 
52 static SDValue findChainOperand(SDNode *Load) {
53   SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1);
54   assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node");
55   return LastOp;
56 }
57 
58 /// \brief Returns true if both nodes have the same value for the given
59 ///        operand \p Op, or if both nodes do not have this operand.
60 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
61   unsigned Opc0 = N0->getMachineOpcode();
62   unsigned Opc1 = N1->getMachineOpcode();
63 
64   int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
65   int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
66 
67   if (Op0Idx == -1 && Op1Idx == -1)
68     return true;
69 
70 
71   if ((Op0Idx == -1 && Op1Idx != -1) ||
72       (Op1Idx == -1 && Op0Idx != -1))
73     return false;
74 
75   // getNamedOperandIdx returns the index for the MachineInstr's operands,
76   // which includes the result as the first operand. We are indexing into the
77   // MachineSDNode's operands, so we need to skip the result operand to get
78   // the real index.
79   --Op0Idx;
80   --Op1Idx;
81 
82   return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
83 }
84 
85 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
86                                                     AliasAnalysis *AA) const {
87   // TODO: The generic check fails for VALU instructions that should be
88   // rematerializable due to implicit reads of exec. We really want all of the
89   // generic logic for this except for this.
90   switch (MI.getOpcode()) {
91   case AMDGPU::V_MOV_B32_e32:
92   case AMDGPU::V_MOV_B32_e64:
93   case AMDGPU::V_MOV_B64_PSEUDO:
94     return true;
95   default:
96     return false;
97   }
98 }
99 
100 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
101                                           int64_t &Offset0,
102                                           int64_t &Offset1) const {
103   if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
104     return false;
105 
106   unsigned Opc0 = Load0->getMachineOpcode();
107   unsigned Opc1 = Load1->getMachineOpcode();
108 
109   // Make sure both are actually loads.
110   if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
111     return false;
112 
113   if (isDS(Opc0) && isDS(Opc1)) {
114 
115     // FIXME: Handle this case:
116     if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
117       return false;
118 
119     // Check base reg.
120     if (Load0->getOperand(1) != Load1->getOperand(1))
121       return false;
122 
123     // Check chain.
124     if (findChainOperand(Load0) != findChainOperand(Load1))
125       return false;
126 
127     // Skip read2 / write2 variants for simplicity.
128     // TODO: We should report true if the used offsets are adjacent (excluded
129     // st64 versions).
130     if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 ||
131         AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1)
132       return false;
133 
134     Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue();
135     Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue();
136     return true;
137   }
138 
139   if (isSMRD(Opc0) && isSMRD(Opc1)) {
140     assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
141 
142     // Check base reg.
143     if (Load0->getOperand(0) != Load1->getOperand(0))
144       return false;
145 
146     const ConstantSDNode *Load0Offset =
147         dyn_cast<ConstantSDNode>(Load0->getOperand(1));
148     const ConstantSDNode *Load1Offset =
149         dyn_cast<ConstantSDNode>(Load1->getOperand(1));
150 
151     if (!Load0Offset || !Load1Offset)
152       return false;
153 
154     // Check chain.
155     if (findChainOperand(Load0) != findChainOperand(Load1))
156       return false;
157 
158     Offset0 = Load0Offset->getZExtValue();
159     Offset1 = Load1Offset->getZExtValue();
160     return true;
161   }
162 
163   // MUBUF and MTBUF can access the same addresses.
164   if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
165 
166     // MUBUF and MTBUF have vaddr at different indices.
167     if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
168         findChainOperand(Load0) != findChainOperand(Load1) ||
169         !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
170         !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
171       return false;
172 
173     int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
174     int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
175 
176     if (OffIdx0 == -1 || OffIdx1 == -1)
177       return false;
178 
179     // getNamedOperandIdx returns the index for MachineInstrs.  Since they
180     // inlcude the output in the operand list, but SDNodes don't, we need to
181     // subtract the index by one.
182     --OffIdx0;
183     --OffIdx1;
184 
185     SDValue Off0 = Load0->getOperand(OffIdx0);
186     SDValue Off1 = Load1->getOperand(OffIdx1);
187 
188     // The offset might be a FrameIndexSDNode.
189     if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
190       return false;
191 
192     Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
193     Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
194     return true;
195   }
196 
197   return false;
198 }
199 
200 static bool isStride64(unsigned Opc) {
201   switch (Opc) {
202   case AMDGPU::DS_READ2ST64_B32:
203   case AMDGPU::DS_READ2ST64_B64:
204   case AMDGPU::DS_WRITE2ST64_B32:
205   case AMDGPU::DS_WRITE2ST64_B64:
206     return true;
207   default:
208     return false;
209   }
210 }
211 
212 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
213                                         int64_t &Offset,
214                                         const TargetRegisterInfo *TRI) const {
215   unsigned Opc = LdSt.getOpcode();
216 
217   if (isDS(LdSt)) {
218     const MachineOperand *OffsetImm =
219         getNamedOperand(LdSt, AMDGPU::OpName::offset);
220     if (OffsetImm) {
221       // Normal, single offset LDS instruction.
222       const MachineOperand *AddrReg =
223           getNamedOperand(LdSt, AMDGPU::OpName::addr);
224 
225       BaseReg = AddrReg->getReg();
226       Offset = OffsetImm->getImm();
227       return true;
228     }
229 
230     // The 2 offset instructions use offset0 and offset1 instead. We can treat
231     // these as a load with a single offset if the 2 offsets are consecutive. We
232     // will use this for some partially aligned loads.
233     const MachineOperand *Offset0Imm =
234         getNamedOperand(LdSt, AMDGPU::OpName::offset0);
235     const MachineOperand *Offset1Imm =
236         getNamedOperand(LdSt, AMDGPU::OpName::offset1);
237 
238     uint8_t Offset0 = Offset0Imm->getImm();
239     uint8_t Offset1 = Offset1Imm->getImm();
240 
241     if (Offset1 > Offset0 && Offset1 - Offset0 == 1) {
242       // Each of these offsets is in element sized units, so we need to convert
243       // to bytes of the individual reads.
244 
245       unsigned EltSize;
246       if (LdSt.mayLoad())
247         EltSize = getOpRegClass(LdSt, 0)->getSize() / 2;
248       else {
249         assert(LdSt.mayStore());
250         int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
251         EltSize = getOpRegClass(LdSt, Data0Idx)->getSize();
252       }
253 
254       if (isStride64(Opc))
255         EltSize *= 64;
256 
257       const MachineOperand *AddrReg =
258           getNamedOperand(LdSt, AMDGPU::OpName::addr);
259       BaseReg = AddrReg->getReg();
260       Offset = EltSize * Offset0;
261       return true;
262     }
263 
264     return false;
265   }
266 
267   if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
268     const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset);
269     if (SOffset && SOffset->isReg())
270       return false;
271 
272     const MachineOperand *AddrReg =
273         getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
274     if (!AddrReg)
275       return false;
276 
277     const MachineOperand *OffsetImm =
278         getNamedOperand(LdSt, AMDGPU::OpName::offset);
279     BaseReg = AddrReg->getReg();
280     Offset = OffsetImm->getImm();
281 
282     if (SOffset) // soffset can be an inline immediate.
283       Offset += SOffset->getImm();
284 
285     return true;
286   }
287 
288   if (isSMRD(LdSt)) {
289     const MachineOperand *OffsetImm =
290         getNamedOperand(LdSt, AMDGPU::OpName::offset);
291     if (!OffsetImm)
292       return false;
293 
294     const MachineOperand *SBaseReg =
295         getNamedOperand(LdSt, AMDGPU::OpName::sbase);
296     BaseReg = SBaseReg->getReg();
297     Offset = OffsetImm->getImm();
298     return true;
299   }
300 
301   if (isFLAT(LdSt)) {
302     const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
303     BaseReg = AddrReg->getReg();
304     Offset = 0;
305     return true;
306   }
307 
308   return false;
309 }
310 
311 bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
312                                       MachineInstr &SecondLdSt,
313                                       unsigned NumLoads) const {
314   const MachineOperand *FirstDst = nullptr;
315   const MachineOperand *SecondDst = nullptr;
316 
317   if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) ||
318       (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt)) ||
319       (isFLAT(FirstLdSt) && isFLAT(SecondLdSt))) {
320     FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata);
321     SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata);
322   } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) {
323     FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst);
324     SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst);
325   } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) {
326     FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst);
327     SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst);
328   }
329 
330   if (!FirstDst || !SecondDst)
331     return false;
332 
333   // Try to limit clustering based on the total number of bytes loaded
334   // rather than the number of instructions.  This is done to help reduce
335   // register pressure.  The method used is somewhat inexact, though,
336   // because it assumes that all loads in the cluster will load the
337   // same number of bytes as FirstLdSt.
338 
339   // The unit of this value is bytes.
340   // FIXME: This needs finer tuning.
341   unsigned LoadClusterThreshold = 16;
342 
343   const MachineRegisterInfo &MRI =
344       FirstLdSt.getParent()->getParent()->getRegInfo();
345   const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
346 
347   return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
348 }
349 
350 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
351                               MachineBasicBlock::iterator MI,
352                               const DebugLoc &DL, unsigned DestReg,
353                               unsigned SrcReg, bool KillSrc) const {
354   const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg);
355 
356   if (RC == &AMDGPU::VGPR_32RegClass) {
357     assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
358            AMDGPU::SReg_32RegClass.contains(SrcReg));
359     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
360       .addReg(SrcReg, getKillRegState(KillSrc));
361     return;
362   }
363 
364   if (RC == &AMDGPU::SReg_32_XM0RegClass ||
365       RC == &AMDGPU::SReg_32RegClass) {
366     if (SrcReg == AMDGPU::SCC) {
367       BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
368           .addImm(-1)
369           .addImm(0);
370       return;
371     }
372 
373     assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
374     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
375             .addReg(SrcReg, getKillRegState(KillSrc));
376     return;
377   }
378 
379   if (RC == &AMDGPU::SReg_64RegClass) {
380     if (DestReg == AMDGPU::VCC) {
381       if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
382         BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
383           .addReg(SrcReg, getKillRegState(KillSrc));
384       } else {
385         // FIXME: Hack until VReg_1 removed.
386         assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
387         BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
388           .addImm(0)
389           .addReg(SrcReg, getKillRegState(KillSrc));
390       }
391 
392       return;
393     }
394 
395     assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
396     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
397             .addReg(SrcReg, getKillRegState(KillSrc));
398     return;
399   }
400 
401   if (DestReg == AMDGPU::SCC) {
402     assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
403     BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
404       .addReg(SrcReg, getKillRegState(KillSrc))
405       .addImm(0);
406     return;
407   }
408 
409   unsigned EltSize = 4;
410   unsigned Opcode = AMDGPU::V_MOV_B32_e32;
411   if (RI.isSGPRClass(RC)) {
412     if (RC->getSize() > 4) {
413       Opcode =  AMDGPU::S_MOV_B64;
414       EltSize = 8;
415     } else {
416       Opcode = AMDGPU::S_MOV_B32;
417       EltSize = 4;
418     }
419   }
420 
421   ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
422   bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
423 
424   for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
425     unsigned SubIdx;
426     if (Forward)
427       SubIdx = SubIndices[Idx];
428     else
429       SubIdx = SubIndices[SubIndices.size() - Idx - 1];
430 
431     MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
432       get(Opcode), RI.getSubReg(DestReg, SubIdx));
433 
434     Builder.addReg(RI.getSubReg(SrcReg, SubIdx));
435 
436     if (Idx == SubIndices.size() - 1)
437       Builder.addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
438 
439     if (Idx == 0)
440       Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
441 
442     Builder.addReg(SrcReg, RegState::Implicit);
443   }
444 }
445 
446 int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
447   int NewOpc;
448 
449   // Try to map original to commuted opcode
450   NewOpc = AMDGPU::getCommuteRev(Opcode);
451   if (NewOpc != -1)
452     // Check if the commuted (REV) opcode exists on the target.
453     return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
454 
455   // Try to map commuted to original opcode
456   NewOpc = AMDGPU::getCommuteOrig(Opcode);
457   if (NewOpc != -1)
458     // Check if the original (non-REV) opcode exists on the target.
459     return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
460 
461   return Opcode;
462 }
463 
464 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
465 
466   if (DstRC->getSize() == 4) {
467     return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
468   } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) {
469     return AMDGPU::S_MOV_B64;
470   } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) {
471     return  AMDGPU::V_MOV_B64_PSEUDO;
472   }
473   return AMDGPU::COPY;
474 }
475 
476 static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
477   switch (Size) {
478   case 4:
479     return AMDGPU::SI_SPILL_S32_SAVE;
480   case 8:
481     return AMDGPU::SI_SPILL_S64_SAVE;
482   case 16:
483     return AMDGPU::SI_SPILL_S128_SAVE;
484   case 32:
485     return AMDGPU::SI_SPILL_S256_SAVE;
486   case 64:
487     return AMDGPU::SI_SPILL_S512_SAVE;
488   default:
489     llvm_unreachable("unknown register size");
490   }
491 }
492 
493 static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
494   switch (Size) {
495   case 4:
496     return AMDGPU::SI_SPILL_V32_SAVE;
497   case 8:
498     return AMDGPU::SI_SPILL_V64_SAVE;
499   case 12:
500     return AMDGPU::SI_SPILL_V96_SAVE;
501   case 16:
502     return AMDGPU::SI_SPILL_V128_SAVE;
503   case 32:
504     return AMDGPU::SI_SPILL_V256_SAVE;
505   case 64:
506     return AMDGPU::SI_SPILL_V512_SAVE;
507   default:
508     llvm_unreachable("unknown register size");
509   }
510 }
511 
512 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
513                                       MachineBasicBlock::iterator MI,
514                                       unsigned SrcReg, bool isKill,
515                                       int FrameIndex,
516                                       const TargetRegisterClass *RC,
517                                       const TargetRegisterInfo *TRI) const {
518   MachineFunction *MF = MBB.getParent();
519   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
520   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
521   DebugLoc DL = MBB.findDebugLoc(MI);
522 
523   unsigned Size = FrameInfo.getObjectSize(FrameIndex);
524   unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
525   MachinePointerInfo PtrInfo
526     = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
527   MachineMemOperand *MMO
528     = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
529                                Size, Align);
530 
531   if (RI.isSGPRClass(RC)) {
532     MFI->setHasSpilledSGPRs();
533 
534     // We are only allowed to create one new instruction when spilling
535     // registers, so we need to use pseudo instruction for spilling SGPRs.
536     const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(RC->getSize()));
537 
538     // The SGPR spill/restore instructions only work on number sgprs, so we need
539     // to make sure we are using the correct register class.
540     if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) {
541       MachineRegisterInfo &MRI = MF->getRegInfo();
542       MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
543     }
544 
545     MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc)
546       .addReg(SrcReg, getKillRegState(isKill)) // data
547       .addFrameIndex(FrameIndex)               // addr
548       .addMemOperand(MMO)
549       .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
550       .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit);
551     // Add the scratch resource registers as implicit uses because we may end up
552     // needing them, and need to ensure that the reserved registers are
553     // correctly handled.
554 
555     if (ST.hasScalarStores()) {
556       // m0 is used for offset to scalar stores if used to spill.
557       Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine);
558     }
559 
560     return;
561   }
562 
563   if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
564     LLVMContext &Ctx = MF->getFunction()->getContext();
565     Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
566                   " spill register");
567     BuildMI(MBB, MI, DL, get(AMDGPU::KILL))
568       .addReg(SrcReg);
569 
570     return;
571   }
572 
573   assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
574 
575   unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize());
576   MFI->setHasSpilledVGPRs();
577   BuildMI(MBB, MI, DL, get(Opcode))
578     .addReg(SrcReg, getKillRegState(isKill)) // data
579     .addFrameIndex(FrameIndex)               // addr
580     .addReg(MFI->getScratchRSrcReg())        // scratch_rsrc
581     .addReg(MFI->getScratchWaveOffsetReg())  // scratch_offset
582     .addImm(0)                               // offset
583     .addMemOperand(MMO);
584 }
585 
586 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
587   switch (Size) {
588   case 4:
589     return AMDGPU::SI_SPILL_S32_RESTORE;
590   case 8:
591     return AMDGPU::SI_SPILL_S64_RESTORE;
592   case 16:
593     return AMDGPU::SI_SPILL_S128_RESTORE;
594   case 32:
595     return AMDGPU::SI_SPILL_S256_RESTORE;
596   case 64:
597     return AMDGPU::SI_SPILL_S512_RESTORE;
598   default:
599     llvm_unreachable("unknown register size");
600   }
601 }
602 
603 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
604   switch (Size) {
605   case 4:
606     return AMDGPU::SI_SPILL_V32_RESTORE;
607   case 8:
608     return AMDGPU::SI_SPILL_V64_RESTORE;
609   case 12:
610     return AMDGPU::SI_SPILL_V96_RESTORE;
611   case 16:
612     return AMDGPU::SI_SPILL_V128_RESTORE;
613   case 32:
614     return AMDGPU::SI_SPILL_V256_RESTORE;
615   case 64:
616     return AMDGPU::SI_SPILL_V512_RESTORE;
617   default:
618     llvm_unreachable("unknown register size");
619   }
620 }
621 
622 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
623                                        MachineBasicBlock::iterator MI,
624                                        unsigned DestReg, int FrameIndex,
625                                        const TargetRegisterClass *RC,
626                                        const TargetRegisterInfo *TRI) const {
627   MachineFunction *MF = MBB.getParent();
628   const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
629   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
630   DebugLoc DL = MBB.findDebugLoc(MI);
631   unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
632   unsigned Size = FrameInfo.getObjectSize(FrameIndex);
633 
634   MachinePointerInfo PtrInfo
635     = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
636 
637   MachineMemOperand *MMO = MF->getMachineMemOperand(
638     PtrInfo, MachineMemOperand::MOLoad, Size, Align);
639 
640   if (RI.isSGPRClass(RC)) {
641     // FIXME: Maybe this should not include a memoperand because it will be
642     // lowered to non-memory instructions.
643     const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(RC->getSize()));
644     if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) {
645       MachineRegisterInfo &MRI = MF->getRegInfo();
646       MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
647     }
648 
649     MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg)
650       .addFrameIndex(FrameIndex) // addr
651       .addMemOperand(MMO)
652       .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
653       .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit);
654 
655     if (ST.hasScalarStores()) {
656       // m0 is used for offset to scalar stores if used to spill.
657       Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine);
658     }
659 
660     return;
661   }
662 
663   if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
664     LLVMContext &Ctx = MF->getFunction()->getContext();
665     Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
666                   " restore register");
667     BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg);
668 
669     return;
670   }
671 
672   assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
673 
674   unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize());
675   BuildMI(MBB, MI, DL, get(Opcode), DestReg)
676     .addFrameIndex(FrameIndex)              // vaddr
677     .addReg(MFI->getScratchRSrcReg())       // scratch_rsrc
678     .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
679     .addImm(0)                              // offset
680     .addMemOperand(MMO);
681 }
682 
683 /// \param @Offset Offset in bytes of the FrameIndex being spilled
684 unsigned SIInstrInfo::calculateLDSSpillAddress(
685     MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg,
686     unsigned FrameOffset, unsigned Size) const {
687   MachineFunction *MF = MBB.getParent();
688   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
689   const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
690   const SIRegisterInfo *TRI = ST.getRegisterInfo();
691   DebugLoc DL = MBB.findDebugLoc(MI);
692   unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
693   unsigned WavefrontSize = ST.getWavefrontSize();
694 
695   unsigned TIDReg = MFI->getTIDReg();
696   if (!MFI->hasCalculatedTID()) {
697     MachineBasicBlock &Entry = MBB.getParent()->front();
698     MachineBasicBlock::iterator Insert = Entry.front();
699     DebugLoc DL = Insert->getDebugLoc();
700 
701     TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass,
702                                    *MF);
703     if (TIDReg == AMDGPU::NoRegister)
704       return TIDReg;
705 
706     if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) &&
707         WorkGroupSize > WavefrontSize) {
708 
709       unsigned TIDIGXReg
710         = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_X);
711       unsigned TIDIGYReg
712         = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Y);
713       unsigned TIDIGZReg
714         = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Z);
715       unsigned InputPtrReg =
716           TRI->getPreloadedValue(*MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
717       for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) {
718         if (!Entry.isLiveIn(Reg))
719           Entry.addLiveIn(Reg);
720       }
721 
722       RS->enterBasicBlock(Entry);
723       // FIXME: Can we scavenge an SReg_64 and access the subregs?
724       unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
725       unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
726       BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0)
727               .addReg(InputPtrReg)
728               .addImm(SI::KernelInputOffsets::NGROUPS_Z);
729       BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1)
730               .addReg(InputPtrReg)
731               .addImm(SI::KernelInputOffsets::NGROUPS_Y);
732 
733       // NGROUPS.X * NGROUPS.Y
734       BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1)
735               .addReg(STmp1)
736               .addReg(STmp0);
737       // (NGROUPS.X * NGROUPS.Y) * TIDIG.X
738       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg)
739               .addReg(STmp1)
740               .addReg(TIDIGXReg);
741       // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)
742       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg)
743               .addReg(STmp0)
744               .addReg(TIDIGYReg)
745               .addReg(TIDReg);
746       // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z
747       BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg)
748               .addReg(TIDReg)
749               .addReg(TIDIGZReg);
750     } else {
751       // Get the wave id
752       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64),
753               TIDReg)
754               .addImm(-1)
755               .addImm(0);
756 
757       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64),
758               TIDReg)
759               .addImm(-1)
760               .addReg(TIDReg);
761     }
762 
763     BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32),
764             TIDReg)
765             .addImm(2)
766             .addReg(TIDReg);
767     MFI->setTIDReg(TIDReg);
768   }
769 
770   // Add FrameIndex to LDS offset
771   unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize);
772   BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg)
773           .addImm(LDSOffset)
774           .addReg(TIDReg);
775 
776   return TmpReg;
777 }
778 
779 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB,
780                                    MachineBasicBlock::iterator MI,
781                                    int Count) const {
782   DebugLoc DL = MBB.findDebugLoc(MI);
783   while (Count > 0) {
784     int Arg;
785     if (Count >= 8)
786       Arg = 7;
787     else
788       Arg = Count - 1;
789     Count -= 8;
790     BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP))
791             .addImm(Arg);
792   }
793 }
794 
795 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
796                              MachineBasicBlock::iterator MI) const {
797   insertWaitStates(MBB, MI, 1);
798 }
799 
800 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const {
801   switch (MI.getOpcode()) {
802   default: return 1; // FIXME: Do wait states equal cycles?
803 
804   case AMDGPU::S_NOP:
805     return MI.getOperand(0).getImm() + 1;
806   }
807 }
808 
809 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
810   MachineBasicBlock &MBB = *MI.getParent();
811   DebugLoc DL = MBB.findDebugLoc(MI);
812   switch (MI.getOpcode()) {
813   default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
814   case AMDGPU::S_MOV_B64_term: {
815     // This is only a terminator to get the correct spill code placement during
816     // register allocation.
817     MI.setDesc(get(AMDGPU::S_MOV_B64));
818     break;
819   }
820   case AMDGPU::S_XOR_B64_term: {
821     // This is only a terminator to get the correct spill code placement during
822     // register allocation.
823     MI.setDesc(get(AMDGPU::S_XOR_B64));
824     break;
825   }
826   case AMDGPU::S_ANDN2_B64_term: {
827     // This is only a terminator to get the correct spill code placement during
828     // register allocation.
829     MI.setDesc(get(AMDGPU::S_ANDN2_B64));
830     break;
831   }
832   case AMDGPU::V_MOV_B64_PSEUDO: {
833     unsigned Dst = MI.getOperand(0).getReg();
834     unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
835     unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
836 
837     const MachineOperand &SrcOp = MI.getOperand(1);
838     // FIXME: Will this work for 64-bit floating point immediates?
839     assert(!SrcOp.isFPImm());
840     if (SrcOp.isImm()) {
841       APInt Imm(64, SrcOp.getImm());
842       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
843         .addImm(Imm.getLoBits(32).getZExtValue())
844         .addReg(Dst, RegState::Implicit | RegState::Define);
845       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
846         .addImm(Imm.getHiBits(32).getZExtValue())
847         .addReg(Dst, RegState::Implicit | RegState::Define);
848     } else {
849       assert(SrcOp.isReg());
850       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
851         .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
852         .addReg(Dst, RegState::Implicit | RegState::Define);
853       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
854         .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
855         .addReg(Dst, RegState::Implicit | RegState::Define);
856     }
857     MI.eraseFromParent();
858     break;
859   }
860   case AMDGPU::V_MOVRELD_B32_V1:
861   case AMDGPU::V_MOVRELD_B32_V2:
862   case AMDGPU::V_MOVRELD_B32_V4:
863   case AMDGPU::V_MOVRELD_B32_V8:
864   case AMDGPU::V_MOVRELD_B32_V16: {
865     const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32);
866     unsigned VecReg = MI.getOperand(0).getReg();
867     bool IsUndef = MI.getOperand(1).isUndef();
868     unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm();
869     assert(VecReg == MI.getOperand(1).getReg());
870 
871     MachineInstr *MovRel =
872         BuildMI(MBB, MI, DL, MovRelDesc)
873             .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
874             .add(MI.getOperand(2))
875             .addReg(VecReg, RegState::ImplicitDefine)
876             .addReg(VecReg,
877                     RegState::Implicit | (IsUndef ? RegState::Undef : 0));
878 
879     const int ImpDefIdx =
880         MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses();
881     const int ImpUseIdx = ImpDefIdx + 1;
882     MovRel->tieOperands(ImpDefIdx, ImpUseIdx);
883 
884     MI.eraseFromParent();
885     break;
886   }
887   case AMDGPU::SI_PC_ADD_REL_OFFSET: {
888     MachineFunction &MF = *MBB.getParent();
889     unsigned Reg = MI.getOperand(0).getReg();
890     unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
891     unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
892 
893     // Create a bundle so these instructions won't be re-ordered by the
894     // post-RA scheduler.
895     MIBundleBuilder Bundler(MBB, MI);
896     Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
897 
898     // Add 32-bit offset from this instruction to the start of the
899     // constant data.
900     Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
901                        .addReg(RegLo)
902                        .add(MI.getOperand(1)));
903 
904     MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
905                                   .addReg(RegHi);
906     if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE)
907       MIB.addImm(0);
908     else
909       MIB.add(MI.getOperand(2));
910 
911     Bundler.append(MIB);
912     llvm::finalizeBundle(MBB, Bundler.begin());
913 
914     MI.eraseFromParent();
915     break;
916   }
917   }
918   return true;
919 }
920 
921 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
922                                       MachineOperand &Src0,
923                                       unsigned Src0OpName,
924                                       MachineOperand &Src1,
925                                       unsigned Src1OpName) const {
926   MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
927   if (!Src0Mods)
928     return false;
929 
930   MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
931   assert(Src1Mods &&
932          "All commutable instructions have both src0 and src1 modifiers");
933 
934   int Src0ModsVal = Src0Mods->getImm();
935   int Src1ModsVal = Src1Mods->getImm();
936 
937   Src1Mods->setImm(Src0ModsVal);
938   Src0Mods->setImm(Src1ModsVal);
939   return true;
940 }
941 
942 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
943                                              MachineOperand &RegOp,
944                                              MachineOperand &NonRegOp) {
945   unsigned Reg = RegOp.getReg();
946   unsigned SubReg = RegOp.getSubReg();
947   bool IsKill = RegOp.isKill();
948   bool IsDead = RegOp.isDead();
949   bool IsUndef = RegOp.isUndef();
950   bool IsDebug = RegOp.isDebug();
951 
952   if (NonRegOp.isImm())
953     RegOp.ChangeToImmediate(NonRegOp.getImm());
954   else if (NonRegOp.isFI())
955     RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
956   else
957     return nullptr;
958 
959   NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
960   NonRegOp.setSubReg(SubReg);
961 
962   return &MI;
963 }
964 
965 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
966                                                   unsigned Src0Idx,
967                                                   unsigned Src1Idx) const {
968   assert(!NewMI && "this should never be used");
969 
970   unsigned Opc = MI.getOpcode();
971   int CommutedOpcode = commuteOpcode(Opc);
972   if (CommutedOpcode == -1)
973     return nullptr;
974 
975   assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==
976            static_cast<int>(Src0Idx) &&
977          AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==
978            static_cast<int>(Src1Idx) &&
979          "inconsistency with findCommutedOpIndices");
980 
981   MachineOperand &Src0 = MI.getOperand(Src0Idx);
982   MachineOperand &Src1 = MI.getOperand(Src1Idx);
983 
984   MachineInstr *CommutedMI = nullptr;
985   if (Src0.isReg() && Src1.isReg()) {
986     if (isOperandLegal(MI, Src1Idx, &Src0)) {
987       // Be sure to copy the source modifiers to the right place.
988       CommutedMI
989         = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
990     }
991 
992   } else if (Src0.isReg() && !Src1.isReg()) {
993     // src0 should always be able to support any operand type, so no need to
994     // check operand legality.
995     CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
996   } else if (!Src0.isReg() && Src1.isReg()) {
997     if (isOperandLegal(MI, Src1Idx, &Src0))
998       CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
999   } else {
1000     // FIXME: Found two non registers to commute. This does happen.
1001     return nullptr;
1002   }
1003 
1004 
1005   if (CommutedMI) {
1006     swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
1007                         Src1, AMDGPU::OpName::src1_modifiers);
1008 
1009     CommutedMI->setDesc(get(CommutedOpcode));
1010   }
1011 
1012   return CommutedMI;
1013 }
1014 
1015 // This needs to be implemented because the source modifiers may be inserted
1016 // between the true commutable operands, and the base
1017 // TargetInstrInfo::commuteInstruction uses it.
1018 bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0,
1019                                         unsigned &SrcOpIdx1) const {
1020   if (!MI.isCommutable())
1021     return false;
1022 
1023   unsigned Opc = MI.getOpcode();
1024   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1025   if (Src0Idx == -1)
1026     return false;
1027 
1028   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1029   if (Src1Idx == -1)
1030     return false;
1031 
1032   return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
1033 }
1034 
1035 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
1036                                         int64_t BrOffset) const {
1037   // BranchRelaxation should never have to check s_setpc_b64 because its dest
1038   // block is unanalyzable.
1039   assert(BranchOp != AMDGPU::S_SETPC_B64);
1040 
1041   // Convert to dwords.
1042   BrOffset /= 4;
1043 
1044   // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
1045   // from the next instruction.
1046   BrOffset -= 1;
1047 
1048   return isIntN(BranchOffsetBits, BrOffset);
1049 }
1050 
1051 MachineBasicBlock *SIInstrInfo::getBranchDestBlock(
1052   const MachineInstr &MI) const {
1053   if (MI.getOpcode() == AMDGPU::S_SETPC_B64) {
1054     // This would be a difficult analysis to perform, but can always be legal so
1055     // there's no need to analyze it.
1056     return nullptr;
1057   }
1058 
1059   return MI.getOperand(0).getMBB();
1060 }
1061 
1062 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
1063                                            MachineBasicBlock &DestBB,
1064                                            const DebugLoc &DL,
1065                                            int64_t BrOffset,
1066                                            RegScavenger *RS) const {
1067   assert(RS && "RegScavenger required for long branching");
1068   assert(MBB.empty() &&
1069          "new block should be inserted for expanding unconditional branch");
1070   assert(MBB.pred_size() == 1);
1071 
1072   MachineFunction *MF = MBB.getParent();
1073   MachineRegisterInfo &MRI = MF->getRegInfo();
1074 
1075   // FIXME: Virtual register workaround for RegScavenger not working with empty
1076   // blocks.
1077   unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1078 
1079   auto I = MBB.end();
1080 
1081   // We need to compute the offset relative to the instruction immediately after
1082   // s_getpc_b64. Insert pc arithmetic code before last terminator.
1083   MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
1084 
1085   // TODO: Handle > 32-bit block address.
1086   if (BrOffset >= 0) {
1087     BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
1088       .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1089       .addReg(PCReg, 0, AMDGPU::sub0)
1090       .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_FORWARD);
1091     BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
1092       .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1093       .addReg(PCReg, 0, AMDGPU::sub1)
1094       .addImm(0);
1095   } else {
1096     // Backwards branch.
1097     BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32))
1098       .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1099       .addReg(PCReg, 0, AMDGPU::sub0)
1100       .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_BACKWARD);
1101     BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32))
1102       .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1103       .addReg(PCReg, 0, AMDGPU::sub1)
1104       .addImm(0);
1105   }
1106 
1107   // Insert the indirect branch after the other terminator.
1108   BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
1109     .addReg(PCReg);
1110 
1111   // FIXME: If spilling is necessary, this will fail because this scavenger has
1112   // no emergency stack slots. It is non-trivial to spill in this situation,
1113   // because the restore code needs to be specially placed after the
1114   // jump. BranchRelaxation then needs to be made aware of the newly inserted
1115   // block.
1116   //
1117   // If a spill is needed for the pc register pair, we need to insert a spill
1118   // restore block right before the destination block, and insert a short branch
1119   // into the old destination block's fallthrough predecessor.
1120   // e.g.:
1121   //
1122   // s_cbranch_scc0 skip_long_branch:
1123   //
1124   // long_branch_bb:
1125   //   spill s[8:9]
1126   //   s_getpc_b64 s[8:9]
1127   //   s_add_u32 s8, s8, restore_bb
1128   //   s_addc_u32 s9, s9, 0
1129   //   s_setpc_b64 s[8:9]
1130   //
1131   // skip_long_branch:
1132   //   foo;
1133   //
1134   // .....
1135   //
1136   // dest_bb_fallthrough_predecessor:
1137   // bar;
1138   // s_branch dest_bb
1139   //
1140   // restore_bb:
1141   //  restore s[8:9]
1142   //  fallthrough dest_bb
1143   ///
1144   // dest_bb:
1145   //   buzz;
1146 
1147   RS->enterBasicBlockEnd(MBB);
1148   unsigned Scav = RS->scavengeRegister(&AMDGPU::SReg_64RegClass,
1149                                        MachineBasicBlock::iterator(GetPC), 0);
1150   MRI.replaceRegWith(PCReg, Scav);
1151   MRI.clearVirtRegs();
1152   RS->setRegUsed(Scav);
1153 
1154   return 4 + 8 + 4 + 4;
1155 }
1156 
1157 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
1158   switch (Cond) {
1159   case SIInstrInfo::SCC_TRUE:
1160     return AMDGPU::S_CBRANCH_SCC1;
1161   case SIInstrInfo::SCC_FALSE:
1162     return AMDGPU::S_CBRANCH_SCC0;
1163   case SIInstrInfo::VCCNZ:
1164     return AMDGPU::S_CBRANCH_VCCNZ;
1165   case SIInstrInfo::VCCZ:
1166     return AMDGPU::S_CBRANCH_VCCZ;
1167   case SIInstrInfo::EXECNZ:
1168     return AMDGPU::S_CBRANCH_EXECNZ;
1169   case SIInstrInfo::EXECZ:
1170     return AMDGPU::S_CBRANCH_EXECZ;
1171   default:
1172     llvm_unreachable("invalid branch predicate");
1173   }
1174 }
1175 
1176 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
1177   switch (Opcode) {
1178   case AMDGPU::S_CBRANCH_SCC0:
1179     return SCC_FALSE;
1180   case AMDGPU::S_CBRANCH_SCC1:
1181     return SCC_TRUE;
1182   case AMDGPU::S_CBRANCH_VCCNZ:
1183     return VCCNZ;
1184   case AMDGPU::S_CBRANCH_VCCZ:
1185     return VCCZ;
1186   case AMDGPU::S_CBRANCH_EXECNZ:
1187     return EXECNZ;
1188   case AMDGPU::S_CBRANCH_EXECZ:
1189     return EXECZ;
1190   default:
1191     return INVALID_BR;
1192   }
1193 }
1194 
1195 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
1196                                     MachineBasicBlock::iterator I,
1197                                     MachineBasicBlock *&TBB,
1198                                     MachineBasicBlock *&FBB,
1199                                     SmallVectorImpl<MachineOperand> &Cond,
1200                                     bool AllowModify) const {
1201   if (I->getOpcode() == AMDGPU::S_BRANCH) {
1202     // Unconditional Branch
1203     TBB = I->getOperand(0).getMBB();
1204     return false;
1205   }
1206 
1207   BranchPredicate Pred = getBranchPredicate(I->getOpcode());
1208   if (Pred == INVALID_BR)
1209     return true;
1210 
1211   MachineBasicBlock *CondBB = I->getOperand(0).getMBB();
1212   Cond.push_back(MachineOperand::CreateImm(Pred));
1213   Cond.push_back(I->getOperand(1)); // Save the branch register.
1214 
1215   ++I;
1216 
1217   if (I == MBB.end()) {
1218     // Conditional branch followed by fall-through.
1219     TBB = CondBB;
1220     return false;
1221   }
1222 
1223   if (I->getOpcode() == AMDGPU::S_BRANCH) {
1224     TBB = CondBB;
1225     FBB = I->getOperand(0).getMBB();
1226     return false;
1227   }
1228 
1229   return true;
1230 }
1231 
1232 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
1233                                 MachineBasicBlock *&FBB,
1234                                 SmallVectorImpl<MachineOperand> &Cond,
1235                                 bool AllowModify) const {
1236   MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1237   if (I == MBB.end())
1238     return false;
1239 
1240   if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH)
1241     return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
1242 
1243   ++I;
1244 
1245   // TODO: Should be able to treat as fallthrough?
1246   if (I == MBB.end())
1247     return true;
1248 
1249   if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify))
1250     return true;
1251 
1252   MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB();
1253 
1254   // Specifically handle the case where the conditional branch is to the same
1255   // destination as the mask branch. e.g.
1256   //
1257   // si_mask_branch BB8
1258   // s_cbranch_execz BB8
1259   // s_cbranch BB9
1260   //
1261   // This is required to understand divergent loops which may need the branches
1262   // to be relaxed.
1263   if (TBB != MaskBrDest || Cond.empty())
1264     return true;
1265 
1266   auto Pred = Cond[0].getImm();
1267   return (Pred != EXECZ && Pred != EXECNZ);
1268 }
1269 
1270 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB,
1271                                    int *BytesRemoved) const {
1272   MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1273 
1274   unsigned Count = 0;
1275   unsigned RemovedSize = 0;
1276   while (I != MBB.end()) {
1277     MachineBasicBlock::iterator Next = std::next(I);
1278     if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) {
1279       I = Next;
1280       continue;
1281     }
1282 
1283     RemovedSize += getInstSizeInBytes(*I);
1284     I->eraseFromParent();
1285     ++Count;
1286     I = Next;
1287   }
1288 
1289   if (BytesRemoved)
1290     *BytesRemoved = RemovedSize;
1291 
1292   return Count;
1293 }
1294 
1295 // Copy the flags onto the implicit condition register operand.
1296 static void preserveCondRegFlags(MachineOperand &CondReg,
1297                                  const MachineOperand &OrigCond) {
1298   CondReg.setIsUndef(OrigCond.isUndef());
1299   CondReg.setIsKill(OrigCond.isKill());
1300 }
1301 
1302 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
1303                                    MachineBasicBlock *TBB,
1304                                    MachineBasicBlock *FBB,
1305                                    ArrayRef<MachineOperand> Cond,
1306                                    const DebugLoc &DL,
1307                                    int *BytesAdded) const {
1308 
1309   if (!FBB && Cond.empty()) {
1310     BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1311       .addMBB(TBB);
1312     if (BytesAdded)
1313       *BytesAdded = 4;
1314     return 1;
1315   }
1316 
1317   assert(TBB && Cond[0].isImm());
1318 
1319   unsigned Opcode
1320     = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
1321 
1322   if (!FBB) {
1323     Cond[1].isUndef();
1324     MachineInstr *CondBr =
1325       BuildMI(&MBB, DL, get(Opcode))
1326       .addMBB(TBB);
1327 
1328     // Copy the flags onto the implicit condition register operand.
1329     preserveCondRegFlags(CondBr->getOperand(1), Cond[1]);
1330 
1331     if (BytesAdded)
1332       *BytesAdded = 4;
1333     return 1;
1334   }
1335 
1336   assert(TBB && FBB);
1337 
1338   MachineInstr *CondBr =
1339     BuildMI(&MBB, DL, get(Opcode))
1340     .addMBB(TBB);
1341   BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1342     .addMBB(FBB);
1343 
1344   MachineOperand &CondReg = CondBr->getOperand(1);
1345   CondReg.setIsUndef(Cond[1].isUndef());
1346   CondReg.setIsKill(Cond[1].isKill());
1347 
1348   if (BytesAdded)
1349       *BytesAdded = 8;
1350 
1351   return 2;
1352 }
1353 
1354 bool SIInstrInfo::reverseBranchCondition(
1355   SmallVectorImpl<MachineOperand> &Cond) const {
1356   assert(Cond.size() == 2);
1357   Cond[0].setImm(-Cond[0].getImm());
1358   return false;
1359 }
1360 
1361 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
1362                                   ArrayRef<MachineOperand> Cond,
1363                                   unsigned TrueReg, unsigned FalseReg,
1364                                   int &CondCycles,
1365                                   int &TrueCycles, int &FalseCycles) const {
1366   switch (Cond[0].getImm()) {
1367   case VCCNZ:
1368   case VCCZ: {
1369     const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1370     const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
1371     assert(MRI.getRegClass(FalseReg) == RC);
1372 
1373     int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
1374     CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
1375 
1376     // Limit to equal cost for branch vs. N v_cndmask_b32s.
1377     return !RI.isSGPRClass(RC) && NumInsts <= 6;
1378   }
1379   case SCC_TRUE:
1380   case SCC_FALSE: {
1381     // FIXME: We could insert for VGPRs if we could replace the original compare
1382     // with a vector one.
1383     const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1384     const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
1385     assert(MRI.getRegClass(FalseReg) == RC);
1386 
1387     int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
1388 
1389     // Multiples of 8 can do s_cselect_b64
1390     if (NumInsts % 2 == 0)
1391       NumInsts /= 2;
1392 
1393     CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
1394     return RI.isSGPRClass(RC);
1395   }
1396   default:
1397     return false;
1398   }
1399 }
1400 
1401 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
1402                                MachineBasicBlock::iterator I, const DebugLoc &DL,
1403                                unsigned DstReg, ArrayRef<MachineOperand> Cond,
1404                                unsigned TrueReg, unsigned FalseReg) const {
1405   BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm());
1406   if (Pred == VCCZ || Pred == SCC_FALSE) {
1407     Pred = static_cast<BranchPredicate>(-Pred);
1408     std::swap(TrueReg, FalseReg);
1409   }
1410 
1411   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1412   const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
1413   unsigned DstSize = DstRC->getSize();
1414 
1415   if (DstSize == 4) {
1416     unsigned SelOp = Pred == SCC_TRUE ?
1417       AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32;
1418 
1419     // Instruction's operands are backwards from what is expected.
1420     MachineInstr *Select =
1421       BuildMI(MBB, I, DL, get(SelOp), DstReg)
1422       .addReg(FalseReg)
1423       .addReg(TrueReg);
1424 
1425     preserveCondRegFlags(Select->getOperand(3), Cond[1]);
1426     return;
1427   }
1428 
1429   if (DstSize == 8 && Pred == SCC_TRUE) {
1430     MachineInstr *Select =
1431       BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg)
1432       .addReg(FalseReg)
1433       .addReg(TrueReg);
1434 
1435     preserveCondRegFlags(Select->getOperand(3), Cond[1]);
1436     return;
1437   }
1438 
1439   static const int16_t Sub0_15[] = {
1440     AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1441     AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1442     AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
1443     AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
1444   };
1445 
1446   static const int16_t Sub0_15_64[] = {
1447     AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1448     AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
1449     AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
1450     AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
1451   };
1452 
1453   unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
1454   const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass;
1455   const int16_t *SubIndices = Sub0_15;
1456   int NElts = DstSize / 4;
1457 
1458   // 64-bit select is only avaialble for SALU.
1459   if (Pred == SCC_TRUE) {
1460     SelOp = AMDGPU::S_CSELECT_B64;
1461     EltRC = &AMDGPU::SGPR_64RegClass;
1462     SubIndices = Sub0_15_64;
1463 
1464     assert(NElts % 2 == 0);
1465     NElts /= 2;
1466   }
1467 
1468   MachineInstrBuilder MIB = BuildMI(
1469     MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg);
1470 
1471   I = MIB->getIterator();
1472 
1473   SmallVector<unsigned, 8> Regs;
1474   for (int Idx = 0; Idx != NElts; ++Idx) {
1475     unsigned DstElt = MRI.createVirtualRegister(EltRC);
1476     Regs.push_back(DstElt);
1477 
1478     unsigned SubIdx = SubIndices[Idx];
1479 
1480     MachineInstr *Select =
1481       BuildMI(MBB, I, DL, get(SelOp), DstElt)
1482       .addReg(FalseReg, 0, SubIdx)
1483       .addReg(TrueReg, 0, SubIdx);
1484     preserveCondRegFlags(Select->getOperand(3), Cond[1]);
1485 
1486     MIB.addReg(DstElt)
1487        .addImm(SubIdx);
1488   }
1489 }
1490 
1491 static void removeModOperands(MachineInstr &MI) {
1492   unsigned Opc = MI.getOpcode();
1493   int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1494                                               AMDGPU::OpName::src0_modifiers);
1495   int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1496                                               AMDGPU::OpName::src1_modifiers);
1497   int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1498                                               AMDGPU::OpName::src2_modifiers);
1499 
1500   MI.RemoveOperand(Src2ModIdx);
1501   MI.RemoveOperand(Src1ModIdx);
1502   MI.RemoveOperand(Src0ModIdx);
1503 }
1504 
1505 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
1506                                 unsigned Reg, MachineRegisterInfo *MRI) const {
1507   if (!MRI->hasOneNonDBGUse(Reg))
1508     return false;
1509 
1510   unsigned Opc = UseMI.getOpcode();
1511   if (Opc == AMDGPU::COPY) {
1512     bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg());
1513     switch (DefMI.getOpcode()) {
1514     default:
1515       return false;
1516     case AMDGPU::S_MOV_B64:
1517       // TODO: We could fold 64-bit immediates, but this get compilicated
1518       // when there are sub-registers.
1519       return false;
1520 
1521     case AMDGPU::V_MOV_B32_e32:
1522     case AMDGPU::S_MOV_B32:
1523       break;
1524     }
1525     unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1526     const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
1527     assert(ImmOp);
1528     // FIXME: We could handle FrameIndex values here.
1529     if (!ImmOp->isImm()) {
1530       return false;
1531     }
1532     UseMI.setDesc(get(NewOpc));
1533     UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm());
1534     UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
1535     return true;
1536   }
1537 
1538   if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 ||
1539       Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) {
1540     // Don't fold if we are using source or output modifiers. The new VOP2
1541     // instructions don't have them.
1542     if (hasAnyModifiersSet(UseMI))
1543       return false;
1544 
1545     const MachineOperand &ImmOp = DefMI.getOperand(1);
1546 
1547     // If this is a free constant, there's no reason to do this.
1548     // TODO: We could fold this here instead of letting SIFoldOperands do it
1549     // later.
1550     MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
1551 
1552     // Any src operand can be used for the legality check.
1553     if (isInlineConstant(UseMI, *Src0, ImmOp))
1554       return false;
1555 
1556     bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64;
1557     MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
1558     MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
1559 
1560     // Multiplied part is the constant: Use v_madmk_{f16, f32}.
1561     // We should only expect these to be on src0 due to canonicalizations.
1562     if (Src0->isReg() && Src0->getReg() == Reg) {
1563       if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
1564         return false;
1565 
1566       if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
1567         return false;
1568 
1569       // We need to swap operands 0 and 1 since madmk constant is at operand 1.
1570 
1571       const int64_t Imm = DefMI.getOperand(1).getImm();
1572 
1573       // FIXME: This would be a lot easier if we could return a new instruction
1574       // instead of having to modify in place.
1575 
1576       // Remove these first since they are at the end.
1577       UseMI.RemoveOperand(
1578           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
1579       UseMI.RemoveOperand(
1580           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
1581 
1582       unsigned Src1Reg = Src1->getReg();
1583       unsigned Src1SubReg = Src1->getSubReg();
1584       Src0->setReg(Src1Reg);
1585       Src0->setSubReg(Src1SubReg);
1586       Src0->setIsKill(Src1->isKill());
1587 
1588       if (Opc == AMDGPU::V_MAC_F32_e64 ||
1589           Opc == AMDGPU::V_MAC_F16_e64)
1590         UseMI.untieRegOperand(
1591             AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
1592 
1593       Src1->ChangeToImmediate(Imm);
1594 
1595       removeModOperands(UseMI);
1596       UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16));
1597 
1598       bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1599       if (DeleteDef)
1600         DefMI.eraseFromParent();
1601 
1602       return true;
1603     }
1604 
1605     // Added part is the constant: Use v_madak_{f16, f32}.
1606     if (Src2->isReg() && Src2->getReg() == Reg) {
1607       // Not allowed to use constant bus for another operand.
1608       // We can however allow an inline immediate as src0.
1609       if (!Src0->isImm() &&
1610           (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))
1611         return false;
1612 
1613       if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
1614         return false;
1615 
1616       const int64_t Imm = DefMI.getOperand(1).getImm();
1617 
1618       // FIXME: This would be a lot easier if we could return a new instruction
1619       // instead of having to modify in place.
1620 
1621       // Remove these first since they are at the end.
1622       UseMI.RemoveOperand(
1623           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
1624       UseMI.RemoveOperand(
1625           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
1626 
1627       if (Opc == AMDGPU::V_MAC_F32_e64 ||
1628           Opc == AMDGPU::V_MAC_F16_e64)
1629         UseMI.untieRegOperand(
1630             AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
1631 
1632       // ChangingToImmediate adds Src2 back to the instruction.
1633       Src2->ChangeToImmediate(Imm);
1634 
1635       // These come before src2.
1636       removeModOperands(UseMI);
1637       UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16));
1638 
1639       bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1640       if (DeleteDef)
1641         DefMI.eraseFromParent();
1642 
1643       return true;
1644     }
1645   }
1646 
1647   return false;
1648 }
1649 
1650 static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
1651                                 int WidthB, int OffsetB) {
1652   int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
1653   int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
1654   int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1655   return LowOffset + LowWidth <= HighOffset;
1656 }
1657 
1658 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
1659                                                MachineInstr &MIb) const {
1660   unsigned BaseReg0, BaseReg1;
1661   int64_t Offset0, Offset1;
1662 
1663   if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
1664       getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
1665 
1666     if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
1667       // FIXME: Handle ds_read2 / ds_write2.
1668       return false;
1669     }
1670     unsigned Width0 = (*MIa.memoperands_begin())->getSize();
1671     unsigned Width1 = (*MIb.memoperands_begin())->getSize();
1672     if (BaseReg0 == BaseReg1 &&
1673         offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) {
1674       return true;
1675     }
1676   }
1677 
1678   return false;
1679 }
1680 
1681 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa,
1682                                                   MachineInstr &MIb,
1683                                                   AliasAnalysis *AA) const {
1684   assert((MIa.mayLoad() || MIa.mayStore()) &&
1685          "MIa must load from or modify a memory location");
1686   assert((MIb.mayLoad() || MIb.mayStore()) &&
1687          "MIb must load from or modify a memory location");
1688 
1689   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
1690     return false;
1691 
1692   // XXX - Can we relax this between address spaces?
1693   if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1694     return false;
1695 
1696   if (AA && MIa.hasOneMemOperand() && MIb.hasOneMemOperand()) {
1697     const MachineMemOperand *MMOa = *MIa.memoperands_begin();
1698     const MachineMemOperand *MMOb = *MIb.memoperands_begin();
1699     if (MMOa->getValue() && MMOb->getValue()) {
1700       MemoryLocation LocA(MMOa->getValue(), MMOa->getSize(), MMOa->getAAInfo());
1701       MemoryLocation LocB(MMOb->getValue(), MMOb->getSize(), MMOb->getAAInfo());
1702       if (!AA->alias(LocA, LocB))
1703         return true;
1704     }
1705   }
1706 
1707   // TODO: Should we check the address space from the MachineMemOperand? That
1708   // would allow us to distinguish objects we know don't alias based on the
1709   // underlying address space, even if it was lowered to a different one,
1710   // e.g. private accesses lowered to use MUBUF instructions on a scratch
1711   // buffer.
1712   if (isDS(MIa)) {
1713     if (isDS(MIb))
1714       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1715 
1716     return !isFLAT(MIb);
1717   }
1718 
1719   if (isMUBUF(MIa) || isMTBUF(MIa)) {
1720     if (isMUBUF(MIb) || isMTBUF(MIb))
1721       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1722 
1723     return !isFLAT(MIb) && !isSMRD(MIb);
1724   }
1725 
1726   if (isSMRD(MIa)) {
1727     if (isSMRD(MIb))
1728       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1729 
1730     return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa);
1731   }
1732 
1733   if (isFLAT(MIa)) {
1734     if (isFLAT(MIb))
1735       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1736 
1737     return false;
1738   }
1739 
1740   return false;
1741 }
1742 
1743 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
1744                                                  MachineInstr &MI,
1745                                                  LiveVariables *LV) const {
1746   bool IsF16 = false;
1747 
1748   switch (MI.getOpcode()) {
1749   default:
1750     return nullptr;
1751   case AMDGPU::V_MAC_F16_e64:
1752     IsF16 = true;
1753   case AMDGPU::V_MAC_F32_e64:
1754     break;
1755   case AMDGPU::V_MAC_F16_e32:
1756     IsF16 = true;
1757   case AMDGPU::V_MAC_F32_e32: {
1758     int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1759                                              AMDGPU::OpName::src0);
1760     const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
1761     if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
1762       return nullptr;
1763     break;
1764   }
1765   }
1766 
1767   const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
1768   const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
1769   const MachineOperand *Src0Mods =
1770     getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
1771   const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
1772   const MachineOperand *Src1Mods =
1773     getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
1774   const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
1775   const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
1776   const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod);
1777 
1778   return BuildMI(*MBB, MI, MI.getDebugLoc(),
1779                  get(IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32))
1780       .add(*Dst)
1781       .addImm(Src0Mods ? Src0Mods->getImm() : 0)
1782       .add(*Src0)
1783       .addImm(Src1Mods ? Src1Mods->getImm() : 0)
1784       .add(*Src1)
1785       .addImm(0) // Src mods
1786       .add(*Src2)
1787       .addImm(Clamp ? Clamp->getImm() : 0)
1788       .addImm(Omod ? Omod->getImm() : 0);
1789 }
1790 
1791 // It's not generally safe to move VALU instructions across these since it will
1792 // start using the register as a base index rather than directly.
1793 // XXX - Why isn't hasSideEffects sufficient for these?
1794 static bool changesVGPRIndexingMode(const MachineInstr &MI) {
1795   switch (MI.getOpcode()) {
1796   case AMDGPU::S_SET_GPR_IDX_ON:
1797   case AMDGPU::S_SET_GPR_IDX_MODE:
1798   case AMDGPU::S_SET_GPR_IDX_OFF:
1799     return true;
1800   default:
1801     return false;
1802   }
1803 }
1804 
1805 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1806                                        const MachineBasicBlock *MBB,
1807                                        const MachineFunction &MF) const {
1808   // XXX - Do we want the SP check in the base implementation?
1809 
1810   // Target-independent instructions do not have an implicit-use of EXEC, even
1811   // when they operate on VGPRs. Treating EXEC modifications as scheduling
1812   // boundaries prevents incorrect movements of such instructions.
1813   return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) ||
1814          MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
1815          MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
1816          MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
1817          changesVGPRIndexingMode(MI);
1818 }
1819 
1820 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
1821   switch (Imm.getBitWidth()) {
1822   case 32:
1823     return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
1824                                         ST.hasInv2PiInlineImm());
1825   case 64:
1826     return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
1827                                         ST.hasInv2PiInlineImm());
1828   case 16:
1829     return ST.has16BitInsts() &&
1830            AMDGPU::isInlinableLiteral16(Imm.getSExtValue(),
1831                                         ST.hasInv2PiInlineImm());
1832   default:
1833     llvm_unreachable("invalid bitwidth");
1834   }
1835 }
1836 
1837 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
1838                                    uint8_t OperandType) const {
1839   if (!MO.isImm() || OperandType < MCOI::OPERAND_FIRST_TARGET)
1840     return false;
1841 
1842   // MachineOperand provides no way to tell the true operand size, since it only
1843   // records a 64-bit value. We need to know the size to determine if a 32-bit
1844   // floating point immediate bit pattern is legal for an integer immediate. It
1845   // would be for any 32-bit integer operand, but would not be for a 64-bit one.
1846 
1847   int64_t Imm = MO.getImm();
1848   switch (OperandType) {
1849   case AMDGPU::OPERAND_REG_IMM_INT32:
1850   case AMDGPU::OPERAND_REG_IMM_FP32:
1851   case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1852   case AMDGPU::OPERAND_REG_INLINE_C_FP32: {
1853     int32_t Trunc = static_cast<int32_t>(Imm);
1854     return Trunc == Imm &&
1855            AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
1856   }
1857   case AMDGPU::OPERAND_REG_IMM_INT64:
1858   case AMDGPU::OPERAND_REG_IMM_FP64:
1859   case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1860   case AMDGPU::OPERAND_REG_INLINE_C_FP64: {
1861     return AMDGPU::isInlinableLiteral64(MO.getImm(),
1862                                         ST.hasInv2PiInlineImm());
1863   }
1864   case AMDGPU::OPERAND_REG_IMM_INT16:
1865   case AMDGPU::OPERAND_REG_IMM_FP16:
1866   case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1867   case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
1868     if (isInt<16>(Imm) || isUInt<16>(Imm)) {
1869       // A few special case instructions have 16-bit operands on subtargets
1870       // where 16-bit instructions are not legal.
1871       // TODO: Do the 32-bit immediates work? We shouldn't really need to handle
1872       // constants in these cases
1873       int16_t Trunc = static_cast<int16_t>(Imm);
1874       return ST.has16BitInsts() &&
1875              AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
1876     }
1877 
1878     return false;
1879   }
1880   case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1881   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1882     uint32_t Trunc = static_cast<uint32_t>(Imm);
1883     return  AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm());
1884   }
1885   default:
1886     llvm_unreachable("invalid bitwidth");
1887   }
1888 }
1889 
1890 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO,
1891                                         const MCOperandInfo &OpInfo) const {
1892   switch (MO.getType()) {
1893   case MachineOperand::MO_Register:
1894     return false;
1895   case MachineOperand::MO_Immediate:
1896     return !isInlineConstant(MO, OpInfo);
1897   case MachineOperand::MO_FrameIndex:
1898   case MachineOperand::MO_MachineBasicBlock:
1899   case MachineOperand::MO_ExternalSymbol:
1900   case MachineOperand::MO_GlobalAddress:
1901   case MachineOperand::MO_MCSymbol:
1902     return true;
1903   default:
1904     llvm_unreachable("unexpected operand type");
1905   }
1906 }
1907 
1908 static bool compareMachineOp(const MachineOperand &Op0,
1909                              const MachineOperand &Op1) {
1910   if (Op0.getType() != Op1.getType())
1911     return false;
1912 
1913   switch (Op0.getType()) {
1914   case MachineOperand::MO_Register:
1915     return Op0.getReg() == Op1.getReg();
1916   case MachineOperand::MO_Immediate:
1917     return Op0.getImm() == Op1.getImm();
1918   default:
1919     llvm_unreachable("Didn't expect to be comparing these operand types");
1920   }
1921 }
1922 
1923 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
1924                                     const MachineOperand &MO) const {
1925   const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo];
1926 
1927   assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
1928 
1929   if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
1930     return true;
1931 
1932   if (OpInfo.RegClass < 0)
1933     return false;
1934 
1935   if (MO.isImm() && isInlineConstant(MO, OpInfo))
1936     return RI.opCanUseInlineConstant(OpInfo.OperandType);
1937 
1938   return RI.opCanUseLiteralConstant(OpInfo.OperandType);
1939 }
1940 
1941 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
1942   int Op32 = AMDGPU::getVOPe32(Opcode);
1943   if (Op32 == -1)
1944     return false;
1945 
1946   return pseudoToMCOpcode(Op32) != -1;
1947 }
1948 
1949 bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
1950   // The src0_modifier operand is present on all instructions
1951   // that have modifiers.
1952 
1953   return AMDGPU::getNamedOperandIdx(Opcode,
1954                                     AMDGPU::OpName::src0_modifiers) != -1;
1955 }
1956 
1957 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
1958                                   unsigned OpName) const {
1959   const MachineOperand *Mods = getNamedOperand(MI, OpName);
1960   return Mods && Mods->getImm();
1961 }
1962 
1963 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const {
1964   return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1965          hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1966          hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) ||
1967          hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
1968          hasModifiersSet(MI, AMDGPU::OpName::omod);
1969 }
1970 
1971 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
1972                                   const MachineOperand &MO,
1973                                   const MCOperandInfo &OpInfo) const {
1974   // Literal constants use the constant bus.
1975   //if (isLiteralConstantLike(MO, OpInfo))
1976   // return true;
1977   if (MO.isImm())
1978     return !isInlineConstant(MO, OpInfo);
1979 
1980   if (!MO.isReg())
1981     return true; // Misc other operands like FrameIndex
1982 
1983   if (!MO.isUse())
1984     return false;
1985 
1986   if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1987     return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
1988 
1989   // FLAT_SCR is just an SGPR pair.
1990   if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR))
1991     return true;
1992 
1993   // EXEC register uses the constant bus.
1994   if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
1995     return true;
1996 
1997   // SGPRs use the constant bus
1998   return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 ||
1999           (!MO.isImplicit() &&
2000            (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
2001             AMDGPU::SGPR_64RegClass.contains(MO.getReg()))));
2002 }
2003 
2004 static unsigned findImplicitSGPRRead(const MachineInstr &MI) {
2005   for (const MachineOperand &MO : MI.implicit_operands()) {
2006     // We only care about reads.
2007     if (MO.isDef())
2008       continue;
2009 
2010     switch (MO.getReg()) {
2011     case AMDGPU::VCC:
2012     case AMDGPU::M0:
2013     case AMDGPU::FLAT_SCR:
2014       return MO.getReg();
2015 
2016     default:
2017       break;
2018     }
2019   }
2020 
2021   return AMDGPU::NoRegister;
2022 }
2023 
2024 static bool shouldReadExec(const MachineInstr &MI) {
2025   if (SIInstrInfo::isVALU(MI)) {
2026     switch (MI.getOpcode()) {
2027     case AMDGPU::V_READLANE_B32:
2028     case AMDGPU::V_READLANE_B32_si:
2029     case AMDGPU::V_READLANE_B32_vi:
2030     case AMDGPU::V_WRITELANE_B32:
2031     case AMDGPU::V_WRITELANE_B32_si:
2032     case AMDGPU::V_WRITELANE_B32_vi:
2033       return false;
2034     }
2035 
2036     return true;
2037   }
2038 
2039   if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
2040       SIInstrInfo::isSALU(MI) ||
2041       SIInstrInfo::isSMRD(MI))
2042     return false;
2043 
2044   return true;
2045 }
2046 
2047 static bool isSubRegOf(const SIRegisterInfo &TRI,
2048                        const MachineOperand &SuperVec,
2049                        const MachineOperand &SubReg) {
2050   if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg()))
2051     return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
2052 
2053   return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
2054          SubReg.getReg() == SuperVec.getReg();
2055 }
2056 
2057 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
2058                                     StringRef &ErrInfo) const {
2059   uint16_t Opcode = MI.getOpcode();
2060   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2061   int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2062   int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2063   int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2064 
2065   // Make sure the number of operands is correct.
2066   const MCInstrDesc &Desc = get(Opcode);
2067   if (!Desc.isVariadic() &&
2068       Desc.getNumOperands() != MI.getNumExplicitOperands()) {
2069     ErrInfo = "Instruction has wrong number of operands.";
2070     return false;
2071   }
2072 
2073   if (MI.isInlineAsm()) {
2074     // Verify register classes for inlineasm constraints.
2075     for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
2076          I != E; ++I) {
2077       const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
2078       if (!RC)
2079         continue;
2080 
2081       const MachineOperand &Op = MI.getOperand(I);
2082       if (!Op.isReg())
2083         continue;
2084 
2085       unsigned Reg = Op.getReg();
2086       if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) {
2087         ErrInfo = "inlineasm operand has incorrect register class.";
2088         return false;
2089       }
2090     }
2091 
2092     return true;
2093   }
2094 
2095   // Make sure the register classes are correct.
2096   for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
2097     if (MI.getOperand(i).isFPImm()) {
2098       ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
2099                 "all fp values to integers.";
2100       return false;
2101     }
2102 
2103     int RegClass = Desc.OpInfo[i].RegClass;
2104 
2105     switch (Desc.OpInfo[i].OperandType) {
2106     case MCOI::OPERAND_REGISTER:
2107       if (MI.getOperand(i).isImm()) {
2108         ErrInfo = "Illegal immediate value for operand.";
2109         return false;
2110       }
2111       break;
2112     case AMDGPU::OPERAND_REG_IMM_INT32:
2113     case AMDGPU::OPERAND_REG_IMM_FP32:
2114       break;
2115     case AMDGPU::OPERAND_REG_INLINE_C_INT32:
2116     case AMDGPU::OPERAND_REG_INLINE_C_FP32:
2117     case AMDGPU::OPERAND_REG_INLINE_C_INT64:
2118     case AMDGPU::OPERAND_REG_INLINE_C_FP64:
2119     case AMDGPU::OPERAND_REG_INLINE_C_INT16:
2120     case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
2121       const MachineOperand &MO = MI.getOperand(i);
2122       if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
2123         ErrInfo = "Illegal immediate value for operand.";
2124         return false;
2125       }
2126       break;
2127     }
2128     case MCOI::OPERAND_IMMEDIATE:
2129     case AMDGPU::OPERAND_KIMM32:
2130       // Check if this operand is an immediate.
2131       // FrameIndex operands will be replaced by immediates, so they are
2132       // allowed.
2133       if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
2134         ErrInfo = "Expected immediate, but got non-immediate";
2135         return false;
2136       }
2137       LLVM_FALLTHROUGH;
2138     default:
2139       continue;
2140     }
2141 
2142     if (!MI.getOperand(i).isReg())
2143       continue;
2144 
2145     if (RegClass != -1) {
2146       unsigned Reg = MI.getOperand(i).getReg();
2147       if (Reg == AMDGPU::NoRegister ||
2148           TargetRegisterInfo::isVirtualRegister(Reg))
2149         continue;
2150 
2151       const TargetRegisterClass *RC = RI.getRegClass(RegClass);
2152       if (!RC->contains(Reg)) {
2153         ErrInfo = "Operand has incorrect register class.";
2154         return false;
2155       }
2156     }
2157   }
2158 
2159   // Verify VOP*
2160   if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI)) {
2161     // Only look at the true operands. Only a real operand can use the constant
2162     // bus, and we don't want to check pseudo-operands like the source modifier
2163     // flags.
2164     const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2165 
2166     unsigned ConstantBusCount = 0;
2167 
2168     if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
2169       ++ConstantBusCount;
2170 
2171     unsigned SGPRUsed = findImplicitSGPRRead(MI);
2172     if (SGPRUsed != AMDGPU::NoRegister)
2173       ++ConstantBusCount;
2174 
2175     for (int OpIdx : OpIndices) {
2176       if (OpIdx == -1)
2177         break;
2178       const MachineOperand &MO = MI.getOperand(OpIdx);
2179       if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
2180         if (MO.isReg()) {
2181           if (MO.getReg() != SGPRUsed)
2182             ++ConstantBusCount;
2183           SGPRUsed = MO.getReg();
2184         } else {
2185           ++ConstantBusCount;
2186         }
2187       }
2188     }
2189     if (ConstantBusCount > 1) {
2190       ErrInfo = "VOP* instruction uses the constant bus more than once";
2191       return false;
2192     }
2193   }
2194 
2195   // Verify misc. restrictions on specific instructions.
2196   if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
2197       Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
2198     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
2199     const MachineOperand &Src1 = MI.getOperand(Src1Idx);
2200     const MachineOperand &Src2 = MI.getOperand(Src2Idx);
2201     if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
2202       if (!compareMachineOp(Src0, Src1) &&
2203           !compareMachineOp(Src0, Src2)) {
2204         ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
2205         return false;
2206       }
2207     }
2208   }
2209 
2210   if (isSOPK(MI)) {
2211     int64_t Imm = getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm();
2212     if (sopkIsZext(MI)) {
2213       if (!isUInt<16>(Imm)) {
2214         ErrInfo = "invalid immediate for SOPK instruction";
2215         return false;
2216       }
2217     } else {
2218       if (!isInt<16>(Imm)) {
2219         ErrInfo = "invalid immediate for SOPK instruction";
2220         return false;
2221       }
2222     }
2223   }
2224 
2225   if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
2226       Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
2227       Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2228       Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
2229     const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2230                        Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
2231 
2232     const unsigned StaticNumOps = Desc.getNumOperands() +
2233       Desc.getNumImplicitUses();
2234     const unsigned NumImplicitOps = IsDst ? 2 : 1;
2235 
2236     // Allow additional implicit operands. This allows a fixup done by the post
2237     // RA scheduler where the main implicit operand is killed and implicit-defs
2238     // are added for sub-registers that remain live after this instruction.
2239     if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
2240       ErrInfo = "missing implicit register operands";
2241       return false;
2242     }
2243 
2244     const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
2245     if (IsDst) {
2246       if (!Dst->isUse()) {
2247         ErrInfo = "v_movreld_b32 vdst should be a use operand";
2248         return false;
2249       }
2250 
2251       unsigned UseOpIdx;
2252       if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
2253           UseOpIdx != StaticNumOps + 1) {
2254         ErrInfo = "movrel implicit operands should be tied";
2255         return false;
2256       }
2257     }
2258 
2259     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
2260     const MachineOperand &ImpUse
2261       = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
2262     if (!ImpUse.isReg() || !ImpUse.isUse() ||
2263         !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
2264       ErrInfo = "src0 should be subreg of implicit vector use";
2265       return false;
2266     }
2267   }
2268 
2269   // Make sure we aren't losing exec uses in the td files. This mostly requires
2270   // being careful when using let Uses to try to add other use registers.
2271   if (shouldReadExec(MI)) {
2272     if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
2273       ErrInfo = "VALU instruction does not implicitly read exec mask";
2274       return false;
2275     }
2276   }
2277 
2278   if (isSMRD(MI)) {
2279     if (MI.mayStore()) {
2280       // The register offset form of scalar stores may only use m0 as the
2281       // soffset register.
2282       const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff);
2283       if (Soff && Soff->getReg() != AMDGPU::M0) {
2284         ErrInfo = "scalar stores must use m0 as offset register";
2285         return false;
2286       }
2287     }
2288   }
2289 
2290   return true;
2291 }
2292 
2293 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
2294   switch (MI.getOpcode()) {
2295   default: return AMDGPU::INSTRUCTION_LIST_END;
2296   case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
2297   case AMDGPU::COPY: return AMDGPU::COPY;
2298   case AMDGPU::PHI: return AMDGPU::PHI;
2299   case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
2300   case AMDGPU::S_MOV_B32:
2301     return MI.getOperand(1).isReg() ?
2302            AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
2303   case AMDGPU::S_ADD_I32:
2304   case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32;
2305   case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
2306   case AMDGPU::S_SUB_I32:
2307   case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32;
2308   case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
2309   case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32;
2310   case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
2311   case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
2312   case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
2313   case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
2314   case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
2315   case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
2316   case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
2317   case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
2318   case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
2319   case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
2320   case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
2321   case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
2322   case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
2323   case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
2324   case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
2325   case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
2326   case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
2327   case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
2328   case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
2329   case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
2330   case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
2331   case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
2332   case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
2333   case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
2334   case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
2335   case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
2336   case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
2337   case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32;
2338   case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32;
2339   case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32;
2340   case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32;
2341   case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32;
2342   case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32;
2343   case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32;
2344   case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32;
2345   case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
2346   case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
2347   case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
2348   case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
2349   case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
2350   case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
2351   }
2352 }
2353 
2354 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
2355   return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
2356 }
2357 
2358 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
2359                                                       unsigned OpNo) const {
2360   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2361   const MCInstrDesc &Desc = get(MI.getOpcode());
2362   if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
2363       Desc.OpInfo[OpNo].RegClass == -1) {
2364     unsigned Reg = MI.getOperand(OpNo).getReg();
2365 
2366     if (TargetRegisterInfo::isVirtualRegister(Reg))
2367       return MRI.getRegClass(Reg);
2368     return RI.getPhysRegClass(Reg);
2369   }
2370 
2371   unsigned RCID = Desc.OpInfo[OpNo].RegClass;
2372   return RI.getRegClass(RCID);
2373 }
2374 
2375 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
2376   switch (MI.getOpcode()) {
2377   case AMDGPU::COPY:
2378   case AMDGPU::REG_SEQUENCE:
2379   case AMDGPU::PHI:
2380   case AMDGPU::INSERT_SUBREG:
2381     return RI.hasVGPRs(getOpRegClass(MI, 0));
2382   default:
2383     return RI.hasVGPRs(getOpRegClass(MI, OpNo));
2384   }
2385 }
2386 
2387 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
2388   MachineBasicBlock::iterator I = MI;
2389   MachineBasicBlock *MBB = MI.getParent();
2390   MachineOperand &MO = MI.getOperand(OpIdx);
2391   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
2392   unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
2393   const TargetRegisterClass *RC = RI.getRegClass(RCID);
2394   unsigned Opcode = AMDGPU::V_MOV_B32_e32;
2395   if (MO.isReg())
2396     Opcode = AMDGPU::COPY;
2397   else if (RI.isSGPRClass(RC))
2398     Opcode = AMDGPU::S_MOV_B32;
2399 
2400   const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
2401   if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
2402     VRC = &AMDGPU::VReg_64RegClass;
2403   else
2404     VRC = &AMDGPU::VGPR_32RegClass;
2405 
2406   unsigned Reg = MRI.createVirtualRegister(VRC);
2407   DebugLoc DL = MBB->findDebugLoc(I);
2408   BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO);
2409   MO.ChangeToRegister(Reg, false);
2410 }
2411 
2412 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
2413                                          MachineRegisterInfo &MRI,
2414                                          MachineOperand &SuperReg,
2415                                          const TargetRegisterClass *SuperRC,
2416                                          unsigned SubIdx,
2417                                          const TargetRegisterClass *SubRC)
2418                                          const {
2419   MachineBasicBlock *MBB = MI->getParent();
2420   DebugLoc DL = MI->getDebugLoc();
2421   unsigned SubReg = MRI.createVirtualRegister(SubRC);
2422 
2423   if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
2424     BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
2425       .addReg(SuperReg.getReg(), 0, SubIdx);
2426     return SubReg;
2427   }
2428 
2429   // Just in case the super register is itself a sub-register, copy it to a new
2430   // value so we don't need to worry about merging its subreg index with the
2431   // SubIdx passed to this function. The register coalescer should be able to
2432   // eliminate this extra copy.
2433   unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
2434 
2435   BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
2436     .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
2437 
2438   BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
2439     .addReg(NewSuperReg, 0, SubIdx);
2440 
2441   return SubReg;
2442 }
2443 
2444 MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
2445   MachineBasicBlock::iterator MII,
2446   MachineRegisterInfo &MRI,
2447   MachineOperand &Op,
2448   const TargetRegisterClass *SuperRC,
2449   unsigned SubIdx,
2450   const TargetRegisterClass *SubRC) const {
2451   if (Op.isImm()) {
2452     if (SubIdx == AMDGPU::sub0)
2453       return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
2454     if (SubIdx == AMDGPU::sub1)
2455       return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
2456 
2457     llvm_unreachable("Unhandled register index for immediate");
2458   }
2459 
2460   unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
2461                                        SubIdx, SubRC);
2462   return MachineOperand::CreateReg(SubReg, false);
2463 }
2464 
2465 // Change the order of operands from (0, 1, 2) to (0, 2, 1)
2466 void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
2467   assert(Inst.getNumExplicitOperands() == 3);
2468   MachineOperand Op1 = Inst.getOperand(1);
2469   Inst.RemoveOperand(1);
2470   Inst.addOperand(Op1);
2471 }
2472 
2473 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
2474                                     const MCOperandInfo &OpInfo,
2475                                     const MachineOperand &MO) const {
2476   if (!MO.isReg())
2477     return false;
2478 
2479   unsigned Reg = MO.getReg();
2480   const TargetRegisterClass *RC =
2481     TargetRegisterInfo::isVirtualRegister(Reg) ?
2482     MRI.getRegClass(Reg) :
2483     RI.getPhysRegClass(Reg);
2484 
2485   const SIRegisterInfo *TRI =
2486       static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
2487   RC = TRI->getSubRegClass(RC, MO.getSubReg());
2488 
2489   // In order to be legal, the common sub-class must be equal to the
2490   // class of the current operand.  For example:
2491   //
2492   // v_mov_b32 s0 ; Operand defined as vsrc_b32
2493   //              ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL
2494   //
2495   // s_sendmsg 0, s0 ; Operand defined as m0reg
2496   //                 ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL
2497 
2498   return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC;
2499 }
2500 
2501 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
2502                                      const MCOperandInfo &OpInfo,
2503                                      const MachineOperand &MO) const {
2504   if (MO.isReg())
2505     return isLegalRegOperand(MRI, OpInfo, MO);
2506 
2507   // Handle non-register types that are treated like immediates.
2508   assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
2509   return true;
2510 }
2511 
2512 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
2513                                  const MachineOperand *MO) const {
2514   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2515   const MCInstrDesc &InstDesc = MI.getDesc();
2516   const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
2517   const TargetRegisterClass *DefinedRC =
2518       OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
2519   if (!MO)
2520     MO = &MI.getOperand(OpIdx);
2521 
2522   if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) {
2523 
2524     RegSubRegPair SGPRUsed;
2525     if (MO->isReg())
2526       SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg());
2527 
2528     for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
2529       if (i == OpIdx)
2530         continue;
2531       const MachineOperand &Op = MI.getOperand(i);
2532       if (Op.isReg()) {
2533         if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) &&
2534             usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
2535           return false;
2536         }
2537       } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) {
2538         return false;
2539       }
2540     }
2541   }
2542 
2543   if (MO->isReg()) {
2544     assert(DefinedRC);
2545     return isLegalRegOperand(MRI, OpInfo, *MO);
2546   }
2547 
2548   // Handle non-register types that are treated like immediates.
2549   assert(MO->isImm() || MO->isTargetIndex() || MO->isFI());
2550 
2551   if (!DefinedRC) {
2552     // This operand expects an immediate.
2553     return true;
2554   }
2555 
2556   return isImmOperandLegal(MI, OpIdx, *MO);
2557 }
2558 
2559 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
2560                                        MachineInstr &MI) const {
2561   unsigned Opc = MI.getOpcode();
2562   const MCInstrDesc &InstrDesc = get(Opc);
2563 
2564   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
2565   MachineOperand &Src1 = MI.getOperand(Src1Idx);
2566 
2567   // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
2568   // we need to only have one constant bus use.
2569   //
2570   // Note we do not need to worry about literal constants here. They are
2571   // disabled for the operand type for instructions because they will always
2572   // violate the one constant bus use rule.
2573   bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
2574   if (HasImplicitSGPR) {
2575     int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2576     MachineOperand &Src0 = MI.getOperand(Src0Idx);
2577 
2578     if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg()))
2579       legalizeOpWithMove(MI, Src0Idx);
2580   }
2581 
2582   // VOP2 src0 instructions support all operand types, so we don't need to check
2583   // their legality. If src1 is already legal, we don't need to do anything.
2584   if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
2585     return;
2586 
2587   // We do not use commuteInstruction here because it is too aggressive and will
2588   // commute if it is possible. We only want to commute here if it improves
2589   // legality. This can be called a fairly large number of times so don't waste
2590   // compile time pointlessly swapping and checking legality again.
2591   if (HasImplicitSGPR || !MI.isCommutable()) {
2592     legalizeOpWithMove(MI, Src1Idx);
2593     return;
2594   }
2595 
2596   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2597   MachineOperand &Src0 = MI.getOperand(Src0Idx);
2598 
2599   // If src0 can be used as src1, commuting will make the operands legal.
2600   // Otherwise we have to give up and insert a move.
2601   //
2602   // TODO: Other immediate-like operand kinds could be commuted if there was a
2603   // MachineOperand::ChangeTo* for them.
2604   if ((!Src1.isImm() && !Src1.isReg()) ||
2605       !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
2606     legalizeOpWithMove(MI, Src1Idx);
2607     return;
2608   }
2609 
2610   int CommutedOpc = commuteOpcode(MI);
2611   if (CommutedOpc == -1) {
2612     legalizeOpWithMove(MI, Src1Idx);
2613     return;
2614   }
2615 
2616   MI.setDesc(get(CommutedOpc));
2617 
2618   unsigned Src0Reg = Src0.getReg();
2619   unsigned Src0SubReg = Src0.getSubReg();
2620   bool Src0Kill = Src0.isKill();
2621 
2622   if (Src1.isImm())
2623     Src0.ChangeToImmediate(Src1.getImm());
2624   else if (Src1.isReg()) {
2625     Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
2626     Src0.setSubReg(Src1.getSubReg());
2627   } else
2628     llvm_unreachable("Should only have register or immediate operands");
2629 
2630   Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
2631   Src1.setSubReg(Src0SubReg);
2632 }
2633 
2634 // Legalize VOP3 operands. Because all operand types are supported for any
2635 // operand, and since literal constants are not allowed and should never be
2636 // seen, we only need to worry about inserting copies if we use multiple SGPR
2637 // operands.
2638 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
2639                                        MachineInstr &MI) const {
2640   unsigned Opc = MI.getOpcode();
2641 
2642   int VOP3Idx[3] = {
2643     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
2644     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
2645     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
2646   };
2647 
2648   // Find the one SGPR operand we are allowed to use.
2649   unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx);
2650 
2651   for (unsigned i = 0; i < 3; ++i) {
2652     int Idx = VOP3Idx[i];
2653     if (Idx == -1)
2654       break;
2655     MachineOperand &MO = MI.getOperand(Idx);
2656 
2657     // We should never see a VOP3 instruction with an illegal immediate operand.
2658     if (!MO.isReg())
2659       continue;
2660 
2661     if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
2662       continue; // VGPRs are legal
2663 
2664     if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
2665       SGPRReg = MO.getReg();
2666       // We can use one SGPR in each VOP3 instruction.
2667       continue;
2668     }
2669 
2670     // If we make it this far, then the operand is not legal and we must
2671     // legalize it.
2672     legalizeOpWithMove(MI, Idx);
2673   }
2674 }
2675 
2676 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
2677                                          MachineRegisterInfo &MRI) const {
2678   const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
2679   const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
2680   unsigned DstReg = MRI.createVirtualRegister(SRC);
2681   unsigned SubRegs = VRC->getSize() / 4;
2682 
2683   SmallVector<unsigned, 8> SRegs;
2684   for (unsigned i = 0; i < SubRegs; ++i) {
2685     unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2686     BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
2687             get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
2688         .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
2689     SRegs.push_back(SGPR);
2690   }
2691 
2692   MachineInstrBuilder MIB =
2693       BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
2694               get(AMDGPU::REG_SEQUENCE), DstReg);
2695   for (unsigned i = 0; i < SubRegs; ++i) {
2696     MIB.addReg(SRegs[i]);
2697     MIB.addImm(RI.getSubRegFromChannel(i));
2698   }
2699   return DstReg;
2700 }
2701 
2702 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
2703                                        MachineInstr &MI) const {
2704 
2705   // If the pointer is store in VGPRs, then we need to move them to
2706   // SGPRs using v_readfirstlane.  This is safe because we only select
2707   // loads with uniform pointers to SMRD instruction so we know the
2708   // pointer value is uniform.
2709   MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
2710   if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
2711       unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
2712       SBase->setReg(SGPR);
2713   }
2714 }
2715 
2716 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
2717                                          MachineBasicBlock::iterator I,
2718                                          const TargetRegisterClass *DstRC,
2719                                          MachineOperand &Op,
2720                                          MachineRegisterInfo &MRI,
2721                                          const DebugLoc &DL) const {
2722 
2723   unsigned OpReg = Op.getReg();
2724   unsigned OpSubReg = Op.getSubReg();
2725 
2726   const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
2727       RI.getRegClassForReg(MRI, OpReg), OpSubReg);
2728 
2729   // Check if operand is already the correct register class.
2730   if (DstRC == OpRC)
2731     return;
2732 
2733   unsigned DstReg = MRI.createVirtualRegister(DstRC);
2734   MachineInstr *Copy =
2735       BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op);
2736 
2737   Op.setReg(DstReg);
2738   Op.setSubReg(0);
2739 
2740   MachineInstr *Def = MRI.getVRegDef(OpReg);
2741   if (!Def)
2742     return;
2743 
2744   // Try to eliminate the copy if it is copying an immediate value.
2745   if (Def->isMoveImmediate())
2746     FoldImmediate(*Copy, *Def, OpReg, &MRI);
2747 }
2748 
2749 void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
2750   MachineFunction &MF = *MI.getParent()->getParent();
2751   MachineRegisterInfo &MRI = MF.getRegInfo();
2752 
2753   // Legalize VOP2
2754   if (isVOP2(MI) || isVOPC(MI)) {
2755     legalizeOperandsVOP2(MRI, MI);
2756     return;
2757   }
2758 
2759   // Legalize VOP3
2760   if (isVOP3(MI)) {
2761     legalizeOperandsVOP3(MRI, MI);
2762     return;
2763   }
2764 
2765   // Legalize SMRD
2766   if (isSMRD(MI)) {
2767     legalizeOperandsSMRD(MRI, MI);
2768     return;
2769   }
2770 
2771   // Legalize REG_SEQUENCE and PHI
2772   // The register class of the operands much be the same type as the register
2773   // class of the output.
2774   if (MI.getOpcode() == AMDGPU::PHI) {
2775     const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
2776     for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2777       if (!MI.getOperand(i).isReg() ||
2778           !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
2779         continue;
2780       const TargetRegisterClass *OpRC =
2781           MRI.getRegClass(MI.getOperand(i).getReg());
2782       if (RI.hasVGPRs(OpRC)) {
2783         VRC = OpRC;
2784       } else {
2785         SRC = OpRC;
2786       }
2787     }
2788 
2789     // If any of the operands are VGPR registers, then they all most be
2790     // otherwise we will create illegal VGPR->SGPR copies when legalizing
2791     // them.
2792     if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
2793       if (!VRC) {
2794         assert(SRC);
2795         VRC = RI.getEquivalentVGPRClass(SRC);
2796       }
2797       RC = VRC;
2798     } else {
2799       RC = SRC;
2800     }
2801 
2802     // Update all the operands so they have the same type.
2803     for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2804       MachineOperand &Op = MI.getOperand(I);
2805       if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
2806         continue;
2807 
2808       // MI is a PHI instruction.
2809       MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
2810       MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
2811 
2812       // Avoid creating no-op copies with the same src and dst reg class.  These
2813       // confuse some of the machine passes.
2814       legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
2815     }
2816   }
2817 
2818   // REG_SEQUENCE doesn't really require operand legalization, but if one has a
2819   // VGPR dest type and SGPR sources, insert copies so all operands are
2820   // VGPRs. This seems to help operand folding / the register coalescer.
2821   if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
2822     MachineBasicBlock *MBB = MI.getParent();
2823     const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
2824     if (RI.hasVGPRs(DstRC)) {
2825       // Update all the operands so they are VGPR register classes. These may
2826       // not be the same register class because REG_SEQUENCE supports mixing
2827       // subregister index types e.g. sub0_sub1 + sub2 + sub3
2828       for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2829         MachineOperand &Op = MI.getOperand(I);
2830         if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
2831           continue;
2832 
2833         const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
2834         const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
2835         if (VRC == OpRC)
2836           continue;
2837 
2838         legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
2839         Op.setIsKill();
2840       }
2841     }
2842 
2843     return;
2844   }
2845 
2846   // Legalize INSERT_SUBREG
2847   // src0 must have the same register class as dst
2848   if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
2849     unsigned Dst = MI.getOperand(0).getReg();
2850     unsigned Src0 = MI.getOperand(1).getReg();
2851     const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
2852     const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
2853     if (DstRC != Src0RC) {
2854       MachineBasicBlock *MBB = MI.getParent();
2855       MachineOperand &Op = MI.getOperand(1);
2856       legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
2857     }
2858     return;
2859   }
2860 
2861   // Legalize MIMG and MUBUF/MTBUF for shaders.
2862   //
2863   // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
2864   // scratch memory access. In both cases, the legalization never involves
2865   // conversion to the addr64 form.
2866   if (isMIMG(MI) ||
2867       (AMDGPU::isShader(MF.getFunction()->getCallingConv()) &&
2868        (isMUBUF(MI) || isMTBUF(MI)))) {
2869     MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
2870     if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
2871       unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI);
2872       SRsrc->setReg(SGPR);
2873     }
2874 
2875     MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
2876     if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) {
2877       unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI);
2878       SSamp->setReg(SGPR);
2879     }
2880     return;
2881   }
2882 
2883   // Legalize MUBUF* instructions by converting to addr64 form.
2884   // FIXME: If we start using the non-addr64 instructions for compute, we
2885   // may need to legalize them as above. This especially applies to the
2886   // buffer_load_format_* variants and variants with idxen (or bothen).
2887   int SRsrcIdx =
2888       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
2889   if (SRsrcIdx != -1) {
2890     // We have an MUBUF instruction
2891     MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx);
2892     unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass;
2893     if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()),
2894                                              RI.getRegClass(SRsrcRC))) {
2895       // The operands are legal.
2896       // FIXME: We may need to legalize operands besided srsrc.
2897       return;
2898     }
2899 
2900     MachineBasicBlock &MBB = *MI.getParent();
2901 
2902     // Extract the ptr from the resource descriptor.
2903     unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc,
2904       &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
2905 
2906     // Create an empty resource descriptor
2907     unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2908     unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2909     unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2910     unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
2911     uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
2912 
2913     // Zero64 = 0
2914     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64)
2915         .addImm(0);
2916 
2917     // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
2918     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
2919         .addImm(RsrcDataFormat & 0xFFFFFFFF);
2920 
2921     // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
2922     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
2923         .addImm(RsrcDataFormat >> 32);
2924 
2925     // NewSRsrc = {Zero64, SRsrcFormat}
2926     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc)
2927         .addReg(Zero64)
2928         .addImm(AMDGPU::sub0_sub1)
2929         .addReg(SRsrcFormatLo)
2930         .addImm(AMDGPU::sub2)
2931         .addReg(SRsrcFormatHi)
2932         .addImm(AMDGPU::sub3);
2933 
2934     MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
2935     unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
2936     if (VAddr) {
2937       // This is already an ADDR64 instruction so we need to add the pointer
2938       // extracted from the resource descriptor to the current value of VAddr.
2939       unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2940       unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2941 
2942       // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0
2943       DebugLoc DL = MI.getDebugLoc();
2944       BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
2945         .addReg(SRsrcPtr, 0, AMDGPU::sub0)
2946         .addReg(VAddr->getReg(), 0, AMDGPU::sub0);
2947 
2948       // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1
2949       BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
2950         .addReg(SRsrcPtr, 0, AMDGPU::sub1)
2951         .addReg(VAddr->getReg(), 0, AMDGPU::sub1);
2952 
2953       // NewVaddr = {NewVaddrHi, NewVaddrLo}
2954       BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
2955           .addReg(NewVAddrLo)
2956           .addImm(AMDGPU::sub0)
2957           .addReg(NewVAddrHi)
2958           .addImm(AMDGPU::sub1);
2959     } else {
2960       // This instructions is the _OFFSET variant, so we need to convert it to
2961       // ADDR64.
2962       assert(MBB.getParent()->getSubtarget<SISubtarget>().getGeneration()
2963              < SISubtarget::VOLCANIC_ISLANDS &&
2964              "FIXME: Need to emit flat atomics here");
2965 
2966       MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
2967       MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
2968       MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
2969       unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
2970 
2971       // Atomics rith return have have an additional tied operand and are
2972       // missing some of the special bits.
2973       MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
2974       MachineInstr *Addr64;
2975 
2976       if (!VDataIn) {
2977         // Regular buffer load / store.
2978         MachineInstrBuilder MIB =
2979             BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
2980                 .add(*VData)
2981                 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
2982                 // This will be replaced later
2983                 // with the new value of vaddr.
2984                 .add(*SRsrc)
2985                 .add(*SOffset)
2986                 .add(*Offset);
2987 
2988         // Atomics do not have this operand.
2989         if (const MachineOperand *GLC =
2990                 getNamedOperand(MI, AMDGPU::OpName::glc)) {
2991           MIB.addImm(GLC->getImm());
2992         }
2993 
2994         MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc));
2995 
2996         if (const MachineOperand *TFE =
2997                 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
2998           MIB.addImm(TFE->getImm());
2999         }
3000 
3001         MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
3002         Addr64 = MIB;
3003       } else {
3004         // Atomics with return.
3005         Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
3006                      .add(*VData)
3007                      .add(*VDataIn)
3008                      .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
3009                      // This will be replaced later
3010                      // with the new value of vaddr.
3011                      .add(*SRsrc)
3012                      .add(*SOffset)
3013                      .add(*Offset)
3014                      .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
3015                      .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
3016       }
3017 
3018       MI.removeFromParent();
3019 
3020       // NewVaddr = {NewVaddrHi, NewVaddrLo}
3021       BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
3022               NewVAddr)
3023           .addReg(SRsrcPtr, 0, AMDGPU::sub0)
3024           .addImm(AMDGPU::sub0)
3025           .addReg(SRsrcPtr, 0, AMDGPU::sub1)
3026           .addImm(AMDGPU::sub1);
3027 
3028       VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr);
3029       SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc);
3030     }
3031 
3032     // Update the instruction to use NewVaddr
3033     VAddr->setReg(NewVAddr);
3034     // Update the instruction to use NewSRsrc
3035     SRsrc->setReg(NewSRsrc);
3036   }
3037 }
3038 
3039 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
3040   SmallVector<MachineInstr *, 128> Worklist;
3041   Worklist.push_back(&TopInst);
3042 
3043   while (!Worklist.empty()) {
3044     MachineInstr &Inst = *Worklist.pop_back_val();
3045     MachineBasicBlock *MBB = Inst.getParent();
3046     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
3047 
3048     unsigned Opcode = Inst.getOpcode();
3049     unsigned NewOpcode = getVALUOp(Inst);
3050 
3051     // Handle some special cases
3052     switch (Opcode) {
3053     default:
3054       break;
3055     case AMDGPU::S_AND_B64:
3056       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64);
3057       Inst.eraseFromParent();
3058       continue;
3059 
3060     case AMDGPU::S_OR_B64:
3061       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64);
3062       Inst.eraseFromParent();
3063       continue;
3064 
3065     case AMDGPU::S_XOR_B64:
3066       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64);
3067       Inst.eraseFromParent();
3068       continue;
3069 
3070     case AMDGPU::S_NOT_B64:
3071       splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32);
3072       Inst.eraseFromParent();
3073       continue;
3074 
3075     case AMDGPU::S_BCNT1_I32_B64:
3076       splitScalar64BitBCNT(Worklist, Inst);
3077       Inst.eraseFromParent();
3078       continue;
3079 
3080     case AMDGPU::S_BFE_I64: {
3081       splitScalar64BitBFE(Worklist, Inst);
3082       Inst.eraseFromParent();
3083       continue;
3084     }
3085 
3086     case AMDGPU::S_LSHL_B32:
3087       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
3088         NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
3089         swapOperands(Inst);
3090       }
3091       break;
3092     case AMDGPU::S_ASHR_I32:
3093       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
3094         NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
3095         swapOperands(Inst);
3096       }
3097       break;
3098     case AMDGPU::S_LSHR_B32:
3099       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
3100         NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
3101         swapOperands(Inst);
3102       }
3103       break;
3104     case AMDGPU::S_LSHL_B64:
3105       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
3106         NewOpcode = AMDGPU::V_LSHLREV_B64;
3107         swapOperands(Inst);
3108       }
3109       break;
3110     case AMDGPU::S_ASHR_I64:
3111       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
3112         NewOpcode = AMDGPU::V_ASHRREV_I64;
3113         swapOperands(Inst);
3114       }
3115       break;
3116     case AMDGPU::S_LSHR_B64:
3117       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
3118         NewOpcode = AMDGPU::V_LSHRREV_B64;
3119         swapOperands(Inst);
3120       }
3121       break;
3122 
3123     case AMDGPU::S_ABS_I32:
3124       lowerScalarAbs(Worklist, Inst);
3125       Inst.eraseFromParent();
3126       continue;
3127 
3128     case AMDGPU::S_CBRANCH_SCC0:
3129     case AMDGPU::S_CBRANCH_SCC1:
3130       // Clear unused bits of vcc
3131       BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
3132               AMDGPU::VCC)
3133           .addReg(AMDGPU::EXEC)
3134           .addReg(AMDGPU::VCC);
3135       break;
3136 
3137     case AMDGPU::S_BFE_U64:
3138     case AMDGPU::S_BFM_B64:
3139       llvm_unreachable("Moving this op to VALU not implemented");
3140 
3141     case AMDGPU::S_PACK_LL_B32_B16:
3142     case AMDGPU::S_PACK_LH_B32_B16:
3143     case AMDGPU::S_PACK_HH_B32_B16: {
3144       movePackToVALU(Worklist, MRI, Inst);
3145       Inst.eraseFromParent();
3146       continue;
3147     }
3148     }
3149 
3150     if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
3151       // We cannot move this instruction to the VALU, so we should try to
3152       // legalize its operands instead.
3153       legalizeOperands(Inst);
3154       continue;
3155     }
3156 
3157     // Use the new VALU Opcode.
3158     const MCInstrDesc &NewDesc = get(NewOpcode);
3159     Inst.setDesc(NewDesc);
3160 
3161     // Remove any references to SCC. Vector instructions can't read from it, and
3162     // We're just about to add the implicit use / defs of VCC, and we don't want
3163     // both.
3164     for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) {
3165       MachineOperand &Op = Inst.getOperand(i);
3166       if (Op.isReg() && Op.getReg() == AMDGPU::SCC) {
3167         Inst.RemoveOperand(i);
3168         addSCCDefUsersToVALUWorklist(Inst, Worklist);
3169       }
3170     }
3171 
3172     if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
3173       // We are converting these to a BFE, so we need to add the missing
3174       // operands for the size and offset.
3175       unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
3176       Inst.addOperand(MachineOperand::CreateImm(0));
3177       Inst.addOperand(MachineOperand::CreateImm(Size));
3178 
3179     } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
3180       // The VALU version adds the second operand to the result, so insert an
3181       // extra 0 operand.
3182       Inst.addOperand(MachineOperand::CreateImm(0));
3183     }
3184 
3185     Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent());
3186 
3187     if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
3188       const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
3189       // If we need to move this to VGPRs, we need to unpack the second operand
3190       // back into the 2 separate ones for bit offset and width.
3191       assert(OffsetWidthOp.isImm() &&
3192              "Scalar BFE is only implemented for constant width and offset");
3193       uint32_t Imm = OffsetWidthOp.getImm();
3194 
3195       uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
3196       uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
3197       Inst.RemoveOperand(2);                     // Remove old immediate.
3198       Inst.addOperand(MachineOperand::CreateImm(Offset));
3199       Inst.addOperand(MachineOperand::CreateImm(BitWidth));
3200     }
3201 
3202     bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
3203     unsigned NewDstReg = AMDGPU::NoRegister;
3204     if (HasDst) {
3205       // Update the destination register class.
3206       const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
3207       if (!NewDstRC)
3208         continue;
3209 
3210       unsigned DstReg = Inst.getOperand(0).getReg();
3211       if (Inst.isCopy() &&
3212           TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) &&
3213           NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
3214         // Instead of creating a copy where src and dst are the same register
3215         // class, we just replace all uses of dst with src.  These kinds of
3216         // copies interfere with the heuristics MachineSink uses to decide
3217         // whether or not to split a critical edge.  Since the pass assumes
3218         // that copies will end up as machine instructions and not be
3219         // eliminated.
3220         addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
3221         MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg());
3222         MRI.clearKillFlags(Inst.getOperand(1).getReg());
3223         Inst.getOperand(0).setReg(DstReg);
3224         continue;
3225       }
3226 
3227       NewDstReg = MRI.createVirtualRegister(NewDstRC);
3228       MRI.replaceRegWith(DstReg, NewDstReg);
3229     }
3230 
3231     // Legalize the operands
3232     legalizeOperands(Inst);
3233 
3234     if (HasDst)
3235      addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
3236   }
3237 }
3238 
3239 void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist,
3240                                  MachineInstr &Inst) const {
3241   MachineBasicBlock &MBB = *Inst.getParent();
3242   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3243   MachineBasicBlock::iterator MII = Inst;
3244   DebugLoc DL = Inst.getDebugLoc();
3245 
3246   MachineOperand &Dest = Inst.getOperand(0);
3247   MachineOperand &Src = Inst.getOperand(1);
3248   unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3249   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3250 
3251   BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg)
3252     .addImm(0)
3253     .addReg(Src.getReg());
3254 
3255   BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
3256     .addReg(Src.getReg())
3257     .addReg(TmpReg);
3258 
3259   MRI.replaceRegWith(Dest.getReg(), ResultReg);
3260   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3261 }
3262 
3263 void SIInstrInfo::splitScalar64BitUnaryOp(
3264     SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
3265     unsigned Opcode) const {
3266   MachineBasicBlock &MBB = *Inst.getParent();
3267   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3268 
3269   MachineOperand &Dest = Inst.getOperand(0);
3270   MachineOperand &Src0 = Inst.getOperand(1);
3271   DebugLoc DL = Inst.getDebugLoc();
3272 
3273   MachineBasicBlock::iterator MII = Inst;
3274 
3275   const MCInstrDesc &InstDesc = get(Opcode);
3276   const TargetRegisterClass *Src0RC = Src0.isReg() ?
3277     MRI.getRegClass(Src0.getReg()) :
3278     &AMDGPU::SGPR_32RegClass;
3279 
3280   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
3281 
3282   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3283                                                        AMDGPU::sub0, Src0SubRC);
3284 
3285   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
3286   const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
3287   const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
3288 
3289   unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
3290   BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0);
3291 
3292   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3293                                                        AMDGPU::sub1, Src0SubRC);
3294 
3295   unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
3296   BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1);
3297 
3298   unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
3299   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3300     .addReg(DestSub0)
3301     .addImm(AMDGPU::sub0)
3302     .addReg(DestSub1)
3303     .addImm(AMDGPU::sub1);
3304 
3305   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
3306 
3307   // We don't need to legalizeOperands here because for a single operand, src0
3308   // will support any kind of input.
3309 
3310   // Move all users of this moved value.
3311   addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
3312 }
3313 
3314 void SIInstrInfo::splitScalar64BitBinaryOp(
3315     SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
3316     unsigned Opcode) const {
3317   MachineBasicBlock &MBB = *Inst.getParent();
3318   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3319 
3320   MachineOperand &Dest = Inst.getOperand(0);
3321   MachineOperand &Src0 = Inst.getOperand(1);
3322   MachineOperand &Src1 = Inst.getOperand(2);
3323   DebugLoc DL = Inst.getDebugLoc();
3324 
3325   MachineBasicBlock::iterator MII = Inst;
3326 
3327   const MCInstrDesc &InstDesc = get(Opcode);
3328   const TargetRegisterClass *Src0RC = Src0.isReg() ?
3329     MRI.getRegClass(Src0.getReg()) :
3330     &AMDGPU::SGPR_32RegClass;
3331 
3332   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
3333   const TargetRegisterClass *Src1RC = Src1.isReg() ?
3334     MRI.getRegClass(Src1.getReg()) :
3335     &AMDGPU::SGPR_32RegClass;
3336 
3337   const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
3338 
3339   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3340                                                        AMDGPU::sub0, Src0SubRC);
3341   MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3342                                                        AMDGPU::sub0, Src1SubRC);
3343 
3344   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
3345   const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
3346   const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
3347 
3348   unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
3349   MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
3350                               .add(SrcReg0Sub0)
3351                               .add(SrcReg1Sub0);
3352 
3353   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3354                                                        AMDGPU::sub1, Src0SubRC);
3355   MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3356                                                        AMDGPU::sub1, Src1SubRC);
3357 
3358   unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
3359   MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
3360                               .add(SrcReg0Sub1)
3361                               .add(SrcReg1Sub1);
3362 
3363   unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
3364   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3365     .addReg(DestSub0)
3366     .addImm(AMDGPU::sub0)
3367     .addReg(DestSub1)
3368     .addImm(AMDGPU::sub1);
3369 
3370   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
3371 
3372   // Try to legalize the operands in case we need to swap the order to keep it
3373   // valid.
3374   legalizeOperands(LoHalf);
3375   legalizeOperands(HiHalf);
3376 
3377   // Move all users of this moved vlaue.
3378   addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
3379 }
3380 
3381 void SIInstrInfo::splitScalar64BitBCNT(
3382     SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst) const {
3383   MachineBasicBlock &MBB = *Inst.getParent();
3384   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3385 
3386   MachineBasicBlock::iterator MII = Inst;
3387   DebugLoc DL = Inst.getDebugLoc();
3388 
3389   MachineOperand &Dest = Inst.getOperand(0);
3390   MachineOperand &Src = Inst.getOperand(1);
3391 
3392   const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
3393   const TargetRegisterClass *SrcRC = Src.isReg() ?
3394     MRI.getRegClass(Src.getReg()) :
3395     &AMDGPU::SGPR_32RegClass;
3396 
3397   unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3398   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3399 
3400   const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
3401 
3402   MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3403                                                       AMDGPU::sub0, SrcSubRC);
3404   MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3405                                                       AMDGPU::sub1, SrcSubRC);
3406 
3407   BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0);
3408 
3409   BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg);
3410 
3411   MRI.replaceRegWith(Dest.getReg(), ResultReg);
3412 
3413   // We don't need to legalize operands here. src0 for etiher instruction can be
3414   // an SGPR, and the second input is unused or determined here.
3415   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3416 }
3417 
3418 void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
3419                                       MachineInstr &Inst) const {
3420   MachineBasicBlock &MBB = *Inst.getParent();
3421   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3422   MachineBasicBlock::iterator MII = Inst;
3423   DebugLoc DL = Inst.getDebugLoc();
3424 
3425   MachineOperand &Dest = Inst.getOperand(0);
3426   uint32_t Imm = Inst.getOperand(2).getImm();
3427   uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
3428   uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
3429 
3430   (void) Offset;
3431 
3432   // Only sext_inreg cases handled.
3433   assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
3434          Offset == 0 && "Not implemented");
3435 
3436   if (BitWidth < 32) {
3437     unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3438     unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3439     unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
3440 
3441     BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo)
3442         .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
3443         .addImm(0)
3444         .addImm(BitWidth);
3445 
3446     BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
3447       .addImm(31)
3448       .addReg(MidRegLo);
3449 
3450     BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
3451       .addReg(MidRegLo)
3452       .addImm(AMDGPU::sub0)
3453       .addReg(MidRegHi)
3454       .addImm(AMDGPU::sub1);
3455 
3456     MRI.replaceRegWith(Dest.getReg(), ResultReg);
3457     addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3458     return;
3459   }
3460 
3461   MachineOperand &Src = Inst.getOperand(1);
3462   unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3463   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
3464 
3465   BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
3466     .addImm(31)
3467     .addReg(Src.getReg(), 0, AMDGPU::sub0);
3468 
3469   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
3470     .addReg(Src.getReg(), 0, AMDGPU::sub0)
3471     .addImm(AMDGPU::sub0)
3472     .addReg(TmpReg)
3473     .addImm(AMDGPU::sub1);
3474 
3475   MRI.replaceRegWith(Dest.getReg(), ResultReg);
3476   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3477 }
3478 
3479 void SIInstrInfo::addUsersToMoveToVALUWorklist(
3480   unsigned DstReg,
3481   MachineRegisterInfo &MRI,
3482   SmallVectorImpl<MachineInstr *> &Worklist) const {
3483   for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg),
3484          E = MRI.use_end(); I != E;) {
3485     MachineInstr &UseMI = *I->getParent();
3486     if (!canReadVGPR(UseMI, I.getOperandNo())) {
3487       Worklist.push_back(&UseMI);
3488 
3489       do {
3490         ++I;
3491       } while (I != E && I->getParent() == &UseMI);
3492     } else {
3493       ++I;
3494     }
3495   }
3496 }
3497 
3498 void SIInstrInfo::movePackToVALU(SmallVectorImpl<MachineInstr *> &Worklist,
3499                                  MachineRegisterInfo &MRI,
3500                                  MachineInstr &Inst) const {
3501   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3502   MachineBasicBlock *MBB = Inst.getParent();
3503   MachineOperand &Src0 = Inst.getOperand(1);
3504   MachineOperand &Src1 = Inst.getOperand(2);
3505   const DebugLoc &DL = Inst.getDebugLoc();
3506 
3507   switch (Inst.getOpcode()) {
3508   case AMDGPU::S_PACK_LL_B32_B16: {
3509     // v_pack_b32_f16 flushes denormals if not enabled. Use it if the default
3510     // is to leave them untouched.
3511     // XXX: Does this do anything to NaNs?
3512     if (ST.hasFP16Denormals()) {
3513       BuildMI(*MBB, Inst, DL, get(AMDGPU::V_PACK_B32_F16), ResultReg)
3514         .addImm(0)  // src0_modifiers
3515         .add(Src0)  // src0
3516         .addImm(0)  // src1_modifiers
3517         .add(Src1)  // src2
3518         .addImm(0)  // clamp
3519         .addImm(0); // omod
3520     } else {
3521       unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3522       unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3523 
3524       // FIXME: Can do a lot better if we know the high bits of src0 or src1 are
3525       // 0.
3526       BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
3527         .addImm(0xffff);
3528 
3529       BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg)
3530         .addReg(ImmReg, RegState::Kill)
3531         .add(Src0);
3532 
3533       BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg)
3534         .add(Src1)
3535         .addImm(16)
3536         .addReg(TmpReg, RegState::Kill);
3537     }
3538 
3539     break;
3540   }
3541   case AMDGPU::S_PACK_LH_B32_B16: {
3542     unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3543     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
3544       .addImm(0xffff);
3545     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg)
3546       .addReg(ImmReg, RegState::Kill)
3547       .add(Src0)
3548       .add(Src1);
3549     break;
3550   }
3551   case AMDGPU::S_PACK_HH_B32_B16: {
3552     unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3553     unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3554     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
3555       .addImm(16)
3556       .add(Src0);
3557     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
3558       .addImm(0xffff);
3559     BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg)
3560       .add(Src1)
3561       .addReg(ImmReg, RegState::Kill)
3562       .addReg(TmpReg, RegState::Kill);
3563     break;
3564   }
3565   default:
3566     llvm_unreachable("unhandled s_pack_* instruction");
3567   }
3568 
3569   MachineOperand &Dest = Inst.getOperand(0);
3570   MRI.replaceRegWith(Dest.getReg(), ResultReg);
3571   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3572 }
3573 
3574 void SIInstrInfo::addSCCDefUsersToVALUWorklist(
3575     MachineInstr &SCCDefInst, SmallVectorImpl<MachineInstr *> &Worklist) const {
3576   // This assumes that all the users of SCC are in the same block
3577   // as the SCC def.
3578   for (MachineInstr &MI :
3579        llvm::make_range(MachineBasicBlock::iterator(SCCDefInst),
3580                         SCCDefInst.getParent()->end())) {
3581     // Exit if we find another SCC def.
3582     if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1)
3583       return;
3584 
3585     if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1)
3586       Worklist.push_back(&MI);
3587   }
3588 }
3589 
3590 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
3591   const MachineInstr &Inst) const {
3592   const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
3593 
3594   switch (Inst.getOpcode()) {
3595   // For target instructions, getOpRegClass just returns the virtual register
3596   // class associated with the operand, so we need to find an equivalent VGPR
3597   // register class in order to move the instruction to the VALU.
3598   case AMDGPU::COPY:
3599   case AMDGPU::PHI:
3600   case AMDGPU::REG_SEQUENCE:
3601   case AMDGPU::INSERT_SUBREG:
3602     if (RI.hasVGPRs(NewDstRC))
3603       return nullptr;
3604 
3605     NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
3606     if (!NewDstRC)
3607       return nullptr;
3608     return NewDstRC;
3609   default:
3610     return NewDstRC;
3611   }
3612 }
3613 
3614 // Find the one SGPR operand we are allowed to use.
3615 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
3616                                    int OpIndices[3]) const {
3617   const MCInstrDesc &Desc = MI.getDesc();
3618 
3619   // Find the one SGPR operand we are allowed to use.
3620   //
3621   // First we need to consider the instruction's operand requirements before
3622   // legalizing. Some operands are required to be SGPRs, such as implicit uses
3623   // of VCC, but we are still bound by the constant bus requirement to only use
3624   // one.
3625   //
3626   // If the operand's class is an SGPR, we can never move it.
3627 
3628   unsigned SGPRReg = findImplicitSGPRRead(MI);
3629   if (SGPRReg != AMDGPU::NoRegister)
3630     return SGPRReg;
3631 
3632   unsigned UsedSGPRs[3] = { AMDGPU::NoRegister };
3633   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3634 
3635   for (unsigned i = 0; i < 3; ++i) {
3636     int Idx = OpIndices[i];
3637     if (Idx == -1)
3638       break;
3639 
3640     const MachineOperand &MO = MI.getOperand(Idx);
3641     if (!MO.isReg())
3642       continue;
3643 
3644     // Is this operand statically required to be an SGPR based on the operand
3645     // constraints?
3646     const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass);
3647     bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
3648     if (IsRequiredSGPR)
3649       return MO.getReg();
3650 
3651     // If this could be a VGPR or an SGPR, Check the dynamic register class.
3652     unsigned Reg = MO.getReg();
3653     const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
3654     if (RI.isSGPRClass(RegRC))
3655       UsedSGPRs[i] = Reg;
3656   }
3657 
3658   // We don't have a required SGPR operand, so we have a bit more freedom in
3659   // selecting operands to move.
3660 
3661   // Try to select the most used SGPR. If an SGPR is equal to one of the
3662   // others, we choose that.
3663   //
3664   // e.g.
3665   // V_FMA_F32 v0, s0, s0, s0 -> No moves
3666   // V_FMA_F32 v0, s0, s1, s0 -> Move s1
3667 
3668   // TODO: If some of the operands are 64-bit SGPRs and some 32, we should
3669   // prefer those.
3670 
3671   if (UsedSGPRs[0] != AMDGPU::NoRegister) {
3672     if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
3673       SGPRReg = UsedSGPRs[0];
3674   }
3675 
3676   if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
3677     if (UsedSGPRs[1] == UsedSGPRs[2])
3678       SGPRReg = UsedSGPRs[1];
3679   }
3680 
3681   return SGPRReg;
3682 }
3683 
3684 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
3685                                              unsigned OperandName) const {
3686   int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
3687   if (Idx == -1)
3688     return nullptr;
3689 
3690   return &MI.getOperand(Idx);
3691 }
3692 
3693 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
3694   uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
3695   if (ST.isAmdHsaOS()) {
3696     RsrcDataFormat |= (1ULL << 56);
3697 
3698     if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3699       // Set MTYPE = 2
3700       RsrcDataFormat |= (2ULL << 59);
3701   }
3702 
3703   return RsrcDataFormat;
3704 }
3705 
3706 uint64_t SIInstrInfo::getScratchRsrcWords23() const {
3707   uint64_t Rsrc23 = getDefaultRsrcDataFormat() |
3708                     AMDGPU::RSRC_TID_ENABLE |
3709                     0xffffffff; // Size;
3710 
3711   uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1;
3712 
3713   Rsrc23 |= (EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT) |
3714             // IndexStride = 64
3715             (UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT);
3716 
3717   // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
3718   // Clear them unless we want a huge stride.
3719   if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3720     Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
3721 
3722   return Rsrc23;
3723 }
3724 
3725 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const {
3726   unsigned Opc = MI.getOpcode();
3727 
3728   return isSMRD(Opc);
3729 }
3730 
3731 bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const {
3732   unsigned Opc = MI.getOpcode();
3733 
3734   return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc);
3735 }
3736 
3737 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI,
3738                                     int &FrameIndex) const {
3739   const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
3740   if (!Addr || !Addr->isFI())
3741     return AMDGPU::NoRegister;
3742 
3743   assert(!MI.memoperands_empty() &&
3744          (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
3745 
3746   FrameIndex = Addr->getIndex();
3747   return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
3748 }
3749 
3750 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI,
3751                                         int &FrameIndex) const {
3752   const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr);
3753   assert(Addr && Addr->isFI());
3754   FrameIndex = Addr->getIndex();
3755   return getNamedOperand(MI, AMDGPU::OpName::data)->getReg();
3756 }
3757 
3758 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
3759                                           int &FrameIndex) const {
3760 
3761   if (!MI.mayLoad())
3762     return AMDGPU::NoRegister;
3763 
3764   if (isMUBUF(MI) || isVGPRSpill(MI))
3765     return isStackAccess(MI, FrameIndex);
3766 
3767   if (isSGPRSpill(MI))
3768     return isSGPRStackAccess(MI, FrameIndex);
3769 
3770   return AMDGPU::NoRegister;
3771 }
3772 
3773 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
3774                                          int &FrameIndex) const {
3775   if (!MI.mayStore())
3776     return AMDGPU::NoRegister;
3777 
3778   if (isMUBUF(MI) || isVGPRSpill(MI))
3779     return isStackAccess(MI, FrameIndex);
3780 
3781   if (isSGPRSpill(MI))
3782     return isSGPRStackAccess(MI, FrameIndex);
3783 
3784   return AMDGPU::NoRegister;
3785 }
3786 
3787 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
3788   unsigned Opc = MI.getOpcode();
3789   const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc);
3790   unsigned DescSize = Desc.getSize();
3791 
3792   // If we have a definitive size, we can use it. Otherwise we need to inspect
3793   // the operands to know the size.
3794   //
3795   // FIXME: Instructions that have a base 32-bit encoding report their size as
3796   // 4, even though they are really 8 bytes if they have a literal operand.
3797   if (DescSize != 0 && DescSize != 4)
3798     return DescSize;
3799 
3800   if (Opc == AMDGPU::WAVE_BARRIER)
3801     return 0;
3802 
3803   // 4-byte instructions may have a 32-bit literal encoded after them. Check
3804   // operands that coud ever be literals.
3805   if (isVALU(MI) || isSALU(MI)) {
3806     if (isFixedSize(MI)) {
3807       assert(DescSize == 4);
3808       return DescSize;
3809     }
3810 
3811     int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
3812     if (Src0Idx == -1)
3813       return 4; // No operands.
3814 
3815     if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx]))
3816       return 8;
3817 
3818     int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
3819     if (Src1Idx == -1)
3820       return 4;
3821 
3822     if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx]))
3823       return 8;
3824 
3825     return 4;
3826   }
3827 
3828   if (DescSize == 4)
3829     return 4;
3830 
3831   switch (Opc) {
3832   case AMDGPU::SI_MASK_BRANCH:
3833   case TargetOpcode::IMPLICIT_DEF:
3834   case TargetOpcode::KILL:
3835   case TargetOpcode::DBG_VALUE:
3836   case TargetOpcode::BUNDLE:
3837   case TargetOpcode::EH_LABEL:
3838     return 0;
3839   case TargetOpcode::INLINEASM: {
3840     const MachineFunction *MF = MI.getParent()->getParent();
3841     const char *AsmStr = MI.getOperand(0).getSymbolName();
3842     return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
3843   }
3844   default:
3845     llvm_unreachable("unable to find instruction size");
3846   }
3847 }
3848 
3849 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
3850   if (!isFLAT(MI))
3851     return false;
3852 
3853   if (MI.memoperands_empty())
3854     return true;
3855 
3856   for (const MachineMemOperand *MMO : MI.memoperands()) {
3857     if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
3858       return true;
3859   }
3860   return false;
3861 }
3862 
3863 ArrayRef<std::pair<int, const char *>>
3864 SIInstrInfo::getSerializableTargetIndices() const {
3865   static const std::pair<int, const char *> TargetIndices[] = {
3866       {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
3867       {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
3868       {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
3869       {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
3870       {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
3871   return makeArrayRef(TargetIndices);
3872 }
3873 
3874 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp).  The
3875 /// post-RA version of misched uses CreateTargetMIHazardRecognizer.
3876 ScheduleHazardRecognizer *
3877 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
3878                                             const ScheduleDAG *DAG) const {
3879   return new GCNHazardRecognizer(DAG->MF);
3880 }
3881 
3882 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
3883 /// pass.
3884 ScheduleHazardRecognizer *
3885 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
3886   return new GCNHazardRecognizer(MF);
3887 }
3888 
3889 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const {
3890   return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY &&
3891          MI.modifiesRegister(AMDGPU::EXEC, &RI);
3892 }
3893