1 //===-- SIInstrInfo.cpp - SI Instruction Information  ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief SI Implementation of TargetInstrInfo.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "SIInstrInfo.h"
16 #include "AMDGPUTargetMachine.h"
17 #include "GCNHazardRecognizer.h"
18 #include "SIDefines.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/ScheduleDAG.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/Support/Debug.h"
28 
29 using namespace llvm;
30 
31 // Must be at least 4 to be able to branch over minimum unconditional branch
32 // code. This is only for making it possible to write reasonably small tests for
33 // long branches.
34 static cl::opt<unsigned>
35 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
36                  cl::desc("Restrict range of branch instructions (DEBUG)"));
37 
38 SIInstrInfo::SIInstrInfo(const SISubtarget &ST)
39   : AMDGPUInstrInfo(ST), RI(), ST(ST) {}
40 
41 //===----------------------------------------------------------------------===//
42 // TargetInstrInfo callbacks
43 //===----------------------------------------------------------------------===//
44 
45 static unsigned getNumOperandsNoGlue(SDNode *Node) {
46   unsigned N = Node->getNumOperands();
47   while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
48     --N;
49   return N;
50 }
51 
52 static SDValue findChainOperand(SDNode *Load) {
53   SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1);
54   assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node");
55   return LastOp;
56 }
57 
58 /// \brief Returns true if both nodes have the same value for the given
59 ///        operand \p Op, or if both nodes do not have this operand.
60 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
61   unsigned Opc0 = N0->getMachineOpcode();
62   unsigned Opc1 = N1->getMachineOpcode();
63 
64   int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
65   int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
66 
67   if (Op0Idx == -1 && Op1Idx == -1)
68     return true;
69 
70 
71   if ((Op0Idx == -1 && Op1Idx != -1) ||
72       (Op1Idx == -1 && Op0Idx != -1))
73     return false;
74 
75   // getNamedOperandIdx returns the index for the MachineInstr's operands,
76   // which includes the result as the first operand. We are indexing into the
77   // MachineSDNode's operands, so we need to skip the result operand to get
78   // the real index.
79   --Op0Idx;
80   --Op1Idx;
81 
82   return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
83 }
84 
85 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
86                                                     AliasAnalysis *AA) const {
87   // TODO: The generic check fails for VALU instructions that should be
88   // rematerializable due to implicit reads of exec. We really want all of the
89   // generic logic for this except for this.
90   switch (MI.getOpcode()) {
91   case AMDGPU::V_MOV_B32_e32:
92   case AMDGPU::V_MOV_B32_e64:
93   case AMDGPU::V_MOV_B64_PSEUDO:
94     return true;
95   default:
96     return false;
97   }
98 }
99 
100 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
101                                           int64_t &Offset0,
102                                           int64_t &Offset1) const {
103   if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
104     return false;
105 
106   unsigned Opc0 = Load0->getMachineOpcode();
107   unsigned Opc1 = Load1->getMachineOpcode();
108 
109   // Make sure both are actually loads.
110   if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
111     return false;
112 
113   if (isDS(Opc0) && isDS(Opc1)) {
114 
115     // FIXME: Handle this case:
116     if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
117       return false;
118 
119     // Check base reg.
120     if (Load0->getOperand(1) != Load1->getOperand(1))
121       return false;
122 
123     // Check chain.
124     if (findChainOperand(Load0) != findChainOperand(Load1))
125       return false;
126 
127     // Skip read2 / write2 variants for simplicity.
128     // TODO: We should report true if the used offsets are adjacent (excluded
129     // st64 versions).
130     if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 ||
131         AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1)
132       return false;
133 
134     Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue();
135     Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue();
136     return true;
137   }
138 
139   if (isSMRD(Opc0) && isSMRD(Opc1)) {
140     assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
141 
142     // Check base reg.
143     if (Load0->getOperand(0) != Load1->getOperand(0))
144       return false;
145 
146     const ConstantSDNode *Load0Offset =
147         dyn_cast<ConstantSDNode>(Load0->getOperand(1));
148     const ConstantSDNode *Load1Offset =
149         dyn_cast<ConstantSDNode>(Load1->getOperand(1));
150 
151     if (!Load0Offset || !Load1Offset)
152       return false;
153 
154     // Check chain.
155     if (findChainOperand(Load0) != findChainOperand(Load1))
156       return false;
157 
158     Offset0 = Load0Offset->getZExtValue();
159     Offset1 = Load1Offset->getZExtValue();
160     return true;
161   }
162 
163   // MUBUF and MTBUF can access the same addresses.
164   if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
165 
166     // MUBUF and MTBUF have vaddr at different indices.
167     if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
168         findChainOperand(Load0) != findChainOperand(Load1) ||
169         !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
170         !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
171       return false;
172 
173     int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
174     int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
175 
176     if (OffIdx0 == -1 || OffIdx1 == -1)
177       return false;
178 
179     // getNamedOperandIdx returns the index for MachineInstrs.  Since they
180     // inlcude the output in the operand list, but SDNodes don't, we need to
181     // subtract the index by one.
182     --OffIdx0;
183     --OffIdx1;
184 
185     SDValue Off0 = Load0->getOperand(OffIdx0);
186     SDValue Off1 = Load1->getOperand(OffIdx1);
187 
188     // The offset might be a FrameIndexSDNode.
189     if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
190       return false;
191 
192     Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
193     Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
194     return true;
195   }
196 
197   return false;
198 }
199 
200 static bool isStride64(unsigned Opc) {
201   switch (Opc) {
202   case AMDGPU::DS_READ2ST64_B32:
203   case AMDGPU::DS_READ2ST64_B64:
204   case AMDGPU::DS_WRITE2ST64_B32:
205   case AMDGPU::DS_WRITE2ST64_B64:
206     return true;
207   default:
208     return false;
209   }
210 }
211 
212 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
213                                         int64_t &Offset,
214                                         const TargetRegisterInfo *TRI) const {
215   unsigned Opc = LdSt.getOpcode();
216 
217   if (isDS(LdSt)) {
218     const MachineOperand *OffsetImm =
219         getNamedOperand(LdSt, AMDGPU::OpName::offset);
220     if (OffsetImm) {
221       // Normal, single offset LDS instruction.
222       const MachineOperand *AddrReg =
223           getNamedOperand(LdSt, AMDGPU::OpName::addr);
224 
225       BaseReg = AddrReg->getReg();
226       Offset = OffsetImm->getImm();
227       return true;
228     }
229 
230     // The 2 offset instructions use offset0 and offset1 instead. We can treat
231     // these as a load with a single offset if the 2 offsets are consecutive. We
232     // will use this for some partially aligned loads.
233     const MachineOperand *Offset0Imm =
234         getNamedOperand(LdSt, AMDGPU::OpName::offset0);
235     const MachineOperand *Offset1Imm =
236         getNamedOperand(LdSt, AMDGPU::OpName::offset1);
237 
238     uint8_t Offset0 = Offset0Imm->getImm();
239     uint8_t Offset1 = Offset1Imm->getImm();
240 
241     if (Offset1 > Offset0 && Offset1 - Offset0 == 1) {
242       // Each of these offsets is in element sized units, so we need to convert
243       // to bytes of the individual reads.
244 
245       unsigned EltSize;
246       if (LdSt.mayLoad())
247         EltSize = getOpRegClass(LdSt, 0)->getSize() / 2;
248       else {
249         assert(LdSt.mayStore());
250         int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
251         EltSize = getOpRegClass(LdSt, Data0Idx)->getSize();
252       }
253 
254       if (isStride64(Opc))
255         EltSize *= 64;
256 
257       const MachineOperand *AddrReg =
258           getNamedOperand(LdSt, AMDGPU::OpName::addr);
259       BaseReg = AddrReg->getReg();
260       Offset = EltSize * Offset0;
261       return true;
262     }
263 
264     return false;
265   }
266 
267   if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
268     const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset);
269     if (SOffset && SOffset->isReg())
270       return false;
271 
272     const MachineOperand *AddrReg =
273         getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
274     if (!AddrReg)
275       return false;
276 
277     const MachineOperand *OffsetImm =
278         getNamedOperand(LdSt, AMDGPU::OpName::offset);
279     BaseReg = AddrReg->getReg();
280     Offset = OffsetImm->getImm();
281 
282     if (SOffset) // soffset can be an inline immediate.
283       Offset += SOffset->getImm();
284 
285     return true;
286   }
287 
288   if (isSMRD(LdSt)) {
289     const MachineOperand *OffsetImm =
290         getNamedOperand(LdSt, AMDGPU::OpName::offset);
291     if (!OffsetImm)
292       return false;
293 
294     const MachineOperand *SBaseReg =
295         getNamedOperand(LdSt, AMDGPU::OpName::sbase);
296     BaseReg = SBaseReg->getReg();
297     Offset = OffsetImm->getImm();
298     return true;
299   }
300 
301   if (isFLAT(LdSt)) {
302     const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::addr);
303     BaseReg = AddrReg->getReg();
304     Offset = 0;
305     return true;
306   }
307 
308   return false;
309 }
310 
311 bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
312                                       MachineInstr &SecondLdSt,
313                                       unsigned NumLoads) const {
314   const MachineOperand *FirstDst = nullptr;
315   const MachineOperand *SecondDst = nullptr;
316 
317   if (isDS(FirstLdSt) && isDS(SecondLdSt)) {
318     FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst);
319     SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst);
320   }
321 
322   if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) {
323     FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst);
324     SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst);
325   }
326 
327   if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) ||
328       (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt))) {
329     FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata);
330     SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata);
331   }
332 
333   if (!FirstDst || !SecondDst)
334     return false;
335 
336   // Try to limit clustering based on the total number of bytes loaded
337   // rather than the number of instructions.  This is done to help reduce
338   // register pressure.  The method used is somewhat inexact, though,
339   // because it assumes that all loads in the cluster will load the
340   // same number of bytes as FirstLdSt.
341 
342   // The unit of this value is bytes.
343   // FIXME: This needs finer tuning.
344   unsigned LoadClusterThreshold = 16;
345 
346   const MachineRegisterInfo &MRI =
347       FirstLdSt.getParent()->getParent()->getRegInfo();
348   const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
349 
350   return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
351 }
352 
353 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
354                               MachineBasicBlock::iterator MI,
355                               const DebugLoc &DL, unsigned DestReg,
356                               unsigned SrcReg, bool KillSrc) const {
357   const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg);
358 
359   if (RC == &AMDGPU::VGPR_32RegClass) {
360     assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
361            AMDGPU::SReg_32RegClass.contains(SrcReg));
362     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
363       .addReg(SrcReg, getKillRegState(KillSrc));
364     return;
365   }
366 
367   if (RC == &AMDGPU::SReg_32RegClass) {
368     if (SrcReg == AMDGPU::SCC) {
369       BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
370           .addImm(-1)
371           .addImm(0);
372       return;
373     }
374 
375     assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
376     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
377             .addReg(SrcReg, getKillRegState(KillSrc));
378     return;
379   }
380 
381   if (RC == &AMDGPU::SReg_64RegClass) {
382     if (DestReg == AMDGPU::VCC) {
383       if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
384         BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
385           .addReg(SrcReg, getKillRegState(KillSrc));
386       } else {
387         // FIXME: Hack until VReg_1 removed.
388         assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
389         BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
390           .addImm(0)
391           .addReg(SrcReg, getKillRegState(KillSrc));
392       }
393 
394       return;
395     }
396 
397     assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
398     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
399             .addReg(SrcReg, getKillRegState(KillSrc));
400     return;
401   }
402 
403   if (DestReg == AMDGPU::SCC) {
404     assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
405     BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
406       .addReg(SrcReg, getKillRegState(KillSrc))
407       .addImm(0);
408     return;
409   }
410 
411   unsigned EltSize = 4;
412   unsigned Opcode = AMDGPU::V_MOV_B32_e32;
413   if (RI.isSGPRClass(RC)) {
414     if (RC->getSize() > 4) {
415       Opcode =  AMDGPU::S_MOV_B64;
416       EltSize = 8;
417     } else {
418       Opcode = AMDGPU::S_MOV_B32;
419       EltSize = 4;
420     }
421   }
422 
423   ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
424   bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
425 
426   for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
427     unsigned SubIdx;
428     if (Forward)
429       SubIdx = SubIndices[Idx];
430     else
431       SubIdx = SubIndices[SubIndices.size() - Idx - 1];
432 
433     MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
434       get(Opcode), RI.getSubReg(DestReg, SubIdx));
435 
436     Builder.addReg(RI.getSubReg(SrcReg, SubIdx));
437 
438     if (Idx == SubIndices.size() - 1)
439       Builder.addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
440 
441     if (Idx == 0)
442       Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
443 
444     Builder.addReg(SrcReg, RegState::Implicit);
445   }
446 }
447 
448 int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
449   int NewOpc;
450 
451   // Try to map original to commuted opcode
452   NewOpc = AMDGPU::getCommuteRev(Opcode);
453   if (NewOpc != -1)
454     // Check if the commuted (REV) opcode exists on the target.
455     return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
456 
457   // Try to map commuted to original opcode
458   NewOpc = AMDGPU::getCommuteOrig(Opcode);
459   if (NewOpc != -1)
460     // Check if the original (non-REV) opcode exists on the target.
461     return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
462 
463   return Opcode;
464 }
465 
466 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
467 
468   if (DstRC->getSize() == 4) {
469     return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
470   } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) {
471     return AMDGPU::S_MOV_B64;
472   } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) {
473     return  AMDGPU::V_MOV_B64_PSEUDO;
474   }
475   return AMDGPU::COPY;
476 }
477 
478 static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
479   switch (Size) {
480   case 4:
481     return AMDGPU::SI_SPILL_S32_SAVE;
482   case 8:
483     return AMDGPU::SI_SPILL_S64_SAVE;
484   case 16:
485     return AMDGPU::SI_SPILL_S128_SAVE;
486   case 32:
487     return AMDGPU::SI_SPILL_S256_SAVE;
488   case 64:
489     return AMDGPU::SI_SPILL_S512_SAVE;
490   default:
491     llvm_unreachable("unknown register size");
492   }
493 }
494 
495 static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
496   switch (Size) {
497   case 4:
498     return AMDGPU::SI_SPILL_V32_SAVE;
499   case 8:
500     return AMDGPU::SI_SPILL_V64_SAVE;
501   case 12:
502     return AMDGPU::SI_SPILL_V96_SAVE;
503   case 16:
504     return AMDGPU::SI_SPILL_V128_SAVE;
505   case 32:
506     return AMDGPU::SI_SPILL_V256_SAVE;
507   case 64:
508     return AMDGPU::SI_SPILL_V512_SAVE;
509   default:
510     llvm_unreachable("unknown register size");
511   }
512 }
513 
514 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
515                                       MachineBasicBlock::iterator MI,
516                                       unsigned SrcReg, bool isKill,
517                                       int FrameIndex,
518                                       const TargetRegisterClass *RC,
519                                       const TargetRegisterInfo *TRI) const {
520   MachineFunction *MF = MBB.getParent();
521   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
522   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
523   DebugLoc DL = MBB.findDebugLoc(MI);
524 
525   unsigned Size = FrameInfo.getObjectSize(FrameIndex);
526   unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
527   MachinePointerInfo PtrInfo
528     = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
529   MachineMemOperand *MMO
530     = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
531                                Size, Align);
532 
533   if (RI.isSGPRClass(RC)) {
534     MFI->setHasSpilledSGPRs();
535 
536     // We are only allowed to create one new instruction when spilling
537     // registers, so we need to use pseudo instruction for spilling SGPRs.
538     const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(RC->getSize()));
539 
540     // The SGPR spill/restore instructions only work on number sgprs, so we need
541     // to make sure we are using the correct register class.
542     if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) {
543       MachineRegisterInfo &MRI = MF->getRegInfo();
544       MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
545     }
546 
547     MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc)
548       .addReg(SrcReg, getKillRegState(isKill)) // data
549       .addFrameIndex(FrameIndex)               // addr
550       .addMemOperand(MMO)
551       .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
552       .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit);
553     // Add the scratch resource registers as implicit uses because we may end up
554     // needing them, and need to ensure that the reserved registers are
555     // correctly handled.
556 
557     if (ST.hasScalarStores()) {
558       // m0 is used for offset to scalar stores if used to spill.
559       Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine);
560     }
561 
562     return;
563   }
564 
565   if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
566     LLVMContext &Ctx = MF->getFunction()->getContext();
567     Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
568                   " spill register");
569     BuildMI(MBB, MI, DL, get(AMDGPU::KILL))
570       .addReg(SrcReg);
571 
572     return;
573   }
574 
575   assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
576 
577   unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize());
578   MFI->setHasSpilledVGPRs();
579   BuildMI(MBB, MI, DL, get(Opcode))
580     .addReg(SrcReg, getKillRegState(isKill)) // data
581     .addFrameIndex(FrameIndex)               // addr
582     .addReg(MFI->getScratchRSrcReg())        // scratch_rsrc
583     .addReg(MFI->getScratchWaveOffsetReg())  // scratch_offset
584     .addImm(0)                               // offset
585     .addMemOperand(MMO);
586 }
587 
588 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
589   switch (Size) {
590   case 4:
591     return AMDGPU::SI_SPILL_S32_RESTORE;
592   case 8:
593     return AMDGPU::SI_SPILL_S64_RESTORE;
594   case 16:
595     return AMDGPU::SI_SPILL_S128_RESTORE;
596   case 32:
597     return AMDGPU::SI_SPILL_S256_RESTORE;
598   case 64:
599     return AMDGPU::SI_SPILL_S512_RESTORE;
600   default:
601     llvm_unreachable("unknown register size");
602   }
603 }
604 
605 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
606   switch (Size) {
607   case 4:
608     return AMDGPU::SI_SPILL_V32_RESTORE;
609   case 8:
610     return AMDGPU::SI_SPILL_V64_RESTORE;
611   case 12:
612     return AMDGPU::SI_SPILL_V96_RESTORE;
613   case 16:
614     return AMDGPU::SI_SPILL_V128_RESTORE;
615   case 32:
616     return AMDGPU::SI_SPILL_V256_RESTORE;
617   case 64:
618     return AMDGPU::SI_SPILL_V512_RESTORE;
619   default:
620     llvm_unreachable("unknown register size");
621   }
622 }
623 
624 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
625                                        MachineBasicBlock::iterator MI,
626                                        unsigned DestReg, int FrameIndex,
627                                        const TargetRegisterClass *RC,
628                                        const TargetRegisterInfo *TRI) const {
629   MachineFunction *MF = MBB.getParent();
630   const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
631   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
632   DebugLoc DL = MBB.findDebugLoc(MI);
633   unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
634   unsigned Size = FrameInfo.getObjectSize(FrameIndex);
635 
636   MachinePointerInfo PtrInfo
637     = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
638 
639   MachineMemOperand *MMO = MF->getMachineMemOperand(
640     PtrInfo, MachineMemOperand::MOLoad, Size, Align);
641 
642   if (RI.isSGPRClass(RC)) {
643     // FIXME: Maybe this should not include a memoperand because it will be
644     // lowered to non-memory instructions.
645     const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(RC->getSize()));
646     if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) {
647       MachineRegisterInfo &MRI = MF->getRegInfo();
648       MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
649     }
650 
651     MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg)
652       .addFrameIndex(FrameIndex) // addr
653       .addMemOperand(MMO)
654       .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
655       .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit);
656 
657     if (ST.hasScalarStores()) {
658       // m0 is used for offset to scalar stores if used to spill.
659       Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine);
660     }
661 
662     return;
663   }
664 
665   if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
666     LLVMContext &Ctx = MF->getFunction()->getContext();
667     Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
668                   " restore register");
669     BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg);
670 
671     return;
672   }
673 
674   assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
675 
676   unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize());
677   BuildMI(MBB, MI, DL, get(Opcode), DestReg)
678     .addFrameIndex(FrameIndex)              // vaddr
679     .addReg(MFI->getScratchRSrcReg())       // scratch_rsrc
680     .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
681     .addImm(0)                              // offset
682     .addMemOperand(MMO);
683 }
684 
685 /// \param @Offset Offset in bytes of the FrameIndex being spilled
686 unsigned SIInstrInfo::calculateLDSSpillAddress(
687     MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg,
688     unsigned FrameOffset, unsigned Size) const {
689   MachineFunction *MF = MBB.getParent();
690   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
691   const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
692   const SIRegisterInfo *TRI = ST.getRegisterInfo();
693   DebugLoc DL = MBB.findDebugLoc(MI);
694   unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
695   unsigned WavefrontSize = ST.getWavefrontSize();
696 
697   unsigned TIDReg = MFI->getTIDReg();
698   if (!MFI->hasCalculatedTID()) {
699     MachineBasicBlock &Entry = MBB.getParent()->front();
700     MachineBasicBlock::iterator Insert = Entry.front();
701     DebugLoc DL = Insert->getDebugLoc();
702 
703     TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass,
704                                    *MF);
705     if (TIDReg == AMDGPU::NoRegister)
706       return TIDReg;
707 
708     if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) &&
709         WorkGroupSize > WavefrontSize) {
710 
711       unsigned TIDIGXReg
712         = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_X);
713       unsigned TIDIGYReg
714         = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Y);
715       unsigned TIDIGZReg
716         = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Z);
717       unsigned InputPtrReg =
718           TRI->getPreloadedValue(*MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
719       for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) {
720         if (!Entry.isLiveIn(Reg))
721           Entry.addLiveIn(Reg);
722       }
723 
724       RS->enterBasicBlock(Entry);
725       // FIXME: Can we scavenge an SReg_64 and access the subregs?
726       unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
727       unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
728       BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0)
729               .addReg(InputPtrReg)
730               .addImm(SI::KernelInputOffsets::NGROUPS_Z);
731       BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1)
732               .addReg(InputPtrReg)
733               .addImm(SI::KernelInputOffsets::NGROUPS_Y);
734 
735       // NGROUPS.X * NGROUPS.Y
736       BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1)
737               .addReg(STmp1)
738               .addReg(STmp0);
739       // (NGROUPS.X * NGROUPS.Y) * TIDIG.X
740       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg)
741               .addReg(STmp1)
742               .addReg(TIDIGXReg);
743       // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)
744       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg)
745               .addReg(STmp0)
746               .addReg(TIDIGYReg)
747               .addReg(TIDReg);
748       // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z
749       BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg)
750               .addReg(TIDReg)
751               .addReg(TIDIGZReg);
752     } else {
753       // Get the wave id
754       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64),
755               TIDReg)
756               .addImm(-1)
757               .addImm(0);
758 
759       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64),
760               TIDReg)
761               .addImm(-1)
762               .addReg(TIDReg);
763     }
764 
765     BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32),
766             TIDReg)
767             .addImm(2)
768             .addReg(TIDReg);
769     MFI->setTIDReg(TIDReg);
770   }
771 
772   // Add FrameIndex to LDS offset
773   unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize);
774   BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg)
775           .addImm(LDSOffset)
776           .addReg(TIDReg);
777 
778   return TmpReg;
779 }
780 
781 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB,
782                                    MachineBasicBlock::iterator MI,
783                                    int Count) const {
784   DebugLoc DL = MBB.findDebugLoc(MI);
785   while (Count > 0) {
786     int Arg;
787     if (Count >= 8)
788       Arg = 7;
789     else
790       Arg = Count - 1;
791     Count -= 8;
792     BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP))
793             .addImm(Arg);
794   }
795 }
796 
797 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
798                              MachineBasicBlock::iterator MI) const {
799   insertWaitStates(MBB, MI, 1);
800 }
801 
802 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const {
803   switch (MI.getOpcode()) {
804   default: return 1; // FIXME: Do wait states equal cycles?
805 
806   case AMDGPU::S_NOP:
807     return MI.getOperand(0).getImm() + 1;
808   }
809 }
810 
811 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
812   MachineBasicBlock &MBB = *MI.getParent();
813   DebugLoc DL = MBB.findDebugLoc(MI);
814   switch (MI.getOpcode()) {
815   default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
816   case AMDGPU::S_MOV_B64_term: {
817     // This is only a terminator to get the correct spill code placement during
818     // register allocation.
819     MI.setDesc(get(AMDGPU::S_MOV_B64));
820     break;
821   }
822   case AMDGPU::S_XOR_B64_term: {
823     // This is only a terminator to get the correct spill code placement during
824     // register allocation.
825     MI.setDesc(get(AMDGPU::S_XOR_B64));
826     break;
827   }
828   case AMDGPU::S_ANDN2_B64_term: {
829     // This is only a terminator to get the correct spill code placement during
830     // register allocation.
831     MI.setDesc(get(AMDGPU::S_ANDN2_B64));
832     break;
833   }
834   case AMDGPU::V_MOV_B64_PSEUDO: {
835     unsigned Dst = MI.getOperand(0).getReg();
836     unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
837     unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
838 
839     const MachineOperand &SrcOp = MI.getOperand(1);
840     // FIXME: Will this work for 64-bit floating point immediates?
841     assert(!SrcOp.isFPImm());
842     if (SrcOp.isImm()) {
843       APInt Imm(64, SrcOp.getImm());
844       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
845         .addImm(Imm.getLoBits(32).getZExtValue())
846         .addReg(Dst, RegState::Implicit | RegState::Define);
847       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
848         .addImm(Imm.getHiBits(32).getZExtValue())
849         .addReg(Dst, RegState::Implicit | RegState::Define);
850     } else {
851       assert(SrcOp.isReg());
852       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
853         .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
854         .addReg(Dst, RegState::Implicit | RegState::Define);
855       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
856         .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
857         .addReg(Dst, RegState::Implicit | RegState::Define);
858     }
859     MI.eraseFromParent();
860     break;
861   }
862   case AMDGPU::V_MOVRELD_B32_V1:
863   case AMDGPU::V_MOVRELD_B32_V2:
864   case AMDGPU::V_MOVRELD_B32_V4:
865   case AMDGPU::V_MOVRELD_B32_V8:
866   case AMDGPU::V_MOVRELD_B32_V16: {
867     const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32);
868     unsigned VecReg = MI.getOperand(0).getReg();
869     bool IsUndef = MI.getOperand(1).isUndef();
870     unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm();
871     assert(VecReg == MI.getOperand(1).getReg());
872 
873     MachineInstr *MovRel =
874         BuildMI(MBB, MI, DL, MovRelDesc)
875             .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
876             .addOperand(MI.getOperand(2))
877             .addReg(VecReg, RegState::ImplicitDefine)
878             .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
879 
880     const int ImpDefIdx =
881         MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses();
882     const int ImpUseIdx = ImpDefIdx + 1;
883     MovRel->tieOperands(ImpDefIdx, ImpUseIdx);
884 
885     MI.eraseFromParent();
886     break;
887   }
888   case AMDGPU::SI_PC_ADD_REL_OFFSET: {
889     MachineFunction &MF = *MBB.getParent();
890     unsigned Reg = MI.getOperand(0).getReg();
891     unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
892     unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
893 
894     // Create a bundle so these instructions won't be re-ordered by the
895     // post-RA scheduler.
896     MIBundleBuilder Bundler(MBB, MI);
897     Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
898 
899     // Add 32-bit offset from this instruction to the start of the
900     // constant data.
901     Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
902                        .addReg(RegLo)
903                        .addOperand(MI.getOperand(1)));
904 
905     MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
906                                   .addReg(RegHi);
907     if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE)
908       MIB.addImm(0);
909     else
910       MIB.addOperand(MI.getOperand(2));
911 
912     Bundler.append(MIB);
913     llvm::finalizeBundle(MBB, Bundler.begin());
914 
915     MI.eraseFromParent();
916     break;
917   }
918   }
919   return true;
920 }
921 
922 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
923                                       MachineOperand &Src0,
924                                       unsigned Src0OpName,
925                                       MachineOperand &Src1,
926                                       unsigned Src1OpName) const {
927   MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
928   if (!Src0Mods)
929     return false;
930 
931   MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
932   assert(Src1Mods &&
933          "All commutable instructions have both src0 and src1 modifiers");
934 
935   int Src0ModsVal = Src0Mods->getImm();
936   int Src1ModsVal = Src1Mods->getImm();
937 
938   Src1Mods->setImm(Src0ModsVal);
939   Src0Mods->setImm(Src1ModsVal);
940   return true;
941 }
942 
943 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
944                                              MachineOperand &RegOp,
945                                              MachineOperand &NonRegOp) {
946   unsigned Reg = RegOp.getReg();
947   unsigned SubReg = RegOp.getSubReg();
948   bool IsKill = RegOp.isKill();
949   bool IsDead = RegOp.isDead();
950   bool IsUndef = RegOp.isUndef();
951   bool IsDebug = RegOp.isDebug();
952 
953   if (NonRegOp.isImm())
954     RegOp.ChangeToImmediate(NonRegOp.getImm());
955   else if (NonRegOp.isFI())
956     RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
957   else
958     return nullptr;
959 
960   NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
961   NonRegOp.setSubReg(SubReg);
962 
963   return &MI;
964 }
965 
966 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
967                                                   unsigned Src0Idx,
968                                                   unsigned Src1Idx) const {
969   assert(!NewMI && "this should never be used");
970 
971   unsigned Opc = MI.getOpcode();
972   int CommutedOpcode = commuteOpcode(Opc);
973   if (CommutedOpcode == -1)
974     return nullptr;
975 
976   assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==
977            static_cast<int>(Src0Idx) &&
978          AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==
979            static_cast<int>(Src1Idx) &&
980          "inconsistency with findCommutedOpIndices");
981 
982   MachineOperand &Src0 = MI.getOperand(Src0Idx);
983   MachineOperand &Src1 = MI.getOperand(Src1Idx);
984 
985   MachineInstr *CommutedMI = nullptr;
986   if (Src0.isReg() && Src1.isReg()) {
987     if (isOperandLegal(MI, Src1Idx, &Src0)) {
988       // Be sure to copy the source modifiers to the right place.
989       CommutedMI
990         = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
991     }
992 
993   } else if (Src0.isReg() && !Src1.isReg()) {
994     // src0 should always be able to support any operand type, so no need to
995     // check operand legality.
996     CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
997   } else if (!Src0.isReg() && Src1.isReg()) {
998     if (isOperandLegal(MI, Src1Idx, &Src0))
999       CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
1000   } else {
1001     // FIXME: Found two non registers to commute. This does happen.
1002     return nullptr;
1003   }
1004 
1005 
1006   if (CommutedMI) {
1007     swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
1008                         Src1, AMDGPU::OpName::src1_modifiers);
1009 
1010     CommutedMI->setDesc(get(CommutedOpcode));
1011   }
1012 
1013   return CommutedMI;
1014 }
1015 
1016 // This needs to be implemented because the source modifiers may be inserted
1017 // between the true commutable operands, and the base
1018 // TargetInstrInfo::commuteInstruction uses it.
1019 bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0,
1020                                         unsigned &SrcOpIdx1) const {
1021   if (!MI.isCommutable())
1022     return false;
1023 
1024   unsigned Opc = MI.getOpcode();
1025   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1026   if (Src0Idx == -1)
1027     return false;
1028 
1029   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1030   if (Src1Idx == -1)
1031     return false;
1032 
1033   return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
1034 }
1035 
1036 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
1037                                         int64_t BrOffset) const {
1038   // BranchRelaxation should never have to check s_setpc_b64 because its dest
1039   // block is unanalyzable.
1040   assert(BranchOp != AMDGPU::S_SETPC_B64);
1041 
1042   // Convert to dwords.
1043   BrOffset /= 4;
1044 
1045   // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
1046   // from the next instruction.
1047   BrOffset -= 1;
1048 
1049   return isIntN(BranchOffsetBits, BrOffset);
1050 }
1051 
1052 MachineBasicBlock *SIInstrInfo::getBranchDestBlock(
1053   const MachineInstr &MI) const {
1054   if (MI.getOpcode() == AMDGPU::S_SETPC_B64) {
1055     // This would be a difficult analysis to perform, but can always be legal so
1056     // there's no need to analyze it.
1057     return nullptr;
1058   }
1059 
1060   return MI.getOperand(0).getMBB();
1061 }
1062 
1063 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
1064                                            MachineBasicBlock &DestBB,
1065                                            const DebugLoc &DL,
1066                                            int64_t BrOffset,
1067                                            RegScavenger *RS) const {
1068   assert(RS && "RegScavenger required for long branching");
1069   assert(MBB.empty() &&
1070          "new block should be inserted for expanding unconditional branch");
1071   assert(MBB.pred_size() == 1);
1072 
1073   MachineFunction *MF = MBB.getParent();
1074   MachineRegisterInfo &MRI = MF->getRegInfo();
1075 
1076   // FIXME: Virtual register workaround for RegScavenger not working with empty
1077   // blocks.
1078   unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1079 
1080   auto I = MBB.end();
1081 
1082   // We need to compute the offset relative to the instruction immediately after
1083   // s_getpc_b64. Insert pc arithmetic code before last terminator.
1084   MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
1085 
1086   // TODO: Handle > 32-bit block address.
1087   if (BrOffset >= 0) {
1088     BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
1089       .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1090       .addReg(PCReg, 0, AMDGPU::sub0)
1091       .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_FORWARD);
1092     BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
1093       .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1094       .addReg(PCReg, 0, AMDGPU::sub1)
1095       .addImm(0);
1096   } else {
1097     // Backwards branch.
1098     BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32))
1099       .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1100       .addReg(PCReg, 0, AMDGPU::sub0)
1101       .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_BACKWARD);
1102     BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32))
1103       .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1104       .addReg(PCReg, 0, AMDGPU::sub1)
1105       .addImm(0);
1106   }
1107 
1108   // Insert the indirect branch after the other terminator.
1109   BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
1110     .addReg(PCReg);
1111 
1112   // FIXME: If spilling is necessary, this will fail because this scavenger has
1113   // no emergency stack slots. It is non-trivial to spill in this situation,
1114   // because the restore code needs to be specially placed after the
1115   // jump. BranchRelaxation then needs to be made aware of the newly inserted
1116   // block.
1117   //
1118   // If a spill is needed for the pc register pair, we need to insert a spill
1119   // restore block right before the destination block, and insert a short branch
1120   // into the old destination block's fallthrough predecessor.
1121   // e.g.:
1122   //
1123   // s_cbranch_scc0 skip_long_branch:
1124   //
1125   // long_branch_bb:
1126   //   spill s[8:9]
1127   //   s_getpc_b64 s[8:9]
1128   //   s_add_u32 s8, s8, restore_bb
1129   //   s_addc_u32 s9, s9, 0
1130   //   s_setpc_b64 s[8:9]
1131   //
1132   // skip_long_branch:
1133   //   foo;
1134   //
1135   // .....
1136   //
1137   // dest_bb_fallthrough_predecessor:
1138   // bar;
1139   // s_branch dest_bb
1140   //
1141   // restore_bb:
1142   //  restore s[8:9]
1143   //  fallthrough dest_bb
1144   ///
1145   // dest_bb:
1146   //   buzz;
1147 
1148   RS->enterBasicBlockEnd(MBB);
1149   unsigned Scav = RS->scavengeRegister(&AMDGPU::SReg_64RegClass,
1150                                        MachineBasicBlock::iterator(GetPC), 0);
1151   MRI.replaceRegWith(PCReg, Scav);
1152   MRI.clearVirtRegs();
1153   RS->setRegUsed(Scav);
1154 
1155   return 4 + 8 + 4 + 4;
1156 }
1157 
1158 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
1159   switch (Cond) {
1160   case SIInstrInfo::SCC_TRUE:
1161     return AMDGPU::S_CBRANCH_SCC1;
1162   case SIInstrInfo::SCC_FALSE:
1163     return AMDGPU::S_CBRANCH_SCC0;
1164   case SIInstrInfo::VCCNZ:
1165     return AMDGPU::S_CBRANCH_VCCNZ;
1166   case SIInstrInfo::VCCZ:
1167     return AMDGPU::S_CBRANCH_VCCZ;
1168   case SIInstrInfo::EXECNZ:
1169     return AMDGPU::S_CBRANCH_EXECNZ;
1170   case SIInstrInfo::EXECZ:
1171     return AMDGPU::S_CBRANCH_EXECZ;
1172   default:
1173     llvm_unreachable("invalid branch predicate");
1174   }
1175 }
1176 
1177 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
1178   switch (Opcode) {
1179   case AMDGPU::S_CBRANCH_SCC0:
1180     return SCC_FALSE;
1181   case AMDGPU::S_CBRANCH_SCC1:
1182     return SCC_TRUE;
1183   case AMDGPU::S_CBRANCH_VCCNZ:
1184     return VCCNZ;
1185   case AMDGPU::S_CBRANCH_VCCZ:
1186     return VCCZ;
1187   case AMDGPU::S_CBRANCH_EXECNZ:
1188     return EXECNZ;
1189   case AMDGPU::S_CBRANCH_EXECZ:
1190     return EXECZ;
1191   default:
1192     return INVALID_BR;
1193   }
1194 }
1195 
1196 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
1197                                     MachineBasicBlock::iterator I,
1198                                     MachineBasicBlock *&TBB,
1199                                     MachineBasicBlock *&FBB,
1200                                     SmallVectorImpl<MachineOperand> &Cond,
1201                                     bool AllowModify) const {
1202   if (I->getOpcode() == AMDGPU::S_BRANCH) {
1203     // Unconditional Branch
1204     TBB = I->getOperand(0).getMBB();
1205     return false;
1206   }
1207 
1208   BranchPredicate Pred = getBranchPredicate(I->getOpcode());
1209   if (Pred == INVALID_BR)
1210     return true;
1211 
1212   MachineBasicBlock *CondBB = I->getOperand(0).getMBB();
1213   Cond.push_back(MachineOperand::CreateImm(Pred));
1214   Cond.push_back(I->getOperand(1)); // Save the branch register.
1215 
1216   ++I;
1217 
1218   if (I == MBB.end()) {
1219     // Conditional branch followed by fall-through.
1220     TBB = CondBB;
1221     return false;
1222   }
1223 
1224   if (I->getOpcode() == AMDGPU::S_BRANCH) {
1225     TBB = CondBB;
1226     FBB = I->getOperand(0).getMBB();
1227     return false;
1228   }
1229 
1230   return true;
1231 }
1232 
1233 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
1234                                 MachineBasicBlock *&FBB,
1235                                 SmallVectorImpl<MachineOperand> &Cond,
1236                                 bool AllowModify) const {
1237   MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1238   if (I == MBB.end())
1239     return false;
1240 
1241   if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH)
1242     return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
1243 
1244   ++I;
1245 
1246   // TODO: Should be able to treat as fallthrough?
1247   if (I == MBB.end())
1248     return true;
1249 
1250   if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify))
1251     return true;
1252 
1253   MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB();
1254 
1255   // Specifically handle the case where the conditional branch is to the same
1256   // destination as the mask branch. e.g.
1257   //
1258   // si_mask_branch BB8
1259   // s_cbranch_execz BB8
1260   // s_cbranch BB9
1261   //
1262   // This is required to understand divergent loops which may need the branches
1263   // to be relaxed.
1264   if (TBB != MaskBrDest || Cond.empty())
1265     return true;
1266 
1267   auto Pred = Cond[0].getImm();
1268   return (Pred != EXECZ && Pred != EXECNZ);
1269 }
1270 
1271 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB,
1272                                    int *BytesRemoved) const {
1273   MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1274 
1275   unsigned Count = 0;
1276   unsigned RemovedSize = 0;
1277   while (I != MBB.end()) {
1278     MachineBasicBlock::iterator Next = std::next(I);
1279     if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) {
1280       I = Next;
1281       continue;
1282     }
1283 
1284     RemovedSize += getInstSizeInBytes(*I);
1285     I->eraseFromParent();
1286     ++Count;
1287     I = Next;
1288   }
1289 
1290   if (BytesRemoved)
1291     *BytesRemoved = RemovedSize;
1292 
1293   return Count;
1294 }
1295 
1296 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
1297                                    MachineBasicBlock *TBB,
1298                                    MachineBasicBlock *FBB,
1299                                    ArrayRef<MachineOperand> Cond,
1300                                    const DebugLoc &DL,
1301                                    int *BytesAdded) const {
1302 
1303   if (!FBB && Cond.empty()) {
1304     BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1305       .addMBB(TBB);
1306     if (BytesAdded)
1307       *BytesAdded = 4;
1308     return 1;
1309   }
1310 
1311   assert(TBB && Cond[0].isImm());
1312 
1313   unsigned Opcode
1314     = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
1315 
1316   if (!FBB) {
1317     Cond[1].isUndef();
1318     MachineInstr *CondBr =
1319       BuildMI(&MBB, DL, get(Opcode))
1320       .addMBB(TBB);
1321 
1322     // Copy the flags onto the implicit condition register operand.
1323     MachineOperand &CondReg = CondBr->getOperand(1);
1324     CondReg.setIsUndef(Cond[1].isUndef());
1325     CondReg.setIsKill(Cond[1].isKill());
1326 
1327     if (BytesAdded)
1328       *BytesAdded = 4;
1329     return 1;
1330   }
1331 
1332   assert(TBB && FBB);
1333 
1334   MachineInstr *CondBr =
1335     BuildMI(&MBB, DL, get(Opcode))
1336     .addMBB(TBB);
1337   BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1338     .addMBB(FBB);
1339 
1340   MachineOperand &CondReg = CondBr->getOperand(1);
1341   CondReg.setIsUndef(Cond[1].isUndef());
1342   CondReg.setIsKill(Cond[1].isKill());
1343 
1344   if (BytesAdded)
1345       *BytesAdded = 8;
1346 
1347   return 2;
1348 }
1349 
1350 bool SIInstrInfo::reverseBranchCondition(
1351   SmallVectorImpl<MachineOperand> &Cond) const {
1352   assert(Cond.size() == 2);
1353   Cond[0].setImm(-Cond[0].getImm());
1354   return false;
1355 }
1356 
1357 static void removeModOperands(MachineInstr &MI) {
1358   unsigned Opc = MI.getOpcode();
1359   int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1360                                               AMDGPU::OpName::src0_modifiers);
1361   int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1362                                               AMDGPU::OpName::src1_modifiers);
1363   int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1364                                               AMDGPU::OpName::src2_modifiers);
1365 
1366   MI.RemoveOperand(Src2ModIdx);
1367   MI.RemoveOperand(Src1ModIdx);
1368   MI.RemoveOperand(Src0ModIdx);
1369 }
1370 
1371 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
1372                                 unsigned Reg, MachineRegisterInfo *MRI) const {
1373   if (!MRI->hasOneNonDBGUse(Reg))
1374     return false;
1375 
1376   unsigned Opc = UseMI.getOpcode();
1377   if (Opc == AMDGPU::COPY) {
1378     bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg());
1379     switch (DefMI.getOpcode()) {
1380     default:
1381       return false;
1382     case AMDGPU::S_MOV_B64:
1383       // TODO: We could fold 64-bit immediates, but this get compilicated
1384       // when there are sub-registers.
1385       return false;
1386 
1387     case AMDGPU::V_MOV_B32_e32:
1388     case AMDGPU::S_MOV_B32:
1389       break;
1390     }
1391     unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1392     const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
1393     assert(ImmOp);
1394     // FIXME: We could handle FrameIndex values here.
1395     if (!ImmOp->isImm()) {
1396       return false;
1397     }
1398     UseMI.setDesc(get(NewOpc));
1399     UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm());
1400     UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
1401     return true;
1402   }
1403 
1404   if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 ||
1405       Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) {
1406     bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64;
1407 
1408     // Don't fold if we are using source modifiers. The new VOP2 instructions
1409     // don't have them.
1410     if (hasModifiersSet(UseMI, AMDGPU::OpName::src0_modifiers) ||
1411         hasModifiersSet(UseMI, AMDGPU::OpName::src1_modifiers) ||
1412         hasModifiersSet(UseMI, AMDGPU::OpName::src2_modifiers)) {
1413       return false;
1414     }
1415 
1416     const MachineOperand &ImmOp = DefMI.getOperand(1);
1417 
1418     // If this is a free constant, there's no reason to do this.
1419     // TODO: We could fold this here instead of letting SIFoldOperands do it
1420     // later.
1421     if (isInlineConstant(ImmOp, 4))
1422       return false;
1423 
1424     MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
1425     MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
1426     MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
1427 
1428     // Multiplied part is the constant: Use v_madmk_{f16, f32}.
1429     // We should only expect these to be on src0 due to canonicalizations.
1430     if (Src0->isReg() && Src0->getReg() == Reg) {
1431       if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
1432         return false;
1433 
1434       if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
1435         return false;
1436 
1437       // We need to swap operands 0 and 1 since madmk constant is at operand 1.
1438 
1439       const int64_t Imm = DefMI.getOperand(1).getImm();
1440 
1441       // FIXME: This would be a lot easier if we could return a new instruction
1442       // instead of having to modify in place.
1443 
1444       // Remove these first since they are at the end.
1445       UseMI.RemoveOperand(
1446           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
1447       UseMI.RemoveOperand(
1448           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
1449 
1450       unsigned Src1Reg = Src1->getReg();
1451       unsigned Src1SubReg = Src1->getSubReg();
1452       Src0->setReg(Src1Reg);
1453       Src0->setSubReg(Src1SubReg);
1454       Src0->setIsKill(Src1->isKill());
1455 
1456       if (Opc == AMDGPU::V_MAC_F32_e64 ||
1457           Opc == AMDGPU::V_MAC_F16_e64)
1458         UseMI.untieRegOperand(
1459             AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
1460 
1461       Src1->ChangeToImmediate(Imm);
1462 
1463       removeModOperands(UseMI);
1464       UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16));
1465 
1466       bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1467       if (DeleteDef)
1468         DefMI.eraseFromParent();
1469 
1470       return true;
1471     }
1472 
1473     // Added part is the constant: Use v_madak_{f16, f32}.
1474     if (Src2->isReg() && Src2->getReg() == Reg) {
1475       // Not allowed to use constant bus for another operand.
1476       // We can however allow an inline immediate as src0.
1477       if (!Src0->isImm() &&
1478           (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))
1479         return false;
1480 
1481       if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
1482         return false;
1483 
1484       const int64_t Imm = DefMI.getOperand(1).getImm();
1485 
1486       // FIXME: This would be a lot easier if we could return a new instruction
1487       // instead of having to modify in place.
1488 
1489       // Remove these first since they are at the end.
1490       UseMI.RemoveOperand(
1491           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
1492       UseMI.RemoveOperand(
1493           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
1494 
1495       if (Opc == AMDGPU::V_MAC_F32_e64 ||
1496           Opc == AMDGPU::V_MAC_F16_e64)
1497         UseMI.untieRegOperand(
1498             AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
1499 
1500       // ChangingToImmediate adds Src2 back to the instruction.
1501       Src2->ChangeToImmediate(Imm);
1502 
1503       // These come before src2.
1504       removeModOperands(UseMI);
1505       UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16));
1506 
1507       bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1508       if (DeleteDef)
1509         DefMI.eraseFromParent();
1510 
1511       return true;
1512     }
1513   }
1514 
1515   return false;
1516 }
1517 
1518 static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
1519                                 int WidthB, int OffsetB) {
1520   int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
1521   int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
1522   int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1523   return LowOffset + LowWidth <= HighOffset;
1524 }
1525 
1526 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
1527                                                MachineInstr &MIb) const {
1528   unsigned BaseReg0, BaseReg1;
1529   int64_t Offset0, Offset1;
1530 
1531   if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
1532       getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
1533 
1534     if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
1535       // FIXME: Handle ds_read2 / ds_write2.
1536       return false;
1537     }
1538     unsigned Width0 = (*MIa.memoperands_begin())->getSize();
1539     unsigned Width1 = (*MIb.memoperands_begin())->getSize();
1540     if (BaseReg0 == BaseReg1 &&
1541         offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) {
1542       return true;
1543     }
1544   }
1545 
1546   return false;
1547 }
1548 
1549 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa,
1550                                                   MachineInstr &MIb,
1551                                                   AliasAnalysis *AA) const {
1552   assert((MIa.mayLoad() || MIa.mayStore()) &&
1553          "MIa must load from or modify a memory location");
1554   assert((MIb.mayLoad() || MIb.mayStore()) &&
1555          "MIb must load from or modify a memory location");
1556 
1557   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
1558     return false;
1559 
1560   // XXX - Can we relax this between address spaces?
1561   if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1562     return false;
1563 
1564   if (AA && MIa.hasOneMemOperand() && MIb.hasOneMemOperand()) {
1565     const MachineMemOperand *MMOa = *MIa.memoperands_begin();
1566     const MachineMemOperand *MMOb = *MIb.memoperands_begin();
1567     if (MMOa->getValue() && MMOb->getValue()) {
1568       MemoryLocation LocA(MMOa->getValue(), MMOa->getSize(), MMOa->getAAInfo());
1569       MemoryLocation LocB(MMOb->getValue(), MMOb->getSize(), MMOb->getAAInfo());
1570       if (!AA->alias(LocA, LocB))
1571         return true;
1572     }
1573   }
1574 
1575   // TODO: Should we check the address space from the MachineMemOperand? That
1576   // would allow us to distinguish objects we know don't alias based on the
1577   // underlying address space, even if it was lowered to a different one,
1578   // e.g. private accesses lowered to use MUBUF instructions on a scratch
1579   // buffer.
1580   if (isDS(MIa)) {
1581     if (isDS(MIb))
1582       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1583 
1584     return !isFLAT(MIb);
1585   }
1586 
1587   if (isMUBUF(MIa) || isMTBUF(MIa)) {
1588     if (isMUBUF(MIb) || isMTBUF(MIb))
1589       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1590 
1591     return !isFLAT(MIb) && !isSMRD(MIb);
1592   }
1593 
1594   if (isSMRD(MIa)) {
1595     if (isSMRD(MIb))
1596       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1597 
1598     return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa);
1599   }
1600 
1601   if (isFLAT(MIa)) {
1602     if (isFLAT(MIb))
1603       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1604 
1605     return false;
1606   }
1607 
1608   return false;
1609 }
1610 
1611 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
1612                                                  MachineInstr &MI,
1613                                                  LiveVariables *LV) const {
1614   bool IsF16 = false;
1615 
1616   switch (MI.getOpcode()) {
1617   default:
1618     return nullptr;
1619   case AMDGPU::V_MAC_F16_e64:
1620     IsF16 = true;
1621   case AMDGPU::V_MAC_F32_e64:
1622     break;
1623   case AMDGPU::V_MAC_F16_e32:
1624     IsF16 = true;
1625   case AMDGPU::V_MAC_F32_e32: {
1626     const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
1627     if (Src0->isImm() && !isInlineConstant(*Src0, 4))
1628       return nullptr;
1629     break;
1630   }
1631   }
1632 
1633   const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
1634   const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
1635   const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
1636   const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
1637 
1638   return BuildMI(*MBB, MI, MI.getDebugLoc(),
1639                  get(IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32))
1640       .addOperand(*Dst)
1641       .addImm(0) // Src0 mods
1642       .addOperand(*Src0)
1643       .addImm(0) // Src1 mods
1644       .addOperand(*Src1)
1645       .addImm(0) // Src mods
1646       .addOperand(*Src2)
1647       .addImm(0)  // clamp
1648       .addImm(0); // omod
1649 }
1650 
1651 // It's not generally safe to move VALU instructions across these since it will
1652 // start using the register as a base index rather than directly.
1653 // XXX - Why isn't hasSideEffects sufficient for these?
1654 static bool changesVGPRIndexingMode(const MachineInstr &MI) {
1655   switch (MI.getOpcode()) {
1656   case AMDGPU::S_SET_GPR_IDX_ON:
1657   case AMDGPU::S_SET_GPR_IDX_MODE:
1658   case AMDGPU::S_SET_GPR_IDX_OFF:
1659     return true;
1660   default:
1661     return false;
1662   }
1663 }
1664 
1665 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1666                                        const MachineBasicBlock *MBB,
1667                                        const MachineFunction &MF) const {
1668   // XXX - Do we want the SP check in the base implementation?
1669 
1670   // Target-independent instructions do not have an implicit-use of EXEC, even
1671   // when they operate on VGPRs. Treating EXEC modifications as scheduling
1672   // boundaries prevents incorrect movements of such instructions.
1673   return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) ||
1674          MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
1675          changesVGPRIndexingMode(MI);
1676 }
1677 
1678 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
1679   int64_t SVal = Imm.getSExtValue();
1680   if (SVal >= -16 && SVal <= 64)
1681     return true;
1682 
1683   if (Imm.getBitWidth() == 64) {
1684     uint64_t Val = Imm.getZExtValue();
1685     return (DoubleToBits(0.0) == Val) ||
1686            (DoubleToBits(1.0) == Val) ||
1687            (DoubleToBits(-1.0) == Val) ||
1688            (DoubleToBits(0.5) == Val) ||
1689            (DoubleToBits(-0.5) == Val) ||
1690            (DoubleToBits(2.0) == Val) ||
1691            (DoubleToBits(-2.0) == Val) ||
1692            (DoubleToBits(4.0) == Val) ||
1693            (DoubleToBits(-4.0) == Val) ||
1694            (ST.hasInv2PiInlineImm() && Val == 0x3fc45f306dc9c882);
1695   }
1696 
1697   // The actual type of the operand does not seem to matter as long
1698   // as the bits match one of the inline immediate values.  For example:
1699   //
1700   // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
1701   // so it is a legal inline immediate.
1702   //
1703   // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
1704   // floating-point, so it is a legal inline immediate.
1705   uint32_t Val = Imm.getZExtValue();
1706 
1707   return (FloatToBits(0.0f) == Val) ||
1708          (FloatToBits(1.0f) == Val) ||
1709          (FloatToBits(-1.0f) == Val) ||
1710          (FloatToBits(0.5f) == Val) ||
1711          (FloatToBits(-0.5f) == Val) ||
1712          (FloatToBits(2.0f) == Val) ||
1713          (FloatToBits(-2.0f) == Val) ||
1714          (FloatToBits(4.0f) == Val) ||
1715          (FloatToBits(-4.0f) == Val) ||
1716          (ST.hasInv2PiInlineImm() && Val == 0x3e22f983);
1717 }
1718 
1719 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
1720                                    unsigned OpSize) const {
1721   if (MO.isImm()) {
1722     // MachineOperand provides no way to tell the true operand size, since it
1723     // only records a 64-bit value. We need to know the size to determine if a
1724     // 32-bit floating point immediate bit pattern is legal for an integer
1725     // immediate. It would be for any 32-bit integer operand, but would not be
1726     // for a 64-bit one.
1727 
1728     unsigned BitSize = 8 * OpSize;
1729     return isInlineConstant(APInt(BitSize, MO.getImm(), true));
1730   }
1731 
1732   return false;
1733 }
1734 
1735 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO,
1736                                     unsigned OpSize) const {
1737   return MO.isImm() && !isInlineConstant(MO, OpSize);
1738 }
1739 
1740 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO,
1741                                         unsigned OpSize) const {
1742   switch (MO.getType()) {
1743   case MachineOperand::MO_Register:
1744     return false;
1745   case MachineOperand::MO_Immediate:
1746     return !isInlineConstant(MO, OpSize);
1747   case MachineOperand::MO_FrameIndex:
1748   case MachineOperand::MO_MachineBasicBlock:
1749   case MachineOperand::MO_ExternalSymbol:
1750   case MachineOperand::MO_GlobalAddress:
1751   case MachineOperand::MO_MCSymbol:
1752     return true;
1753   default:
1754     llvm_unreachable("unexpected operand type");
1755   }
1756 }
1757 
1758 static bool compareMachineOp(const MachineOperand &Op0,
1759                              const MachineOperand &Op1) {
1760   if (Op0.getType() != Op1.getType())
1761     return false;
1762 
1763   switch (Op0.getType()) {
1764   case MachineOperand::MO_Register:
1765     return Op0.getReg() == Op1.getReg();
1766   case MachineOperand::MO_Immediate:
1767     return Op0.getImm() == Op1.getImm();
1768   default:
1769     llvm_unreachable("Didn't expect to be comparing these operand types");
1770   }
1771 }
1772 
1773 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
1774                                     const MachineOperand &MO) const {
1775   const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo];
1776 
1777   assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
1778 
1779   if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
1780     return true;
1781 
1782   if (OpInfo.RegClass < 0)
1783     return false;
1784 
1785   unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize();
1786   if (isLiteralConstant(MO, OpSize))
1787     return RI.opCanUseLiteralConstant(OpInfo.OperandType);
1788 
1789   return RI.opCanUseInlineConstant(OpInfo.OperandType);
1790 }
1791 
1792 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
1793   int Op32 = AMDGPU::getVOPe32(Opcode);
1794   if (Op32 == -1)
1795     return false;
1796 
1797   return pseudoToMCOpcode(Op32) != -1;
1798 }
1799 
1800 bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
1801   // The src0_modifier operand is present on all instructions
1802   // that have modifiers.
1803 
1804   return AMDGPU::getNamedOperandIdx(Opcode,
1805                                     AMDGPU::OpName::src0_modifiers) != -1;
1806 }
1807 
1808 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
1809                                   unsigned OpName) const {
1810   const MachineOperand *Mods = getNamedOperand(MI, OpName);
1811   return Mods && Mods->getImm();
1812 }
1813 
1814 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
1815                                   const MachineOperand &MO,
1816                                   unsigned OpSize) const {
1817   // Literal constants use the constant bus.
1818   if (isLiteralConstant(MO, OpSize))
1819     return true;
1820 
1821   if (!MO.isReg() || !MO.isUse())
1822     return false;
1823 
1824   if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1825     return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
1826 
1827   // FLAT_SCR is just an SGPR pair.
1828   if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR))
1829     return true;
1830 
1831   // EXEC register uses the constant bus.
1832   if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
1833     return true;
1834 
1835   // SGPRs use the constant bus
1836   return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 ||
1837           (!MO.isImplicit() &&
1838            (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
1839             AMDGPU::SGPR_64RegClass.contains(MO.getReg()))));
1840 }
1841 
1842 static unsigned findImplicitSGPRRead(const MachineInstr &MI) {
1843   for (const MachineOperand &MO : MI.implicit_operands()) {
1844     // We only care about reads.
1845     if (MO.isDef())
1846       continue;
1847 
1848     switch (MO.getReg()) {
1849     case AMDGPU::VCC:
1850     case AMDGPU::M0:
1851     case AMDGPU::FLAT_SCR:
1852       return MO.getReg();
1853 
1854     default:
1855       break;
1856     }
1857   }
1858 
1859   return AMDGPU::NoRegister;
1860 }
1861 
1862 static bool shouldReadExec(const MachineInstr &MI) {
1863   if (SIInstrInfo::isVALU(MI)) {
1864     switch (MI.getOpcode()) {
1865     case AMDGPU::V_READLANE_B32:
1866     case AMDGPU::V_READLANE_B32_si:
1867     case AMDGPU::V_READLANE_B32_vi:
1868     case AMDGPU::V_WRITELANE_B32:
1869     case AMDGPU::V_WRITELANE_B32_si:
1870     case AMDGPU::V_WRITELANE_B32_vi:
1871       return false;
1872     }
1873 
1874     return true;
1875   }
1876 
1877   if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
1878       SIInstrInfo::isSALU(MI) ||
1879       SIInstrInfo::isSMRD(MI))
1880     return false;
1881 
1882   return true;
1883 }
1884 
1885 static bool isSubRegOf(const SIRegisterInfo &TRI,
1886                        const MachineOperand &SuperVec,
1887                        const MachineOperand &SubReg) {
1888   if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg()))
1889     return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
1890 
1891   return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
1892          SubReg.getReg() == SuperVec.getReg();
1893 }
1894 
1895 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
1896                                     StringRef &ErrInfo) const {
1897   uint16_t Opcode = MI.getOpcode();
1898   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1899   int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
1900   int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
1901   int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
1902 
1903   // Make sure the number of operands is correct.
1904   const MCInstrDesc &Desc = get(Opcode);
1905   if (!Desc.isVariadic() &&
1906       Desc.getNumOperands() != MI.getNumExplicitOperands()) {
1907     ErrInfo = "Instruction has wrong number of operands.";
1908     return false;
1909   }
1910 
1911   if (MI.isInlineAsm()) {
1912     // Verify register classes for inlineasm constraints.
1913     for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
1914          I != E; ++I) {
1915       const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
1916       if (!RC)
1917         continue;
1918 
1919       const MachineOperand &Op = MI.getOperand(I);
1920       if (!Op.isReg())
1921         continue;
1922 
1923       unsigned Reg = Op.getReg();
1924       if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) {
1925         ErrInfo = "inlineasm operand has incorrect register class.";
1926         return false;
1927       }
1928     }
1929 
1930     return true;
1931   }
1932 
1933   // Make sure the register classes are correct.
1934   for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
1935     if (MI.getOperand(i).isFPImm()) {
1936       ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
1937                 "all fp values to integers.";
1938       return false;
1939     }
1940 
1941     int RegClass = Desc.OpInfo[i].RegClass;
1942 
1943     switch (Desc.OpInfo[i].OperandType) {
1944     case MCOI::OPERAND_REGISTER:
1945       if (MI.getOperand(i).isImm()) {
1946         ErrInfo = "Illegal immediate value for operand.";
1947         return false;
1948       }
1949       break;
1950     case AMDGPU::OPERAND_REG_IMM32_INT:
1951     case AMDGPU::OPERAND_REG_IMM32_FP:
1952       break;
1953     case AMDGPU::OPERAND_REG_INLINE_C_INT:
1954     case AMDGPU::OPERAND_REG_INLINE_C_FP:
1955       if (isLiteralConstant(MI.getOperand(i),
1956                             RI.getRegClass(RegClass)->getSize())) {
1957         ErrInfo = "Illegal immediate value for operand.";
1958         return false;
1959       }
1960       break;
1961     case MCOI::OPERAND_IMMEDIATE:
1962     case AMDGPU::OPERAND_KIMM32:
1963       // Check if this operand is an immediate.
1964       // FrameIndex operands will be replaced by immediates, so they are
1965       // allowed.
1966       if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
1967         ErrInfo = "Expected immediate, but got non-immediate";
1968         return false;
1969       }
1970       LLVM_FALLTHROUGH;
1971     default:
1972       continue;
1973     }
1974 
1975     if (!MI.getOperand(i).isReg())
1976       continue;
1977 
1978     if (RegClass != -1) {
1979       unsigned Reg = MI.getOperand(i).getReg();
1980       if (Reg == AMDGPU::NoRegister ||
1981           TargetRegisterInfo::isVirtualRegister(Reg))
1982         continue;
1983 
1984       const TargetRegisterClass *RC = RI.getRegClass(RegClass);
1985       if (!RC->contains(Reg)) {
1986         ErrInfo = "Operand has incorrect register class.";
1987         return false;
1988       }
1989     }
1990   }
1991 
1992   // Verify VOP*
1993   if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI)) {
1994     // Only look at the true operands. Only a real operand can use the constant
1995     // bus, and we don't want to check pseudo-operands like the source modifier
1996     // flags.
1997     const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
1998 
1999     unsigned ConstantBusCount = 0;
2000 
2001     if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
2002       ++ConstantBusCount;
2003 
2004     unsigned SGPRUsed = findImplicitSGPRRead(MI);
2005     if (SGPRUsed != AMDGPU::NoRegister)
2006       ++ConstantBusCount;
2007 
2008     for (int OpIdx : OpIndices) {
2009       if (OpIdx == -1)
2010         break;
2011       const MachineOperand &MO = MI.getOperand(OpIdx);
2012       if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) {
2013         if (MO.isReg()) {
2014           if (MO.getReg() != SGPRUsed)
2015             ++ConstantBusCount;
2016           SGPRUsed = MO.getReg();
2017         } else {
2018           ++ConstantBusCount;
2019         }
2020       }
2021     }
2022     if (ConstantBusCount > 1) {
2023       ErrInfo = "VOP* instruction uses the constant bus more than once";
2024       return false;
2025     }
2026   }
2027 
2028   // Verify misc. restrictions on specific instructions.
2029   if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
2030       Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
2031     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
2032     const MachineOperand &Src1 = MI.getOperand(Src1Idx);
2033     const MachineOperand &Src2 = MI.getOperand(Src2Idx);
2034     if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
2035       if (!compareMachineOp(Src0, Src1) &&
2036           !compareMachineOp(Src0, Src2)) {
2037         ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
2038         return false;
2039       }
2040     }
2041   }
2042 
2043   if (isSOPK(MI)) {
2044     int64_t Imm = getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm();
2045     if (sopkIsZext(MI)) {
2046       if (!isUInt<16>(Imm)) {
2047         ErrInfo = "invalid immediate for SOPK instruction";
2048         return false;
2049       }
2050     } else {
2051       if (!isInt<16>(Imm)) {
2052         ErrInfo = "invalid immediate for SOPK instruction";
2053         return false;
2054       }
2055     }
2056   }
2057 
2058   if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
2059       Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
2060       Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2061       Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
2062     const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2063                        Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
2064 
2065     const unsigned StaticNumOps = Desc.getNumOperands() +
2066       Desc.getNumImplicitUses();
2067     const unsigned NumImplicitOps = IsDst ? 2 : 1;
2068 
2069     // Allow additional implicit operands. This allows a fixup done by the post
2070     // RA scheduler where the main implicit operand is killed and implicit-defs
2071     // are added for sub-registers that remain live after this instruction.
2072     if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
2073       ErrInfo = "missing implicit register operands";
2074       return false;
2075     }
2076 
2077     const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
2078     if (IsDst) {
2079       if (!Dst->isUse()) {
2080         ErrInfo = "v_movreld_b32 vdst should be a use operand";
2081         return false;
2082       }
2083 
2084       unsigned UseOpIdx;
2085       if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
2086           UseOpIdx != StaticNumOps + 1) {
2087         ErrInfo = "movrel implicit operands should be tied";
2088         return false;
2089       }
2090     }
2091 
2092     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
2093     const MachineOperand &ImpUse
2094       = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
2095     if (!ImpUse.isReg() || !ImpUse.isUse() ||
2096         !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
2097       ErrInfo = "src0 should be subreg of implicit vector use";
2098       return false;
2099     }
2100   }
2101 
2102   // Make sure we aren't losing exec uses in the td files. This mostly requires
2103   // being careful when using let Uses to try to add other use registers.
2104   if (shouldReadExec(MI)) {
2105     if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
2106       ErrInfo = "VALU instruction does not implicitly read exec mask";
2107       return false;
2108     }
2109   }
2110 
2111   if (isSMRD(MI)) {
2112     if (MI.mayStore()) {
2113       // The register offset form of scalar stores may only use m0 as the
2114       // soffset register.
2115       const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff);
2116       if (Soff && Soff->getReg() != AMDGPU::M0) {
2117         ErrInfo = "scalar stores must use m0 as offset register";
2118         return false;
2119       }
2120     }
2121   }
2122 
2123   return true;
2124 }
2125 
2126 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
2127   switch (MI.getOpcode()) {
2128   default: return AMDGPU::INSTRUCTION_LIST_END;
2129   case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
2130   case AMDGPU::COPY: return AMDGPU::COPY;
2131   case AMDGPU::PHI: return AMDGPU::PHI;
2132   case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
2133   case AMDGPU::S_MOV_B32:
2134     return MI.getOperand(1).isReg() ?
2135            AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
2136   case AMDGPU::S_ADD_I32:
2137   case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32;
2138   case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
2139   case AMDGPU::S_SUB_I32:
2140   case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32;
2141   case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
2142   case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32;
2143   case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
2144   case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
2145   case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
2146   case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
2147   case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
2148   case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
2149   case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
2150   case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
2151   case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
2152   case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
2153   case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
2154   case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
2155   case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
2156   case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
2157   case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
2158   case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
2159   case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
2160   case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
2161   case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
2162   case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
2163   case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
2164   case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
2165   case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
2166   case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
2167   case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
2168   case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
2169   case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
2170   case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32;
2171   case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32;
2172   case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32;
2173   case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32;
2174   case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32;
2175   case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32;
2176   case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32;
2177   case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32;
2178   case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
2179   case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
2180   case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
2181   case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
2182   case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
2183   case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
2184   }
2185 }
2186 
2187 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
2188   return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
2189 }
2190 
2191 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
2192                                                       unsigned OpNo) const {
2193   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2194   const MCInstrDesc &Desc = get(MI.getOpcode());
2195   if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
2196       Desc.OpInfo[OpNo].RegClass == -1) {
2197     unsigned Reg = MI.getOperand(OpNo).getReg();
2198 
2199     if (TargetRegisterInfo::isVirtualRegister(Reg))
2200       return MRI.getRegClass(Reg);
2201     return RI.getPhysRegClass(Reg);
2202   }
2203 
2204   unsigned RCID = Desc.OpInfo[OpNo].RegClass;
2205   return RI.getRegClass(RCID);
2206 }
2207 
2208 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
2209   switch (MI.getOpcode()) {
2210   case AMDGPU::COPY:
2211   case AMDGPU::REG_SEQUENCE:
2212   case AMDGPU::PHI:
2213   case AMDGPU::INSERT_SUBREG:
2214     return RI.hasVGPRs(getOpRegClass(MI, 0));
2215   default:
2216     return RI.hasVGPRs(getOpRegClass(MI, OpNo));
2217   }
2218 }
2219 
2220 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
2221   MachineBasicBlock::iterator I = MI;
2222   MachineBasicBlock *MBB = MI.getParent();
2223   MachineOperand &MO = MI.getOperand(OpIdx);
2224   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
2225   unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
2226   const TargetRegisterClass *RC = RI.getRegClass(RCID);
2227   unsigned Opcode = AMDGPU::V_MOV_B32_e32;
2228   if (MO.isReg())
2229     Opcode = AMDGPU::COPY;
2230   else if (RI.isSGPRClass(RC))
2231     Opcode = AMDGPU::S_MOV_B32;
2232 
2233   const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
2234   if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
2235     VRC = &AMDGPU::VReg_64RegClass;
2236   else
2237     VRC = &AMDGPU::VGPR_32RegClass;
2238 
2239   unsigned Reg = MRI.createVirtualRegister(VRC);
2240   DebugLoc DL = MBB->findDebugLoc(I);
2241   BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).addOperand(MO);
2242   MO.ChangeToRegister(Reg, false);
2243 }
2244 
2245 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
2246                                          MachineRegisterInfo &MRI,
2247                                          MachineOperand &SuperReg,
2248                                          const TargetRegisterClass *SuperRC,
2249                                          unsigned SubIdx,
2250                                          const TargetRegisterClass *SubRC)
2251                                          const {
2252   MachineBasicBlock *MBB = MI->getParent();
2253   DebugLoc DL = MI->getDebugLoc();
2254   unsigned SubReg = MRI.createVirtualRegister(SubRC);
2255 
2256   if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
2257     BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
2258       .addReg(SuperReg.getReg(), 0, SubIdx);
2259     return SubReg;
2260   }
2261 
2262   // Just in case the super register is itself a sub-register, copy it to a new
2263   // value so we don't need to worry about merging its subreg index with the
2264   // SubIdx passed to this function. The register coalescer should be able to
2265   // eliminate this extra copy.
2266   unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
2267 
2268   BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
2269     .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
2270 
2271   BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
2272     .addReg(NewSuperReg, 0, SubIdx);
2273 
2274   return SubReg;
2275 }
2276 
2277 MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
2278   MachineBasicBlock::iterator MII,
2279   MachineRegisterInfo &MRI,
2280   MachineOperand &Op,
2281   const TargetRegisterClass *SuperRC,
2282   unsigned SubIdx,
2283   const TargetRegisterClass *SubRC) const {
2284   if (Op.isImm()) {
2285     if (SubIdx == AMDGPU::sub0)
2286       return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
2287     if (SubIdx == AMDGPU::sub1)
2288       return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
2289 
2290     llvm_unreachable("Unhandled register index for immediate");
2291   }
2292 
2293   unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
2294                                        SubIdx, SubRC);
2295   return MachineOperand::CreateReg(SubReg, false);
2296 }
2297 
2298 // Change the order of operands from (0, 1, 2) to (0, 2, 1)
2299 void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
2300   assert(Inst.getNumExplicitOperands() == 3);
2301   MachineOperand Op1 = Inst.getOperand(1);
2302   Inst.RemoveOperand(1);
2303   Inst.addOperand(Op1);
2304 }
2305 
2306 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
2307                                     const MCOperandInfo &OpInfo,
2308                                     const MachineOperand &MO) const {
2309   if (!MO.isReg())
2310     return false;
2311 
2312   unsigned Reg = MO.getReg();
2313   const TargetRegisterClass *RC =
2314     TargetRegisterInfo::isVirtualRegister(Reg) ?
2315     MRI.getRegClass(Reg) :
2316     RI.getPhysRegClass(Reg);
2317 
2318   const SIRegisterInfo *TRI =
2319       static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
2320   RC = TRI->getSubRegClass(RC, MO.getSubReg());
2321 
2322   // In order to be legal, the common sub-class must be equal to the
2323   // class of the current operand.  For example:
2324   //
2325   // v_mov_b32 s0 ; Operand defined as vsrc_b32
2326   //              ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL
2327   //
2328   // s_sendmsg 0, s0 ; Operand defined as m0reg
2329   //                 ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL
2330 
2331   return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC;
2332 }
2333 
2334 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
2335                                      const MCOperandInfo &OpInfo,
2336                                      const MachineOperand &MO) const {
2337   if (MO.isReg())
2338     return isLegalRegOperand(MRI, OpInfo, MO);
2339 
2340   // Handle non-register types that are treated like immediates.
2341   assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
2342   return true;
2343 }
2344 
2345 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
2346                                  const MachineOperand *MO) const {
2347   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2348   const MCInstrDesc &InstDesc = MI.getDesc();
2349   const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
2350   const TargetRegisterClass *DefinedRC =
2351       OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
2352   if (!MO)
2353     MO = &MI.getOperand(OpIdx);
2354 
2355   if (isVALU(MI) && usesConstantBus(MRI, *MO, DefinedRC->getSize())) {
2356 
2357     RegSubRegPair SGPRUsed;
2358     if (MO->isReg())
2359       SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg());
2360 
2361     for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
2362       if (i == OpIdx)
2363         continue;
2364       const MachineOperand &Op = MI.getOperand(i);
2365       if (Op.isReg()) {
2366         if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) &&
2367             usesConstantBus(MRI, Op, getOpSize(MI, i))) {
2368           return false;
2369         }
2370       } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) {
2371         return false;
2372       }
2373     }
2374   }
2375 
2376   if (MO->isReg()) {
2377     assert(DefinedRC);
2378     return isLegalRegOperand(MRI, OpInfo, *MO);
2379   }
2380 
2381   // Handle non-register types that are treated like immediates.
2382   assert(MO->isImm() || MO->isTargetIndex() || MO->isFI());
2383 
2384   if (!DefinedRC) {
2385     // This operand expects an immediate.
2386     return true;
2387   }
2388 
2389   return isImmOperandLegal(MI, OpIdx, *MO);
2390 }
2391 
2392 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
2393                                        MachineInstr &MI) const {
2394   unsigned Opc = MI.getOpcode();
2395   const MCInstrDesc &InstrDesc = get(Opc);
2396 
2397   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
2398   MachineOperand &Src1 = MI.getOperand(Src1Idx);
2399 
2400   // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
2401   // we need to only have one constant bus use.
2402   //
2403   // Note we do not need to worry about literal constants here. They are
2404   // disabled for the operand type for instructions because they will always
2405   // violate the one constant bus use rule.
2406   bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
2407   if (HasImplicitSGPR) {
2408     int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2409     MachineOperand &Src0 = MI.getOperand(Src0Idx);
2410 
2411     if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg()))
2412       legalizeOpWithMove(MI, Src0Idx);
2413   }
2414 
2415   // VOP2 src0 instructions support all operand types, so we don't need to check
2416   // their legality. If src1 is already legal, we don't need to do anything.
2417   if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
2418     return;
2419 
2420   // We do not use commuteInstruction here because it is too aggressive and will
2421   // commute if it is possible. We only want to commute here if it improves
2422   // legality. This can be called a fairly large number of times so don't waste
2423   // compile time pointlessly swapping and checking legality again.
2424   if (HasImplicitSGPR || !MI.isCommutable()) {
2425     legalizeOpWithMove(MI, Src1Idx);
2426     return;
2427   }
2428 
2429   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2430   MachineOperand &Src0 = MI.getOperand(Src0Idx);
2431 
2432   // If src0 can be used as src1, commuting will make the operands legal.
2433   // Otherwise we have to give up and insert a move.
2434   //
2435   // TODO: Other immediate-like operand kinds could be commuted if there was a
2436   // MachineOperand::ChangeTo* for them.
2437   if ((!Src1.isImm() && !Src1.isReg()) ||
2438       !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
2439     legalizeOpWithMove(MI, Src1Idx);
2440     return;
2441   }
2442 
2443   int CommutedOpc = commuteOpcode(MI);
2444   if (CommutedOpc == -1) {
2445     legalizeOpWithMove(MI, Src1Idx);
2446     return;
2447   }
2448 
2449   MI.setDesc(get(CommutedOpc));
2450 
2451   unsigned Src0Reg = Src0.getReg();
2452   unsigned Src0SubReg = Src0.getSubReg();
2453   bool Src0Kill = Src0.isKill();
2454 
2455   if (Src1.isImm())
2456     Src0.ChangeToImmediate(Src1.getImm());
2457   else if (Src1.isReg()) {
2458     Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
2459     Src0.setSubReg(Src1.getSubReg());
2460   } else
2461     llvm_unreachable("Should only have register or immediate operands");
2462 
2463   Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
2464   Src1.setSubReg(Src0SubReg);
2465 }
2466 
2467 // Legalize VOP3 operands. Because all operand types are supported for any
2468 // operand, and since literal constants are not allowed and should never be
2469 // seen, we only need to worry about inserting copies if we use multiple SGPR
2470 // operands.
2471 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
2472                                        MachineInstr &MI) const {
2473   unsigned Opc = MI.getOpcode();
2474 
2475   int VOP3Idx[3] = {
2476     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
2477     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
2478     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
2479   };
2480 
2481   // Find the one SGPR operand we are allowed to use.
2482   unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx);
2483 
2484   for (unsigned i = 0; i < 3; ++i) {
2485     int Idx = VOP3Idx[i];
2486     if (Idx == -1)
2487       break;
2488     MachineOperand &MO = MI.getOperand(Idx);
2489 
2490     // We should never see a VOP3 instruction with an illegal immediate operand.
2491     if (!MO.isReg())
2492       continue;
2493 
2494     if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
2495       continue; // VGPRs are legal
2496 
2497     if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
2498       SGPRReg = MO.getReg();
2499       // We can use one SGPR in each VOP3 instruction.
2500       continue;
2501     }
2502 
2503     // If we make it this far, then the operand is not legal and we must
2504     // legalize it.
2505     legalizeOpWithMove(MI, Idx);
2506   }
2507 }
2508 
2509 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
2510                                          MachineRegisterInfo &MRI) const {
2511   const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
2512   const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
2513   unsigned DstReg = MRI.createVirtualRegister(SRC);
2514   unsigned SubRegs = VRC->getSize() / 4;
2515 
2516   SmallVector<unsigned, 8> SRegs;
2517   for (unsigned i = 0; i < SubRegs; ++i) {
2518     unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2519     BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
2520             get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
2521         .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
2522     SRegs.push_back(SGPR);
2523   }
2524 
2525   MachineInstrBuilder MIB =
2526       BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
2527               get(AMDGPU::REG_SEQUENCE), DstReg);
2528   for (unsigned i = 0; i < SubRegs; ++i) {
2529     MIB.addReg(SRegs[i]);
2530     MIB.addImm(RI.getSubRegFromChannel(i));
2531   }
2532   return DstReg;
2533 }
2534 
2535 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
2536                                        MachineInstr &MI) const {
2537 
2538   // If the pointer is store in VGPRs, then we need to move them to
2539   // SGPRs using v_readfirstlane.  This is safe because we only select
2540   // loads with uniform pointers to SMRD instruction so we know the
2541   // pointer value is uniform.
2542   MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
2543   if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
2544       unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
2545       SBase->setReg(SGPR);
2546   }
2547 }
2548 
2549 void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
2550   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2551 
2552   // Legalize VOP2
2553   if (isVOP2(MI) || isVOPC(MI)) {
2554     legalizeOperandsVOP2(MRI, MI);
2555     return;
2556   }
2557 
2558   // Legalize VOP3
2559   if (isVOP3(MI)) {
2560     legalizeOperandsVOP3(MRI, MI);
2561     return;
2562   }
2563 
2564   // Legalize SMRD
2565   if (isSMRD(MI)) {
2566     legalizeOperandsSMRD(MRI, MI);
2567     return;
2568   }
2569 
2570   // Legalize REG_SEQUENCE and PHI
2571   // The register class of the operands much be the same type as the register
2572   // class of the output.
2573   if (MI.getOpcode() == AMDGPU::PHI) {
2574     const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
2575     for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2576       if (!MI.getOperand(i).isReg() ||
2577           !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
2578         continue;
2579       const TargetRegisterClass *OpRC =
2580           MRI.getRegClass(MI.getOperand(i).getReg());
2581       if (RI.hasVGPRs(OpRC)) {
2582         VRC = OpRC;
2583       } else {
2584         SRC = OpRC;
2585       }
2586     }
2587 
2588     // If any of the operands are VGPR registers, then they all most be
2589     // otherwise we will create illegal VGPR->SGPR copies when legalizing
2590     // them.
2591     if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
2592       if (!VRC) {
2593         assert(SRC);
2594         VRC = RI.getEquivalentVGPRClass(SRC);
2595       }
2596       RC = VRC;
2597     } else {
2598       RC = SRC;
2599     }
2600 
2601     // Update all the operands so they have the same type.
2602     for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2603       MachineOperand &Op = MI.getOperand(I);
2604       if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
2605         continue;
2606       unsigned DstReg = MRI.createVirtualRegister(RC);
2607 
2608       // MI is a PHI instruction.
2609       MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
2610       MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
2611 
2612       BuildMI(*InsertBB, Insert, MI.getDebugLoc(), get(AMDGPU::COPY), DstReg)
2613           .addOperand(Op);
2614       Op.setReg(DstReg);
2615     }
2616   }
2617 
2618   // REG_SEQUENCE doesn't really require operand legalization, but if one has a
2619   // VGPR dest type and SGPR sources, insert copies so all operands are
2620   // VGPRs. This seems to help operand folding / the register coalescer.
2621   if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
2622     MachineBasicBlock *MBB = MI.getParent();
2623     const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
2624     if (RI.hasVGPRs(DstRC)) {
2625       // Update all the operands so they are VGPR register classes. These may
2626       // not be the same register class because REG_SEQUENCE supports mixing
2627       // subregister index types e.g. sub0_sub1 + sub2 + sub3
2628       for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2629         MachineOperand &Op = MI.getOperand(I);
2630         if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
2631           continue;
2632 
2633         const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
2634         const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
2635         if (VRC == OpRC)
2636           continue;
2637 
2638         unsigned DstReg = MRI.createVirtualRegister(VRC);
2639 
2640         BuildMI(*MBB, MI, MI.getDebugLoc(), get(AMDGPU::COPY), DstReg)
2641             .addOperand(Op);
2642 
2643         Op.setReg(DstReg);
2644         Op.setIsKill();
2645       }
2646     }
2647 
2648     return;
2649   }
2650 
2651   // Legalize INSERT_SUBREG
2652   // src0 must have the same register class as dst
2653   if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
2654     unsigned Dst = MI.getOperand(0).getReg();
2655     unsigned Src0 = MI.getOperand(1).getReg();
2656     const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
2657     const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
2658     if (DstRC != Src0RC) {
2659       MachineBasicBlock &MBB = *MI.getParent();
2660       unsigned NewSrc0 = MRI.createVirtualRegister(DstRC);
2661       BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::COPY), NewSrc0)
2662           .addReg(Src0);
2663       MI.getOperand(1).setReg(NewSrc0);
2664     }
2665     return;
2666   }
2667 
2668   // Legalize MIMG
2669   if (isMIMG(MI)) {
2670     MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
2671     if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
2672       unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI);
2673       SRsrc->setReg(SGPR);
2674     }
2675 
2676     MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
2677     if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) {
2678       unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI);
2679       SSamp->setReg(SGPR);
2680     }
2681     return;
2682   }
2683 
2684   // Legalize MUBUF* instructions
2685   // FIXME: If we start using the non-addr64 instructions for compute, we
2686   // may need to legalize them here.
2687   int SRsrcIdx =
2688       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
2689   if (SRsrcIdx != -1) {
2690     // We have an MUBUF instruction
2691     MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx);
2692     unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass;
2693     if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()),
2694                                              RI.getRegClass(SRsrcRC))) {
2695       // The operands are legal.
2696       // FIXME: We may need to legalize operands besided srsrc.
2697       return;
2698     }
2699 
2700     MachineBasicBlock &MBB = *MI.getParent();
2701 
2702     // Extract the ptr from the resource descriptor.
2703     unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc,
2704       &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
2705 
2706     // Create an empty resource descriptor
2707     unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2708     unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2709     unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2710     unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
2711     uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
2712 
2713     // Zero64 = 0
2714     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64)
2715         .addImm(0);
2716 
2717     // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
2718     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
2719         .addImm(RsrcDataFormat & 0xFFFFFFFF);
2720 
2721     // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
2722     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
2723         .addImm(RsrcDataFormat >> 32);
2724 
2725     // NewSRsrc = {Zero64, SRsrcFormat}
2726     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc)
2727         .addReg(Zero64)
2728         .addImm(AMDGPU::sub0_sub1)
2729         .addReg(SRsrcFormatLo)
2730         .addImm(AMDGPU::sub2)
2731         .addReg(SRsrcFormatHi)
2732         .addImm(AMDGPU::sub3);
2733 
2734     MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
2735     unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
2736     if (VAddr) {
2737       // This is already an ADDR64 instruction so we need to add the pointer
2738       // extracted from the resource descriptor to the current value of VAddr.
2739       unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2740       unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2741 
2742       // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0
2743       DebugLoc DL = MI.getDebugLoc();
2744       BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
2745         .addReg(SRsrcPtr, 0, AMDGPU::sub0)
2746         .addReg(VAddr->getReg(), 0, AMDGPU::sub0);
2747 
2748       // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1
2749       BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
2750         .addReg(SRsrcPtr, 0, AMDGPU::sub1)
2751         .addReg(VAddr->getReg(), 0, AMDGPU::sub1);
2752 
2753       // NewVaddr = {NewVaddrHi, NewVaddrLo}
2754       BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
2755           .addReg(NewVAddrLo)
2756           .addImm(AMDGPU::sub0)
2757           .addReg(NewVAddrHi)
2758           .addImm(AMDGPU::sub1);
2759     } else {
2760       // This instructions is the _OFFSET variant, so we need to convert it to
2761       // ADDR64.
2762       assert(MBB.getParent()->getSubtarget<SISubtarget>().getGeneration()
2763              < SISubtarget::VOLCANIC_ISLANDS &&
2764              "FIXME: Need to emit flat atomics here");
2765 
2766       MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
2767       MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
2768       MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
2769       unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
2770 
2771       // Atomics rith return have have an additional tied operand and are
2772       // missing some of the special bits.
2773       MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
2774       MachineInstr *Addr64;
2775 
2776       if (!VDataIn) {
2777         // Regular buffer load / store.
2778         MachineInstrBuilder MIB =
2779             BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
2780                 .addOperand(*VData)
2781                 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
2782                 // This will be replaced later
2783                 // with the new value of vaddr.
2784                 .addOperand(*SRsrc)
2785                 .addOperand(*SOffset)
2786                 .addOperand(*Offset);
2787 
2788         // Atomics do not have this operand.
2789         if (const MachineOperand *GLC =
2790                 getNamedOperand(MI, AMDGPU::OpName::glc)) {
2791           MIB.addImm(GLC->getImm());
2792         }
2793 
2794         MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc));
2795 
2796         if (const MachineOperand *TFE =
2797                 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
2798           MIB.addImm(TFE->getImm());
2799         }
2800 
2801         MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
2802         Addr64 = MIB;
2803       } else {
2804         // Atomics with return.
2805         Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
2806                      .addOperand(*VData)
2807                      .addOperand(*VDataIn)
2808                      .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
2809                      // This will be replaced later
2810                      // with the new value of vaddr.
2811                      .addOperand(*SRsrc)
2812                      .addOperand(*SOffset)
2813                      .addOperand(*Offset)
2814                      .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
2815                      .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
2816       }
2817 
2818       MI.removeFromParent();
2819 
2820       // NewVaddr = {NewVaddrHi, NewVaddrLo}
2821       BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
2822               NewVAddr)
2823           .addReg(SRsrcPtr, 0, AMDGPU::sub0)
2824           .addImm(AMDGPU::sub0)
2825           .addReg(SRsrcPtr, 0, AMDGPU::sub1)
2826           .addImm(AMDGPU::sub1);
2827 
2828       VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr);
2829       SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc);
2830     }
2831 
2832     // Update the instruction to use NewVaddr
2833     VAddr->setReg(NewVAddr);
2834     // Update the instruction to use NewSRsrc
2835     SRsrc->setReg(NewSRsrc);
2836   }
2837 }
2838 
2839 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
2840   SmallVector<MachineInstr *, 128> Worklist;
2841   Worklist.push_back(&TopInst);
2842 
2843   while (!Worklist.empty()) {
2844     MachineInstr &Inst = *Worklist.pop_back_val();
2845     MachineBasicBlock *MBB = Inst.getParent();
2846     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
2847 
2848     unsigned Opcode = Inst.getOpcode();
2849     unsigned NewOpcode = getVALUOp(Inst);
2850 
2851     // Handle some special cases
2852     switch (Opcode) {
2853     default:
2854       break;
2855     case AMDGPU::S_AND_B64:
2856       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64);
2857       Inst.eraseFromParent();
2858       continue;
2859 
2860     case AMDGPU::S_OR_B64:
2861       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64);
2862       Inst.eraseFromParent();
2863       continue;
2864 
2865     case AMDGPU::S_XOR_B64:
2866       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64);
2867       Inst.eraseFromParent();
2868       continue;
2869 
2870     case AMDGPU::S_NOT_B64:
2871       splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32);
2872       Inst.eraseFromParent();
2873       continue;
2874 
2875     case AMDGPU::S_BCNT1_I32_B64:
2876       splitScalar64BitBCNT(Worklist, Inst);
2877       Inst.eraseFromParent();
2878       continue;
2879 
2880     case AMDGPU::S_BFE_I64: {
2881       splitScalar64BitBFE(Worklist, Inst);
2882       Inst.eraseFromParent();
2883       continue;
2884     }
2885 
2886     case AMDGPU::S_LSHL_B32:
2887       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2888         NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
2889         swapOperands(Inst);
2890       }
2891       break;
2892     case AMDGPU::S_ASHR_I32:
2893       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2894         NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
2895         swapOperands(Inst);
2896       }
2897       break;
2898     case AMDGPU::S_LSHR_B32:
2899       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2900         NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
2901         swapOperands(Inst);
2902       }
2903       break;
2904     case AMDGPU::S_LSHL_B64:
2905       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2906         NewOpcode = AMDGPU::V_LSHLREV_B64;
2907         swapOperands(Inst);
2908       }
2909       break;
2910     case AMDGPU::S_ASHR_I64:
2911       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2912         NewOpcode = AMDGPU::V_ASHRREV_I64;
2913         swapOperands(Inst);
2914       }
2915       break;
2916     case AMDGPU::S_LSHR_B64:
2917       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2918         NewOpcode = AMDGPU::V_LSHRREV_B64;
2919         swapOperands(Inst);
2920       }
2921       break;
2922 
2923     case AMDGPU::S_ABS_I32:
2924       lowerScalarAbs(Worklist, Inst);
2925       Inst.eraseFromParent();
2926       continue;
2927 
2928     case AMDGPU::S_CBRANCH_SCC0:
2929     case AMDGPU::S_CBRANCH_SCC1:
2930       // Clear unused bits of vcc
2931       BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
2932               AMDGPU::VCC)
2933           .addReg(AMDGPU::EXEC)
2934           .addReg(AMDGPU::VCC);
2935       break;
2936 
2937     case AMDGPU::S_BFE_U64:
2938     case AMDGPU::S_BFM_B64:
2939       llvm_unreachable("Moving this op to VALU not implemented");
2940     }
2941 
2942     if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
2943       // We cannot move this instruction to the VALU, so we should try to
2944       // legalize its operands instead.
2945       legalizeOperands(Inst);
2946       continue;
2947     }
2948 
2949     // Use the new VALU Opcode.
2950     const MCInstrDesc &NewDesc = get(NewOpcode);
2951     Inst.setDesc(NewDesc);
2952 
2953     // Remove any references to SCC. Vector instructions can't read from it, and
2954     // We're just about to add the implicit use / defs of VCC, and we don't want
2955     // both.
2956     for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) {
2957       MachineOperand &Op = Inst.getOperand(i);
2958       if (Op.isReg() && Op.getReg() == AMDGPU::SCC) {
2959         Inst.RemoveOperand(i);
2960         addSCCDefUsersToVALUWorklist(Inst, Worklist);
2961       }
2962     }
2963 
2964     if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
2965       // We are converting these to a BFE, so we need to add the missing
2966       // operands for the size and offset.
2967       unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
2968       Inst.addOperand(MachineOperand::CreateImm(0));
2969       Inst.addOperand(MachineOperand::CreateImm(Size));
2970 
2971     } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
2972       // The VALU version adds the second operand to the result, so insert an
2973       // extra 0 operand.
2974       Inst.addOperand(MachineOperand::CreateImm(0));
2975     }
2976 
2977     Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent());
2978 
2979     if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
2980       const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
2981       // If we need to move this to VGPRs, we need to unpack the second operand
2982       // back into the 2 separate ones for bit offset and width.
2983       assert(OffsetWidthOp.isImm() &&
2984              "Scalar BFE is only implemented for constant width and offset");
2985       uint32_t Imm = OffsetWidthOp.getImm();
2986 
2987       uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
2988       uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
2989       Inst.RemoveOperand(2);                     // Remove old immediate.
2990       Inst.addOperand(MachineOperand::CreateImm(Offset));
2991       Inst.addOperand(MachineOperand::CreateImm(BitWidth));
2992     }
2993 
2994     bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
2995     unsigned NewDstReg = AMDGPU::NoRegister;
2996     if (HasDst) {
2997       // Update the destination register class.
2998       const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
2999       if (!NewDstRC)
3000         continue;
3001 
3002       unsigned DstReg = Inst.getOperand(0).getReg();
3003       NewDstReg = MRI.createVirtualRegister(NewDstRC);
3004       MRI.replaceRegWith(DstReg, NewDstReg);
3005     }
3006 
3007     // Legalize the operands
3008     legalizeOperands(Inst);
3009 
3010     if (HasDst)
3011      addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
3012   }
3013 }
3014 
3015 void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist,
3016                                  MachineInstr &Inst) const {
3017   MachineBasicBlock &MBB = *Inst.getParent();
3018   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3019   MachineBasicBlock::iterator MII = Inst;
3020   DebugLoc DL = Inst.getDebugLoc();
3021 
3022   MachineOperand &Dest = Inst.getOperand(0);
3023   MachineOperand &Src = Inst.getOperand(1);
3024   unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3025   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3026 
3027   BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg)
3028     .addImm(0)
3029     .addReg(Src.getReg());
3030 
3031   BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
3032     .addReg(Src.getReg())
3033     .addReg(TmpReg);
3034 
3035   MRI.replaceRegWith(Dest.getReg(), ResultReg);
3036   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3037 }
3038 
3039 void SIInstrInfo::splitScalar64BitUnaryOp(
3040     SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
3041     unsigned Opcode) const {
3042   MachineBasicBlock &MBB = *Inst.getParent();
3043   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3044 
3045   MachineOperand &Dest = Inst.getOperand(0);
3046   MachineOperand &Src0 = Inst.getOperand(1);
3047   DebugLoc DL = Inst.getDebugLoc();
3048 
3049   MachineBasicBlock::iterator MII = Inst;
3050 
3051   const MCInstrDesc &InstDesc = get(Opcode);
3052   const TargetRegisterClass *Src0RC = Src0.isReg() ?
3053     MRI.getRegClass(Src0.getReg()) :
3054     &AMDGPU::SGPR_32RegClass;
3055 
3056   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
3057 
3058   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3059                                                        AMDGPU::sub0, Src0SubRC);
3060 
3061   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
3062   const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
3063   const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
3064 
3065   unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
3066   BuildMI(MBB, MII, DL, InstDesc, DestSub0)
3067     .addOperand(SrcReg0Sub0);
3068 
3069   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3070                                                        AMDGPU::sub1, Src0SubRC);
3071 
3072   unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
3073   BuildMI(MBB, MII, DL, InstDesc, DestSub1)
3074     .addOperand(SrcReg0Sub1);
3075 
3076   unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
3077   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3078     .addReg(DestSub0)
3079     .addImm(AMDGPU::sub0)
3080     .addReg(DestSub1)
3081     .addImm(AMDGPU::sub1);
3082 
3083   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
3084 
3085   // We don't need to legalizeOperands here because for a single operand, src0
3086   // will support any kind of input.
3087 
3088   // Move all users of this moved value.
3089   addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
3090 }
3091 
3092 void SIInstrInfo::splitScalar64BitBinaryOp(
3093     SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
3094     unsigned Opcode) const {
3095   MachineBasicBlock &MBB = *Inst.getParent();
3096   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3097 
3098   MachineOperand &Dest = Inst.getOperand(0);
3099   MachineOperand &Src0 = Inst.getOperand(1);
3100   MachineOperand &Src1 = Inst.getOperand(2);
3101   DebugLoc DL = Inst.getDebugLoc();
3102 
3103   MachineBasicBlock::iterator MII = Inst;
3104 
3105   const MCInstrDesc &InstDesc = get(Opcode);
3106   const TargetRegisterClass *Src0RC = Src0.isReg() ?
3107     MRI.getRegClass(Src0.getReg()) :
3108     &AMDGPU::SGPR_32RegClass;
3109 
3110   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
3111   const TargetRegisterClass *Src1RC = Src1.isReg() ?
3112     MRI.getRegClass(Src1.getReg()) :
3113     &AMDGPU::SGPR_32RegClass;
3114 
3115   const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
3116 
3117   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3118                                                        AMDGPU::sub0, Src0SubRC);
3119   MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3120                                                        AMDGPU::sub0, Src1SubRC);
3121 
3122   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
3123   const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
3124   const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
3125 
3126   unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
3127   MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
3128                               .addOperand(SrcReg0Sub0)
3129                               .addOperand(SrcReg1Sub0);
3130 
3131   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3132                                                        AMDGPU::sub1, Src0SubRC);
3133   MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3134                                                        AMDGPU::sub1, Src1SubRC);
3135 
3136   unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
3137   MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
3138                               .addOperand(SrcReg0Sub1)
3139                               .addOperand(SrcReg1Sub1);
3140 
3141   unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
3142   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3143     .addReg(DestSub0)
3144     .addImm(AMDGPU::sub0)
3145     .addReg(DestSub1)
3146     .addImm(AMDGPU::sub1);
3147 
3148   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
3149 
3150   // Try to legalize the operands in case we need to swap the order to keep it
3151   // valid.
3152   legalizeOperands(LoHalf);
3153   legalizeOperands(HiHalf);
3154 
3155   // Move all users of this moved vlaue.
3156   addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
3157 }
3158 
3159 void SIInstrInfo::splitScalar64BitBCNT(
3160     SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst) const {
3161   MachineBasicBlock &MBB = *Inst.getParent();
3162   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3163 
3164   MachineBasicBlock::iterator MII = Inst;
3165   DebugLoc DL = Inst.getDebugLoc();
3166 
3167   MachineOperand &Dest = Inst.getOperand(0);
3168   MachineOperand &Src = Inst.getOperand(1);
3169 
3170   const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
3171   const TargetRegisterClass *SrcRC = Src.isReg() ?
3172     MRI.getRegClass(Src.getReg()) :
3173     &AMDGPU::SGPR_32RegClass;
3174 
3175   unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3176   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3177 
3178   const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
3179 
3180   MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3181                                                       AMDGPU::sub0, SrcSubRC);
3182   MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3183                                                       AMDGPU::sub1, SrcSubRC);
3184 
3185   BuildMI(MBB, MII, DL, InstDesc, MidReg)
3186     .addOperand(SrcRegSub0)
3187     .addImm(0);
3188 
3189   BuildMI(MBB, MII, DL, InstDesc, ResultReg)
3190     .addOperand(SrcRegSub1)
3191     .addReg(MidReg);
3192 
3193   MRI.replaceRegWith(Dest.getReg(), ResultReg);
3194 
3195   // We don't need to legalize operands here. src0 for etiher instruction can be
3196   // an SGPR, and the second input is unused or determined here.
3197   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3198 }
3199 
3200 void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
3201                                       MachineInstr &Inst) const {
3202   MachineBasicBlock &MBB = *Inst.getParent();
3203   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3204   MachineBasicBlock::iterator MII = Inst;
3205   DebugLoc DL = Inst.getDebugLoc();
3206 
3207   MachineOperand &Dest = Inst.getOperand(0);
3208   uint32_t Imm = Inst.getOperand(2).getImm();
3209   uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
3210   uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
3211 
3212   (void) Offset;
3213 
3214   // Only sext_inreg cases handled.
3215   assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
3216          Offset == 0 && "Not implemented");
3217 
3218   if (BitWidth < 32) {
3219     unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3220     unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3221     unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
3222 
3223     BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo)
3224         .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
3225         .addImm(0)
3226         .addImm(BitWidth);
3227 
3228     BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
3229       .addImm(31)
3230       .addReg(MidRegLo);
3231 
3232     BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
3233       .addReg(MidRegLo)
3234       .addImm(AMDGPU::sub0)
3235       .addReg(MidRegHi)
3236       .addImm(AMDGPU::sub1);
3237 
3238     MRI.replaceRegWith(Dest.getReg(), ResultReg);
3239     addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3240     return;
3241   }
3242 
3243   MachineOperand &Src = Inst.getOperand(1);
3244   unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3245   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
3246 
3247   BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
3248     .addImm(31)
3249     .addReg(Src.getReg(), 0, AMDGPU::sub0);
3250 
3251   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
3252     .addReg(Src.getReg(), 0, AMDGPU::sub0)
3253     .addImm(AMDGPU::sub0)
3254     .addReg(TmpReg)
3255     .addImm(AMDGPU::sub1);
3256 
3257   MRI.replaceRegWith(Dest.getReg(), ResultReg);
3258   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3259 }
3260 
3261 void SIInstrInfo::addUsersToMoveToVALUWorklist(
3262   unsigned DstReg,
3263   MachineRegisterInfo &MRI,
3264   SmallVectorImpl<MachineInstr *> &Worklist) const {
3265   for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg),
3266          E = MRI.use_end(); I != E; ++I) {
3267     MachineInstr &UseMI = *I->getParent();
3268     if (!canReadVGPR(UseMI, I.getOperandNo())) {
3269       Worklist.push_back(&UseMI);
3270     }
3271   }
3272 }
3273 
3274 void SIInstrInfo::addSCCDefUsersToVALUWorklist(
3275     MachineInstr &SCCDefInst, SmallVectorImpl<MachineInstr *> &Worklist) const {
3276   // This assumes that all the users of SCC are in the same block
3277   // as the SCC def.
3278   for (MachineInstr &MI :
3279        llvm::make_range(MachineBasicBlock::iterator(SCCDefInst),
3280                         SCCDefInst.getParent()->end())) {
3281     // Exit if we find another SCC def.
3282     if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1)
3283       return;
3284 
3285     if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1)
3286       Worklist.push_back(&MI);
3287   }
3288 }
3289 
3290 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
3291   const MachineInstr &Inst) const {
3292   const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
3293 
3294   switch (Inst.getOpcode()) {
3295   // For target instructions, getOpRegClass just returns the virtual register
3296   // class associated with the operand, so we need to find an equivalent VGPR
3297   // register class in order to move the instruction to the VALU.
3298   case AMDGPU::COPY:
3299   case AMDGPU::PHI:
3300   case AMDGPU::REG_SEQUENCE:
3301   case AMDGPU::INSERT_SUBREG:
3302     if (RI.hasVGPRs(NewDstRC))
3303       return nullptr;
3304 
3305     NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
3306     if (!NewDstRC)
3307       return nullptr;
3308     return NewDstRC;
3309   default:
3310     return NewDstRC;
3311   }
3312 }
3313 
3314 // Find the one SGPR operand we are allowed to use.
3315 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
3316                                    int OpIndices[3]) const {
3317   const MCInstrDesc &Desc = MI.getDesc();
3318 
3319   // Find the one SGPR operand we are allowed to use.
3320   //
3321   // First we need to consider the instruction's operand requirements before
3322   // legalizing. Some operands are required to be SGPRs, such as implicit uses
3323   // of VCC, but we are still bound by the constant bus requirement to only use
3324   // one.
3325   //
3326   // If the operand's class is an SGPR, we can never move it.
3327 
3328   unsigned SGPRReg = findImplicitSGPRRead(MI);
3329   if (SGPRReg != AMDGPU::NoRegister)
3330     return SGPRReg;
3331 
3332   unsigned UsedSGPRs[3] = { AMDGPU::NoRegister };
3333   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3334 
3335   for (unsigned i = 0; i < 3; ++i) {
3336     int Idx = OpIndices[i];
3337     if (Idx == -1)
3338       break;
3339 
3340     const MachineOperand &MO = MI.getOperand(Idx);
3341     if (!MO.isReg())
3342       continue;
3343 
3344     // Is this operand statically required to be an SGPR based on the operand
3345     // constraints?
3346     const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass);
3347     bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
3348     if (IsRequiredSGPR)
3349       return MO.getReg();
3350 
3351     // If this could be a VGPR or an SGPR, Check the dynamic register class.
3352     unsigned Reg = MO.getReg();
3353     const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
3354     if (RI.isSGPRClass(RegRC))
3355       UsedSGPRs[i] = Reg;
3356   }
3357 
3358   // We don't have a required SGPR operand, so we have a bit more freedom in
3359   // selecting operands to move.
3360 
3361   // Try to select the most used SGPR. If an SGPR is equal to one of the
3362   // others, we choose that.
3363   //
3364   // e.g.
3365   // V_FMA_F32 v0, s0, s0, s0 -> No moves
3366   // V_FMA_F32 v0, s0, s1, s0 -> Move s1
3367 
3368   // TODO: If some of the operands are 64-bit SGPRs and some 32, we should
3369   // prefer those.
3370 
3371   if (UsedSGPRs[0] != AMDGPU::NoRegister) {
3372     if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
3373       SGPRReg = UsedSGPRs[0];
3374   }
3375 
3376   if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
3377     if (UsedSGPRs[1] == UsedSGPRs[2])
3378       SGPRReg = UsedSGPRs[1];
3379   }
3380 
3381   return SGPRReg;
3382 }
3383 
3384 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
3385                                              unsigned OperandName) const {
3386   int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
3387   if (Idx == -1)
3388     return nullptr;
3389 
3390   return &MI.getOperand(Idx);
3391 }
3392 
3393 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
3394   uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
3395   if (ST.isAmdHsaOS()) {
3396     RsrcDataFormat |= (1ULL << 56);
3397 
3398     if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3399       // Set MTYPE = 2
3400       RsrcDataFormat |= (2ULL << 59);
3401   }
3402 
3403   return RsrcDataFormat;
3404 }
3405 
3406 uint64_t SIInstrInfo::getScratchRsrcWords23() const {
3407   uint64_t Rsrc23 = getDefaultRsrcDataFormat() |
3408                     AMDGPU::RSRC_TID_ENABLE |
3409                     0xffffffff; // Size;
3410 
3411   uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1;
3412 
3413   Rsrc23 |= (EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT) |
3414             // IndexStride = 64
3415             (UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT);
3416 
3417   // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
3418   // Clear them unless we want a huge stride.
3419   if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3420     Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
3421 
3422   return Rsrc23;
3423 }
3424 
3425 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const {
3426   unsigned Opc = MI.getOpcode();
3427 
3428   return isSMRD(Opc);
3429 }
3430 
3431 bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const {
3432   unsigned Opc = MI.getOpcode();
3433 
3434   return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc);
3435 }
3436 
3437 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI,
3438                                     int &FrameIndex) const {
3439   const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
3440   if (!Addr || !Addr->isFI())
3441     return AMDGPU::NoRegister;
3442 
3443   assert(!MI.memoperands_empty() &&
3444          (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
3445 
3446   FrameIndex = Addr->getIndex();
3447   return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
3448 }
3449 
3450 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI,
3451                                         int &FrameIndex) const {
3452   const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr);
3453   assert(Addr && Addr->isFI());
3454   FrameIndex = Addr->getIndex();
3455   return getNamedOperand(MI, AMDGPU::OpName::data)->getReg();
3456 }
3457 
3458 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
3459                                           int &FrameIndex) const {
3460 
3461   if (!MI.mayLoad())
3462     return AMDGPU::NoRegister;
3463 
3464   if (isMUBUF(MI) || isVGPRSpill(MI))
3465     return isStackAccess(MI, FrameIndex);
3466 
3467   if (isSGPRSpill(MI))
3468     return isSGPRStackAccess(MI, FrameIndex);
3469 
3470   return AMDGPU::NoRegister;
3471 }
3472 
3473 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
3474                                          int &FrameIndex) const {
3475   if (!MI.mayStore())
3476     return AMDGPU::NoRegister;
3477 
3478   if (isMUBUF(MI) || isVGPRSpill(MI))
3479     return isStackAccess(MI, FrameIndex);
3480 
3481   if (isSGPRSpill(MI))
3482     return isSGPRStackAccess(MI, FrameIndex);
3483 
3484   return AMDGPU::NoRegister;
3485 }
3486 
3487 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
3488   unsigned Opc = MI.getOpcode();
3489   const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc);
3490   unsigned DescSize = Desc.getSize();
3491 
3492   // If we have a definitive size, we can use it. Otherwise we need to inspect
3493   // the operands to know the size.
3494   //
3495   // FIXME: Instructions that have a base 32-bit encoding report their size as
3496   // 4, even though they are really 8 bytes if they have a literal operand.
3497   if (DescSize != 0 && DescSize != 4)
3498     return DescSize;
3499 
3500   if (Opc == AMDGPU::WAVE_BARRIER)
3501     return 0;
3502 
3503   // 4-byte instructions may have a 32-bit literal encoded after them. Check
3504   // operands that coud ever be literals.
3505   if (isVALU(MI) || isSALU(MI)) {
3506     if (isFixedSize(MI)) {
3507       assert(DescSize == 4);
3508       return DescSize;
3509     }
3510 
3511     int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
3512     if (Src0Idx == -1)
3513       return 4; // No operands.
3514 
3515     if (isLiteralConstantLike(MI.getOperand(Src0Idx), getOpSize(MI, Src0Idx)))
3516       return 8;
3517 
3518     int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
3519     if (Src1Idx == -1)
3520       return 4;
3521 
3522     if (isLiteralConstantLike(MI.getOperand(Src1Idx), getOpSize(MI, Src1Idx)))
3523       return 8;
3524 
3525     return 4;
3526   }
3527 
3528   if (DescSize == 4)
3529     return 4;
3530 
3531   switch (Opc) {
3532   case AMDGPU::SI_MASK_BRANCH:
3533   case TargetOpcode::IMPLICIT_DEF:
3534   case TargetOpcode::KILL:
3535   case TargetOpcode::DBG_VALUE:
3536   case TargetOpcode::BUNDLE:
3537   case TargetOpcode::EH_LABEL:
3538     return 0;
3539   case TargetOpcode::INLINEASM: {
3540     const MachineFunction *MF = MI.getParent()->getParent();
3541     const char *AsmStr = MI.getOperand(0).getSymbolName();
3542     return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
3543   }
3544   default:
3545     llvm_unreachable("unable to find instruction size");
3546   }
3547 }
3548 
3549 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
3550   if (!isFLAT(MI))
3551     return false;
3552 
3553   if (MI.memoperands_empty())
3554     return true;
3555 
3556   for (const MachineMemOperand *MMO : MI.memoperands()) {
3557     if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
3558       return true;
3559   }
3560   return false;
3561 }
3562 
3563 ArrayRef<std::pair<int, const char *>>
3564 SIInstrInfo::getSerializableTargetIndices() const {
3565   static const std::pair<int, const char *> TargetIndices[] = {
3566       {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
3567       {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
3568       {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
3569       {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
3570       {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
3571   return makeArrayRef(TargetIndices);
3572 }
3573 
3574 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp).  The
3575 /// post-RA version of misched uses CreateTargetMIHazardRecognizer.
3576 ScheduleHazardRecognizer *
3577 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
3578                                             const ScheduleDAG *DAG) const {
3579   return new GCNHazardRecognizer(DAG->MF);
3580 }
3581 
3582 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
3583 /// pass.
3584 ScheduleHazardRecognizer *
3585 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
3586   return new GCNHazardRecognizer(MF);
3587 }
3588