1 //===-- SIInstrInfo.cpp - SI Instruction Information  ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief SI Implementation of TargetInstrInfo.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "SIInstrInfo.h"
16 #include "AMDGPUTargetMachine.h"
17 #include "GCNHazardRecognizer.h"
18 #include "SIDefines.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/ScheduleDAG.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/Support/Debug.h"
28 
29 using namespace llvm;
30 
31 // Must be at least 4 to be able to branch over minimum unconditional branch
32 // code. This is only for making it possible to write reasonably small tests for
33 // long branches.
34 static cl::opt<unsigned>
35 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
36                  cl::desc("Restrict range of branch instructions (DEBUG)"));
37 
38 SIInstrInfo::SIInstrInfo(const SISubtarget &ST)
39   : AMDGPUInstrInfo(ST), RI(), ST(ST) {}
40 
41 //===----------------------------------------------------------------------===//
42 // TargetInstrInfo callbacks
43 //===----------------------------------------------------------------------===//
44 
45 static unsigned getNumOperandsNoGlue(SDNode *Node) {
46   unsigned N = Node->getNumOperands();
47   while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
48     --N;
49   return N;
50 }
51 
52 static SDValue findChainOperand(SDNode *Load) {
53   SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1);
54   assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node");
55   return LastOp;
56 }
57 
58 /// \brief Returns true if both nodes have the same value for the given
59 ///        operand \p Op, or if both nodes do not have this operand.
60 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
61   unsigned Opc0 = N0->getMachineOpcode();
62   unsigned Opc1 = N1->getMachineOpcode();
63 
64   int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
65   int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
66 
67   if (Op0Idx == -1 && Op1Idx == -1)
68     return true;
69 
70 
71   if ((Op0Idx == -1 && Op1Idx != -1) ||
72       (Op1Idx == -1 && Op0Idx != -1))
73     return false;
74 
75   // getNamedOperandIdx returns the index for the MachineInstr's operands,
76   // which includes the result as the first operand. We are indexing into the
77   // MachineSDNode's operands, so we need to skip the result operand to get
78   // the real index.
79   --Op0Idx;
80   --Op1Idx;
81 
82   return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
83 }
84 
85 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
86                                                     AliasAnalysis *AA) const {
87   // TODO: The generic check fails for VALU instructions that should be
88   // rematerializable due to implicit reads of exec. We really want all of the
89   // generic logic for this except for this.
90   switch (MI.getOpcode()) {
91   case AMDGPU::V_MOV_B32_e32:
92   case AMDGPU::V_MOV_B32_e64:
93   case AMDGPU::V_MOV_B64_PSEUDO:
94     return true;
95   default:
96     return false;
97   }
98 }
99 
100 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
101                                           int64_t &Offset0,
102                                           int64_t &Offset1) const {
103   if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
104     return false;
105 
106   unsigned Opc0 = Load0->getMachineOpcode();
107   unsigned Opc1 = Load1->getMachineOpcode();
108 
109   // Make sure both are actually loads.
110   if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
111     return false;
112 
113   if (isDS(Opc0) && isDS(Opc1)) {
114 
115     // FIXME: Handle this case:
116     if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
117       return false;
118 
119     // Check base reg.
120     if (Load0->getOperand(1) != Load1->getOperand(1))
121       return false;
122 
123     // Check chain.
124     if (findChainOperand(Load0) != findChainOperand(Load1))
125       return false;
126 
127     // Skip read2 / write2 variants for simplicity.
128     // TODO: We should report true if the used offsets are adjacent (excluded
129     // st64 versions).
130     if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 ||
131         AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1)
132       return false;
133 
134     Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue();
135     Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue();
136     return true;
137   }
138 
139   if (isSMRD(Opc0) && isSMRD(Opc1)) {
140     assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
141 
142     // Check base reg.
143     if (Load0->getOperand(0) != Load1->getOperand(0))
144       return false;
145 
146     const ConstantSDNode *Load0Offset =
147         dyn_cast<ConstantSDNode>(Load0->getOperand(1));
148     const ConstantSDNode *Load1Offset =
149         dyn_cast<ConstantSDNode>(Load1->getOperand(1));
150 
151     if (!Load0Offset || !Load1Offset)
152       return false;
153 
154     // Check chain.
155     if (findChainOperand(Load0) != findChainOperand(Load1))
156       return false;
157 
158     Offset0 = Load0Offset->getZExtValue();
159     Offset1 = Load1Offset->getZExtValue();
160     return true;
161   }
162 
163   // MUBUF and MTBUF can access the same addresses.
164   if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
165 
166     // MUBUF and MTBUF have vaddr at different indices.
167     if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
168         findChainOperand(Load0) != findChainOperand(Load1) ||
169         !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
170         !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
171       return false;
172 
173     int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
174     int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
175 
176     if (OffIdx0 == -1 || OffIdx1 == -1)
177       return false;
178 
179     // getNamedOperandIdx returns the index for MachineInstrs.  Since they
180     // inlcude the output in the operand list, but SDNodes don't, we need to
181     // subtract the index by one.
182     --OffIdx0;
183     --OffIdx1;
184 
185     SDValue Off0 = Load0->getOperand(OffIdx0);
186     SDValue Off1 = Load1->getOperand(OffIdx1);
187 
188     // The offset might be a FrameIndexSDNode.
189     if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
190       return false;
191 
192     Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
193     Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
194     return true;
195   }
196 
197   return false;
198 }
199 
200 static bool isStride64(unsigned Opc) {
201   switch (Opc) {
202   case AMDGPU::DS_READ2ST64_B32:
203   case AMDGPU::DS_READ2ST64_B64:
204   case AMDGPU::DS_WRITE2ST64_B32:
205   case AMDGPU::DS_WRITE2ST64_B64:
206     return true;
207   default:
208     return false;
209   }
210 }
211 
212 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
213                                         int64_t &Offset,
214                                         const TargetRegisterInfo *TRI) const {
215   unsigned Opc = LdSt.getOpcode();
216 
217   if (isDS(LdSt)) {
218     const MachineOperand *OffsetImm =
219         getNamedOperand(LdSt, AMDGPU::OpName::offset);
220     if (OffsetImm) {
221       // Normal, single offset LDS instruction.
222       const MachineOperand *AddrReg =
223           getNamedOperand(LdSt, AMDGPU::OpName::addr);
224 
225       BaseReg = AddrReg->getReg();
226       Offset = OffsetImm->getImm();
227       return true;
228     }
229 
230     // The 2 offset instructions use offset0 and offset1 instead. We can treat
231     // these as a load with a single offset if the 2 offsets are consecutive. We
232     // will use this for some partially aligned loads.
233     const MachineOperand *Offset0Imm =
234         getNamedOperand(LdSt, AMDGPU::OpName::offset0);
235     const MachineOperand *Offset1Imm =
236         getNamedOperand(LdSt, AMDGPU::OpName::offset1);
237 
238     uint8_t Offset0 = Offset0Imm->getImm();
239     uint8_t Offset1 = Offset1Imm->getImm();
240 
241     if (Offset1 > Offset0 && Offset1 - Offset0 == 1) {
242       // Each of these offsets is in element sized units, so we need to convert
243       // to bytes of the individual reads.
244 
245       unsigned EltSize;
246       if (LdSt.mayLoad())
247         EltSize = getOpRegClass(LdSt, 0)->getSize() / 2;
248       else {
249         assert(LdSt.mayStore());
250         int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
251         EltSize = getOpRegClass(LdSt, Data0Idx)->getSize();
252       }
253 
254       if (isStride64(Opc))
255         EltSize *= 64;
256 
257       const MachineOperand *AddrReg =
258           getNamedOperand(LdSt, AMDGPU::OpName::addr);
259       BaseReg = AddrReg->getReg();
260       Offset = EltSize * Offset0;
261       return true;
262     }
263 
264     return false;
265   }
266 
267   if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
268     const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset);
269     if (SOffset && SOffset->isReg())
270       return false;
271 
272     const MachineOperand *AddrReg =
273         getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
274     if (!AddrReg)
275       return false;
276 
277     const MachineOperand *OffsetImm =
278         getNamedOperand(LdSt, AMDGPU::OpName::offset);
279     BaseReg = AddrReg->getReg();
280     Offset = OffsetImm->getImm();
281 
282     if (SOffset) // soffset can be an inline immediate.
283       Offset += SOffset->getImm();
284 
285     return true;
286   }
287 
288   if (isSMRD(LdSt)) {
289     const MachineOperand *OffsetImm =
290         getNamedOperand(LdSt, AMDGPU::OpName::offset);
291     if (!OffsetImm)
292       return false;
293 
294     const MachineOperand *SBaseReg =
295         getNamedOperand(LdSt, AMDGPU::OpName::sbase);
296     BaseReg = SBaseReg->getReg();
297     Offset = OffsetImm->getImm();
298     return true;
299   }
300 
301   if (isFLAT(LdSt)) {
302     const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
303     BaseReg = AddrReg->getReg();
304     Offset = 0;
305     return true;
306   }
307 
308   return false;
309 }
310 
311 bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
312                                       MachineInstr &SecondLdSt,
313                                       unsigned NumLoads) const {
314   const MachineOperand *FirstDst = nullptr;
315   const MachineOperand *SecondDst = nullptr;
316 
317   if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) ||
318       (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt))) {
319     FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata);
320     SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata);
321   } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) {
322     FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst);
323     SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst);
324   } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) {
325     FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst);
326     SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst);
327   }
328 
329   if (!FirstDst || !SecondDst)
330     return false;
331 
332   // Try to limit clustering based on the total number of bytes loaded
333   // rather than the number of instructions.  This is done to help reduce
334   // register pressure.  The method used is somewhat inexact, though,
335   // because it assumes that all loads in the cluster will load the
336   // same number of bytes as FirstLdSt.
337 
338   // The unit of this value is bytes.
339   // FIXME: This needs finer tuning.
340   unsigned LoadClusterThreshold = 16;
341 
342   const MachineRegisterInfo &MRI =
343       FirstLdSt.getParent()->getParent()->getRegInfo();
344   const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
345 
346   return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
347 }
348 
349 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
350                               MachineBasicBlock::iterator MI,
351                               const DebugLoc &DL, unsigned DestReg,
352                               unsigned SrcReg, bool KillSrc) const {
353   const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg);
354 
355   if (RC == &AMDGPU::VGPR_32RegClass) {
356     assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
357            AMDGPU::SReg_32RegClass.contains(SrcReg));
358     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
359       .addReg(SrcReg, getKillRegState(KillSrc));
360     return;
361   }
362 
363   if (RC == &AMDGPU::SReg_32_XM0RegClass ||
364       RC == &AMDGPU::SReg_32RegClass) {
365     if (SrcReg == AMDGPU::SCC) {
366       BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
367           .addImm(-1)
368           .addImm(0);
369       return;
370     }
371 
372     assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
373     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
374             .addReg(SrcReg, getKillRegState(KillSrc));
375     return;
376   }
377 
378   if (RC == &AMDGPU::SReg_64RegClass) {
379     if (DestReg == AMDGPU::VCC) {
380       if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
381         BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
382           .addReg(SrcReg, getKillRegState(KillSrc));
383       } else {
384         // FIXME: Hack until VReg_1 removed.
385         assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
386         BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
387           .addImm(0)
388           .addReg(SrcReg, getKillRegState(KillSrc));
389       }
390 
391       return;
392     }
393 
394     assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
395     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
396             .addReg(SrcReg, getKillRegState(KillSrc));
397     return;
398   }
399 
400   if (DestReg == AMDGPU::SCC) {
401     assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
402     BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
403       .addReg(SrcReg, getKillRegState(KillSrc))
404       .addImm(0);
405     return;
406   }
407 
408   unsigned EltSize = 4;
409   unsigned Opcode = AMDGPU::V_MOV_B32_e32;
410   if (RI.isSGPRClass(RC)) {
411     if (RC->getSize() > 4) {
412       Opcode =  AMDGPU::S_MOV_B64;
413       EltSize = 8;
414     } else {
415       Opcode = AMDGPU::S_MOV_B32;
416       EltSize = 4;
417     }
418   }
419 
420   ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
421   bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
422 
423   for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
424     unsigned SubIdx;
425     if (Forward)
426       SubIdx = SubIndices[Idx];
427     else
428       SubIdx = SubIndices[SubIndices.size() - Idx - 1];
429 
430     MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
431       get(Opcode), RI.getSubReg(DestReg, SubIdx));
432 
433     Builder.addReg(RI.getSubReg(SrcReg, SubIdx));
434 
435     if (Idx == SubIndices.size() - 1)
436       Builder.addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
437 
438     if (Idx == 0)
439       Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
440 
441     Builder.addReg(SrcReg, RegState::Implicit);
442   }
443 }
444 
445 int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
446   int NewOpc;
447 
448   // Try to map original to commuted opcode
449   NewOpc = AMDGPU::getCommuteRev(Opcode);
450   if (NewOpc != -1)
451     // Check if the commuted (REV) opcode exists on the target.
452     return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
453 
454   // Try to map commuted to original opcode
455   NewOpc = AMDGPU::getCommuteOrig(Opcode);
456   if (NewOpc != -1)
457     // Check if the original (non-REV) opcode exists on the target.
458     return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
459 
460   return Opcode;
461 }
462 
463 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
464 
465   if (DstRC->getSize() == 4) {
466     return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
467   } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) {
468     return AMDGPU::S_MOV_B64;
469   } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) {
470     return  AMDGPU::V_MOV_B64_PSEUDO;
471   }
472   return AMDGPU::COPY;
473 }
474 
475 static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
476   switch (Size) {
477   case 4:
478     return AMDGPU::SI_SPILL_S32_SAVE;
479   case 8:
480     return AMDGPU::SI_SPILL_S64_SAVE;
481   case 16:
482     return AMDGPU::SI_SPILL_S128_SAVE;
483   case 32:
484     return AMDGPU::SI_SPILL_S256_SAVE;
485   case 64:
486     return AMDGPU::SI_SPILL_S512_SAVE;
487   default:
488     llvm_unreachable("unknown register size");
489   }
490 }
491 
492 static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
493   switch (Size) {
494   case 4:
495     return AMDGPU::SI_SPILL_V32_SAVE;
496   case 8:
497     return AMDGPU::SI_SPILL_V64_SAVE;
498   case 12:
499     return AMDGPU::SI_SPILL_V96_SAVE;
500   case 16:
501     return AMDGPU::SI_SPILL_V128_SAVE;
502   case 32:
503     return AMDGPU::SI_SPILL_V256_SAVE;
504   case 64:
505     return AMDGPU::SI_SPILL_V512_SAVE;
506   default:
507     llvm_unreachable("unknown register size");
508   }
509 }
510 
511 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
512                                       MachineBasicBlock::iterator MI,
513                                       unsigned SrcReg, bool isKill,
514                                       int FrameIndex,
515                                       const TargetRegisterClass *RC,
516                                       const TargetRegisterInfo *TRI) const {
517   MachineFunction *MF = MBB.getParent();
518   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
519   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
520   DebugLoc DL = MBB.findDebugLoc(MI);
521 
522   unsigned Size = FrameInfo.getObjectSize(FrameIndex);
523   unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
524   MachinePointerInfo PtrInfo
525     = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
526   MachineMemOperand *MMO
527     = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
528                                Size, Align);
529 
530   if (RI.isSGPRClass(RC)) {
531     MFI->setHasSpilledSGPRs();
532 
533     // We are only allowed to create one new instruction when spilling
534     // registers, so we need to use pseudo instruction for spilling SGPRs.
535     const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(RC->getSize()));
536 
537     // The SGPR spill/restore instructions only work on number sgprs, so we need
538     // to make sure we are using the correct register class.
539     if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) {
540       MachineRegisterInfo &MRI = MF->getRegInfo();
541       MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
542     }
543 
544     MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc)
545       .addReg(SrcReg, getKillRegState(isKill)) // data
546       .addFrameIndex(FrameIndex)               // addr
547       .addMemOperand(MMO)
548       .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
549       .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit);
550     // Add the scratch resource registers as implicit uses because we may end up
551     // needing them, and need to ensure that the reserved registers are
552     // correctly handled.
553 
554     if (ST.hasScalarStores()) {
555       // m0 is used for offset to scalar stores if used to spill.
556       Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine);
557     }
558 
559     return;
560   }
561 
562   if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
563     LLVMContext &Ctx = MF->getFunction()->getContext();
564     Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
565                   " spill register");
566     BuildMI(MBB, MI, DL, get(AMDGPU::KILL))
567       .addReg(SrcReg);
568 
569     return;
570   }
571 
572   assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
573 
574   unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize());
575   MFI->setHasSpilledVGPRs();
576   BuildMI(MBB, MI, DL, get(Opcode))
577     .addReg(SrcReg, getKillRegState(isKill)) // data
578     .addFrameIndex(FrameIndex)               // addr
579     .addReg(MFI->getScratchRSrcReg())        // scratch_rsrc
580     .addReg(MFI->getScratchWaveOffsetReg())  // scratch_offset
581     .addImm(0)                               // offset
582     .addMemOperand(MMO);
583 }
584 
585 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
586   switch (Size) {
587   case 4:
588     return AMDGPU::SI_SPILL_S32_RESTORE;
589   case 8:
590     return AMDGPU::SI_SPILL_S64_RESTORE;
591   case 16:
592     return AMDGPU::SI_SPILL_S128_RESTORE;
593   case 32:
594     return AMDGPU::SI_SPILL_S256_RESTORE;
595   case 64:
596     return AMDGPU::SI_SPILL_S512_RESTORE;
597   default:
598     llvm_unreachable("unknown register size");
599   }
600 }
601 
602 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
603   switch (Size) {
604   case 4:
605     return AMDGPU::SI_SPILL_V32_RESTORE;
606   case 8:
607     return AMDGPU::SI_SPILL_V64_RESTORE;
608   case 12:
609     return AMDGPU::SI_SPILL_V96_RESTORE;
610   case 16:
611     return AMDGPU::SI_SPILL_V128_RESTORE;
612   case 32:
613     return AMDGPU::SI_SPILL_V256_RESTORE;
614   case 64:
615     return AMDGPU::SI_SPILL_V512_RESTORE;
616   default:
617     llvm_unreachable("unknown register size");
618   }
619 }
620 
621 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
622                                        MachineBasicBlock::iterator MI,
623                                        unsigned DestReg, int FrameIndex,
624                                        const TargetRegisterClass *RC,
625                                        const TargetRegisterInfo *TRI) const {
626   MachineFunction *MF = MBB.getParent();
627   const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
628   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
629   DebugLoc DL = MBB.findDebugLoc(MI);
630   unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
631   unsigned Size = FrameInfo.getObjectSize(FrameIndex);
632 
633   MachinePointerInfo PtrInfo
634     = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
635 
636   MachineMemOperand *MMO = MF->getMachineMemOperand(
637     PtrInfo, MachineMemOperand::MOLoad, Size, Align);
638 
639   if (RI.isSGPRClass(RC)) {
640     // FIXME: Maybe this should not include a memoperand because it will be
641     // lowered to non-memory instructions.
642     const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(RC->getSize()));
643     if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) {
644       MachineRegisterInfo &MRI = MF->getRegInfo();
645       MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
646     }
647 
648     MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg)
649       .addFrameIndex(FrameIndex) // addr
650       .addMemOperand(MMO)
651       .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
652       .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit);
653 
654     if (ST.hasScalarStores()) {
655       // m0 is used for offset to scalar stores if used to spill.
656       Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine);
657     }
658 
659     return;
660   }
661 
662   if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
663     LLVMContext &Ctx = MF->getFunction()->getContext();
664     Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
665                   " restore register");
666     BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg);
667 
668     return;
669   }
670 
671   assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
672 
673   unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize());
674   BuildMI(MBB, MI, DL, get(Opcode), DestReg)
675     .addFrameIndex(FrameIndex)              // vaddr
676     .addReg(MFI->getScratchRSrcReg())       // scratch_rsrc
677     .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
678     .addImm(0)                              // offset
679     .addMemOperand(MMO);
680 }
681 
682 /// \param @Offset Offset in bytes of the FrameIndex being spilled
683 unsigned SIInstrInfo::calculateLDSSpillAddress(
684     MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg,
685     unsigned FrameOffset, unsigned Size) const {
686   MachineFunction *MF = MBB.getParent();
687   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
688   const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
689   const SIRegisterInfo *TRI = ST.getRegisterInfo();
690   DebugLoc DL = MBB.findDebugLoc(MI);
691   unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
692   unsigned WavefrontSize = ST.getWavefrontSize();
693 
694   unsigned TIDReg = MFI->getTIDReg();
695   if (!MFI->hasCalculatedTID()) {
696     MachineBasicBlock &Entry = MBB.getParent()->front();
697     MachineBasicBlock::iterator Insert = Entry.front();
698     DebugLoc DL = Insert->getDebugLoc();
699 
700     TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass,
701                                    *MF);
702     if (TIDReg == AMDGPU::NoRegister)
703       return TIDReg;
704 
705     if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) &&
706         WorkGroupSize > WavefrontSize) {
707 
708       unsigned TIDIGXReg
709         = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_X);
710       unsigned TIDIGYReg
711         = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Y);
712       unsigned TIDIGZReg
713         = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Z);
714       unsigned InputPtrReg =
715           TRI->getPreloadedValue(*MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
716       for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) {
717         if (!Entry.isLiveIn(Reg))
718           Entry.addLiveIn(Reg);
719       }
720 
721       RS->enterBasicBlock(Entry);
722       // FIXME: Can we scavenge an SReg_64 and access the subregs?
723       unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
724       unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
725       BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0)
726               .addReg(InputPtrReg)
727               .addImm(SI::KernelInputOffsets::NGROUPS_Z);
728       BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1)
729               .addReg(InputPtrReg)
730               .addImm(SI::KernelInputOffsets::NGROUPS_Y);
731 
732       // NGROUPS.X * NGROUPS.Y
733       BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1)
734               .addReg(STmp1)
735               .addReg(STmp0);
736       // (NGROUPS.X * NGROUPS.Y) * TIDIG.X
737       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg)
738               .addReg(STmp1)
739               .addReg(TIDIGXReg);
740       // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)
741       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg)
742               .addReg(STmp0)
743               .addReg(TIDIGYReg)
744               .addReg(TIDReg);
745       // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z
746       BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg)
747               .addReg(TIDReg)
748               .addReg(TIDIGZReg);
749     } else {
750       // Get the wave id
751       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64),
752               TIDReg)
753               .addImm(-1)
754               .addImm(0);
755 
756       BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64),
757               TIDReg)
758               .addImm(-1)
759               .addReg(TIDReg);
760     }
761 
762     BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32),
763             TIDReg)
764             .addImm(2)
765             .addReg(TIDReg);
766     MFI->setTIDReg(TIDReg);
767   }
768 
769   // Add FrameIndex to LDS offset
770   unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize);
771   BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg)
772           .addImm(LDSOffset)
773           .addReg(TIDReg);
774 
775   return TmpReg;
776 }
777 
778 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB,
779                                    MachineBasicBlock::iterator MI,
780                                    int Count) const {
781   DebugLoc DL = MBB.findDebugLoc(MI);
782   while (Count > 0) {
783     int Arg;
784     if (Count >= 8)
785       Arg = 7;
786     else
787       Arg = Count - 1;
788     Count -= 8;
789     BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP))
790             .addImm(Arg);
791   }
792 }
793 
794 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
795                              MachineBasicBlock::iterator MI) const {
796   insertWaitStates(MBB, MI, 1);
797 }
798 
799 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const {
800   switch (MI.getOpcode()) {
801   default: return 1; // FIXME: Do wait states equal cycles?
802 
803   case AMDGPU::S_NOP:
804     return MI.getOperand(0).getImm() + 1;
805   }
806 }
807 
808 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
809   MachineBasicBlock &MBB = *MI.getParent();
810   DebugLoc DL = MBB.findDebugLoc(MI);
811   switch (MI.getOpcode()) {
812   default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
813   case AMDGPU::S_MOV_B64_term: {
814     // This is only a terminator to get the correct spill code placement during
815     // register allocation.
816     MI.setDesc(get(AMDGPU::S_MOV_B64));
817     break;
818   }
819   case AMDGPU::S_XOR_B64_term: {
820     // This is only a terminator to get the correct spill code placement during
821     // register allocation.
822     MI.setDesc(get(AMDGPU::S_XOR_B64));
823     break;
824   }
825   case AMDGPU::S_ANDN2_B64_term: {
826     // This is only a terminator to get the correct spill code placement during
827     // register allocation.
828     MI.setDesc(get(AMDGPU::S_ANDN2_B64));
829     break;
830   }
831   case AMDGPU::V_MOV_B64_PSEUDO: {
832     unsigned Dst = MI.getOperand(0).getReg();
833     unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
834     unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
835 
836     const MachineOperand &SrcOp = MI.getOperand(1);
837     // FIXME: Will this work for 64-bit floating point immediates?
838     assert(!SrcOp.isFPImm());
839     if (SrcOp.isImm()) {
840       APInt Imm(64, SrcOp.getImm());
841       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
842         .addImm(Imm.getLoBits(32).getZExtValue())
843         .addReg(Dst, RegState::Implicit | RegState::Define);
844       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
845         .addImm(Imm.getHiBits(32).getZExtValue())
846         .addReg(Dst, RegState::Implicit | RegState::Define);
847     } else {
848       assert(SrcOp.isReg());
849       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
850         .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
851         .addReg(Dst, RegState::Implicit | RegState::Define);
852       BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
853         .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
854         .addReg(Dst, RegState::Implicit | RegState::Define);
855     }
856     MI.eraseFromParent();
857     break;
858   }
859   case AMDGPU::V_MOVRELD_B32_V1:
860   case AMDGPU::V_MOVRELD_B32_V2:
861   case AMDGPU::V_MOVRELD_B32_V4:
862   case AMDGPU::V_MOVRELD_B32_V8:
863   case AMDGPU::V_MOVRELD_B32_V16: {
864     const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32);
865     unsigned VecReg = MI.getOperand(0).getReg();
866     bool IsUndef = MI.getOperand(1).isUndef();
867     unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm();
868     assert(VecReg == MI.getOperand(1).getReg());
869 
870     MachineInstr *MovRel =
871         BuildMI(MBB, MI, DL, MovRelDesc)
872             .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
873             .add(MI.getOperand(2))
874             .addReg(VecReg, RegState::ImplicitDefine)
875             .addReg(VecReg,
876                     RegState::Implicit | (IsUndef ? RegState::Undef : 0));
877 
878     const int ImpDefIdx =
879         MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses();
880     const int ImpUseIdx = ImpDefIdx + 1;
881     MovRel->tieOperands(ImpDefIdx, ImpUseIdx);
882 
883     MI.eraseFromParent();
884     break;
885   }
886   case AMDGPU::SI_PC_ADD_REL_OFFSET: {
887     MachineFunction &MF = *MBB.getParent();
888     unsigned Reg = MI.getOperand(0).getReg();
889     unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
890     unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
891 
892     // Create a bundle so these instructions won't be re-ordered by the
893     // post-RA scheduler.
894     MIBundleBuilder Bundler(MBB, MI);
895     Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
896 
897     // Add 32-bit offset from this instruction to the start of the
898     // constant data.
899     Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
900                        .addReg(RegLo)
901                        .add(MI.getOperand(1)));
902 
903     MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
904                                   .addReg(RegHi);
905     if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE)
906       MIB.addImm(0);
907     else
908       MIB.add(MI.getOperand(2));
909 
910     Bundler.append(MIB);
911     llvm::finalizeBundle(MBB, Bundler.begin());
912 
913     MI.eraseFromParent();
914     break;
915   }
916   }
917   return true;
918 }
919 
920 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
921                                       MachineOperand &Src0,
922                                       unsigned Src0OpName,
923                                       MachineOperand &Src1,
924                                       unsigned Src1OpName) const {
925   MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
926   if (!Src0Mods)
927     return false;
928 
929   MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
930   assert(Src1Mods &&
931          "All commutable instructions have both src0 and src1 modifiers");
932 
933   int Src0ModsVal = Src0Mods->getImm();
934   int Src1ModsVal = Src1Mods->getImm();
935 
936   Src1Mods->setImm(Src0ModsVal);
937   Src0Mods->setImm(Src1ModsVal);
938   return true;
939 }
940 
941 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
942                                              MachineOperand &RegOp,
943                                              MachineOperand &NonRegOp) {
944   unsigned Reg = RegOp.getReg();
945   unsigned SubReg = RegOp.getSubReg();
946   bool IsKill = RegOp.isKill();
947   bool IsDead = RegOp.isDead();
948   bool IsUndef = RegOp.isUndef();
949   bool IsDebug = RegOp.isDebug();
950 
951   if (NonRegOp.isImm())
952     RegOp.ChangeToImmediate(NonRegOp.getImm());
953   else if (NonRegOp.isFI())
954     RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
955   else
956     return nullptr;
957 
958   NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
959   NonRegOp.setSubReg(SubReg);
960 
961   return &MI;
962 }
963 
964 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
965                                                   unsigned Src0Idx,
966                                                   unsigned Src1Idx) const {
967   assert(!NewMI && "this should never be used");
968 
969   unsigned Opc = MI.getOpcode();
970   int CommutedOpcode = commuteOpcode(Opc);
971   if (CommutedOpcode == -1)
972     return nullptr;
973 
974   assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==
975            static_cast<int>(Src0Idx) &&
976          AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==
977            static_cast<int>(Src1Idx) &&
978          "inconsistency with findCommutedOpIndices");
979 
980   MachineOperand &Src0 = MI.getOperand(Src0Idx);
981   MachineOperand &Src1 = MI.getOperand(Src1Idx);
982 
983   MachineInstr *CommutedMI = nullptr;
984   if (Src0.isReg() && Src1.isReg()) {
985     if (isOperandLegal(MI, Src1Idx, &Src0)) {
986       // Be sure to copy the source modifiers to the right place.
987       CommutedMI
988         = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
989     }
990 
991   } else if (Src0.isReg() && !Src1.isReg()) {
992     // src0 should always be able to support any operand type, so no need to
993     // check operand legality.
994     CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
995   } else if (!Src0.isReg() && Src1.isReg()) {
996     if (isOperandLegal(MI, Src1Idx, &Src0))
997       CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
998   } else {
999     // FIXME: Found two non registers to commute. This does happen.
1000     return nullptr;
1001   }
1002 
1003 
1004   if (CommutedMI) {
1005     swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
1006                         Src1, AMDGPU::OpName::src1_modifiers);
1007 
1008     CommutedMI->setDesc(get(CommutedOpcode));
1009   }
1010 
1011   return CommutedMI;
1012 }
1013 
1014 // This needs to be implemented because the source modifiers may be inserted
1015 // between the true commutable operands, and the base
1016 // TargetInstrInfo::commuteInstruction uses it.
1017 bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0,
1018                                         unsigned &SrcOpIdx1) const {
1019   if (!MI.isCommutable())
1020     return false;
1021 
1022   unsigned Opc = MI.getOpcode();
1023   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1024   if (Src0Idx == -1)
1025     return false;
1026 
1027   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1028   if (Src1Idx == -1)
1029     return false;
1030 
1031   return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
1032 }
1033 
1034 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
1035                                         int64_t BrOffset) const {
1036   // BranchRelaxation should never have to check s_setpc_b64 because its dest
1037   // block is unanalyzable.
1038   assert(BranchOp != AMDGPU::S_SETPC_B64);
1039 
1040   // Convert to dwords.
1041   BrOffset /= 4;
1042 
1043   // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
1044   // from the next instruction.
1045   BrOffset -= 1;
1046 
1047   return isIntN(BranchOffsetBits, BrOffset);
1048 }
1049 
1050 MachineBasicBlock *SIInstrInfo::getBranchDestBlock(
1051   const MachineInstr &MI) const {
1052   if (MI.getOpcode() == AMDGPU::S_SETPC_B64) {
1053     // This would be a difficult analysis to perform, but can always be legal so
1054     // there's no need to analyze it.
1055     return nullptr;
1056   }
1057 
1058   return MI.getOperand(0).getMBB();
1059 }
1060 
1061 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
1062                                            MachineBasicBlock &DestBB,
1063                                            const DebugLoc &DL,
1064                                            int64_t BrOffset,
1065                                            RegScavenger *RS) const {
1066   assert(RS && "RegScavenger required for long branching");
1067   assert(MBB.empty() &&
1068          "new block should be inserted for expanding unconditional branch");
1069   assert(MBB.pred_size() == 1);
1070 
1071   MachineFunction *MF = MBB.getParent();
1072   MachineRegisterInfo &MRI = MF->getRegInfo();
1073 
1074   // FIXME: Virtual register workaround for RegScavenger not working with empty
1075   // blocks.
1076   unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1077 
1078   auto I = MBB.end();
1079 
1080   // We need to compute the offset relative to the instruction immediately after
1081   // s_getpc_b64. Insert pc arithmetic code before last terminator.
1082   MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
1083 
1084   // TODO: Handle > 32-bit block address.
1085   if (BrOffset >= 0) {
1086     BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
1087       .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1088       .addReg(PCReg, 0, AMDGPU::sub0)
1089       .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_FORWARD);
1090     BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
1091       .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1092       .addReg(PCReg, 0, AMDGPU::sub1)
1093       .addImm(0);
1094   } else {
1095     // Backwards branch.
1096     BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32))
1097       .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1098       .addReg(PCReg, 0, AMDGPU::sub0)
1099       .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_BACKWARD);
1100     BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32))
1101       .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1102       .addReg(PCReg, 0, AMDGPU::sub1)
1103       .addImm(0);
1104   }
1105 
1106   // Insert the indirect branch after the other terminator.
1107   BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
1108     .addReg(PCReg);
1109 
1110   // FIXME: If spilling is necessary, this will fail because this scavenger has
1111   // no emergency stack slots. It is non-trivial to spill in this situation,
1112   // because the restore code needs to be specially placed after the
1113   // jump. BranchRelaxation then needs to be made aware of the newly inserted
1114   // block.
1115   //
1116   // If a spill is needed for the pc register pair, we need to insert a spill
1117   // restore block right before the destination block, and insert a short branch
1118   // into the old destination block's fallthrough predecessor.
1119   // e.g.:
1120   //
1121   // s_cbranch_scc0 skip_long_branch:
1122   //
1123   // long_branch_bb:
1124   //   spill s[8:9]
1125   //   s_getpc_b64 s[8:9]
1126   //   s_add_u32 s8, s8, restore_bb
1127   //   s_addc_u32 s9, s9, 0
1128   //   s_setpc_b64 s[8:9]
1129   //
1130   // skip_long_branch:
1131   //   foo;
1132   //
1133   // .....
1134   //
1135   // dest_bb_fallthrough_predecessor:
1136   // bar;
1137   // s_branch dest_bb
1138   //
1139   // restore_bb:
1140   //  restore s[8:9]
1141   //  fallthrough dest_bb
1142   ///
1143   // dest_bb:
1144   //   buzz;
1145 
1146   RS->enterBasicBlockEnd(MBB);
1147   unsigned Scav = RS->scavengeRegister(&AMDGPU::SReg_64RegClass,
1148                                        MachineBasicBlock::iterator(GetPC), 0);
1149   MRI.replaceRegWith(PCReg, Scav);
1150   MRI.clearVirtRegs();
1151   RS->setRegUsed(Scav);
1152 
1153   return 4 + 8 + 4 + 4;
1154 }
1155 
1156 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
1157   switch (Cond) {
1158   case SIInstrInfo::SCC_TRUE:
1159     return AMDGPU::S_CBRANCH_SCC1;
1160   case SIInstrInfo::SCC_FALSE:
1161     return AMDGPU::S_CBRANCH_SCC0;
1162   case SIInstrInfo::VCCNZ:
1163     return AMDGPU::S_CBRANCH_VCCNZ;
1164   case SIInstrInfo::VCCZ:
1165     return AMDGPU::S_CBRANCH_VCCZ;
1166   case SIInstrInfo::EXECNZ:
1167     return AMDGPU::S_CBRANCH_EXECNZ;
1168   case SIInstrInfo::EXECZ:
1169     return AMDGPU::S_CBRANCH_EXECZ;
1170   default:
1171     llvm_unreachable("invalid branch predicate");
1172   }
1173 }
1174 
1175 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
1176   switch (Opcode) {
1177   case AMDGPU::S_CBRANCH_SCC0:
1178     return SCC_FALSE;
1179   case AMDGPU::S_CBRANCH_SCC1:
1180     return SCC_TRUE;
1181   case AMDGPU::S_CBRANCH_VCCNZ:
1182     return VCCNZ;
1183   case AMDGPU::S_CBRANCH_VCCZ:
1184     return VCCZ;
1185   case AMDGPU::S_CBRANCH_EXECNZ:
1186     return EXECNZ;
1187   case AMDGPU::S_CBRANCH_EXECZ:
1188     return EXECZ;
1189   default:
1190     return INVALID_BR;
1191   }
1192 }
1193 
1194 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
1195                                     MachineBasicBlock::iterator I,
1196                                     MachineBasicBlock *&TBB,
1197                                     MachineBasicBlock *&FBB,
1198                                     SmallVectorImpl<MachineOperand> &Cond,
1199                                     bool AllowModify) const {
1200   if (I->getOpcode() == AMDGPU::S_BRANCH) {
1201     // Unconditional Branch
1202     TBB = I->getOperand(0).getMBB();
1203     return false;
1204   }
1205 
1206   BranchPredicate Pred = getBranchPredicate(I->getOpcode());
1207   if (Pred == INVALID_BR)
1208     return true;
1209 
1210   MachineBasicBlock *CondBB = I->getOperand(0).getMBB();
1211   Cond.push_back(MachineOperand::CreateImm(Pred));
1212   Cond.push_back(I->getOperand(1)); // Save the branch register.
1213 
1214   ++I;
1215 
1216   if (I == MBB.end()) {
1217     // Conditional branch followed by fall-through.
1218     TBB = CondBB;
1219     return false;
1220   }
1221 
1222   if (I->getOpcode() == AMDGPU::S_BRANCH) {
1223     TBB = CondBB;
1224     FBB = I->getOperand(0).getMBB();
1225     return false;
1226   }
1227 
1228   return true;
1229 }
1230 
1231 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
1232                                 MachineBasicBlock *&FBB,
1233                                 SmallVectorImpl<MachineOperand> &Cond,
1234                                 bool AllowModify) const {
1235   MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1236   if (I == MBB.end())
1237     return false;
1238 
1239   if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH)
1240     return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
1241 
1242   ++I;
1243 
1244   // TODO: Should be able to treat as fallthrough?
1245   if (I == MBB.end())
1246     return true;
1247 
1248   if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify))
1249     return true;
1250 
1251   MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB();
1252 
1253   // Specifically handle the case where the conditional branch is to the same
1254   // destination as the mask branch. e.g.
1255   //
1256   // si_mask_branch BB8
1257   // s_cbranch_execz BB8
1258   // s_cbranch BB9
1259   //
1260   // This is required to understand divergent loops which may need the branches
1261   // to be relaxed.
1262   if (TBB != MaskBrDest || Cond.empty())
1263     return true;
1264 
1265   auto Pred = Cond[0].getImm();
1266   return (Pred != EXECZ && Pred != EXECNZ);
1267 }
1268 
1269 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB,
1270                                    int *BytesRemoved) const {
1271   MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1272 
1273   unsigned Count = 0;
1274   unsigned RemovedSize = 0;
1275   while (I != MBB.end()) {
1276     MachineBasicBlock::iterator Next = std::next(I);
1277     if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) {
1278       I = Next;
1279       continue;
1280     }
1281 
1282     RemovedSize += getInstSizeInBytes(*I);
1283     I->eraseFromParent();
1284     ++Count;
1285     I = Next;
1286   }
1287 
1288   if (BytesRemoved)
1289     *BytesRemoved = RemovedSize;
1290 
1291   return Count;
1292 }
1293 
1294 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
1295                                    MachineBasicBlock *TBB,
1296                                    MachineBasicBlock *FBB,
1297                                    ArrayRef<MachineOperand> Cond,
1298                                    const DebugLoc &DL,
1299                                    int *BytesAdded) const {
1300 
1301   if (!FBB && Cond.empty()) {
1302     BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1303       .addMBB(TBB);
1304     if (BytesAdded)
1305       *BytesAdded = 4;
1306     return 1;
1307   }
1308 
1309   assert(TBB && Cond[0].isImm());
1310 
1311   unsigned Opcode
1312     = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
1313 
1314   if (!FBB) {
1315     Cond[1].isUndef();
1316     MachineInstr *CondBr =
1317       BuildMI(&MBB, DL, get(Opcode))
1318       .addMBB(TBB);
1319 
1320     // Copy the flags onto the implicit condition register operand.
1321     MachineOperand &CondReg = CondBr->getOperand(1);
1322     CondReg.setIsUndef(Cond[1].isUndef());
1323     CondReg.setIsKill(Cond[1].isKill());
1324 
1325     if (BytesAdded)
1326       *BytesAdded = 4;
1327     return 1;
1328   }
1329 
1330   assert(TBB && FBB);
1331 
1332   MachineInstr *CondBr =
1333     BuildMI(&MBB, DL, get(Opcode))
1334     .addMBB(TBB);
1335   BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1336     .addMBB(FBB);
1337 
1338   MachineOperand &CondReg = CondBr->getOperand(1);
1339   CondReg.setIsUndef(Cond[1].isUndef());
1340   CondReg.setIsKill(Cond[1].isKill());
1341 
1342   if (BytesAdded)
1343       *BytesAdded = 8;
1344 
1345   return 2;
1346 }
1347 
1348 bool SIInstrInfo::reverseBranchCondition(
1349   SmallVectorImpl<MachineOperand> &Cond) const {
1350   assert(Cond.size() == 2);
1351   Cond[0].setImm(-Cond[0].getImm());
1352   return false;
1353 }
1354 
1355 static void removeModOperands(MachineInstr &MI) {
1356   unsigned Opc = MI.getOpcode();
1357   int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1358                                               AMDGPU::OpName::src0_modifiers);
1359   int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1360                                               AMDGPU::OpName::src1_modifiers);
1361   int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1362                                               AMDGPU::OpName::src2_modifiers);
1363 
1364   MI.RemoveOperand(Src2ModIdx);
1365   MI.RemoveOperand(Src1ModIdx);
1366   MI.RemoveOperand(Src0ModIdx);
1367 }
1368 
1369 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
1370                                 unsigned Reg, MachineRegisterInfo *MRI) const {
1371   if (!MRI->hasOneNonDBGUse(Reg))
1372     return false;
1373 
1374   unsigned Opc = UseMI.getOpcode();
1375   if (Opc == AMDGPU::COPY) {
1376     bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg());
1377     switch (DefMI.getOpcode()) {
1378     default:
1379       return false;
1380     case AMDGPU::S_MOV_B64:
1381       // TODO: We could fold 64-bit immediates, but this get compilicated
1382       // when there are sub-registers.
1383       return false;
1384 
1385     case AMDGPU::V_MOV_B32_e32:
1386     case AMDGPU::S_MOV_B32:
1387       break;
1388     }
1389     unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1390     const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
1391     assert(ImmOp);
1392     // FIXME: We could handle FrameIndex values here.
1393     if (!ImmOp->isImm()) {
1394       return false;
1395     }
1396     UseMI.setDesc(get(NewOpc));
1397     UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm());
1398     UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
1399     return true;
1400   }
1401 
1402   if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 ||
1403       Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) {
1404     bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64;
1405 
1406     // Don't fold if we are using source modifiers. The new VOP2 instructions
1407     // don't have them.
1408     if (hasModifiersSet(UseMI, AMDGPU::OpName::src0_modifiers) ||
1409         hasModifiersSet(UseMI, AMDGPU::OpName::src1_modifiers) ||
1410         hasModifiersSet(UseMI, AMDGPU::OpName::src2_modifiers)) {
1411       return false;
1412     }
1413 
1414     const MachineOperand &ImmOp = DefMI.getOperand(1);
1415 
1416     // If this is a free constant, there's no reason to do this.
1417     // TODO: We could fold this here instead of letting SIFoldOperands do it
1418     // later.
1419     MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
1420 
1421     // Any src operand can be used for the legality check.
1422     if (isInlineConstant(UseMI, *Src0, ImmOp))
1423       return false;
1424 
1425     MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
1426     MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
1427 
1428     // Multiplied part is the constant: Use v_madmk_{f16, f32}.
1429     // We should only expect these to be on src0 due to canonicalizations.
1430     if (Src0->isReg() && Src0->getReg() == Reg) {
1431       if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
1432         return false;
1433 
1434       if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
1435         return false;
1436 
1437       // We need to swap operands 0 and 1 since madmk constant is at operand 1.
1438 
1439       const int64_t Imm = DefMI.getOperand(1).getImm();
1440 
1441       // FIXME: This would be a lot easier if we could return a new instruction
1442       // instead of having to modify in place.
1443 
1444       // Remove these first since they are at the end.
1445       UseMI.RemoveOperand(
1446           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
1447       UseMI.RemoveOperand(
1448           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
1449 
1450       unsigned Src1Reg = Src1->getReg();
1451       unsigned Src1SubReg = Src1->getSubReg();
1452       Src0->setReg(Src1Reg);
1453       Src0->setSubReg(Src1SubReg);
1454       Src0->setIsKill(Src1->isKill());
1455 
1456       if (Opc == AMDGPU::V_MAC_F32_e64 ||
1457           Opc == AMDGPU::V_MAC_F16_e64)
1458         UseMI.untieRegOperand(
1459             AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
1460 
1461       Src1->ChangeToImmediate(Imm);
1462 
1463       removeModOperands(UseMI);
1464       UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16));
1465 
1466       bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1467       if (DeleteDef)
1468         DefMI.eraseFromParent();
1469 
1470       return true;
1471     }
1472 
1473     // Added part is the constant: Use v_madak_{f16, f32}.
1474     if (Src2->isReg() && Src2->getReg() == Reg) {
1475       // Not allowed to use constant bus for another operand.
1476       // We can however allow an inline immediate as src0.
1477       if (!Src0->isImm() &&
1478           (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))
1479         return false;
1480 
1481       if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
1482         return false;
1483 
1484       const int64_t Imm = DefMI.getOperand(1).getImm();
1485 
1486       // FIXME: This would be a lot easier if we could return a new instruction
1487       // instead of having to modify in place.
1488 
1489       // Remove these first since they are at the end.
1490       UseMI.RemoveOperand(
1491           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
1492       UseMI.RemoveOperand(
1493           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
1494 
1495       if (Opc == AMDGPU::V_MAC_F32_e64 ||
1496           Opc == AMDGPU::V_MAC_F16_e64)
1497         UseMI.untieRegOperand(
1498             AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
1499 
1500       // ChangingToImmediate adds Src2 back to the instruction.
1501       Src2->ChangeToImmediate(Imm);
1502 
1503       // These come before src2.
1504       removeModOperands(UseMI);
1505       UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16));
1506 
1507       bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1508       if (DeleteDef)
1509         DefMI.eraseFromParent();
1510 
1511       return true;
1512     }
1513   }
1514 
1515   return false;
1516 }
1517 
1518 static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
1519                                 int WidthB, int OffsetB) {
1520   int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
1521   int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
1522   int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1523   return LowOffset + LowWidth <= HighOffset;
1524 }
1525 
1526 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
1527                                                MachineInstr &MIb) const {
1528   unsigned BaseReg0, BaseReg1;
1529   int64_t Offset0, Offset1;
1530 
1531   if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
1532       getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
1533 
1534     if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
1535       // FIXME: Handle ds_read2 / ds_write2.
1536       return false;
1537     }
1538     unsigned Width0 = (*MIa.memoperands_begin())->getSize();
1539     unsigned Width1 = (*MIb.memoperands_begin())->getSize();
1540     if (BaseReg0 == BaseReg1 &&
1541         offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) {
1542       return true;
1543     }
1544   }
1545 
1546   return false;
1547 }
1548 
1549 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa,
1550                                                   MachineInstr &MIb,
1551                                                   AliasAnalysis *AA) const {
1552   assert((MIa.mayLoad() || MIa.mayStore()) &&
1553          "MIa must load from or modify a memory location");
1554   assert((MIb.mayLoad() || MIb.mayStore()) &&
1555          "MIb must load from or modify a memory location");
1556 
1557   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
1558     return false;
1559 
1560   // XXX - Can we relax this between address spaces?
1561   if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1562     return false;
1563 
1564   if (AA && MIa.hasOneMemOperand() && MIb.hasOneMemOperand()) {
1565     const MachineMemOperand *MMOa = *MIa.memoperands_begin();
1566     const MachineMemOperand *MMOb = *MIb.memoperands_begin();
1567     if (MMOa->getValue() && MMOb->getValue()) {
1568       MemoryLocation LocA(MMOa->getValue(), MMOa->getSize(), MMOa->getAAInfo());
1569       MemoryLocation LocB(MMOb->getValue(), MMOb->getSize(), MMOb->getAAInfo());
1570       if (!AA->alias(LocA, LocB))
1571         return true;
1572     }
1573   }
1574 
1575   // TODO: Should we check the address space from the MachineMemOperand? That
1576   // would allow us to distinguish objects we know don't alias based on the
1577   // underlying address space, even if it was lowered to a different one,
1578   // e.g. private accesses lowered to use MUBUF instructions on a scratch
1579   // buffer.
1580   if (isDS(MIa)) {
1581     if (isDS(MIb))
1582       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1583 
1584     return !isFLAT(MIb);
1585   }
1586 
1587   if (isMUBUF(MIa) || isMTBUF(MIa)) {
1588     if (isMUBUF(MIb) || isMTBUF(MIb))
1589       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1590 
1591     return !isFLAT(MIb) && !isSMRD(MIb);
1592   }
1593 
1594   if (isSMRD(MIa)) {
1595     if (isSMRD(MIb))
1596       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1597 
1598     return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa);
1599   }
1600 
1601   if (isFLAT(MIa)) {
1602     if (isFLAT(MIb))
1603       return checkInstOffsetsDoNotOverlap(MIa, MIb);
1604 
1605     return false;
1606   }
1607 
1608   return false;
1609 }
1610 
1611 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
1612                                                  MachineInstr &MI,
1613                                                  LiveVariables *LV) const {
1614   bool IsF16 = false;
1615 
1616   switch (MI.getOpcode()) {
1617   default:
1618     return nullptr;
1619   case AMDGPU::V_MAC_F16_e64:
1620     IsF16 = true;
1621   case AMDGPU::V_MAC_F32_e64:
1622     break;
1623   case AMDGPU::V_MAC_F16_e32:
1624     IsF16 = true;
1625   case AMDGPU::V_MAC_F32_e32: {
1626     int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1627                                              AMDGPU::OpName::src0);
1628     const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
1629     if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
1630       return nullptr;
1631     break;
1632   }
1633   }
1634 
1635   const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
1636   const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
1637   const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
1638   const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
1639 
1640   return BuildMI(*MBB, MI, MI.getDebugLoc(),
1641                  get(IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32))
1642       .add(*Dst)
1643       .addImm(0) // Src0 mods
1644       .add(*Src0)
1645       .addImm(0) // Src1 mods
1646       .add(*Src1)
1647       .addImm(0) // Src mods
1648       .add(*Src2)
1649       .addImm(0)  // clamp
1650       .addImm(0); // omod
1651 }
1652 
1653 // It's not generally safe to move VALU instructions across these since it will
1654 // start using the register as a base index rather than directly.
1655 // XXX - Why isn't hasSideEffects sufficient for these?
1656 static bool changesVGPRIndexingMode(const MachineInstr &MI) {
1657   switch (MI.getOpcode()) {
1658   case AMDGPU::S_SET_GPR_IDX_ON:
1659   case AMDGPU::S_SET_GPR_IDX_MODE:
1660   case AMDGPU::S_SET_GPR_IDX_OFF:
1661     return true;
1662   default:
1663     return false;
1664   }
1665 }
1666 
1667 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1668                                        const MachineBasicBlock *MBB,
1669                                        const MachineFunction &MF) const {
1670   // XXX - Do we want the SP check in the base implementation?
1671 
1672   // Target-independent instructions do not have an implicit-use of EXEC, even
1673   // when they operate on VGPRs. Treating EXEC modifications as scheduling
1674   // boundaries prevents incorrect movements of such instructions.
1675   return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) ||
1676          MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
1677          MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
1678          MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
1679          changesVGPRIndexingMode(MI);
1680 }
1681 
1682 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
1683   switch (Imm.getBitWidth()) {
1684   case 32:
1685     return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
1686                                         ST.hasInv2PiInlineImm());
1687   case 64:
1688     return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
1689                                         ST.hasInv2PiInlineImm());
1690   case 16:
1691     return AMDGPU::isInlinableLiteral16(Imm.getSExtValue(),
1692                                         ST.hasInv2PiInlineImm());
1693   default:
1694     llvm_unreachable("invalid bitwidth");
1695   }
1696 }
1697 
1698 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
1699                                    uint8_t OperandType) const {
1700   if (!MO.isImm() || OperandType < MCOI::OPERAND_FIRST_TARGET)
1701     return false;
1702 
1703   // MachineOperand provides no way to tell the true operand size, since it only
1704   // records a 64-bit value. We need to know the size to determine if a 32-bit
1705   // floating point immediate bit pattern is legal for an integer immediate. It
1706   // would be for any 32-bit integer operand, but would not be for a 64-bit one.
1707 
1708   int64_t Imm = MO.getImm();
1709   switch (operandBitWidth(OperandType)) {
1710   case 32: {
1711     int32_t Trunc = static_cast<int32_t>(Imm);
1712     return Trunc == Imm &&
1713            AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
1714   }
1715   case 64: {
1716     return AMDGPU::isInlinableLiteral64(MO.getImm(),
1717                                         ST.hasInv2PiInlineImm());
1718   }
1719   case 16: {
1720     if (isInt<16>(Imm) || isUInt<16>(Imm)) {
1721       int16_t Trunc = static_cast<int16_t>(Imm);
1722       return AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
1723     }
1724 
1725     return false;
1726   }
1727   default:
1728     llvm_unreachable("invalid bitwidth");
1729   }
1730 }
1731 
1732 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO,
1733                                         const MCOperandInfo &OpInfo) const {
1734   switch (MO.getType()) {
1735   case MachineOperand::MO_Register:
1736     return false;
1737   case MachineOperand::MO_Immediate:
1738     return !isInlineConstant(MO, OpInfo);
1739   case MachineOperand::MO_FrameIndex:
1740   case MachineOperand::MO_MachineBasicBlock:
1741   case MachineOperand::MO_ExternalSymbol:
1742   case MachineOperand::MO_GlobalAddress:
1743   case MachineOperand::MO_MCSymbol:
1744     return true;
1745   default:
1746     llvm_unreachable("unexpected operand type");
1747   }
1748 }
1749 
1750 static bool compareMachineOp(const MachineOperand &Op0,
1751                              const MachineOperand &Op1) {
1752   if (Op0.getType() != Op1.getType())
1753     return false;
1754 
1755   switch (Op0.getType()) {
1756   case MachineOperand::MO_Register:
1757     return Op0.getReg() == Op1.getReg();
1758   case MachineOperand::MO_Immediate:
1759     return Op0.getImm() == Op1.getImm();
1760   default:
1761     llvm_unreachable("Didn't expect to be comparing these operand types");
1762   }
1763 }
1764 
1765 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
1766                                     const MachineOperand &MO) const {
1767   const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo];
1768 
1769   assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
1770 
1771   if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
1772     return true;
1773 
1774   if (OpInfo.RegClass < 0)
1775     return false;
1776 
1777   if (MO.isImm() && isInlineConstant(MO, OpInfo))
1778     return RI.opCanUseInlineConstant(OpInfo.OperandType);
1779 
1780   return RI.opCanUseLiteralConstant(OpInfo.OperandType);
1781 }
1782 
1783 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
1784   int Op32 = AMDGPU::getVOPe32(Opcode);
1785   if (Op32 == -1)
1786     return false;
1787 
1788   return pseudoToMCOpcode(Op32) != -1;
1789 }
1790 
1791 bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
1792   // The src0_modifier operand is present on all instructions
1793   // that have modifiers.
1794 
1795   return AMDGPU::getNamedOperandIdx(Opcode,
1796                                     AMDGPU::OpName::src0_modifiers) != -1;
1797 }
1798 
1799 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
1800                                   unsigned OpName) const {
1801   const MachineOperand *Mods = getNamedOperand(MI, OpName);
1802   return Mods && Mods->getImm();
1803 }
1804 
1805 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
1806                                   const MachineOperand &MO,
1807                                   const MCOperandInfo &OpInfo) const {
1808   // Literal constants use the constant bus.
1809   //if (isLiteralConstantLike(MO, OpInfo))
1810   // return true;
1811   if (MO.isImm())
1812     return !isInlineConstant(MO, OpInfo);
1813 
1814   if (!MO.isReg())
1815     return true; // Misc other operands like FrameIndex
1816 
1817   if (!MO.isUse())
1818     return false;
1819 
1820   if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1821     return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
1822 
1823   // FLAT_SCR is just an SGPR pair.
1824   if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR))
1825     return true;
1826 
1827   // EXEC register uses the constant bus.
1828   if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
1829     return true;
1830 
1831   // SGPRs use the constant bus
1832   return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 ||
1833           (!MO.isImplicit() &&
1834            (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
1835             AMDGPU::SGPR_64RegClass.contains(MO.getReg()))));
1836 }
1837 
1838 static unsigned findImplicitSGPRRead(const MachineInstr &MI) {
1839   for (const MachineOperand &MO : MI.implicit_operands()) {
1840     // We only care about reads.
1841     if (MO.isDef())
1842       continue;
1843 
1844     switch (MO.getReg()) {
1845     case AMDGPU::VCC:
1846     case AMDGPU::M0:
1847     case AMDGPU::FLAT_SCR:
1848       return MO.getReg();
1849 
1850     default:
1851       break;
1852     }
1853   }
1854 
1855   return AMDGPU::NoRegister;
1856 }
1857 
1858 static bool shouldReadExec(const MachineInstr &MI) {
1859   if (SIInstrInfo::isVALU(MI)) {
1860     switch (MI.getOpcode()) {
1861     case AMDGPU::V_READLANE_B32:
1862     case AMDGPU::V_READLANE_B32_si:
1863     case AMDGPU::V_READLANE_B32_vi:
1864     case AMDGPU::V_WRITELANE_B32:
1865     case AMDGPU::V_WRITELANE_B32_si:
1866     case AMDGPU::V_WRITELANE_B32_vi:
1867       return false;
1868     }
1869 
1870     return true;
1871   }
1872 
1873   if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
1874       SIInstrInfo::isSALU(MI) ||
1875       SIInstrInfo::isSMRD(MI))
1876     return false;
1877 
1878   return true;
1879 }
1880 
1881 static bool isSubRegOf(const SIRegisterInfo &TRI,
1882                        const MachineOperand &SuperVec,
1883                        const MachineOperand &SubReg) {
1884   if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg()))
1885     return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
1886 
1887   return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
1888          SubReg.getReg() == SuperVec.getReg();
1889 }
1890 
1891 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
1892                                     StringRef &ErrInfo) const {
1893   uint16_t Opcode = MI.getOpcode();
1894   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1895   int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
1896   int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
1897   int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
1898 
1899   // Make sure the number of operands is correct.
1900   const MCInstrDesc &Desc = get(Opcode);
1901   if (!Desc.isVariadic() &&
1902       Desc.getNumOperands() != MI.getNumExplicitOperands()) {
1903     ErrInfo = "Instruction has wrong number of operands.";
1904     return false;
1905   }
1906 
1907   if (MI.isInlineAsm()) {
1908     // Verify register classes for inlineasm constraints.
1909     for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
1910          I != E; ++I) {
1911       const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
1912       if (!RC)
1913         continue;
1914 
1915       const MachineOperand &Op = MI.getOperand(I);
1916       if (!Op.isReg())
1917         continue;
1918 
1919       unsigned Reg = Op.getReg();
1920       if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) {
1921         ErrInfo = "inlineasm operand has incorrect register class.";
1922         return false;
1923       }
1924     }
1925 
1926     return true;
1927   }
1928 
1929   // Make sure the register classes are correct.
1930   for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
1931     if (MI.getOperand(i).isFPImm()) {
1932       ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
1933                 "all fp values to integers.";
1934       return false;
1935     }
1936 
1937     int RegClass = Desc.OpInfo[i].RegClass;
1938 
1939     switch (Desc.OpInfo[i].OperandType) {
1940     case MCOI::OPERAND_REGISTER:
1941       if (MI.getOperand(i).isImm()) {
1942         ErrInfo = "Illegal immediate value for operand.";
1943         return false;
1944       }
1945       break;
1946     case AMDGPU::OPERAND_REG_IMM_INT32:
1947     case AMDGPU::OPERAND_REG_IMM_FP32:
1948       break;
1949     case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1950     case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1951     case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1952     case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1953     case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1954     case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
1955       const MachineOperand &MO = MI.getOperand(i);
1956       if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
1957         ErrInfo = "Illegal immediate value for operand.";
1958         return false;
1959       }
1960       break;
1961     }
1962     case MCOI::OPERAND_IMMEDIATE:
1963     case AMDGPU::OPERAND_KIMM32:
1964       // Check if this operand is an immediate.
1965       // FrameIndex operands will be replaced by immediates, so they are
1966       // allowed.
1967       if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
1968         ErrInfo = "Expected immediate, but got non-immediate";
1969         return false;
1970       }
1971       LLVM_FALLTHROUGH;
1972     default:
1973       continue;
1974     }
1975 
1976     if (!MI.getOperand(i).isReg())
1977       continue;
1978 
1979     if (RegClass != -1) {
1980       unsigned Reg = MI.getOperand(i).getReg();
1981       if (Reg == AMDGPU::NoRegister ||
1982           TargetRegisterInfo::isVirtualRegister(Reg))
1983         continue;
1984 
1985       const TargetRegisterClass *RC = RI.getRegClass(RegClass);
1986       if (!RC->contains(Reg)) {
1987         ErrInfo = "Operand has incorrect register class.";
1988         return false;
1989       }
1990     }
1991   }
1992 
1993   // Verify VOP*
1994   if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI)) {
1995     // Only look at the true operands. Only a real operand can use the constant
1996     // bus, and we don't want to check pseudo-operands like the source modifier
1997     // flags.
1998     const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
1999 
2000     unsigned ConstantBusCount = 0;
2001 
2002     if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
2003       ++ConstantBusCount;
2004 
2005     unsigned SGPRUsed = findImplicitSGPRRead(MI);
2006     if (SGPRUsed != AMDGPU::NoRegister)
2007       ++ConstantBusCount;
2008 
2009     for (int OpIdx : OpIndices) {
2010       if (OpIdx == -1)
2011         break;
2012       const MachineOperand &MO = MI.getOperand(OpIdx);
2013       if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
2014         if (MO.isReg()) {
2015           if (MO.getReg() != SGPRUsed)
2016             ++ConstantBusCount;
2017           SGPRUsed = MO.getReg();
2018         } else {
2019           ++ConstantBusCount;
2020         }
2021       }
2022     }
2023     if (ConstantBusCount > 1) {
2024       ErrInfo = "VOP* instruction uses the constant bus more than once";
2025       return false;
2026     }
2027   }
2028 
2029   // Verify misc. restrictions on specific instructions.
2030   if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
2031       Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
2032     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
2033     const MachineOperand &Src1 = MI.getOperand(Src1Idx);
2034     const MachineOperand &Src2 = MI.getOperand(Src2Idx);
2035     if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
2036       if (!compareMachineOp(Src0, Src1) &&
2037           !compareMachineOp(Src0, Src2)) {
2038         ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
2039         return false;
2040       }
2041     }
2042   }
2043 
2044   if (isSOPK(MI)) {
2045     int64_t Imm = getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm();
2046     if (sopkIsZext(MI)) {
2047       if (!isUInt<16>(Imm)) {
2048         ErrInfo = "invalid immediate for SOPK instruction";
2049         return false;
2050       }
2051     } else {
2052       if (!isInt<16>(Imm)) {
2053         ErrInfo = "invalid immediate for SOPK instruction";
2054         return false;
2055       }
2056     }
2057   }
2058 
2059   if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
2060       Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
2061       Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2062       Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
2063     const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2064                        Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
2065 
2066     const unsigned StaticNumOps = Desc.getNumOperands() +
2067       Desc.getNumImplicitUses();
2068     const unsigned NumImplicitOps = IsDst ? 2 : 1;
2069 
2070     // Allow additional implicit operands. This allows a fixup done by the post
2071     // RA scheduler where the main implicit operand is killed and implicit-defs
2072     // are added for sub-registers that remain live after this instruction.
2073     if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
2074       ErrInfo = "missing implicit register operands";
2075       return false;
2076     }
2077 
2078     const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
2079     if (IsDst) {
2080       if (!Dst->isUse()) {
2081         ErrInfo = "v_movreld_b32 vdst should be a use operand";
2082         return false;
2083       }
2084 
2085       unsigned UseOpIdx;
2086       if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
2087           UseOpIdx != StaticNumOps + 1) {
2088         ErrInfo = "movrel implicit operands should be tied";
2089         return false;
2090       }
2091     }
2092 
2093     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
2094     const MachineOperand &ImpUse
2095       = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
2096     if (!ImpUse.isReg() || !ImpUse.isUse() ||
2097         !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
2098       ErrInfo = "src0 should be subreg of implicit vector use";
2099       return false;
2100     }
2101   }
2102 
2103   // Make sure we aren't losing exec uses in the td files. This mostly requires
2104   // being careful when using let Uses to try to add other use registers.
2105   if (shouldReadExec(MI)) {
2106     if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
2107       ErrInfo = "VALU instruction does not implicitly read exec mask";
2108       return false;
2109     }
2110   }
2111 
2112   if (isSMRD(MI)) {
2113     if (MI.mayStore()) {
2114       // The register offset form of scalar stores may only use m0 as the
2115       // soffset register.
2116       const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff);
2117       if (Soff && Soff->getReg() != AMDGPU::M0) {
2118         ErrInfo = "scalar stores must use m0 as offset register";
2119         return false;
2120       }
2121     }
2122   }
2123 
2124   return true;
2125 }
2126 
2127 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
2128   switch (MI.getOpcode()) {
2129   default: return AMDGPU::INSTRUCTION_LIST_END;
2130   case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
2131   case AMDGPU::COPY: return AMDGPU::COPY;
2132   case AMDGPU::PHI: return AMDGPU::PHI;
2133   case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
2134   case AMDGPU::S_MOV_B32:
2135     return MI.getOperand(1).isReg() ?
2136            AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
2137   case AMDGPU::S_ADD_I32:
2138   case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32;
2139   case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
2140   case AMDGPU::S_SUB_I32:
2141   case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32;
2142   case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
2143   case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32;
2144   case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
2145   case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
2146   case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
2147   case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
2148   case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
2149   case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
2150   case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
2151   case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
2152   case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
2153   case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
2154   case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
2155   case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
2156   case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
2157   case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
2158   case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
2159   case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
2160   case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
2161   case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
2162   case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
2163   case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
2164   case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
2165   case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
2166   case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
2167   case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
2168   case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
2169   case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
2170   case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
2171   case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32;
2172   case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32;
2173   case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32;
2174   case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32;
2175   case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32;
2176   case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32;
2177   case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32;
2178   case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32;
2179   case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
2180   case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
2181   case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
2182   case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
2183   case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
2184   case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
2185   }
2186 }
2187 
2188 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
2189   return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
2190 }
2191 
2192 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
2193                                                       unsigned OpNo) const {
2194   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2195   const MCInstrDesc &Desc = get(MI.getOpcode());
2196   if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
2197       Desc.OpInfo[OpNo].RegClass == -1) {
2198     unsigned Reg = MI.getOperand(OpNo).getReg();
2199 
2200     if (TargetRegisterInfo::isVirtualRegister(Reg))
2201       return MRI.getRegClass(Reg);
2202     return RI.getPhysRegClass(Reg);
2203   }
2204 
2205   unsigned RCID = Desc.OpInfo[OpNo].RegClass;
2206   return RI.getRegClass(RCID);
2207 }
2208 
2209 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
2210   switch (MI.getOpcode()) {
2211   case AMDGPU::COPY:
2212   case AMDGPU::REG_SEQUENCE:
2213   case AMDGPU::PHI:
2214   case AMDGPU::INSERT_SUBREG:
2215     return RI.hasVGPRs(getOpRegClass(MI, 0));
2216   default:
2217     return RI.hasVGPRs(getOpRegClass(MI, OpNo));
2218   }
2219 }
2220 
2221 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
2222   MachineBasicBlock::iterator I = MI;
2223   MachineBasicBlock *MBB = MI.getParent();
2224   MachineOperand &MO = MI.getOperand(OpIdx);
2225   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
2226   unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
2227   const TargetRegisterClass *RC = RI.getRegClass(RCID);
2228   unsigned Opcode = AMDGPU::V_MOV_B32_e32;
2229   if (MO.isReg())
2230     Opcode = AMDGPU::COPY;
2231   else if (RI.isSGPRClass(RC))
2232     Opcode = AMDGPU::S_MOV_B32;
2233 
2234   const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
2235   if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
2236     VRC = &AMDGPU::VReg_64RegClass;
2237   else
2238     VRC = &AMDGPU::VGPR_32RegClass;
2239 
2240   unsigned Reg = MRI.createVirtualRegister(VRC);
2241   DebugLoc DL = MBB->findDebugLoc(I);
2242   BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO);
2243   MO.ChangeToRegister(Reg, false);
2244 }
2245 
2246 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
2247                                          MachineRegisterInfo &MRI,
2248                                          MachineOperand &SuperReg,
2249                                          const TargetRegisterClass *SuperRC,
2250                                          unsigned SubIdx,
2251                                          const TargetRegisterClass *SubRC)
2252                                          const {
2253   MachineBasicBlock *MBB = MI->getParent();
2254   DebugLoc DL = MI->getDebugLoc();
2255   unsigned SubReg = MRI.createVirtualRegister(SubRC);
2256 
2257   if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
2258     BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
2259       .addReg(SuperReg.getReg(), 0, SubIdx);
2260     return SubReg;
2261   }
2262 
2263   // Just in case the super register is itself a sub-register, copy it to a new
2264   // value so we don't need to worry about merging its subreg index with the
2265   // SubIdx passed to this function. The register coalescer should be able to
2266   // eliminate this extra copy.
2267   unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
2268 
2269   BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
2270     .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
2271 
2272   BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
2273     .addReg(NewSuperReg, 0, SubIdx);
2274 
2275   return SubReg;
2276 }
2277 
2278 MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
2279   MachineBasicBlock::iterator MII,
2280   MachineRegisterInfo &MRI,
2281   MachineOperand &Op,
2282   const TargetRegisterClass *SuperRC,
2283   unsigned SubIdx,
2284   const TargetRegisterClass *SubRC) const {
2285   if (Op.isImm()) {
2286     if (SubIdx == AMDGPU::sub0)
2287       return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
2288     if (SubIdx == AMDGPU::sub1)
2289       return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
2290 
2291     llvm_unreachable("Unhandled register index for immediate");
2292   }
2293 
2294   unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
2295                                        SubIdx, SubRC);
2296   return MachineOperand::CreateReg(SubReg, false);
2297 }
2298 
2299 // Change the order of operands from (0, 1, 2) to (0, 2, 1)
2300 void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
2301   assert(Inst.getNumExplicitOperands() == 3);
2302   MachineOperand Op1 = Inst.getOperand(1);
2303   Inst.RemoveOperand(1);
2304   Inst.addOperand(Op1);
2305 }
2306 
2307 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
2308                                     const MCOperandInfo &OpInfo,
2309                                     const MachineOperand &MO) const {
2310   if (!MO.isReg())
2311     return false;
2312 
2313   unsigned Reg = MO.getReg();
2314   const TargetRegisterClass *RC =
2315     TargetRegisterInfo::isVirtualRegister(Reg) ?
2316     MRI.getRegClass(Reg) :
2317     RI.getPhysRegClass(Reg);
2318 
2319   const SIRegisterInfo *TRI =
2320       static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
2321   RC = TRI->getSubRegClass(RC, MO.getSubReg());
2322 
2323   // In order to be legal, the common sub-class must be equal to the
2324   // class of the current operand.  For example:
2325   //
2326   // v_mov_b32 s0 ; Operand defined as vsrc_b32
2327   //              ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL
2328   //
2329   // s_sendmsg 0, s0 ; Operand defined as m0reg
2330   //                 ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL
2331 
2332   return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC;
2333 }
2334 
2335 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
2336                                      const MCOperandInfo &OpInfo,
2337                                      const MachineOperand &MO) const {
2338   if (MO.isReg())
2339     return isLegalRegOperand(MRI, OpInfo, MO);
2340 
2341   // Handle non-register types that are treated like immediates.
2342   assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
2343   return true;
2344 }
2345 
2346 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
2347                                  const MachineOperand *MO) const {
2348   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2349   const MCInstrDesc &InstDesc = MI.getDesc();
2350   const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
2351   const TargetRegisterClass *DefinedRC =
2352       OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
2353   if (!MO)
2354     MO = &MI.getOperand(OpIdx);
2355 
2356   if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) {
2357 
2358     RegSubRegPair SGPRUsed;
2359     if (MO->isReg())
2360       SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg());
2361 
2362     for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
2363       if (i == OpIdx)
2364         continue;
2365       const MachineOperand &Op = MI.getOperand(i);
2366       if (Op.isReg()) {
2367         if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) &&
2368             usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
2369           return false;
2370         }
2371       } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) {
2372         return false;
2373       }
2374     }
2375   }
2376 
2377   if (MO->isReg()) {
2378     assert(DefinedRC);
2379     return isLegalRegOperand(MRI, OpInfo, *MO);
2380   }
2381 
2382   // Handle non-register types that are treated like immediates.
2383   assert(MO->isImm() || MO->isTargetIndex() || MO->isFI());
2384 
2385   if (!DefinedRC) {
2386     // This operand expects an immediate.
2387     return true;
2388   }
2389 
2390   return isImmOperandLegal(MI, OpIdx, *MO);
2391 }
2392 
2393 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
2394                                        MachineInstr &MI) const {
2395   unsigned Opc = MI.getOpcode();
2396   const MCInstrDesc &InstrDesc = get(Opc);
2397 
2398   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
2399   MachineOperand &Src1 = MI.getOperand(Src1Idx);
2400 
2401   // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
2402   // we need to only have one constant bus use.
2403   //
2404   // Note we do not need to worry about literal constants here. They are
2405   // disabled for the operand type for instructions because they will always
2406   // violate the one constant bus use rule.
2407   bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
2408   if (HasImplicitSGPR) {
2409     int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2410     MachineOperand &Src0 = MI.getOperand(Src0Idx);
2411 
2412     if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg()))
2413       legalizeOpWithMove(MI, Src0Idx);
2414   }
2415 
2416   // VOP2 src0 instructions support all operand types, so we don't need to check
2417   // their legality. If src1 is already legal, we don't need to do anything.
2418   if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
2419     return;
2420 
2421   // We do not use commuteInstruction here because it is too aggressive and will
2422   // commute if it is possible. We only want to commute here if it improves
2423   // legality. This can be called a fairly large number of times so don't waste
2424   // compile time pointlessly swapping and checking legality again.
2425   if (HasImplicitSGPR || !MI.isCommutable()) {
2426     legalizeOpWithMove(MI, Src1Idx);
2427     return;
2428   }
2429 
2430   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2431   MachineOperand &Src0 = MI.getOperand(Src0Idx);
2432 
2433   // If src0 can be used as src1, commuting will make the operands legal.
2434   // Otherwise we have to give up and insert a move.
2435   //
2436   // TODO: Other immediate-like operand kinds could be commuted if there was a
2437   // MachineOperand::ChangeTo* for them.
2438   if ((!Src1.isImm() && !Src1.isReg()) ||
2439       !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
2440     legalizeOpWithMove(MI, Src1Idx);
2441     return;
2442   }
2443 
2444   int CommutedOpc = commuteOpcode(MI);
2445   if (CommutedOpc == -1) {
2446     legalizeOpWithMove(MI, Src1Idx);
2447     return;
2448   }
2449 
2450   MI.setDesc(get(CommutedOpc));
2451 
2452   unsigned Src0Reg = Src0.getReg();
2453   unsigned Src0SubReg = Src0.getSubReg();
2454   bool Src0Kill = Src0.isKill();
2455 
2456   if (Src1.isImm())
2457     Src0.ChangeToImmediate(Src1.getImm());
2458   else if (Src1.isReg()) {
2459     Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
2460     Src0.setSubReg(Src1.getSubReg());
2461   } else
2462     llvm_unreachable("Should only have register or immediate operands");
2463 
2464   Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
2465   Src1.setSubReg(Src0SubReg);
2466 }
2467 
2468 // Legalize VOP3 operands. Because all operand types are supported for any
2469 // operand, and since literal constants are not allowed and should never be
2470 // seen, we only need to worry about inserting copies if we use multiple SGPR
2471 // operands.
2472 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
2473                                        MachineInstr &MI) const {
2474   unsigned Opc = MI.getOpcode();
2475 
2476   int VOP3Idx[3] = {
2477     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
2478     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
2479     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
2480   };
2481 
2482   // Find the one SGPR operand we are allowed to use.
2483   unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx);
2484 
2485   for (unsigned i = 0; i < 3; ++i) {
2486     int Idx = VOP3Idx[i];
2487     if (Idx == -1)
2488       break;
2489     MachineOperand &MO = MI.getOperand(Idx);
2490 
2491     // We should never see a VOP3 instruction with an illegal immediate operand.
2492     if (!MO.isReg())
2493       continue;
2494 
2495     if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
2496       continue; // VGPRs are legal
2497 
2498     if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
2499       SGPRReg = MO.getReg();
2500       // We can use one SGPR in each VOP3 instruction.
2501       continue;
2502     }
2503 
2504     // If we make it this far, then the operand is not legal and we must
2505     // legalize it.
2506     legalizeOpWithMove(MI, Idx);
2507   }
2508 }
2509 
2510 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
2511                                          MachineRegisterInfo &MRI) const {
2512   const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
2513   const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
2514   unsigned DstReg = MRI.createVirtualRegister(SRC);
2515   unsigned SubRegs = VRC->getSize() / 4;
2516 
2517   SmallVector<unsigned, 8> SRegs;
2518   for (unsigned i = 0; i < SubRegs; ++i) {
2519     unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2520     BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
2521             get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
2522         .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
2523     SRegs.push_back(SGPR);
2524   }
2525 
2526   MachineInstrBuilder MIB =
2527       BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
2528               get(AMDGPU::REG_SEQUENCE), DstReg);
2529   for (unsigned i = 0; i < SubRegs; ++i) {
2530     MIB.addReg(SRegs[i]);
2531     MIB.addImm(RI.getSubRegFromChannel(i));
2532   }
2533   return DstReg;
2534 }
2535 
2536 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
2537                                        MachineInstr &MI) const {
2538 
2539   // If the pointer is store in VGPRs, then we need to move them to
2540   // SGPRs using v_readfirstlane.  This is safe because we only select
2541   // loads with uniform pointers to SMRD instruction so we know the
2542   // pointer value is uniform.
2543   MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
2544   if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
2545       unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
2546       SBase->setReg(SGPR);
2547   }
2548 }
2549 
2550 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
2551                                          MachineBasicBlock::iterator I,
2552                                          const TargetRegisterClass *DstRC,
2553                                          MachineOperand &Op,
2554                                          MachineRegisterInfo &MRI,
2555                                          const DebugLoc &DL) const {
2556 
2557   unsigned OpReg = Op.getReg();
2558   unsigned OpSubReg = Op.getSubReg();
2559 
2560   const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
2561       RI.getRegClassForReg(MRI, OpReg), OpSubReg);
2562 
2563   // Check if operand is already the correct register class.
2564   if (DstRC == OpRC)
2565     return;
2566 
2567   unsigned DstReg = MRI.createVirtualRegister(DstRC);
2568   MachineInstr *Copy =
2569       BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op);
2570 
2571   Op.setReg(DstReg);
2572   Op.setSubReg(0);
2573 
2574   MachineInstr *Def = MRI.getVRegDef(OpReg);
2575   if (!Def)
2576     return;
2577 
2578   // Try to eliminate the copy if it is copying an immediate value.
2579   if (Def->isMoveImmediate())
2580     FoldImmediate(*Copy, *Def, OpReg, &MRI);
2581 }
2582 
2583 void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
2584   MachineFunction &MF = *MI.getParent()->getParent();
2585   MachineRegisterInfo &MRI = MF.getRegInfo();
2586 
2587   // Legalize VOP2
2588   if (isVOP2(MI) || isVOPC(MI)) {
2589     legalizeOperandsVOP2(MRI, MI);
2590     return;
2591   }
2592 
2593   // Legalize VOP3
2594   if (isVOP3(MI)) {
2595     legalizeOperandsVOP3(MRI, MI);
2596     return;
2597   }
2598 
2599   // Legalize SMRD
2600   if (isSMRD(MI)) {
2601     legalizeOperandsSMRD(MRI, MI);
2602     return;
2603   }
2604 
2605   // Legalize REG_SEQUENCE and PHI
2606   // The register class of the operands much be the same type as the register
2607   // class of the output.
2608   if (MI.getOpcode() == AMDGPU::PHI) {
2609     const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
2610     for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2611       if (!MI.getOperand(i).isReg() ||
2612           !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
2613         continue;
2614       const TargetRegisterClass *OpRC =
2615           MRI.getRegClass(MI.getOperand(i).getReg());
2616       if (RI.hasVGPRs(OpRC)) {
2617         VRC = OpRC;
2618       } else {
2619         SRC = OpRC;
2620       }
2621     }
2622 
2623     // If any of the operands are VGPR registers, then they all most be
2624     // otherwise we will create illegal VGPR->SGPR copies when legalizing
2625     // them.
2626     if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
2627       if (!VRC) {
2628         assert(SRC);
2629         VRC = RI.getEquivalentVGPRClass(SRC);
2630       }
2631       RC = VRC;
2632     } else {
2633       RC = SRC;
2634     }
2635 
2636     // Update all the operands so they have the same type.
2637     for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2638       MachineOperand &Op = MI.getOperand(I);
2639       if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
2640         continue;
2641 
2642       // MI is a PHI instruction.
2643       MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
2644       MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
2645 
2646       // Avoid creating no-op copies with the same src and dst reg class.  These
2647       // confuse some of the machine passes.
2648       legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
2649     }
2650   }
2651 
2652   // REG_SEQUENCE doesn't really require operand legalization, but if one has a
2653   // VGPR dest type and SGPR sources, insert copies so all operands are
2654   // VGPRs. This seems to help operand folding / the register coalescer.
2655   if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
2656     MachineBasicBlock *MBB = MI.getParent();
2657     const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
2658     if (RI.hasVGPRs(DstRC)) {
2659       // Update all the operands so they are VGPR register classes. These may
2660       // not be the same register class because REG_SEQUENCE supports mixing
2661       // subregister index types e.g. sub0_sub1 + sub2 + sub3
2662       for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2663         MachineOperand &Op = MI.getOperand(I);
2664         if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
2665           continue;
2666 
2667         const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
2668         const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
2669         if (VRC == OpRC)
2670           continue;
2671 
2672         legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
2673         Op.setIsKill();
2674       }
2675     }
2676 
2677     return;
2678   }
2679 
2680   // Legalize INSERT_SUBREG
2681   // src0 must have the same register class as dst
2682   if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
2683     unsigned Dst = MI.getOperand(0).getReg();
2684     unsigned Src0 = MI.getOperand(1).getReg();
2685     const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
2686     const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
2687     if (DstRC != Src0RC) {
2688       MachineBasicBlock *MBB = MI.getParent();
2689       MachineOperand &Op = MI.getOperand(1);
2690       legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
2691     }
2692     return;
2693   }
2694 
2695   // Legalize MIMG and MUBUF/MTBUF for shaders.
2696   //
2697   // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
2698   // scratch memory access. In both cases, the legalization never involves
2699   // conversion to the addr64 form.
2700   if (isMIMG(MI) ||
2701       (AMDGPU::isShader(MF.getFunction()->getCallingConv()) &&
2702        (isMUBUF(MI) || isMTBUF(MI)))) {
2703     MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
2704     if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
2705       unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI);
2706       SRsrc->setReg(SGPR);
2707     }
2708 
2709     MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
2710     if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) {
2711       unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI);
2712       SSamp->setReg(SGPR);
2713     }
2714     return;
2715   }
2716 
2717   // Legalize MUBUF* instructions by converting to addr64 form.
2718   // FIXME: If we start using the non-addr64 instructions for compute, we
2719   // may need to legalize them as above. This especially applies to the
2720   // buffer_load_format_* variants and variants with idxen (or bothen).
2721   int SRsrcIdx =
2722       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
2723   if (SRsrcIdx != -1) {
2724     // We have an MUBUF instruction
2725     MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx);
2726     unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass;
2727     if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()),
2728                                              RI.getRegClass(SRsrcRC))) {
2729       // The operands are legal.
2730       // FIXME: We may need to legalize operands besided srsrc.
2731       return;
2732     }
2733 
2734     MachineBasicBlock &MBB = *MI.getParent();
2735 
2736     // Extract the ptr from the resource descriptor.
2737     unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc,
2738       &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
2739 
2740     // Create an empty resource descriptor
2741     unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2742     unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2743     unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2744     unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
2745     uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
2746 
2747     // Zero64 = 0
2748     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64)
2749         .addImm(0);
2750 
2751     // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
2752     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
2753         .addImm(RsrcDataFormat & 0xFFFFFFFF);
2754 
2755     // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
2756     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
2757         .addImm(RsrcDataFormat >> 32);
2758 
2759     // NewSRsrc = {Zero64, SRsrcFormat}
2760     BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc)
2761         .addReg(Zero64)
2762         .addImm(AMDGPU::sub0_sub1)
2763         .addReg(SRsrcFormatLo)
2764         .addImm(AMDGPU::sub2)
2765         .addReg(SRsrcFormatHi)
2766         .addImm(AMDGPU::sub3);
2767 
2768     MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
2769     unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
2770     if (VAddr) {
2771       // This is already an ADDR64 instruction so we need to add the pointer
2772       // extracted from the resource descriptor to the current value of VAddr.
2773       unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2774       unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2775 
2776       // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0
2777       DebugLoc DL = MI.getDebugLoc();
2778       BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
2779         .addReg(SRsrcPtr, 0, AMDGPU::sub0)
2780         .addReg(VAddr->getReg(), 0, AMDGPU::sub0);
2781 
2782       // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1
2783       BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
2784         .addReg(SRsrcPtr, 0, AMDGPU::sub1)
2785         .addReg(VAddr->getReg(), 0, AMDGPU::sub1);
2786 
2787       // NewVaddr = {NewVaddrHi, NewVaddrLo}
2788       BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
2789           .addReg(NewVAddrLo)
2790           .addImm(AMDGPU::sub0)
2791           .addReg(NewVAddrHi)
2792           .addImm(AMDGPU::sub1);
2793     } else {
2794       // This instructions is the _OFFSET variant, so we need to convert it to
2795       // ADDR64.
2796       assert(MBB.getParent()->getSubtarget<SISubtarget>().getGeneration()
2797              < SISubtarget::VOLCANIC_ISLANDS &&
2798              "FIXME: Need to emit flat atomics here");
2799 
2800       MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
2801       MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
2802       MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
2803       unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
2804 
2805       // Atomics rith return have have an additional tied operand and are
2806       // missing some of the special bits.
2807       MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
2808       MachineInstr *Addr64;
2809 
2810       if (!VDataIn) {
2811         // Regular buffer load / store.
2812         MachineInstrBuilder MIB =
2813             BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
2814                 .add(*VData)
2815                 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
2816                 // This will be replaced later
2817                 // with the new value of vaddr.
2818                 .add(*SRsrc)
2819                 .add(*SOffset)
2820                 .add(*Offset);
2821 
2822         // Atomics do not have this operand.
2823         if (const MachineOperand *GLC =
2824                 getNamedOperand(MI, AMDGPU::OpName::glc)) {
2825           MIB.addImm(GLC->getImm());
2826         }
2827 
2828         MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc));
2829 
2830         if (const MachineOperand *TFE =
2831                 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
2832           MIB.addImm(TFE->getImm());
2833         }
2834 
2835         MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
2836         Addr64 = MIB;
2837       } else {
2838         // Atomics with return.
2839         Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
2840                      .add(*VData)
2841                      .add(*VDataIn)
2842                      .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
2843                      // This will be replaced later
2844                      // with the new value of vaddr.
2845                      .add(*SRsrc)
2846                      .add(*SOffset)
2847                      .add(*Offset)
2848                      .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
2849                      .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
2850       }
2851 
2852       MI.removeFromParent();
2853 
2854       // NewVaddr = {NewVaddrHi, NewVaddrLo}
2855       BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
2856               NewVAddr)
2857           .addReg(SRsrcPtr, 0, AMDGPU::sub0)
2858           .addImm(AMDGPU::sub0)
2859           .addReg(SRsrcPtr, 0, AMDGPU::sub1)
2860           .addImm(AMDGPU::sub1);
2861 
2862       VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr);
2863       SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc);
2864     }
2865 
2866     // Update the instruction to use NewVaddr
2867     VAddr->setReg(NewVAddr);
2868     // Update the instruction to use NewSRsrc
2869     SRsrc->setReg(NewSRsrc);
2870   }
2871 }
2872 
2873 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
2874   SmallVector<MachineInstr *, 128> Worklist;
2875   Worklist.push_back(&TopInst);
2876 
2877   while (!Worklist.empty()) {
2878     MachineInstr &Inst = *Worklist.pop_back_val();
2879     MachineBasicBlock *MBB = Inst.getParent();
2880     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
2881 
2882     unsigned Opcode = Inst.getOpcode();
2883     unsigned NewOpcode = getVALUOp(Inst);
2884 
2885     // Handle some special cases
2886     switch (Opcode) {
2887     default:
2888       break;
2889     case AMDGPU::S_AND_B64:
2890       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64);
2891       Inst.eraseFromParent();
2892       continue;
2893 
2894     case AMDGPU::S_OR_B64:
2895       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64);
2896       Inst.eraseFromParent();
2897       continue;
2898 
2899     case AMDGPU::S_XOR_B64:
2900       splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64);
2901       Inst.eraseFromParent();
2902       continue;
2903 
2904     case AMDGPU::S_NOT_B64:
2905       splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32);
2906       Inst.eraseFromParent();
2907       continue;
2908 
2909     case AMDGPU::S_BCNT1_I32_B64:
2910       splitScalar64BitBCNT(Worklist, Inst);
2911       Inst.eraseFromParent();
2912       continue;
2913 
2914     case AMDGPU::S_BFE_I64: {
2915       splitScalar64BitBFE(Worklist, Inst);
2916       Inst.eraseFromParent();
2917       continue;
2918     }
2919 
2920     case AMDGPU::S_LSHL_B32:
2921       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2922         NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
2923         swapOperands(Inst);
2924       }
2925       break;
2926     case AMDGPU::S_ASHR_I32:
2927       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2928         NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
2929         swapOperands(Inst);
2930       }
2931       break;
2932     case AMDGPU::S_LSHR_B32:
2933       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2934         NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
2935         swapOperands(Inst);
2936       }
2937       break;
2938     case AMDGPU::S_LSHL_B64:
2939       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2940         NewOpcode = AMDGPU::V_LSHLREV_B64;
2941         swapOperands(Inst);
2942       }
2943       break;
2944     case AMDGPU::S_ASHR_I64:
2945       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2946         NewOpcode = AMDGPU::V_ASHRREV_I64;
2947         swapOperands(Inst);
2948       }
2949       break;
2950     case AMDGPU::S_LSHR_B64:
2951       if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2952         NewOpcode = AMDGPU::V_LSHRREV_B64;
2953         swapOperands(Inst);
2954       }
2955       break;
2956 
2957     case AMDGPU::S_ABS_I32:
2958       lowerScalarAbs(Worklist, Inst);
2959       Inst.eraseFromParent();
2960       continue;
2961 
2962     case AMDGPU::S_CBRANCH_SCC0:
2963     case AMDGPU::S_CBRANCH_SCC1:
2964       // Clear unused bits of vcc
2965       BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
2966               AMDGPU::VCC)
2967           .addReg(AMDGPU::EXEC)
2968           .addReg(AMDGPU::VCC);
2969       break;
2970 
2971     case AMDGPU::S_BFE_U64:
2972     case AMDGPU::S_BFM_B64:
2973       llvm_unreachable("Moving this op to VALU not implemented");
2974     }
2975 
2976     if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
2977       // We cannot move this instruction to the VALU, so we should try to
2978       // legalize its operands instead.
2979       legalizeOperands(Inst);
2980       continue;
2981     }
2982 
2983     // Use the new VALU Opcode.
2984     const MCInstrDesc &NewDesc = get(NewOpcode);
2985     Inst.setDesc(NewDesc);
2986 
2987     // Remove any references to SCC. Vector instructions can't read from it, and
2988     // We're just about to add the implicit use / defs of VCC, and we don't want
2989     // both.
2990     for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) {
2991       MachineOperand &Op = Inst.getOperand(i);
2992       if (Op.isReg() && Op.getReg() == AMDGPU::SCC) {
2993         Inst.RemoveOperand(i);
2994         addSCCDefUsersToVALUWorklist(Inst, Worklist);
2995       }
2996     }
2997 
2998     if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
2999       // We are converting these to a BFE, so we need to add the missing
3000       // operands for the size and offset.
3001       unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
3002       Inst.addOperand(MachineOperand::CreateImm(0));
3003       Inst.addOperand(MachineOperand::CreateImm(Size));
3004 
3005     } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
3006       // The VALU version adds the second operand to the result, so insert an
3007       // extra 0 operand.
3008       Inst.addOperand(MachineOperand::CreateImm(0));
3009     }
3010 
3011     Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent());
3012 
3013     if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
3014       const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
3015       // If we need to move this to VGPRs, we need to unpack the second operand
3016       // back into the 2 separate ones for bit offset and width.
3017       assert(OffsetWidthOp.isImm() &&
3018              "Scalar BFE is only implemented for constant width and offset");
3019       uint32_t Imm = OffsetWidthOp.getImm();
3020 
3021       uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
3022       uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
3023       Inst.RemoveOperand(2);                     // Remove old immediate.
3024       Inst.addOperand(MachineOperand::CreateImm(Offset));
3025       Inst.addOperand(MachineOperand::CreateImm(BitWidth));
3026     }
3027 
3028     bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
3029     unsigned NewDstReg = AMDGPU::NoRegister;
3030     if (HasDst) {
3031       // Update the destination register class.
3032       const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
3033       if (!NewDstRC)
3034         continue;
3035 
3036       unsigned DstReg = Inst.getOperand(0).getReg();
3037       if (Inst.isCopy() &&
3038           TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) &&
3039           NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
3040         // Instead of creating a copy where src and dst are the same register
3041         // class, we just replace all uses of dst with src.  These kinds of
3042         // copies interfere with the heuristics MachineSink uses to decide
3043         // whether or not to split a critical edge.  Since the pass assumes
3044         // that copies will end up as machine instructions and not be
3045         // eliminated.
3046         addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
3047         MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg());
3048         MRI.clearKillFlags(Inst.getOperand(1).getReg());
3049         Inst.getOperand(0).setReg(DstReg);
3050         continue;
3051       }
3052 
3053       NewDstReg = MRI.createVirtualRegister(NewDstRC);
3054       MRI.replaceRegWith(DstReg, NewDstReg);
3055     }
3056 
3057     // Legalize the operands
3058     legalizeOperands(Inst);
3059 
3060     if (HasDst)
3061      addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
3062   }
3063 }
3064 
3065 void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist,
3066                                  MachineInstr &Inst) const {
3067   MachineBasicBlock &MBB = *Inst.getParent();
3068   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3069   MachineBasicBlock::iterator MII = Inst;
3070   DebugLoc DL = Inst.getDebugLoc();
3071 
3072   MachineOperand &Dest = Inst.getOperand(0);
3073   MachineOperand &Src = Inst.getOperand(1);
3074   unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3075   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3076 
3077   BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg)
3078     .addImm(0)
3079     .addReg(Src.getReg());
3080 
3081   BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
3082     .addReg(Src.getReg())
3083     .addReg(TmpReg);
3084 
3085   MRI.replaceRegWith(Dest.getReg(), ResultReg);
3086   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3087 }
3088 
3089 void SIInstrInfo::splitScalar64BitUnaryOp(
3090     SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
3091     unsigned Opcode) const {
3092   MachineBasicBlock &MBB = *Inst.getParent();
3093   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3094 
3095   MachineOperand &Dest = Inst.getOperand(0);
3096   MachineOperand &Src0 = Inst.getOperand(1);
3097   DebugLoc DL = Inst.getDebugLoc();
3098 
3099   MachineBasicBlock::iterator MII = Inst;
3100 
3101   const MCInstrDesc &InstDesc = get(Opcode);
3102   const TargetRegisterClass *Src0RC = Src0.isReg() ?
3103     MRI.getRegClass(Src0.getReg()) :
3104     &AMDGPU::SGPR_32RegClass;
3105 
3106   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
3107 
3108   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3109                                                        AMDGPU::sub0, Src0SubRC);
3110 
3111   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
3112   const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
3113   const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
3114 
3115   unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
3116   BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0);
3117 
3118   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3119                                                        AMDGPU::sub1, Src0SubRC);
3120 
3121   unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
3122   BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1);
3123 
3124   unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
3125   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3126     .addReg(DestSub0)
3127     .addImm(AMDGPU::sub0)
3128     .addReg(DestSub1)
3129     .addImm(AMDGPU::sub1);
3130 
3131   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
3132 
3133   // We don't need to legalizeOperands here because for a single operand, src0
3134   // will support any kind of input.
3135 
3136   // Move all users of this moved value.
3137   addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
3138 }
3139 
3140 void SIInstrInfo::splitScalar64BitBinaryOp(
3141     SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
3142     unsigned Opcode) const {
3143   MachineBasicBlock &MBB = *Inst.getParent();
3144   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3145 
3146   MachineOperand &Dest = Inst.getOperand(0);
3147   MachineOperand &Src0 = Inst.getOperand(1);
3148   MachineOperand &Src1 = Inst.getOperand(2);
3149   DebugLoc DL = Inst.getDebugLoc();
3150 
3151   MachineBasicBlock::iterator MII = Inst;
3152 
3153   const MCInstrDesc &InstDesc = get(Opcode);
3154   const TargetRegisterClass *Src0RC = Src0.isReg() ?
3155     MRI.getRegClass(Src0.getReg()) :
3156     &AMDGPU::SGPR_32RegClass;
3157 
3158   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
3159   const TargetRegisterClass *Src1RC = Src1.isReg() ?
3160     MRI.getRegClass(Src1.getReg()) :
3161     &AMDGPU::SGPR_32RegClass;
3162 
3163   const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
3164 
3165   MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3166                                                        AMDGPU::sub0, Src0SubRC);
3167   MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3168                                                        AMDGPU::sub0, Src1SubRC);
3169 
3170   const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
3171   const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
3172   const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
3173 
3174   unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
3175   MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
3176                               .add(SrcReg0Sub0)
3177                               .add(SrcReg1Sub0);
3178 
3179   MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3180                                                        AMDGPU::sub1, Src0SubRC);
3181   MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3182                                                        AMDGPU::sub1, Src1SubRC);
3183 
3184   unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
3185   MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
3186                               .add(SrcReg0Sub1)
3187                               .add(SrcReg1Sub1);
3188 
3189   unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
3190   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3191     .addReg(DestSub0)
3192     .addImm(AMDGPU::sub0)
3193     .addReg(DestSub1)
3194     .addImm(AMDGPU::sub1);
3195 
3196   MRI.replaceRegWith(Dest.getReg(), FullDestReg);
3197 
3198   // Try to legalize the operands in case we need to swap the order to keep it
3199   // valid.
3200   legalizeOperands(LoHalf);
3201   legalizeOperands(HiHalf);
3202 
3203   // Move all users of this moved vlaue.
3204   addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
3205 }
3206 
3207 void SIInstrInfo::splitScalar64BitBCNT(
3208     SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst) const {
3209   MachineBasicBlock &MBB = *Inst.getParent();
3210   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3211 
3212   MachineBasicBlock::iterator MII = Inst;
3213   DebugLoc DL = Inst.getDebugLoc();
3214 
3215   MachineOperand &Dest = Inst.getOperand(0);
3216   MachineOperand &Src = Inst.getOperand(1);
3217 
3218   const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
3219   const TargetRegisterClass *SrcRC = Src.isReg() ?
3220     MRI.getRegClass(Src.getReg()) :
3221     &AMDGPU::SGPR_32RegClass;
3222 
3223   unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3224   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3225 
3226   const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
3227 
3228   MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3229                                                       AMDGPU::sub0, SrcSubRC);
3230   MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3231                                                       AMDGPU::sub1, SrcSubRC);
3232 
3233   BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0);
3234 
3235   BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg);
3236 
3237   MRI.replaceRegWith(Dest.getReg(), ResultReg);
3238 
3239   // We don't need to legalize operands here. src0 for etiher instruction can be
3240   // an SGPR, and the second input is unused or determined here.
3241   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3242 }
3243 
3244 void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
3245                                       MachineInstr &Inst) const {
3246   MachineBasicBlock &MBB = *Inst.getParent();
3247   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3248   MachineBasicBlock::iterator MII = Inst;
3249   DebugLoc DL = Inst.getDebugLoc();
3250 
3251   MachineOperand &Dest = Inst.getOperand(0);
3252   uint32_t Imm = Inst.getOperand(2).getImm();
3253   uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
3254   uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
3255 
3256   (void) Offset;
3257 
3258   // Only sext_inreg cases handled.
3259   assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
3260          Offset == 0 && "Not implemented");
3261 
3262   if (BitWidth < 32) {
3263     unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3264     unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3265     unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
3266 
3267     BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo)
3268         .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
3269         .addImm(0)
3270         .addImm(BitWidth);
3271 
3272     BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
3273       .addImm(31)
3274       .addReg(MidRegLo);
3275 
3276     BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
3277       .addReg(MidRegLo)
3278       .addImm(AMDGPU::sub0)
3279       .addReg(MidRegHi)
3280       .addImm(AMDGPU::sub1);
3281 
3282     MRI.replaceRegWith(Dest.getReg(), ResultReg);
3283     addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3284     return;
3285   }
3286 
3287   MachineOperand &Src = Inst.getOperand(1);
3288   unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3289   unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
3290 
3291   BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
3292     .addImm(31)
3293     .addReg(Src.getReg(), 0, AMDGPU::sub0);
3294 
3295   BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
3296     .addReg(Src.getReg(), 0, AMDGPU::sub0)
3297     .addImm(AMDGPU::sub0)
3298     .addReg(TmpReg)
3299     .addImm(AMDGPU::sub1);
3300 
3301   MRI.replaceRegWith(Dest.getReg(), ResultReg);
3302   addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3303 }
3304 
3305 void SIInstrInfo::addUsersToMoveToVALUWorklist(
3306   unsigned DstReg,
3307   MachineRegisterInfo &MRI,
3308   SmallVectorImpl<MachineInstr *> &Worklist) const {
3309   for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg),
3310          E = MRI.use_end(); I != E;) {
3311     MachineInstr &UseMI = *I->getParent();
3312     if (!canReadVGPR(UseMI, I.getOperandNo())) {
3313       Worklist.push_back(&UseMI);
3314 
3315       do {
3316         ++I;
3317       } while (I != E && I->getParent() == &UseMI);
3318     } else {
3319       ++I;
3320     }
3321   }
3322 }
3323 
3324 void SIInstrInfo::addSCCDefUsersToVALUWorklist(
3325     MachineInstr &SCCDefInst, SmallVectorImpl<MachineInstr *> &Worklist) const {
3326   // This assumes that all the users of SCC are in the same block
3327   // as the SCC def.
3328   for (MachineInstr &MI :
3329        llvm::make_range(MachineBasicBlock::iterator(SCCDefInst),
3330                         SCCDefInst.getParent()->end())) {
3331     // Exit if we find another SCC def.
3332     if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1)
3333       return;
3334 
3335     if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1)
3336       Worklist.push_back(&MI);
3337   }
3338 }
3339 
3340 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
3341   const MachineInstr &Inst) const {
3342   const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
3343 
3344   switch (Inst.getOpcode()) {
3345   // For target instructions, getOpRegClass just returns the virtual register
3346   // class associated with the operand, so we need to find an equivalent VGPR
3347   // register class in order to move the instruction to the VALU.
3348   case AMDGPU::COPY:
3349   case AMDGPU::PHI:
3350   case AMDGPU::REG_SEQUENCE:
3351   case AMDGPU::INSERT_SUBREG:
3352     if (RI.hasVGPRs(NewDstRC))
3353       return nullptr;
3354 
3355     NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
3356     if (!NewDstRC)
3357       return nullptr;
3358     return NewDstRC;
3359   default:
3360     return NewDstRC;
3361   }
3362 }
3363 
3364 // Find the one SGPR operand we are allowed to use.
3365 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
3366                                    int OpIndices[3]) const {
3367   const MCInstrDesc &Desc = MI.getDesc();
3368 
3369   // Find the one SGPR operand we are allowed to use.
3370   //
3371   // First we need to consider the instruction's operand requirements before
3372   // legalizing. Some operands are required to be SGPRs, such as implicit uses
3373   // of VCC, but we are still bound by the constant bus requirement to only use
3374   // one.
3375   //
3376   // If the operand's class is an SGPR, we can never move it.
3377 
3378   unsigned SGPRReg = findImplicitSGPRRead(MI);
3379   if (SGPRReg != AMDGPU::NoRegister)
3380     return SGPRReg;
3381 
3382   unsigned UsedSGPRs[3] = { AMDGPU::NoRegister };
3383   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3384 
3385   for (unsigned i = 0; i < 3; ++i) {
3386     int Idx = OpIndices[i];
3387     if (Idx == -1)
3388       break;
3389 
3390     const MachineOperand &MO = MI.getOperand(Idx);
3391     if (!MO.isReg())
3392       continue;
3393 
3394     // Is this operand statically required to be an SGPR based on the operand
3395     // constraints?
3396     const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass);
3397     bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
3398     if (IsRequiredSGPR)
3399       return MO.getReg();
3400 
3401     // If this could be a VGPR or an SGPR, Check the dynamic register class.
3402     unsigned Reg = MO.getReg();
3403     const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
3404     if (RI.isSGPRClass(RegRC))
3405       UsedSGPRs[i] = Reg;
3406   }
3407 
3408   // We don't have a required SGPR operand, so we have a bit more freedom in
3409   // selecting operands to move.
3410 
3411   // Try to select the most used SGPR. If an SGPR is equal to one of the
3412   // others, we choose that.
3413   //
3414   // e.g.
3415   // V_FMA_F32 v0, s0, s0, s0 -> No moves
3416   // V_FMA_F32 v0, s0, s1, s0 -> Move s1
3417 
3418   // TODO: If some of the operands are 64-bit SGPRs and some 32, we should
3419   // prefer those.
3420 
3421   if (UsedSGPRs[0] != AMDGPU::NoRegister) {
3422     if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
3423       SGPRReg = UsedSGPRs[0];
3424   }
3425 
3426   if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
3427     if (UsedSGPRs[1] == UsedSGPRs[2])
3428       SGPRReg = UsedSGPRs[1];
3429   }
3430 
3431   return SGPRReg;
3432 }
3433 
3434 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
3435                                              unsigned OperandName) const {
3436   int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
3437   if (Idx == -1)
3438     return nullptr;
3439 
3440   return &MI.getOperand(Idx);
3441 }
3442 
3443 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
3444   uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
3445   if (ST.isAmdHsaOS()) {
3446     RsrcDataFormat |= (1ULL << 56);
3447 
3448     if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3449       // Set MTYPE = 2
3450       RsrcDataFormat |= (2ULL << 59);
3451   }
3452 
3453   return RsrcDataFormat;
3454 }
3455 
3456 uint64_t SIInstrInfo::getScratchRsrcWords23() const {
3457   uint64_t Rsrc23 = getDefaultRsrcDataFormat() |
3458                     AMDGPU::RSRC_TID_ENABLE |
3459                     0xffffffff; // Size;
3460 
3461   uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1;
3462 
3463   Rsrc23 |= (EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT) |
3464             // IndexStride = 64
3465             (UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT);
3466 
3467   // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
3468   // Clear them unless we want a huge stride.
3469   if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3470     Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
3471 
3472   return Rsrc23;
3473 }
3474 
3475 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const {
3476   unsigned Opc = MI.getOpcode();
3477 
3478   return isSMRD(Opc);
3479 }
3480 
3481 bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const {
3482   unsigned Opc = MI.getOpcode();
3483 
3484   return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc);
3485 }
3486 
3487 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI,
3488                                     int &FrameIndex) const {
3489   const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
3490   if (!Addr || !Addr->isFI())
3491     return AMDGPU::NoRegister;
3492 
3493   assert(!MI.memoperands_empty() &&
3494          (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
3495 
3496   FrameIndex = Addr->getIndex();
3497   return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
3498 }
3499 
3500 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI,
3501                                         int &FrameIndex) const {
3502   const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr);
3503   assert(Addr && Addr->isFI());
3504   FrameIndex = Addr->getIndex();
3505   return getNamedOperand(MI, AMDGPU::OpName::data)->getReg();
3506 }
3507 
3508 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
3509                                           int &FrameIndex) const {
3510 
3511   if (!MI.mayLoad())
3512     return AMDGPU::NoRegister;
3513 
3514   if (isMUBUF(MI) || isVGPRSpill(MI))
3515     return isStackAccess(MI, FrameIndex);
3516 
3517   if (isSGPRSpill(MI))
3518     return isSGPRStackAccess(MI, FrameIndex);
3519 
3520   return AMDGPU::NoRegister;
3521 }
3522 
3523 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
3524                                          int &FrameIndex) const {
3525   if (!MI.mayStore())
3526     return AMDGPU::NoRegister;
3527 
3528   if (isMUBUF(MI) || isVGPRSpill(MI))
3529     return isStackAccess(MI, FrameIndex);
3530 
3531   if (isSGPRSpill(MI))
3532     return isSGPRStackAccess(MI, FrameIndex);
3533 
3534   return AMDGPU::NoRegister;
3535 }
3536 
3537 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
3538   unsigned Opc = MI.getOpcode();
3539   const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc);
3540   unsigned DescSize = Desc.getSize();
3541 
3542   // If we have a definitive size, we can use it. Otherwise we need to inspect
3543   // the operands to know the size.
3544   //
3545   // FIXME: Instructions that have a base 32-bit encoding report their size as
3546   // 4, even though they are really 8 bytes if they have a literal operand.
3547   if (DescSize != 0 && DescSize != 4)
3548     return DescSize;
3549 
3550   if (Opc == AMDGPU::WAVE_BARRIER)
3551     return 0;
3552 
3553   // 4-byte instructions may have a 32-bit literal encoded after them. Check
3554   // operands that coud ever be literals.
3555   if (isVALU(MI) || isSALU(MI)) {
3556     if (isFixedSize(MI)) {
3557       assert(DescSize == 4);
3558       return DescSize;
3559     }
3560 
3561     int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
3562     if (Src0Idx == -1)
3563       return 4; // No operands.
3564 
3565     if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx]))
3566       return 8;
3567 
3568     int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
3569     if (Src1Idx == -1)
3570       return 4;
3571 
3572     if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx]))
3573       return 8;
3574 
3575     return 4;
3576   }
3577 
3578   if (DescSize == 4)
3579     return 4;
3580 
3581   switch (Opc) {
3582   case AMDGPU::SI_MASK_BRANCH:
3583   case TargetOpcode::IMPLICIT_DEF:
3584   case TargetOpcode::KILL:
3585   case TargetOpcode::DBG_VALUE:
3586   case TargetOpcode::BUNDLE:
3587   case TargetOpcode::EH_LABEL:
3588     return 0;
3589   case TargetOpcode::INLINEASM: {
3590     const MachineFunction *MF = MI.getParent()->getParent();
3591     const char *AsmStr = MI.getOperand(0).getSymbolName();
3592     return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
3593   }
3594   default:
3595     llvm_unreachable("unable to find instruction size");
3596   }
3597 }
3598 
3599 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
3600   if (!isFLAT(MI))
3601     return false;
3602 
3603   if (MI.memoperands_empty())
3604     return true;
3605 
3606   for (const MachineMemOperand *MMO : MI.memoperands()) {
3607     if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
3608       return true;
3609   }
3610   return false;
3611 }
3612 
3613 ArrayRef<std::pair<int, const char *>>
3614 SIInstrInfo::getSerializableTargetIndices() const {
3615   static const std::pair<int, const char *> TargetIndices[] = {
3616       {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
3617       {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
3618       {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
3619       {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
3620       {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
3621   return makeArrayRef(TargetIndices);
3622 }
3623 
3624 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp).  The
3625 /// post-RA version of misched uses CreateTargetMIHazardRecognizer.
3626 ScheduleHazardRecognizer *
3627 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
3628                                             const ScheduleDAG *DAG) const {
3629   return new GCNHazardRecognizer(DAG->MF);
3630 }
3631 
3632 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
3633 /// pass.
3634 ScheduleHazardRecognizer *
3635 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
3636   return new GCNHazardRecognizer(MF);
3637 }
3638 
3639 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const {
3640   return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY &&
3641          MI.modifiesRegister(AMDGPU::EXEC, &RI);
3642 }
3643