1 //===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Copies from VGPR to SGPR registers are illegal and the register coalescer
11 /// will sometimes generate these illegal copies in situations like this:
12 ///
13 ///  Register Class <vsrc> is the union of <vgpr> and <sgpr>
14 ///
15 /// BB0:
16 ///   %0 <sgpr> = SCALAR_INST
17 ///   %1 <vsrc> = COPY %0 <sgpr>
18 ///    ...
19 ///    BRANCH %cond BB1, BB2
20 ///  BB1:
21 ///    %2 <vgpr> = VECTOR_INST
22 ///    %3 <vsrc> = COPY %2 <vgpr>
23 ///  BB2:
24 ///    %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1>
25 ///    %5 <vgpr> = VECTOR_INST %4 <vsrc>
26 ///
27 ///
28 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting
29 /// code will look like this:
30 ///
31 /// BB0:
32 ///   %0 <sgpr> = SCALAR_INST
33 ///    ...
34 ///    BRANCH %cond BB1, BB2
35 /// BB1:
36 ///   %2 <vgpr> = VECTOR_INST
37 ///   %3 <vsrc> = COPY %2 <vgpr>
38 /// BB2:
39 ///   %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1>
40 ///   %5 <vgpr> = VECTOR_INST %4 <sgpr>
41 ///
42 /// Now that the result of the PHI instruction is an SGPR, the register
43 /// allocator is now forced to constrain the register class of %3 to
44 /// <sgpr> so we end up with final code like this:
45 ///
46 /// BB0:
47 ///   %0 <sgpr> = SCALAR_INST
48 ///    ...
49 ///    BRANCH %cond BB1, BB2
50 /// BB1:
51 ///   %2 <vgpr> = VECTOR_INST
52 ///   %3 <sgpr> = COPY %2 <vgpr>
53 /// BB2:
54 ///   %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1>
55 ///   %5 <vgpr> = VECTOR_INST %4 <sgpr>
56 ///
57 /// Now this code contains an illegal copy from a VGPR to an SGPR.
58 ///
59 /// In order to avoid this problem, this pass searches for PHI instructions
60 /// which define a <vsrc> register and constrains its definition class to
61 /// <vgpr> if the user of the PHI's definition register is a vector instruction.
62 /// If the PHI's definition class is constrained to <vgpr> then the coalescer
63 /// will be unable to perform the COPY removal from the above example  which
64 /// ultimately led to the creation of an illegal COPY.
65 //===----------------------------------------------------------------------===//
66 
67 #include "AMDGPU.h"
68 #include "AMDGPUSubtarget.h"
69 #include "SIInstrInfo.h"
70 #include "SIRegisterInfo.h"
71 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
72 #include "llvm/ADT/DenseSet.h"
73 #include "llvm/ADT/STLExtras.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/CodeGen/MachineBasicBlock.h"
77 #include "llvm/CodeGen/MachineDominators.h"
78 #include "llvm/CodeGen/MachineFunction.h"
79 #include "llvm/CodeGen/MachineFunctionPass.h"
80 #include "llvm/CodeGen/MachineInstr.h"
81 #include "llvm/CodeGen/MachineInstrBuilder.h"
82 #include "llvm/CodeGen/MachineOperand.h"
83 #include "llvm/CodeGen/MachineRegisterInfo.h"
84 #include "llvm/CodeGen/TargetRegisterInfo.h"
85 #include "llvm/Pass.h"
86 #include "llvm/Support/CodeGen.h"
87 #include "llvm/Support/CommandLine.h"
88 #include "llvm/Support/Debug.h"
89 #include "llvm/Support/raw_ostream.h"
90 #include "llvm/Target/TargetMachine.h"
91 #include <cassert>
92 #include <cstdint>
93 #include <iterator>
94 #include <list>
95 #include <map>
96 #include <tuple>
97 #include <utility>
98 
99 using namespace llvm;
100 
101 #define DEBUG_TYPE "si-fix-sgpr-copies"
102 
103 static cl::opt<bool> EnableM0Merge(
104   "amdgpu-enable-merge-m0",
105   cl::desc("Merge and hoist M0 initializations"),
106   cl::init(false));
107 
108 namespace {
109 
110 class SIFixSGPRCopies : public MachineFunctionPass {
111   MachineDominatorTree *MDT;
112 
113 public:
114   static char ID;
115 
116   SIFixSGPRCopies() : MachineFunctionPass(ID) {}
117 
118   bool runOnMachineFunction(MachineFunction &MF) override;
119 
120   StringRef getPassName() const override { return "SI Fix SGPR copies"; }
121 
122   void getAnalysisUsage(AnalysisUsage &AU) const override {
123     AU.addRequired<MachineDominatorTree>();
124     AU.addPreserved<MachineDominatorTree>();
125     AU.setPreservesCFG();
126     MachineFunctionPass::getAnalysisUsage(AU);
127   }
128 };
129 
130 } // end anonymous namespace
131 
132 INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,
133                      "SI Fix SGPR copies", false, false)
134 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
135 INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,
136                      "SI Fix SGPR copies", false, false)
137 
138 char SIFixSGPRCopies::ID = 0;
139 
140 char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
141 
142 FunctionPass *llvm::createSIFixSGPRCopiesPass() {
143   return new SIFixSGPRCopies();
144 }
145 
146 static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
147   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
148   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
149     if (!MI.getOperand(i).isReg() ||
150         !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
151       continue;
152 
153     if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
154       return true;
155   }
156   return false;
157 }
158 
159 static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
160 getCopyRegClasses(const MachineInstr &Copy,
161                   const SIRegisterInfo &TRI,
162                   const MachineRegisterInfo &MRI) {
163   unsigned DstReg = Copy.getOperand(0).getReg();
164   unsigned SrcReg = Copy.getOperand(1).getReg();
165 
166   const TargetRegisterClass *SrcRC =
167     TargetRegisterInfo::isVirtualRegister(SrcReg) ?
168     MRI.getRegClass(SrcReg) :
169     TRI.getPhysRegClass(SrcReg);
170 
171   // We don't really care about the subregister here.
172   // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
173 
174   const TargetRegisterClass *DstRC =
175     TargetRegisterInfo::isVirtualRegister(DstReg) ?
176     MRI.getRegClass(DstReg) :
177     TRI.getPhysRegClass(DstReg);
178 
179   return std::make_pair(SrcRC, DstRC);
180 }
181 
182 static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
183                              const TargetRegisterClass *DstRC,
184                              const SIRegisterInfo &TRI) {
185   return SrcRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(DstRC) &&
186          TRI.hasVGPRs(SrcRC);
187 }
188 
189 static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
190                              const TargetRegisterClass *DstRC,
191                              const SIRegisterInfo &TRI) {
192   return DstRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(SrcRC) &&
193          TRI.hasVGPRs(DstRC);
194 }
195 
196 static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI,
197                                       const SIRegisterInfo *TRI,
198                                       const SIInstrInfo *TII) {
199   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
200   auto &Src = MI.getOperand(1);
201   unsigned DstReg = MI.getOperand(0).getReg();
202   unsigned SrcReg = Src.getReg();
203   if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
204       !TargetRegisterInfo::isVirtualRegister(DstReg))
205     return false;
206 
207   for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) {
208     const auto *UseMI = MO.getParent();
209     if (UseMI == &MI)
210       continue;
211     if (MO.isDef() || UseMI->getParent() != MI.getParent() ||
212         UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END ||
213         !TII->isOperandLegal(*UseMI, UseMI->getOperandNo(&MO), &Src))
214       return false;
215   }
216   // Change VGPR to SGPR destination.
217   MRI.setRegClass(DstReg, TRI->getEquivalentSGPRClass(MRI.getRegClass(DstReg)));
218   return true;
219 }
220 
221 // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
222 //
223 // SGPRx = ...
224 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
225 // VGPRz = COPY SGPRy
226 //
227 // ==>
228 //
229 // VGPRx = COPY SGPRx
230 // VGPRz = REG_SEQUENCE VGPRx, sub0
231 //
232 // This exposes immediate folding opportunities when materializing 64-bit
233 // immediates.
234 static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
235                                         const SIRegisterInfo *TRI,
236                                         const SIInstrInfo *TII,
237                                         MachineRegisterInfo &MRI) {
238   assert(MI.isRegSequence());
239 
240   unsigned DstReg = MI.getOperand(0).getReg();
241   if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
242     return false;
243 
244   if (!MRI.hasOneUse(DstReg))
245     return false;
246 
247   MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
248   if (!CopyUse.isCopy())
249     return false;
250 
251   // It is illegal to have vreg inputs to a physreg defining reg_sequence.
252   if (TargetRegisterInfo::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
253     return false;
254 
255   const TargetRegisterClass *SrcRC, *DstRC;
256   std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
257 
258   if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
259     return false;
260 
261   if (tryChangeVGPRtoSGPRinCopy(CopyUse, TRI, TII))
262     return true;
263 
264   // TODO: Could have multiple extracts?
265   unsigned SubReg = CopyUse.getOperand(1).getSubReg();
266   if (SubReg != AMDGPU::NoSubRegister)
267     return false;
268 
269   MRI.setRegClass(DstReg, DstRC);
270 
271   // SGPRx = ...
272   // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
273   // VGPRz = COPY SGPRy
274 
275   // =>
276   // VGPRx = COPY SGPRx
277   // VGPRz = REG_SEQUENCE VGPRx, sub0
278 
279   MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
280 
281   for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
282     unsigned SrcReg = MI.getOperand(I).getReg();
283     unsigned SrcSubReg = MI.getOperand(I).getSubReg();
284 
285     const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
286     assert(TRI->isSGPRClass(SrcRC) &&
287            "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
288 
289     SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
290     const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
291 
292     unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC);
293 
294     BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
295             TmpReg)
296         .add(MI.getOperand(I));
297 
298     MI.getOperand(I).setReg(TmpReg);
299   }
300 
301   CopyUse.eraseFromParent();
302   return true;
303 }
304 
305 static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
306                                     const MachineInstr *MoveImm,
307                                     const SIInstrInfo *TII,
308                                     unsigned &SMovOp,
309                                     int64_t &Imm) {
310   if (Copy->getOpcode() != AMDGPU::COPY)
311     return false;
312 
313   if (!MoveImm->isMoveImmediate())
314     return false;
315 
316   const MachineOperand *ImmOp =
317       TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
318   if (!ImmOp->isImm())
319     return false;
320 
321   // FIXME: Handle copies with sub-regs.
322   if (Copy->getOperand(0).getSubReg())
323     return false;
324 
325   switch (MoveImm->getOpcode()) {
326   default:
327     return false;
328   case AMDGPU::V_MOV_B32_e32:
329     SMovOp = AMDGPU::S_MOV_B32;
330     break;
331   case AMDGPU::V_MOV_B64_PSEUDO:
332     SMovOp = AMDGPU::S_MOV_B64;
333     break;
334   }
335   Imm = ImmOp->getImm();
336   return true;
337 }
338 
339 template <class UnaryPredicate>
340 bool searchPredecessors(const MachineBasicBlock *MBB,
341                         const MachineBasicBlock *CutOff,
342                         UnaryPredicate Predicate) {
343   if (MBB == CutOff)
344     return false;
345 
346   DenseSet<const MachineBasicBlock *> Visited;
347   SmallVector<MachineBasicBlock *, 4> Worklist(MBB->pred_begin(),
348                                                MBB->pred_end());
349 
350   while (!Worklist.empty()) {
351     MachineBasicBlock *MBB = Worklist.pop_back_val();
352 
353     if (!Visited.insert(MBB).second)
354       continue;
355     if (MBB == CutOff)
356       continue;
357     if (Predicate(MBB))
358       return true;
359 
360     Worklist.append(MBB->pred_begin(), MBB->pred_end());
361   }
362 
363   return false;
364 }
365 
366 // Checks if there is potential path From instruction To instruction.
367 // If CutOff is specified and it sits in between of that path we ignore
368 // a higher portion of the path and report it is not reachable.
369 static bool isReachable(const MachineInstr *From,
370                         const MachineInstr *To,
371                         const MachineBasicBlock *CutOff,
372                         MachineDominatorTree &MDT) {
373   // If either From block dominates To block or instructions are in the same
374   // block and From is higher.
375   if (MDT.dominates(From, To))
376     return true;
377 
378   const MachineBasicBlock *MBBFrom = From->getParent();
379   const MachineBasicBlock *MBBTo = To->getParent();
380   if (MBBFrom == MBBTo)
381     return false;
382 
383   // Instructions are in different blocks, do predecessor search.
384   // We should almost never get here since we do not usually produce M0 stores
385   // other than -1.
386   return searchPredecessors(MBBTo, CutOff, [MBBFrom]
387            (const MachineBasicBlock *MBB) { return MBB == MBBFrom; });
388 }
389 
390 // Hoist and merge identical SGPR initializations into a common predecessor.
391 // This is intended to combine M0 initializations, but can work with any
392 // SGPR. A VGPR cannot be processed since we cannot guarantee vector
393 // executioon.
394 static bool hoistAndMergeSGPRInits(unsigned Reg,
395                                    const MachineRegisterInfo &MRI,
396                                    MachineDominatorTree &MDT) {
397   // List of inits by immediate value.
398   using InitListMap = std::map<unsigned, std::list<MachineInstr *>>;
399   InitListMap Inits;
400   // List of clobbering instructions.
401   SmallVector<MachineInstr*, 8> Clobbers;
402   bool Changed = false;
403 
404   for (auto &MI : MRI.def_instructions(Reg)) {
405     MachineOperand *Imm = nullptr;
406     for (auto &MO: MI.operands()) {
407       if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) ||
408           (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
409         Imm = nullptr;
410         break;
411       } else if (MO.isImm())
412         Imm = &MO;
413     }
414     if (Imm)
415       Inits[Imm->getImm()].push_front(&MI);
416     else
417       Clobbers.push_back(&MI);
418   }
419 
420   for (auto &Init : Inits) {
421     auto &Defs = Init.second;
422 
423     for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) {
424       MachineInstr *MI1 = *I1;
425 
426       for (auto I2 = std::next(I1); I2 != E; ) {
427         MachineInstr *MI2 = *I2;
428 
429         // Check any possible interference
430         auto intereferes = [&](MachineBasicBlock::iterator From,
431                                MachineBasicBlock::iterator To) -> bool {
432 
433           assert(MDT.dominates(&*To, &*From));
434 
435           auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool {
436             const MachineBasicBlock *MBBFrom = From->getParent();
437             const MachineBasicBlock *MBBTo = To->getParent();
438             bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT);
439             bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT);
440             if (!MayClobberFrom && !MayClobberTo)
441               return false;
442             if ((MayClobberFrom && !MayClobberTo) ||
443                 (!MayClobberFrom && MayClobberTo))
444               return true;
445             // Both can clobber, this is not an interference only if both are
446             // dominated by Clobber and belong to the same block or if Clobber
447             // properly dominates To, given that To >> From, so it dominates
448             // both and located in a common dominator.
449             return !((MBBFrom == MBBTo &&
450                       MDT.dominates(Clobber, &*From) &&
451                       MDT.dominates(Clobber, &*To)) ||
452                      MDT.properlyDominates(Clobber->getParent(), MBBTo));
453           };
454 
455           return (llvm::any_of(Clobbers, interferes)) ||
456                  (llvm::any_of(Inits, [&](InitListMap::value_type &C) {
457                     return C.first != Init.first &&
458                            llvm::any_of(C.second, interferes);
459                   }));
460         };
461 
462         if (MDT.dominates(MI1, MI2)) {
463           if (!intereferes(MI2, MI1)) {
464             LLVM_DEBUG(dbgs()
465                        << "Erasing from "
466                        << printMBBReference(*MI2->getParent()) << " " << *MI2);
467             MI2->eraseFromParent();
468             Defs.erase(I2++);
469             Changed = true;
470             continue;
471           }
472         } else if (MDT.dominates(MI2, MI1)) {
473           if (!intereferes(MI1, MI2)) {
474             LLVM_DEBUG(dbgs()
475                        << "Erasing from "
476                        << printMBBReference(*MI1->getParent()) << " " << *MI1);
477             MI1->eraseFromParent();
478             Defs.erase(I1++);
479             Changed = true;
480             break;
481           }
482         } else {
483           auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(),
484                                                      MI2->getParent());
485           if (!MBB) {
486             ++I2;
487             continue;
488           }
489 
490           MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
491           if (!intereferes(MI1, I) && !intereferes(MI2, I)) {
492             LLVM_DEBUG(dbgs()
493                        << "Erasing from "
494                        << printMBBReference(*MI1->getParent()) << " " << *MI1
495                        << "and moving from "
496                        << printMBBReference(*MI2->getParent()) << " to "
497                        << printMBBReference(*I->getParent()) << " " << *MI2);
498             I->getParent()->splice(I, MI2->getParent(), MI2);
499             MI1->eraseFromParent();
500             Defs.erase(I1++);
501             Changed = true;
502             break;
503           }
504         }
505         ++I2;
506       }
507       ++I1;
508     }
509   }
510 
511   if (Changed)
512     MRI.clearKillFlags(Reg);
513 
514   return Changed;
515 }
516 
517 bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
518   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
519   MachineRegisterInfo &MRI = MF.getRegInfo();
520   const SIRegisterInfo *TRI = ST.getRegisterInfo();
521   const SIInstrInfo *TII = ST.getInstrInfo();
522   MDT = &getAnalysis<MachineDominatorTree>();
523 
524   SmallVector<MachineInstr *, 16> Worklist;
525 
526   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
527                                                   BI != BE; ++BI) {
528     MachineBasicBlock &MBB = *BI;
529     for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
530          I != E; ++I) {
531       MachineInstr &MI = *I;
532 
533       switch (MI.getOpcode()) {
534       default:
535         continue;
536       case AMDGPU::COPY:
537       case AMDGPU::WQM:
538       case AMDGPU::WWM: {
539         // If the destination register is a physical register there isn't really
540         // much we can do to fix this.
541         if (!TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg()))
542           continue;
543 
544         const TargetRegisterClass *SrcRC, *DstRC;
545         std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
546         if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
547           unsigned SrcReg = MI.getOperand(1).getReg();
548           if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
549             TII->moveToVALU(MI, MDT);
550             break;
551           }
552 
553           MachineInstr *DefMI = MRI.getVRegDef(SrcReg);
554           unsigned SMovOp;
555           int64_t Imm;
556           // If we are just copying an immediate, we can replace the copy with
557           // s_mov_b32.
558           if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
559             MI.getOperand(1).ChangeToImmediate(Imm);
560             MI.addImplicitDefUseOperands(MF);
561             MI.setDesc(TII->get(SMovOp));
562             break;
563           }
564           TII->moveToVALU(MI, MDT);
565         } else if (isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) {
566           tryChangeVGPRtoSGPRinCopy(MI, TRI, TII);
567         }
568 
569         break;
570       }
571       case AMDGPU::PHI: {
572         unsigned hasVGPRUses = 0;
573         SetVector<const MachineInstr *> worklist;
574         worklist.insert(&MI);
575         while (!worklist.empty()) {
576           const MachineInstr *Instr = worklist.pop_back_val();
577           unsigned Reg = Instr->getOperand(0).getReg();
578           for (const auto &Use : MRI.use_operands(Reg)) {
579             const MachineInstr *UseMI = Use.getParent();
580             if (UseMI->isCopy() || UseMI->isRegSequence()) {
581               if (UseMI->isCopy() &&
582                   TRI->isPhysicalRegister(UseMI->getOperand(0).getReg()) &&
583                   !TRI->isSGPRReg(MRI, UseMI->getOperand(0).getReg())) {
584                 hasVGPRUses++;
585               }
586               worklist.insert(UseMI);
587               continue;
588             }
589 
590             if (UseMI->isPHI()) {
591               if (!TRI->isSGPRReg(MRI, Use.getReg()))
592                 hasVGPRUses++;
593               continue;
594             }
595 
596             unsigned OpNo = UseMI->getOperandNo(&Use);
597             const MCInstrDesc &Desc = TII->get(UseMI->getOpcode());
598             if (!Desc.isPseudo() && Desc.OpInfo &&
599                 OpNo < Desc.getNumOperands() &&
600                 Desc.OpInfo[OpNo].RegClass != -1) {
601               const TargetRegisterClass *OpRC =
602                   TRI->getRegClass(Desc.OpInfo[OpNo].RegClass);
603               if (!TRI->isSGPRClass(OpRC) && OpRC != &AMDGPU::VS_32RegClass &&
604                   OpRC != &AMDGPU::VS_64RegClass) {
605                 hasVGPRUses++;
606               }
607             }
608           }
609         }
610         bool hasVGPRInput = false;
611         for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
612           unsigned InputReg = MI.getOperand(i).getReg();
613           MachineInstr *Def = MRI.getVRegDef(InputReg);
614           if (TRI->isVGPR(MRI, InputReg)) {
615             if (Def->isCopy()) {
616               unsigned SrcReg = Def->getOperand(1).getReg();
617               const TargetRegisterClass *RC =
618                   TRI->isVirtualRegister(SrcReg) ? MRI.getRegClass(SrcReg)
619                                                  : TRI->getPhysRegClass(SrcReg);
620               if (TRI->isSGPRClass(RC))
621                 continue;
622             }
623             hasVGPRInput = true;
624             break;
625           } else if (Def->isCopy() &&
626                      TRI->isVGPR(MRI, Def->getOperand(1).getReg())) {
627             hasVGPRInput = true;
628             break;
629           }
630         }
631         unsigned PHIRes = MI.getOperand(0).getReg();
632         const TargetRegisterClass *RC0 = MRI.getRegClass(PHIRes);
633 
634         if ((!TRI->isVGPR(MRI, PHIRes) && RC0 != &AMDGPU::VReg_1RegClass) &&
635             (hasVGPRInput || hasVGPRUses > 1)) {
636           TII->moveToVALU(MI);
637         } else {
638           TII->legalizeOperands(MI, MDT);
639         }
640 
641         break;
642       }
643       case AMDGPU::REG_SEQUENCE:
644         if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
645             !hasVGPROperands(MI, TRI)) {
646           foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI);
647           continue;
648         }
649 
650         LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
651 
652         TII->moveToVALU(MI, MDT);
653         break;
654       case AMDGPU::INSERT_SUBREG: {
655         const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
656         DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
657         Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
658         Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
659         if (TRI->isSGPRClass(DstRC) &&
660             (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
661           LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
662           TII->moveToVALU(MI, MDT);
663         }
664         break;
665       }
666       }
667     }
668   }
669 
670   if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge)
671     hoistAndMergeSGPRInits(AMDGPU::M0, MRI, *MDT);
672 
673   return true;
674 }
675