1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// The pass tries to use the 32-bit encoding for instructions when possible.
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "GCNSubtarget.h"
13 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14 #include "llvm/ADT/Statistic.h"
15 #include "llvm/CodeGen/MachineFunctionPass.h"
16 
17 #define DEBUG_TYPE "si-shrink-instructions"
18 
19 STATISTIC(NumInstructionsShrunk,
20           "Number of 64-bit instruction reduced to 32-bit.");
21 STATISTIC(NumLiteralConstantsFolded,
22           "Number of literal constants folded into 32-bit instructions.");
23 
24 using namespace llvm;
25 
26 namespace {
27 
28 class SIShrinkInstructions : public MachineFunctionPass {
29 public:
30   static char ID;
31 
32   void shrinkMIMG(MachineInstr &MI);
33 
34 public:
35   SIShrinkInstructions() : MachineFunctionPass(ID) {
36   }
37 
38   bool runOnMachineFunction(MachineFunction &MF) override;
39 
40   StringRef getPassName() const override { return "SI Shrink Instructions"; }
41 
42   void getAnalysisUsage(AnalysisUsage &AU) const override {
43     AU.setPreservesCFG();
44     MachineFunctionPass::getAnalysisUsage(AU);
45   }
46 };
47 
48 } // End anonymous namespace.
49 
50 INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
51                 "SI Shrink Instructions", false, false)
52 
53 char SIShrinkInstructions::ID = 0;
54 
55 FunctionPass *llvm::createSIShrinkInstructionsPass() {
56   return new SIShrinkInstructions();
57 }
58 
59 /// This function checks \p MI for operands defined by a move immediate
60 /// instruction and then folds the literal constant into the instruction if it
61 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
62 static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
63                            MachineRegisterInfo &MRI, bool TryToCommute = true) {
64   assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
65 
66   int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
67 
68   // Try to fold Src0
69   MachineOperand &Src0 = MI.getOperand(Src0Idx);
70   if (Src0.isReg()) {
71     Register Reg = Src0.getReg();
72     if (Reg.isVirtual() && MRI.hasOneUse(Reg)) {
73       MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
74       if (Def && Def->isMoveImmediate()) {
75         MachineOperand &MovSrc = Def->getOperand(1);
76         bool ConstantFolded = false;
77 
78         if (TII->isOperandLegal(MI, Src0Idx, &MovSrc)) {
79           if (MovSrc.isImm() &&
80               (isInt<32>(MovSrc.getImm()) || isUInt<32>(MovSrc.getImm()))) {
81             Src0.ChangeToImmediate(MovSrc.getImm());
82             ConstantFolded = true;
83           } else if (MovSrc.isFI()) {
84             Src0.ChangeToFrameIndex(MovSrc.getIndex());
85             ConstantFolded = true;
86           } else if (MovSrc.isGlobal()) {
87             Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(),
88                             MovSrc.getTargetFlags());
89             ConstantFolded = true;
90           }
91         }
92 
93         if (ConstantFolded) {
94           assert(MRI.use_empty(Reg));
95           Def->eraseFromParent();
96           ++NumLiteralConstantsFolded;
97           return true;
98         }
99       }
100     }
101   }
102 
103   // We have failed to fold src0, so commute the instruction and try again.
104   if (TryToCommute && MI.isCommutable()) {
105     if (TII->commuteInstruction(MI)) {
106       if (foldImmediates(MI, TII, MRI, false))
107         return true;
108 
109       // Commute back.
110       TII->commuteInstruction(MI);
111     }
112   }
113 
114   return false;
115 }
116 
117 static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
118   return isInt<16>(Src.getImm()) &&
119     !TII->isInlineConstant(*Src.getParent(),
120                            Src.getParent()->getOperandNo(&Src));
121 }
122 
123 static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
124   return isUInt<16>(Src.getImm()) &&
125     !TII->isInlineConstant(*Src.getParent(),
126                            Src.getParent()->getOperandNo(&Src));
127 }
128 
129 static bool isKImmOrKUImmOperand(const SIInstrInfo *TII,
130                                  const MachineOperand &Src,
131                                  bool &IsUnsigned) {
132   if (isInt<16>(Src.getImm())) {
133     IsUnsigned = false;
134     return !TII->isInlineConstant(Src);
135   }
136 
137   if (isUInt<16>(Src.getImm())) {
138     IsUnsigned = true;
139     return !TII->isInlineConstant(Src);
140   }
141 
142   return false;
143 }
144 
145 /// \returns true if the constant in \p Src should be replaced with a bitreverse
146 /// of an inline immediate.
147 static bool isReverseInlineImm(const SIInstrInfo *TII,
148                                const MachineOperand &Src,
149                                int32_t &ReverseImm) {
150   if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
151     return false;
152 
153   ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
154   return ReverseImm >= -16 && ReverseImm <= 64;
155 }
156 
157 /// Copy implicit register operands from specified instruction to this
158 /// instruction that are not part of the instruction definition.
159 static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF,
160                                  const MachineInstr &MI) {
161   for (unsigned i = MI.getDesc().getNumOperands() +
162          MI.getDesc().getNumImplicitUses() +
163          MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
164        i != e; ++i) {
165     const MachineOperand &MO = MI.getOperand(i);
166     if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
167       NewMI.addOperand(MF, MO);
168   }
169 }
170 
171 static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) {
172   // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
173   // get constants on the RHS.
174   if (!MI.getOperand(0).isReg())
175     TII->commuteInstruction(MI, false, 0, 1);
176 
177   // cmpk requires src0 to be a register
178   const MachineOperand &Src0 = MI.getOperand(0);
179   if (!Src0.isReg())
180     return;
181 
182   const MachineOperand &Src1 = MI.getOperand(1);
183   if (!Src1.isImm())
184     return;
185 
186   int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
187   if (SOPKOpc == -1)
188     return;
189 
190   // eq/ne is special because the imm16 can be treated as signed or unsigned,
191   // and initially selectd to the unsigned versions.
192   if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
193     bool HasUImm;
194     if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
195       if (!HasUImm) {
196         SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
197           AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
198       }
199 
200       MI.setDesc(TII->get(SOPKOpc));
201     }
202 
203     return;
204   }
205 
206   const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
207 
208   if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
209       (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
210     MI.setDesc(NewDesc);
211   }
212 }
213 
214 // Shrink NSA encoded instructions with contiguous VGPRs to non-NSA encoding.
215 void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) {
216   const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
217   if (!Info || Info->MIMGEncoding != AMDGPU::MIMGEncGfx10NSA)
218     return;
219 
220   MachineFunction *MF = MI.getParent()->getParent();
221   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
222   const SIInstrInfo *TII = ST.getInstrInfo();
223   const SIRegisterInfo &TRI = TII->getRegisterInfo();
224   int VAddr0Idx =
225       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
226   unsigned NewAddrDwords = Info->VAddrDwords;
227   const TargetRegisterClass *RC;
228 
229   if (Info->VAddrDwords == 2) {
230     RC = &AMDGPU::VReg_64RegClass;
231   } else if (Info->VAddrDwords == 3) {
232     RC = &AMDGPU::VReg_96RegClass;
233   } else if (Info->VAddrDwords == 4) {
234     RC = &AMDGPU::VReg_128RegClass;
235   } else if (Info->VAddrDwords == 5) {
236     RC = &AMDGPU::VReg_160RegClass;
237   } else if (Info->VAddrDwords <= 8) {
238     RC = &AMDGPU::VReg_256RegClass;
239     NewAddrDwords = 8;
240   } else {
241     RC = &AMDGPU::VReg_512RegClass;
242     NewAddrDwords = 16;
243   }
244 
245   unsigned VgprBase = 0;
246   bool IsUndef = true;
247   bool IsKill = NewAddrDwords == Info->VAddrDwords;
248   for (unsigned i = 0; i < Info->VAddrDwords; ++i) {
249     const MachineOperand &Op = MI.getOperand(VAddr0Idx + i);
250     unsigned Vgpr = TRI.getHWRegIndex(Op.getReg());
251 
252     if (i == 0) {
253       VgprBase = Vgpr;
254     } else if (VgprBase + i != Vgpr)
255       return;
256 
257     if (!Op.isUndef())
258       IsUndef = false;
259     if (!Op.isKill())
260       IsKill = false;
261   }
262 
263   if (VgprBase + NewAddrDwords > 256)
264     return;
265 
266   // Further check for implicit tied operands - this may be present if TFE is
267   // enabled
268   int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
269   int LWEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::lwe);
270   unsigned TFEVal = (TFEIdx == -1) ? 0 : MI.getOperand(TFEIdx).getImm();
271   unsigned LWEVal = (LWEIdx == -1) ? 0 : MI.getOperand(LWEIdx).getImm();
272   int ToUntie = -1;
273   if (TFEVal || LWEVal) {
274     // TFE/LWE is enabled so we need to deal with an implicit tied operand
275     for (unsigned i = LWEIdx + 1, e = MI.getNumOperands(); i != e; ++i) {
276       if (MI.getOperand(i).isReg() && MI.getOperand(i).isTied() &&
277           MI.getOperand(i).isImplicit()) {
278         // This is the tied operand
279         assert(
280             ToUntie == -1 &&
281             "found more than one tied implicit operand when expecting only 1");
282         ToUntie = i;
283         MI.untieRegOperand(ToUntie);
284       }
285     }
286   }
287 
288   unsigned NewOpcode =
289       AMDGPU::getMIMGOpcode(Info->BaseOpcode, AMDGPU::MIMGEncGfx10Default,
290                             Info->VDataDwords, NewAddrDwords);
291   MI.setDesc(TII->get(NewOpcode));
292   MI.getOperand(VAddr0Idx).setReg(RC->getRegister(VgprBase));
293   MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
294   MI.getOperand(VAddr0Idx).setIsKill(IsKill);
295 
296   for (unsigned i = 1; i < Info->VAddrDwords; ++i)
297     MI.RemoveOperand(VAddr0Idx + 1);
298 
299   if (ToUntie >= 0) {
300     MI.tieOperands(
301         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata),
302         ToUntie - (Info->VAddrDwords - 1));
303   }
304 }
305 
306 /// Attempt to shink AND/OR/XOR operations requiring non-inlineable literals.
307 /// For AND or OR, try using S_BITSET{0,1} to clear or set bits.
308 /// If the inverse of the immediate is legal, use ANDN2, ORN2 or
309 /// XNOR (as a ^ b == ~(a ^ ~b)).
310 /// \returns true if the caller should continue the machine function iterator
311 static bool shrinkScalarLogicOp(const GCNSubtarget &ST,
312                                 MachineRegisterInfo &MRI,
313                                 const SIInstrInfo *TII,
314                                 MachineInstr &MI) {
315   unsigned Opc = MI.getOpcode();
316   const MachineOperand *Dest = &MI.getOperand(0);
317   MachineOperand *Src0 = &MI.getOperand(1);
318   MachineOperand *Src1 = &MI.getOperand(2);
319   MachineOperand *SrcReg = Src0;
320   MachineOperand *SrcImm = Src1;
321 
322   if (!SrcImm->isImm() ||
323       AMDGPU::isInlinableLiteral32(SrcImm->getImm(), ST.hasInv2PiInlineImm()))
324     return false;
325 
326   uint32_t Imm = static_cast<uint32_t>(SrcImm->getImm());
327   uint32_t NewImm = 0;
328 
329   if (Opc == AMDGPU::S_AND_B32) {
330     if (isPowerOf2_32(~Imm)) {
331       NewImm = countTrailingOnes(Imm);
332       Opc = AMDGPU::S_BITSET0_B32;
333     } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
334       NewImm = ~Imm;
335       Opc = AMDGPU::S_ANDN2_B32;
336     }
337   } else if (Opc == AMDGPU::S_OR_B32) {
338     if (isPowerOf2_32(Imm)) {
339       NewImm = countTrailingZeros(Imm);
340       Opc = AMDGPU::S_BITSET1_B32;
341     } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
342       NewImm = ~Imm;
343       Opc = AMDGPU::S_ORN2_B32;
344     }
345   } else if (Opc == AMDGPU::S_XOR_B32) {
346     if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
347       NewImm = ~Imm;
348       Opc = AMDGPU::S_XNOR_B32;
349     }
350   } else {
351     llvm_unreachable("unexpected opcode");
352   }
353 
354   if ((Opc == AMDGPU::S_ANDN2_B32 || Opc == AMDGPU::S_ORN2_B32) &&
355       SrcImm == Src0) {
356     if (!TII->commuteInstruction(MI, false, 1, 2))
357       NewImm = 0;
358   }
359 
360   if (NewImm != 0) {
361     if (Dest->getReg().isVirtual() && SrcReg->isReg()) {
362       MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
363       MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
364       return true;
365     }
366 
367     if (SrcReg->isReg() && SrcReg->getReg() == Dest->getReg()) {
368       const bool IsUndef = SrcReg->isUndef();
369       const bool IsKill = SrcReg->isKill();
370       MI.setDesc(TII->get(Opc));
371       if (Opc == AMDGPU::S_BITSET0_B32 ||
372           Opc == AMDGPU::S_BITSET1_B32) {
373         Src0->ChangeToImmediate(NewImm);
374         // Remove the immediate and add the tied input.
375         MI.getOperand(2).ChangeToRegister(Dest->getReg(), /*IsDef*/ false,
376                                           /*isImp*/ false, IsKill,
377                                           /*isDead*/ false, IsUndef);
378         MI.tieOperands(0, 2);
379       } else {
380         SrcImm->setImm(NewImm);
381       }
382     }
383   }
384 
385   return false;
386 }
387 
388 // This is the same as MachineInstr::readsRegister/modifiesRegister except
389 // it takes subregs into account.
390 static bool instAccessReg(iterator_range<MachineInstr::const_mop_iterator> &&R,
391                           Register Reg, unsigned SubReg,
392                           const SIRegisterInfo &TRI) {
393   for (const MachineOperand &MO : R) {
394     if (!MO.isReg())
395       continue;
396 
397     if (Reg.isPhysical() && MO.getReg().isPhysical()) {
398       if (TRI.regsOverlap(Reg, MO.getReg()))
399         return true;
400     } else if (MO.getReg() == Reg && Reg.isVirtual()) {
401       LaneBitmask Overlap = TRI.getSubRegIndexLaneMask(SubReg) &
402                             TRI.getSubRegIndexLaneMask(MO.getSubReg());
403       if (Overlap.any())
404         return true;
405     }
406   }
407   return false;
408 }
409 
410 static bool instReadsReg(const MachineInstr *MI,
411                          unsigned Reg, unsigned SubReg,
412                          const SIRegisterInfo &TRI) {
413   return instAccessReg(MI->uses(), Reg, SubReg, TRI);
414 }
415 
416 static bool instModifiesReg(const MachineInstr *MI,
417                             unsigned Reg, unsigned SubReg,
418                             const SIRegisterInfo &TRI) {
419   return instAccessReg(MI->defs(), Reg, SubReg, TRI);
420 }
421 
422 static TargetInstrInfo::RegSubRegPair
423 getSubRegForIndex(Register Reg, unsigned Sub, unsigned I,
424                   const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) {
425   if (TRI.getRegSizeInBits(Reg, MRI) != 32) {
426     if (Reg.isPhysical()) {
427       Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I));
428     } else {
429       Sub = TRI.getSubRegFromChannel(I + TRI.getChannelFromSubReg(Sub));
430     }
431   }
432   return TargetInstrInfo::RegSubRegPair(Reg, Sub);
433 }
434 
435 static void dropInstructionKeepingImpDefs(MachineInstr &MI,
436                                           const SIInstrInfo *TII) {
437   for (unsigned i = MI.getDesc().getNumOperands() +
438          MI.getDesc().getNumImplicitUses() +
439          MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
440        i != e; ++i) {
441     const MachineOperand &Op = MI.getOperand(i);
442     if (!Op.isDef())
443       continue;
444     BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
445             TII->get(AMDGPU::IMPLICIT_DEF), Op.getReg());
446   }
447 
448   MI.eraseFromParent();
449 }
450 
451 // Match:
452 // mov t, x
453 // mov x, y
454 // mov y, t
455 //
456 // =>
457 //
458 // mov t, x (t is potentially dead and move eliminated)
459 // v_swap_b32 x, y
460 //
461 // Returns next valid instruction pointer if was able to create v_swap_b32.
462 //
463 // This shall not be done too early not to prevent possible folding which may
464 // remove matched moves, and this should prefereably be done before RA to
465 // release saved registers and also possibly after RA which can insert copies
466 // too.
467 //
468 // This is really just a generic peephole that is not a canocical shrinking,
469 // although requirements match the pass placement and it reduces code size too.
470 static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI,
471                                const SIInstrInfo *TII) {
472   assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
473          MovT.getOpcode() == AMDGPU::COPY);
474 
475   Register T = MovT.getOperand(0).getReg();
476   unsigned Tsub = MovT.getOperand(0).getSubReg();
477   MachineOperand &Xop = MovT.getOperand(1);
478 
479   if (!Xop.isReg())
480     return nullptr;
481   Register X = Xop.getReg();
482   unsigned Xsub = Xop.getSubReg();
483 
484   unsigned Size = TII->getOpSize(MovT, 0) / 4;
485 
486   const SIRegisterInfo &TRI = TII->getRegisterInfo();
487   if (!TRI.isVGPR(MRI, X))
488     return nullptr;
489 
490   if (MovT.hasRegisterImplicitUseOperand(AMDGPU::M0))
491     return nullptr;
492 
493   const unsigned SearchLimit = 16;
494   unsigned Count = 0;
495   bool KilledT = false;
496   for (auto Iter = std::next(MovT.getIterator()),
497             E = MovT.getParent()->instr_end();
498        Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) {
499 
500     MachineInstr *MovY = &*Iter;
501     KilledT = MovY->killsRegister(T, &TRI);
502 
503     if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
504          MovY->getOpcode() != AMDGPU::COPY) ||
505         !MovY->getOperand(1).isReg()        ||
506         MovY->getOperand(1).getReg() != T   ||
507         MovY->getOperand(1).getSubReg() != Tsub ||
508         MovY->hasRegisterImplicitUseOperand(AMDGPU::M0))
509       continue;
510 
511     Register Y = MovY->getOperand(0).getReg();
512     unsigned Ysub = MovY->getOperand(0).getSubReg();
513 
514     if (!TRI.isVGPR(MRI, Y))
515       continue;
516 
517     MachineInstr *MovX = nullptr;
518     for (auto IY = MovY->getIterator(), I = std::next(MovT.getIterator());
519          I != IY; ++I) {
520       if (instReadsReg(&*I, X, Xsub, TRI)    ||
521           instModifiesReg(&*I, Y, Ysub, TRI) ||
522           instModifiesReg(&*I, T, Tsub, TRI) ||
523           (MovX && instModifiesReg(&*I, X, Xsub, TRI))) {
524         MovX = nullptr;
525         break;
526       }
527       if (!instReadsReg(&*I, Y, Ysub, TRI)) {
528         if (!MovX && instModifiesReg(&*I, X, Xsub, TRI)) {
529           MovX = nullptr;
530           break;
531         }
532         continue;
533       }
534       if (MovX ||
535           (I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
536            I->getOpcode() != AMDGPU::COPY) ||
537           I->getOperand(0).getReg() != X ||
538           I->getOperand(0).getSubReg() != Xsub) {
539         MovX = nullptr;
540         break;
541       }
542       // Implicit use of M0 is an indirect move.
543       if (I->hasRegisterImplicitUseOperand(AMDGPU::M0))
544         continue;
545 
546       if (Size > 1 && (I->getNumImplicitOperands() > (I->isCopy() ? 0U : 1U)))
547         continue;
548 
549       MovX = &*I;
550     }
551 
552     if (!MovX)
553       continue;
554 
555     LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << *MovY);
556 
557     for (unsigned I = 0; I < Size; ++I) {
558       TargetInstrInfo::RegSubRegPair X1, Y1;
559       X1 = getSubRegForIndex(X, Xsub, I, TRI, MRI);
560       Y1 = getSubRegForIndex(Y, Ysub, I, TRI, MRI);
561       MachineBasicBlock &MBB = *MovT.getParent();
562       auto MIB = BuildMI(MBB, MovX->getIterator(), MovT.getDebugLoc(),
563                          TII->get(AMDGPU::V_SWAP_B32))
564         .addDef(X1.Reg, 0, X1.SubReg)
565         .addDef(Y1.Reg, 0, Y1.SubReg)
566         .addReg(Y1.Reg, 0, Y1.SubReg)
567         .addReg(X1.Reg, 0, X1.SubReg).getInstr();
568       if (MovX->hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
569         // Drop implicit EXEC.
570         MIB->RemoveOperand(MIB->getNumExplicitOperands());
571         MIB->copyImplicitOps(*MBB.getParent(), *MovX);
572       }
573     }
574     MovX->eraseFromParent();
575     dropInstructionKeepingImpDefs(*MovY, TII);
576     MachineInstr *Next = &*std::next(MovT.getIterator());
577 
578     if (T.isVirtual() && MRI.use_nodbg_empty(T)) {
579       dropInstructionKeepingImpDefs(MovT, TII);
580     } else {
581       Xop.setIsKill(false);
582       for (int I = MovT.getNumImplicitOperands() - 1; I >= 0; --I ) {
583         unsigned OpNo = MovT.getNumExplicitOperands() + I;
584         const MachineOperand &Op = MovT.getOperand(OpNo);
585         if (Op.isKill() && TRI.regsOverlap(X, Op.getReg()))
586           MovT.RemoveOperand(OpNo);
587       }
588     }
589 
590     return Next;
591   }
592 
593   return nullptr;
594 }
595 
596 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
597   if (skipFunction(MF.getFunction()))
598     return false;
599 
600   MachineRegisterInfo &MRI = MF.getRegInfo();
601   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
602   const SIInstrInfo *TII = ST.getInstrInfo();
603   unsigned VCCReg = ST.isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
604 
605   std::vector<unsigned> I1Defs;
606 
607   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
608                                                   BI != BE; ++BI) {
609 
610     MachineBasicBlock &MBB = *BI;
611     MachineBasicBlock::iterator I, Next;
612     for (I = MBB.begin(); I != MBB.end(); I = Next) {
613       Next = std::next(I);
614       MachineInstr &MI = *I;
615 
616       if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
617         // If this has a literal constant source that is the same as the
618         // reversed bits of an inline immediate, replace with a bitreverse of
619         // that constant. This saves 4 bytes in the common case of materializing
620         // sign bits.
621 
622         // Test if we are after regalloc. We only want to do this after any
623         // optimizations happen because this will confuse them.
624         // XXX - not exactly a check for post-regalloc run.
625         MachineOperand &Src = MI.getOperand(1);
626         if (Src.isImm() && MI.getOperand(0).getReg().isPhysical()) {
627           int32_t ReverseImm;
628           if (isReverseInlineImm(TII, Src, ReverseImm)) {
629             MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
630             Src.setImm(ReverseImm);
631             continue;
632           }
633         }
634       }
635 
636       if (ST.hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
637                            MI.getOpcode() == AMDGPU::COPY)) {
638         if (auto *NextMI = matchSwap(MI, MRI, TII)) {
639           Next = NextMI->getIterator();
640           continue;
641         }
642       }
643 
644       // FIXME: We also need to consider movs of constant operands since
645       // immediate operands are not folded if they have more than one use, and
646       // the operand folding pass is unaware if the immediate will be free since
647       // it won't know if the src == dest constraint will end up being
648       // satisfied.
649       if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
650           MI.getOpcode() == AMDGPU::S_MUL_I32) {
651         const MachineOperand *Dest = &MI.getOperand(0);
652         MachineOperand *Src0 = &MI.getOperand(1);
653         MachineOperand *Src1 = &MI.getOperand(2);
654 
655         if (!Src0->isReg() && Src1->isReg()) {
656           if (TII->commuteInstruction(MI, false, 1, 2))
657             std::swap(Src0, Src1);
658         }
659 
660         // FIXME: This could work better if hints worked with subregisters. If
661         // we have a vector add of a constant, we usually don't get the correct
662         // allocation due to the subregister usage.
663         if (Dest->getReg().isVirtual() && Src0->isReg()) {
664           MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
665           MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
666           continue;
667         }
668 
669         if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
670           if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
671             unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
672               AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
673 
674             MI.setDesc(TII->get(Opc));
675             MI.tieOperands(0, 1);
676           }
677         }
678       }
679 
680       // Try to use s_cmpk_*
681       if (MI.isCompare() && TII->isSOPC(MI)) {
682         shrinkScalarCompare(TII, MI);
683         continue;
684       }
685 
686       // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
687       if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
688         const MachineOperand &Dst = MI.getOperand(0);
689         MachineOperand &Src = MI.getOperand(1);
690 
691         if (Src.isImm() && Dst.getReg().isPhysical()) {
692           int32_t ReverseImm;
693           if (isKImmOperand(TII, Src))
694             MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
695           else if (isReverseInlineImm(TII, Src, ReverseImm)) {
696             MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
697             Src.setImm(ReverseImm);
698           }
699         }
700 
701         continue;
702       }
703 
704       // Shrink scalar logic operations.
705       if (MI.getOpcode() == AMDGPU::S_AND_B32 ||
706           MI.getOpcode() == AMDGPU::S_OR_B32 ||
707           MI.getOpcode() == AMDGPU::S_XOR_B32) {
708         if (shrinkScalarLogicOp(ST, MRI, TII, MI))
709           continue;
710       }
711 
712       if (TII->isMIMG(MI.getOpcode()) &&
713           ST.getGeneration() >= AMDGPUSubtarget::GFX10 &&
714           MF.getProperties().hasProperty(
715               MachineFunctionProperties::Property::NoVRegs)) {
716         shrinkMIMG(MI);
717         continue;
718       }
719 
720       if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
721         continue;
722 
723       if (!TII->canShrink(MI, MRI)) {
724         // Try commuting the instruction and see if that enables us to shrink
725         // it.
726         if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
727             !TII->canShrink(MI, MRI))
728           continue;
729       }
730 
731       // getVOPe32 could be -1 here if we started with an instruction that had
732       // a 32-bit encoding and then commuted it to an instruction that did not.
733       if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
734         continue;
735 
736       int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
737 
738       if (TII->isVOPC(Op32)) {
739         Register DstReg = MI.getOperand(0).getReg();
740         if (DstReg.isVirtual()) {
741           // VOPC instructions can only write to the VCC register. We can't
742           // force them to use VCC here, because this is only one register and
743           // cannot deal with sequences which would require multiple copies of
744           // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
745           //
746           // So, instead of forcing the instruction to write to VCC, we provide
747           // a hint to the register allocator to use VCC and then we will run
748           // this pass again after RA and shrink it if it outputs to VCC.
749           MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, VCCReg);
750           continue;
751         }
752         if (DstReg != VCCReg)
753           continue;
754       }
755 
756       if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
757         // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
758         // instructions.
759         const MachineOperand *Src2 =
760             TII->getNamedOperand(MI, AMDGPU::OpName::src2);
761         if (!Src2->isReg())
762           continue;
763         Register SReg = Src2->getReg();
764         if (SReg.isVirtual()) {
765           MRI.setRegAllocationHint(SReg, 0, VCCReg);
766           continue;
767         }
768         if (SReg != VCCReg)
769           continue;
770       }
771 
772       // Check for the bool flag output for instructions like V_ADD_I32_e64.
773       const MachineOperand *SDst = TII->getNamedOperand(MI,
774                                                         AMDGPU::OpName::sdst);
775 
776       // Check the carry-in operand for v_addc_u32_e64.
777       const MachineOperand *Src2 = TII->getNamedOperand(MI,
778                                                         AMDGPU::OpName::src2);
779 
780       if (SDst) {
781         bool Next = false;
782 
783         if (SDst->getReg() != VCCReg) {
784           if (SDst->getReg().isVirtual())
785             MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg);
786           Next = true;
787         }
788 
789         // All of the instructions with carry outs also have an SGPR input in
790         // src2.
791         if (Src2 && Src2->getReg() != VCCReg) {
792           if (Src2->getReg().isVirtual())
793             MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg);
794           Next = true;
795         }
796 
797         if (Next)
798           continue;
799       }
800 
801       // We can shrink this instruction
802       LLVM_DEBUG(dbgs() << "Shrinking " << MI);
803 
804       MachineInstr *Inst32 = TII->buildShrunkInst(MI, Op32);
805       ++NumInstructionsShrunk;
806 
807       // Copy extra operands not present in the instruction definition.
808       copyExtraImplicitOps(*Inst32, MF, MI);
809 
810       MI.eraseFromParent();
811       foldImmediates(*Inst32, TII, MRI);
812 
813       LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
814     }
815   }
816   return false;
817 }
818