1 //===- SIPeepholeSDWA.cpp - Peephole optimization for SDWA instructions ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This pass tries to apply several peephole SDWA patterns.
10 ///
11 /// E.g. original:
12 ///   V_LSHRREV_B32_e32 %0, 16, %1
13 ///   V_ADD_CO_U32_e32 %2, %0, %3
14 ///   V_LSHLREV_B32_e32 %4, 16, %2
15 ///
16 /// Replace:
17 ///   V_ADD_CO_U32_sdwa %4, %1, %3
18 ///       dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #include "AMDGPU.h"
23 #include "AMDGPUSubtarget.h"
24 #include "SIDefines.h"
25 #include "SIInstrInfo.h"
26 #include "SIRegisterInfo.h"
27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/ADT/None.h"
31 #include "llvm/ADT/Optional.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/CodeGen/MachineBasicBlock.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineFunctionPass.h"
38 #include "llvm/CodeGen/MachineInstr.h"
39 #include "llvm/CodeGen/MachineInstrBuilder.h"
40 #include "llvm/CodeGen/MachineOperand.h"
41 #include "llvm/CodeGen/MachineRegisterInfo.h"
42 #include "llvm/CodeGen/TargetRegisterInfo.h"
43 #include "llvm/Config/llvm-config.h"
44 #include "llvm/MC/LaneBitmask.h"
45 #include "llvm/MC/MCInstrDesc.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include <algorithm>
50 #include <cassert>
51 #include <cstdint>
52 #include <memory>
53 #include <unordered_map>
54 
55 using namespace llvm;
56 
57 #define DEBUG_TYPE "si-peephole-sdwa"
58 
59 STATISTIC(NumSDWAPatternsFound, "Number of SDWA patterns found.");
60 STATISTIC(NumSDWAInstructionsPeepholed,
61           "Number of instruction converted to SDWA.");
62 
63 namespace {
64 
65 class SDWAOperand;
66 class SDWADstOperand;
67 
68 class SIPeepholeSDWA : public MachineFunctionPass {
69 public:
70   using SDWAOperandsVector = SmallVector<SDWAOperand *, 4>;
71 
72 private:
73   MachineRegisterInfo *MRI;
74   const SIRegisterInfo *TRI;
75   const SIInstrInfo *TII;
76 
77   MapVector<MachineInstr *, std::unique_ptr<SDWAOperand>> SDWAOperands;
78   MapVector<MachineInstr *, SDWAOperandsVector> PotentialMatches;
79   SmallVector<MachineInstr *, 8> ConvertedInstructions;
80 
81   Optional<int64_t> foldToImm(const MachineOperand &Op) const;
82 
83 public:
84   static char ID;
85 
86   SIPeepholeSDWA() : MachineFunctionPass(ID) {
87     initializeSIPeepholeSDWAPass(*PassRegistry::getPassRegistry());
88   }
89 
90   bool runOnMachineFunction(MachineFunction &MF) override;
91   void matchSDWAOperands(MachineBasicBlock &MBB);
92   std::unique_ptr<SDWAOperand> matchSDWAOperand(MachineInstr &MI);
93   bool isConvertibleToSDWA(MachineInstr &MI, const GCNSubtarget &ST) const;
94   void pseudoOpConvertToVOP2(MachineInstr &MI,
95                              const GCNSubtarget &ST) const;
96   bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands);
97   void legalizeScalarOperands(MachineInstr &MI, const GCNSubtarget &ST) const;
98 
99   StringRef getPassName() const override { return "SI Peephole SDWA"; }
100 
101   void getAnalysisUsage(AnalysisUsage &AU) const override {
102     AU.setPreservesCFG();
103     MachineFunctionPass::getAnalysisUsage(AU);
104   }
105 };
106 
107 class SDWAOperand {
108 private:
109   MachineOperand *Target; // Operand that would be used in converted instruction
110   MachineOperand *Replaced; // Operand that would be replace by Target
111 
112 public:
113   SDWAOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp)
114       : Target(TargetOp), Replaced(ReplacedOp) {
115     assert(Target->isReg());
116     assert(Replaced->isReg());
117   }
118 
119   virtual ~SDWAOperand() = default;
120 
121   virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) = 0;
122   virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
123 
124   MachineOperand *getTargetOperand() const { return Target; }
125   MachineOperand *getReplacedOperand() const { return Replaced; }
126   MachineInstr *getParentInst() const { return Target->getParent(); }
127 
128   MachineRegisterInfo *getMRI() const {
129     return &getParentInst()->getParent()->getParent()->getRegInfo();
130   }
131 
132 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
133   virtual void print(raw_ostream& OS) const = 0;
134   void dump() const { print(dbgs()); }
135 #endif
136 };
137 
138 using namespace AMDGPU::SDWA;
139 
140 class SDWASrcOperand : public SDWAOperand {
141 private:
142   SdwaSel SrcSel;
143   bool Abs;
144   bool Neg;
145   bool Sext;
146 
147 public:
148   SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
149                  SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
150                  bool Sext_ = false)
151       : SDWAOperand(TargetOp, ReplacedOp),
152         SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
153 
154   MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
155   bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
156 
157   SdwaSel getSrcSel() const { return SrcSel; }
158   bool getAbs() const { return Abs; }
159   bool getNeg() const { return Neg; }
160   bool getSext() const { return Sext; }
161 
162   uint64_t getSrcMods(const SIInstrInfo *TII,
163                       const MachineOperand *SrcOp) const;
164 
165 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
166   void print(raw_ostream& OS) const override;
167 #endif
168 };
169 
170 class SDWADstOperand : public SDWAOperand {
171 private:
172   SdwaSel DstSel;
173   DstUnused DstUn;
174 
175 public:
176 
177   SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
178                  SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
179     : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
180 
181   MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
182   bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
183 
184   SdwaSel getDstSel() const { return DstSel; }
185   DstUnused getDstUnused() const { return DstUn; }
186 
187 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
188   void print(raw_ostream& OS) const override;
189 #endif
190 };
191 
192 class SDWADstPreserveOperand : public SDWADstOperand {
193 private:
194   MachineOperand *Preserve;
195 
196 public:
197   SDWADstPreserveOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
198                          MachineOperand *PreserveOp, SdwaSel DstSel_ = DWORD)
199       : SDWADstOperand(TargetOp, ReplacedOp, DstSel_, UNUSED_PRESERVE),
200         Preserve(PreserveOp) {}
201 
202   bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
203 
204   MachineOperand *getPreservedOperand() const { return Preserve; }
205 
206 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
207   void print(raw_ostream& OS) const override;
208 #endif
209 };
210 
211 } // end anonymous namespace
212 
213 INITIALIZE_PASS(SIPeepholeSDWA, DEBUG_TYPE, "SI Peephole SDWA", false, false)
214 
215 char SIPeepholeSDWA::ID = 0;
216 
217 char &llvm::SIPeepholeSDWAID = SIPeepholeSDWA::ID;
218 
219 FunctionPass *llvm::createSIPeepholeSDWAPass() {
220   return new SIPeepholeSDWA();
221 }
222 
223 
224 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
225 static raw_ostream& operator<<(raw_ostream &OS, SdwaSel Sel) {
226   switch(Sel) {
227   case BYTE_0: OS << "BYTE_0"; break;
228   case BYTE_1: OS << "BYTE_1"; break;
229   case BYTE_2: OS << "BYTE_2"; break;
230   case BYTE_3: OS << "BYTE_3"; break;
231   case WORD_0: OS << "WORD_0"; break;
232   case WORD_1: OS << "WORD_1"; break;
233   case DWORD:  OS << "DWORD"; break;
234   }
235   return OS;
236 }
237 
238 static raw_ostream& operator<<(raw_ostream &OS, const DstUnused &Un) {
239   switch(Un) {
240   case UNUSED_PAD: OS << "UNUSED_PAD"; break;
241   case UNUSED_SEXT: OS << "UNUSED_SEXT"; break;
242   case UNUSED_PRESERVE: OS << "UNUSED_PRESERVE"; break;
243   }
244   return OS;
245 }
246 
247 LLVM_DUMP_METHOD
248 void SDWASrcOperand::print(raw_ostream& OS) const {
249   OS << "SDWA src: " << *getTargetOperand()
250     << " src_sel:" << getSrcSel()
251     << " abs:" << getAbs() << " neg:" << getNeg()
252     << " sext:" << getSext() << '\n';
253 }
254 
255 LLVM_DUMP_METHOD
256 void SDWADstOperand::print(raw_ostream& OS) const {
257   OS << "SDWA dst: " << *getTargetOperand()
258     << " dst_sel:" << getDstSel()
259     << " dst_unused:" << getDstUnused() << '\n';
260 }
261 
262 LLVM_DUMP_METHOD
263 void SDWADstPreserveOperand::print(raw_ostream& OS) const {
264   OS << "SDWA preserve dst: " << *getTargetOperand()
265     << " dst_sel:" << getDstSel()
266     << " preserve:" << *getPreservedOperand() << '\n';
267 }
268 
269 #endif
270 
271 static void copyRegOperand(MachineOperand &To, const MachineOperand &From) {
272   assert(To.isReg() && From.isReg());
273   To.setReg(From.getReg());
274   To.setSubReg(From.getSubReg());
275   To.setIsUndef(From.isUndef());
276   if (To.isUse()) {
277     To.setIsKill(From.isKill());
278   } else {
279     To.setIsDead(From.isDead());
280   }
281 }
282 
283 static bool isSameReg(const MachineOperand &LHS, const MachineOperand &RHS) {
284   return LHS.isReg() &&
285          RHS.isReg() &&
286          LHS.getReg() == RHS.getReg() &&
287          LHS.getSubReg() == RHS.getSubReg();
288 }
289 
290 static MachineOperand *findSingleRegUse(const MachineOperand *Reg,
291                                         const MachineRegisterInfo *MRI) {
292   if (!Reg->isReg() || !Reg->isDef())
293     return nullptr;
294 
295   MachineOperand *ResMO = nullptr;
296   for (MachineOperand &UseMO : MRI->use_nodbg_operands(Reg->getReg())) {
297     // If there exist use of subreg of Reg then return nullptr
298     if (!isSameReg(UseMO, *Reg))
299       return nullptr;
300 
301     // Check that there is only one instruction that uses Reg
302     if (!ResMO) {
303       ResMO = &UseMO;
304     } else if (ResMO->getParent() != UseMO.getParent()) {
305       return nullptr;
306     }
307   }
308 
309   return ResMO;
310 }
311 
312 static MachineOperand *findSingleRegDef(const MachineOperand *Reg,
313                                         const MachineRegisterInfo *MRI) {
314   if (!Reg->isReg())
315     return nullptr;
316 
317   MachineInstr *DefInstr = MRI->getUniqueVRegDef(Reg->getReg());
318   if (!DefInstr)
319     return nullptr;
320 
321   for (auto &DefMO : DefInstr->defs()) {
322     if (DefMO.isReg() && DefMO.getReg() == Reg->getReg())
323       return &DefMO;
324   }
325 
326   // Ignore implicit defs.
327   return nullptr;
328 }
329 
330 uint64_t SDWASrcOperand::getSrcMods(const SIInstrInfo *TII,
331                                     const MachineOperand *SrcOp) const {
332   uint64_t Mods = 0;
333   const auto *MI = SrcOp->getParent();
334   if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) {
335     if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) {
336       Mods = Mod->getImm();
337     }
338   } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) {
339     if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) {
340       Mods = Mod->getImm();
341     }
342   }
343   if (Abs || Neg) {
344     assert(!Sext &&
345            "Float and integer src modifiers can't be set simulteniously");
346     Mods |= Abs ? SISrcMods::ABS : 0u;
347     Mods ^= Neg ? SISrcMods::NEG : 0u;
348   } else if (Sext) {
349     Mods |= SISrcMods::SEXT;
350   }
351 
352   return Mods;
353 }
354 
355 MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII) {
356   // For SDWA src operand potential instruction is one that use register
357   // defined by parent instruction
358   MachineOperand *PotentialMO = findSingleRegUse(getReplacedOperand(), getMRI());
359   if (!PotentialMO)
360     return nullptr;
361 
362   return PotentialMO->getParent();
363 }
364 
365 bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
366   // Find operand in instruction that matches source operand and replace it with
367   // target operand. Set corresponding src_sel
368   bool IsPreserveSrc = false;
369   MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
370   MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
371   MachineOperand *SrcMods =
372       TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
373   assert(Src && (Src->isReg() || Src->isImm()));
374   if (!isSameReg(*Src, *getReplacedOperand())) {
375     // If this is not src0 then it could be src1
376     Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
377     SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
378     SrcMods = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
379 
380     if (!Src ||
381         !isSameReg(*Src, *getReplacedOperand())) {
382       // It's possible this Src is a tied operand for
383       // UNUSED_PRESERVE, in which case we can either
384       // abandon the peephole attempt, or if legal we can
385       // copy the target operand into the tied slot
386       // if the preserve operation will effectively cause the same
387       // result by overwriting the rest of the dst.
388       MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
389       MachineOperand *DstUnused =
390         TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
391 
392       if (Dst &&
393           DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) {
394         // This will work if the tied src is acessing WORD_0, and the dst is
395         // writing WORD_1. Modifiers don't matter because all the bits that
396         // would be impacted are being overwritten by the dst.
397         // Any other case will not work.
398         SdwaSel DstSel = static_cast<SdwaSel>(
399             TII->getNamedImmOperand(MI, AMDGPU::OpName::dst_sel));
400         if (DstSel == AMDGPU::SDWA::SdwaSel::WORD_1 &&
401             getSrcSel() == AMDGPU::SDWA::SdwaSel::WORD_0) {
402           IsPreserveSrc = true;
403           auto DstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
404                                                    AMDGPU::OpName::vdst);
405           auto TiedIdx = MI.findTiedOperandIdx(DstIdx);
406           Src = &MI.getOperand(TiedIdx);
407           SrcSel = nullptr;
408           SrcMods = nullptr;
409         } else {
410           // Not legal to convert this src
411           return false;
412         }
413       }
414     }
415     assert(Src && Src->isReg());
416 
417     if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
418          MI.getOpcode() == AMDGPU::V_FMAC_F32_sdwa ||
419          MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
420          MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
421          !isSameReg(*Src, *getReplacedOperand())) {
422       // In case of v_mac_f16/32_sdwa this pass can try to apply src operand to
423       // src2. This is not allowed.
424       return false;
425     }
426 
427     assert(isSameReg(*Src, *getReplacedOperand()) &&
428            (IsPreserveSrc || (SrcSel && SrcMods)));
429   }
430   copyRegOperand(*Src, *getTargetOperand());
431   if (!IsPreserveSrc) {
432     SrcSel->setImm(getSrcSel());
433     SrcMods->setImm(getSrcMods(TII, Src));
434   }
435   getTargetOperand()->setIsKill(false);
436   return true;
437 }
438 
439 MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII) {
440   // For SDWA dst operand potential instruction is one that defines register
441   // that this operand uses
442   MachineRegisterInfo *MRI = getMRI();
443   MachineInstr *ParentMI = getParentInst();
444 
445   MachineOperand *PotentialMO = findSingleRegDef(getReplacedOperand(), MRI);
446   if (!PotentialMO)
447     return nullptr;
448 
449   // Check that ParentMI is the only instruction that uses replaced register
450   for (MachineInstr &UseInst : MRI->use_nodbg_instructions(PotentialMO->getReg())) {
451     if (&UseInst != ParentMI)
452       return nullptr;
453   }
454 
455   return PotentialMO->getParent();
456 }
457 
458 bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
459   // Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
460 
461   if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
462        MI.getOpcode() == AMDGPU::V_FMAC_F32_sdwa ||
463        MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
464        MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
465       getDstSel() != AMDGPU::SDWA::DWORD) {
466     // v_mac_f16/32_sdwa allow dst_sel to be equal only to DWORD
467     return false;
468   }
469 
470   MachineOperand *Operand = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
471   assert(Operand &&
472          Operand->isReg() &&
473          isSameReg(*Operand, *getReplacedOperand()));
474   copyRegOperand(*Operand, *getTargetOperand());
475   MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
476   assert(DstSel);
477   DstSel->setImm(getDstSel());
478   MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
479   assert(DstUnused);
480   DstUnused->setImm(getDstUnused());
481 
482   // Remove original instruction  because it would conflict with our new
483   // instruction by register definition
484   getParentInst()->eraseFromParent();
485   return true;
486 }
487 
488 bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
489                                            const SIInstrInfo *TII) {
490   // MI should be moved right before v_or_b32.
491   // For this we should clear all kill flags on uses of MI src-operands or else
492   // we can encounter problem with use of killed operand.
493   for (MachineOperand &MO : MI.uses()) {
494     if (!MO.isReg())
495       continue;
496     getMRI()->clearKillFlags(MO.getReg());
497   }
498 
499   // Move MI before v_or_b32
500   auto MBB = MI.getParent();
501   MBB->remove(&MI);
502   MBB->insert(getParentInst(), &MI);
503 
504   // Add Implicit use of preserved register
505   MachineInstrBuilder MIB(*MBB->getParent(), MI);
506   MIB.addReg(getPreservedOperand()->getReg(),
507              RegState::ImplicitKill,
508              getPreservedOperand()->getSubReg());
509 
510   // Tie dst to implicit use
511   MI.tieOperands(AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst),
512                  MI.getNumOperands() - 1);
513 
514   // Convert MI as any other SDWADstOperand and remove v_or_b32
515   return SDWADstOperand::convertToSDWA(MI, TII);
516 }
517 
518 Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
519   if (Op.isImm()) {
520     return Op.getImm();
521   }
522 
523   // If this is not immediate then it can be copy of immediate value, e.g.:
524   // %1 = S_MOV_B32 255;
525   if (Op.isReg()) {
526     for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
527       if (!isSameReg(Op, Def))
528         continue;
529 
530       const MachineInstr *DefInst = Def.getParent();
531       if (!TII->isFoldableCopy(*DefInst))
532         return None;
533 
534       const MachineOperand &Copied = DefInst->getOperand(1);
535       if (!Copied.isImm())
536         return None;
537 
538       return Copied.getImm();
539     }
540   }
541 
542   return None;
543 }
544 
545 std::unique_ptr<SDWAOperand>
546 SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
547   unsigned Opcode = MI.getOpcode();
548   switch (Opcode) {
549   case AMDGPU::V_LSHRREV_B32_e32:
550   case AMDGPU::V_ASHRREV_I32_e32:
551   case AMDGPU::V_LSHLREV_B32_e32:
552   case AMDGPU::V_LSHRREV_B32_e64:
553   case AMDGPU::V_ASHRREV_I32_e64:
554   case AMDGPU::V_LSHLREV_B32_e64: {
555     // from: v_lshrrev_b32_e32 v1, 16/24, v0
556     // to SDWA src:v0 src_sel:WORD_1/BYTE_3
557 
558     // from: v_ashrrev_i32_e32 v1, 16/24, v0
559     // to SDWA src:v0 src_sel:WORD_1/BYTE_3 sext:1
560 
561     // from: v_lshlrev_b32_e32 v1, 16/24, v0
562     // to SDWA dst:v1 dst_sel:WORD_1/BYTE_3 dst_unused:UNUSED_PAD
563     MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
564     auto Imm = foldToImm(*Src0);
565     if (!Imm)
566       break;
567 
568     if (*Imm != 16 && *Imm != 24)
569       break;
570 
571     MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
572     MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
573     if (Src1->getReg().isPhysical() || Dst->getReg().isPhysical())
574       break;
575 
576     if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
577         Opcode == AMDGPU::V_LSHLREV_B32_e64) {
578       return std::make_unique<SDWADstOperand>(
579           Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
580     } else {
581       return std::make_unique<SDWASrcOperand>(
582           Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
583           Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
584           Opcode != AMDGPU::V_LSHRREV_B32_e64);
585     }
586     break;
587   }
588 
589   case AMDGPU::V_LSHRREV_B16_e32:
590   case AMDGPU::V_ASHRREV_I16_e32:
591   case AMDGPU::V_LSHLREV_B16_e32:
592   case AMDGPU::V_LSHRREV_B16_e64:
593   case AMDGPU::V_ASHRREV_I16_e64:
594   case AMDGPU::V_LSHLREV_B16_e64: {
595     // from: v_lshrrev_b16_e32 v1, 8, v0
596     // to SDWA src:v0 src_sel:BYTE_1
597 
598     // from: v_ashrrev_i16_e32 v1, 8, v0
599     // to SDWA src:v0 src_sel:BYTE_1 sext:1
600 
601     // from: v_lshlrev_b16_e32 v1, 8, v0
602     // to SDWA dst:v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD
603     MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
604     auto Imm = foldToImm(*Src0);
605     if (!Imm || *Imm != 8)
606       break;
607 
608     MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
609     MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
610 
611     if (Src1->getReg().isPhysical() || Dst->getReg().isPhysical())
612       break;
613 
614     if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
615         Opcode == AMDGPU::V_LSHLREV_B16_e64) {
616       return std::make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD);
617     } else {
618       return std::make_unique<SDWASrcOperand>(
619             Src1, Dst, BYTE_1, false, false,
620             Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
621             Opcode != AMDGPU::V_LSHRREV_B16_e64);
622     }
623     break;
624   }
625 
626   case AMDGPU::V_BFE_I32:
627   case AMDGPU::V_BFE_U32: {
628     // e.g.:
629     // from: v_bfe_u32 v1, v0, 8, 8
630     // to SDWA src:v0 src_sel:BYTE_1
631 
632     // offset | width | src_sel
633     // ------------------------
634     // 0      | 8     | BYTE_0
635     // 0      | 16    | WORD_0
636     // 0      | 32    | DWORD ?
637     // 8      | 8     | BYTE_1
638     // 16     | 8     | BYTE_2
639     // 16     | 16    | WORD_1
640     // 24     | 8     | BYTE_3
641 
642     MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
643     auto Offset = foldToImm(*Src1);
644     if (!Offset)
645       break;
646 
647     MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
648     auto Width = foldToImm(*Src2);
649     if (!Width)
650       break;
651 
652     SdwaSel SrcSel = DWORD;
653 
654     if (*Offset == 0 && *Width == 8)
655       SrcSel = BYTE_0;
656     else if (*Offset == 0 && *Width == 16)
657       SrcSel = WORD_0;
658     else if (*Offset == 0 && *Width == 32)
659       SrcSel = DWORD;
660     else if (*Offset == 8 && *Width == 8)
661       SrcSel = BYTE_1;
662     else if (*Offset == 16 && *Width == 8)
663       SrcSel = BYTE_2;
664     else if (*Offset == 16 && *Width == 16)
665       SrcSel = WORD_1;
666     else if (*Offset == 24 && *Width == 8)
667       SrcSel = BYTE_3;
668     else
669       break;
670 
671     MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
672     MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
673 
674     if (Src0->getReg().isPhysical() || Dst->getReg().isPhysical())
675       break;
676 
677     return std::make_unique<SDWASrcOperand>(
678           Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32);
679   }
680 
681   case AMDGPU::V_AND_B32_e32:
682   case AMDGPU::V_AND_B32_e64: {
683     // e.g.:
684     // from: v_and_b32_e32 v1, 0x0000ffff/0x000000ff, v0
685     // to SDWA src:v0 src_sel:WORD_0/BYTE_0
686 
687     MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
688     MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
689     auto ValSrc = Src1;
690     auto Imm = foldToImm(*Src0);
691 
692     if (!Imm) {
693       Imm = foldToImm(*Src1);
694       ValSrc = Src0;
695     }
696 
697     if (!Imm || (*Imm != 0x0000ffff && *Imm != 0x000000ff))
698       break;
699 
700     MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
701 
702     if (ValSrc->getReg().isPhysical() || Dst->getReg().isPhysical())
703       break;
704 
705     return std::make_unique<SDWASrcOperand>(
706         ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
707   }
708 
709   case AMDGPU::V_OR_B32_e32:
710   case AMDGPU::V_OR_B32_e64: {
711     // Patterns for dst_unused:UNUSED_PRESERVE.
712     // e.g., from:
713     // v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD
714     //                           src1_sel:WORD_1 src2_sel:WORD1
715     // v_add_f16_e32 v3, v1, v2
716     // v_or_b32_e32 v4, v0, v3
717     // to SDWA preserve dst:v4 dst_sel:WORD_1 dst_unused:UNUSED_PRESERVE preserve:v3
718 
719     // Check if one of operands of v_or_b32 is SDWA instruction
720     using CheckRetType = Optional<std::pair<MachineOperand *, MachineOperand *>>;
721     auto CheckOROperandsForSDWA =
722       [&](const MachineOperand *Op1, const MachineOperand *Op2) -> CheckRetType {
723         if (!Op1 || !Op1->isReg() || !Op2 || !Op2->isReg())
724           return CheckRetType(None);
725 
726         MachineOperand *Op1Def = findSingleRegDef(Op1, MRI);
727         if (!Op1Def)
728           return CheckRetType(None);
729 
730         MachineInstr *Op1Inst = Op1Def->getParent();
731         if (!TII->isSDWA(*Op1Inst))
732           return CheckRetType(None);
733 
734         MachineOperand *Op2Def = findSingleRegDef(Op2, MRI);
735         if (!Op2Def)
736           return CheckRetType(None);
737 
738         return CheckRetType(std::make_pair(Op1Def, Op2Def));
739       };
740 
741     MachineOperand *OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
742     MachineOperand *OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
743     assert(OrSDWA && OrOther);
744     auto Res = CheckOROperandsForSDWA(OrSDWA, OrOther);
745     if (!Res) {
746       OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
747       OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
748       assert(OrSDWA && OrOther);
749       Res = CheckOROperandsForSDWA(OrSDWA, OrOther);
750       if (!Res)
751         break;
752     }
753 
754     MachineOperand *OrSDWADef = Res->first;
755     MachineOperand *OrOtherDef = Res->second;
756     assert(OrSDWADef && OrOtherDef);
757 
758     MachineInstr *SDWAInst = OrSDWADef->getParent();
759     MachineInstr *OtherInst = OrOtherDef->getParent();
760 
761     // Check that OtherInstr is actually bitwise compatible with SDWAInst = their
762     // destination patterns don't overlap. Compatible instruction can be either
763     // regular instruction with compatible bitness or SDWA instruction with
764     // correct dst_sel
765     // SDWAInst | OtherInst bitness / OtherInst dst_sel
766     // -----------------------------------------------------
767     // DWORD    | no                    / no
768     // WORD_0   | no                    / BYTE_2/3, WORD_1
769     // WORD_1   | 8/16-bit instructions / BYTE_0/1, WORD_0
770     // BYTE_0   | no                    / BYTE_1/2/3, WORD_1
771     // BYTE_1   | 8-bit                 / BYTE_0/2/3, WORD_1
772     // BYTE_2   | 8/16-bit              / BYTE_0/1/3. WORD_0
773     // BYTE_3   | 8/16/24-bit           / BYTE_0/1/2, WORD_0
774     // E.g. if SDWAInst is v_add_f16_sdwa dst_sel:WORD_1 then v_add_f16 is OK
775     // but v_add_f32 is not.
776 
777     // TODO: add support for non-SDWA instructions as OtherInst.
778     // For now this only works with SDWA instructions. For regular instructions
779     // there is no way to determine if the instruction writes only 8/16/24-bit
780     // out of full register size and all registers are at min 32-bit wide.
781     if (!TII->isSDWA(*OtherInst))
782       break;
783 
784     SdwaSel DstSel = static_cast<SdwaSel>(
785       TII->getNamedImmOperand(*SDWAInst, AMDGPU::OpName::dst_sel));;
786     SdwaSel OtherDstSel = static_cast<SdwaSel>(
787       TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_sel));
788 
789     bool DstSelAgree = false;
790     switch (DstSel) {
791     case WORD_0: DstSelAgree = ((OtherDstSel == BYTE_2) ||
792                                 (OtherDstSel == BYTE_3) ||
793                                 (OtherDstSel == WORD_1));
794       break;
795     case WORD_1: DstSelAgree = ((OtherDstSel == BYTE_0) ||
796                                 (OtherDstSel == BYTE_1) ||
797                                 (OtherDstSel == WORD_0));
798       break;
799     case BYTE_0: DstSelAgree = ((OtherDstSel == BYTE_1) ||
800                                 (OtherDstSel == BYTE_2) ||
801                                 (OtherDstSel == BYTE_3) ||
802                                 (OtherDstSel == WORD_1));
803       break;
804     case BYTE_1: DstSelAgree = ((OtherDstSel == BYTE_0) ||
805                                 (OtherDstSel == BYTE_2) ||
806                                 (OtherDstSel == BYTE_3) ||
807                                 (OtherDstSel == WORD_1));
808       break;
809     case BYTE_2: DstSelAgree = ((OtherDstSel == BYTE_0) ||
810                                 (OtherDstSel == BYTE_1) ||
811                                 (OtherDstSel == BYTE_3) ||
812                                 (OtherDstSel == WORD_0));
813       break;
814     case BYTE_3: DstSelAgree = ((OtherDstSel == BYTE_0) ||
815                                 (OtherDstSel == BYTE_1) ||
816                                 (OtherDstSel == BYTE_2) ||
817                                 (OtherDstSel == WORD_0));
818       break;
819     default: DstSelAgree = false;
820     }
821 
822     if (!DstSelAgree)
823       break;
824 
825     // Also OtherInst dst_unused should be UNUSED_PAD
826     DstUnused OtherDstUnused = static_cast<DstUnused>(
827       TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_unused));
828     if (OtherDstUnused != DstUnused::UNUSED_PAD)
829       break;
830 
831     // Create DstPreserveOperand
832     MachineOperand *OrDst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
833     assert(OrDst && OrDst->isReg());
834 
835     return std::make_unique<SDWADstPreserveOperand>(
836       OrDst, OrSDWADef, OrOtherDef, DstSel);
837 
838   }
839   }
840 
841   return std::unique_ptr<SDWAOperand>(nullptr);
842 }
843 
844 #if !defined(NDEBUG)
845 static raw_ostream& operator<<(raw_ostream &OS, const SDWAOperand &Operand) {
846   Operand.print(OS);
847   return OS;
848 }
849 #endif
850 
851 void SIPeepholeSDWA::matchSDWAOperands(MachineBasicBlock &MBB) {
852   for (MachineInstr &MI : MBB) {
853     if (auto Operand = matchSDWAOperand(MI)) {
854       LLVM_DEBUG(dbgs() << "Match: " << MI << "To: " << *Operand << '\n');
855       SDWAOperands[&MI] = std::move(Operand);
856       ++NumSDWAPatternsFound;
857     }
858   }
859 }
860 
861 // Convert the V_ADDC_U32_e64 into V_ADDC_U32_e32, and
862 // V_ADD_CO_U32_e64 into V_ADD_CO_U32_e32. This allows isConvertibleToSDWA
863 // to perform its transformation on V_ADD_CO_U32_e32 into V_ADD_CO_U32_sdwa.
864 //
865 // We are transforming from a VOP3 into a VOP2 form of the instruction.
866 //   %19:vgpr_32 = V_AND_B32_e32 255,
867 //       killed %16:vgpr_32, implicit $exec
868 //   %47:vgpr_32, %49:sreg_64_xexec = V_ADD_CO_U32_e64
869 //       %26.sub0:vreg_64, %19:vgpr_32, implicit $exec
870 //  %48:vgpr_32, dead %50:sreg_64_xexec = V_ADDC_U32_e64
871 //       %26.sub1:vreg_64, %54:vgpr_32, killed %49:sreg_64_xexec, implicit $exec
872 //
873 // becomes
874 //   %47:vgpr_32 = V_ADD_CO_U32_sdwa
875 //       0, %26.sub0:vreg_64, 0, killed %16:vgpr_32, 0, 6, 0, 6, 0,
876 //       implicit-def $vcc, implicit $exec
877 //  %48:vgpr_32 = V_ADDC_U32_e32
878 //       0, %26.sub1:vreg_64, implicit-def $vcc, implicit $vcc, implicit $exec
879 void SIPeepholeSDWA::pseudoOpConvertToVOP2(MachineInstr &MI,
880                                            const GCNSubtarget &ST) const {
881   int Opc = MI.getOpcode();
882   assert((Opc == AMDGPU::V_ADD_CO_U32_e64 || Opc == AMDGPU::V_SUB_CO_U32_e64) &&
883          "Currently only handles V_ADD_CO_U32_e64 or V_SUB_CO_U32_e64");
884 
885   // Can the candidate MI be shrunk?
886   if (!TII->canShrink(MI, *MRI))
887     return;
888   Opc = AMDGPU::getVOPe32(Opc);
889   // Find the related ADD instruction.
890   const MachineOperand *Sdst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
891   if (!Sdst)
892     return;
893   MachineOperand *NextOp = findSingleRegUse(Sdst, MRI);
894   if (!NextOp)
895     return;
896   MachineInstr &MISucc = *NextOp->getParent();
897   // Can the successor be shrunk?
898   if (!TII->canShrink(MISucc, *MRI))
899     return;
900   int SuccOpc = AMDGPU::getVOPe32(MISucc.getOpcode());
901   // Make sure the carry in/out are subsequently unused.
902   MachineOperand *CarryIn = TII->getNamedOperand(MISucc, AMDGPU::OpName::src2);
903   if (!CarryIn)
904     return;
905   MachineOperand *CarryOut = TII->getNamedOperand(MISucc, AMDGPU::OpName::sdst);
906   if (!CarryOut)
907     return;
908   if (!MRI->hasOneUse(CarryIn->getReg()) || !MRI->use_empty(CarryOut->getReg()))
909     return;
910   // Make sure VCC or its subregs are dead before MI.
911   MachineBasicBlock &MBB = *MI.getParent();
912   auto Liveness = MBB.computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 25);
913   if (Liveness != MachineBasicBlock::LQR_Dead)
914     return;
915   // Check if VCC is referenced in range of (MI,MISucc].
916   for (auto I = std::next(MI.getIterator()), E = MISucc.getIterator();
917        I != E; ++I) {
918     if (I->modifiesRegister(AMDGPU::VCC, TRI))
919       return;
920   }
921 
922   // Make the two new e32 instruction variants.
923   // Replace MI with V_{SUB|ADD}_I32_e32
924   BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(Opc))
925     .add(*TII->getNamedOperand(MI, AMDGPU::OpName::vdst))
926     .add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0))
927     .add(*TII->getNamedOperand(MI, AMDGPU::OpName::src1))
928     .setMIFlags(MI.getFlags());
929 
930   MI.eraseFromParent();
931 
932   // Replace MISucc with V_{SUBB|ADDC}_U32_e32
933   BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(SuccOpc))
934     .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::vdst))
935     .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src0))
936     .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src1))
937     .setMIFlags(MISucc.getFlags());
938 
939   MISucc.eraseFromParent();
940 }
941 
942 bool SIPeepholeSDWA::isConvertibleToSDWA(MachineInstr &MI,
943                                          const GCNSubtarget &ST) const {
944   // Check if this is already an SDWA instruction
945   unsigned Opc = MI.getOpcode();
946   if (TII->isSDWA(Opc))
947     return true;
948 
949   // Check if this instruction has opcode that supports SDWA
950   if (AMDGPU::getSDWAOp(Opc) == -1)
951     Opc = AMDGPU::getVOPe32(Opc);
952 
953   if (AMDGPU::getSDWAOp(Opc) == -1)
954     return false;
955 
956   if (!ST.hasSDWAOmod() && TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
957     return false;
958 
959   if (TII->isVOPC(Opc)) {
960     if (!ST.hasSDWASdst()) {
961       const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
962       if (SDst && (SDst->getReg() != AMDGPU::VCC &&
963                    SDst->getReg() != AMDGPU::VCC_LO))
964         return false;
965     }
966 
967     if (!ST.hasSDWAOutModsVOPC() &&
968         (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
969          TII->hasModifiersSet(MI, AMDGPU::OpName::omod)))
970       return false;
971 
972   } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst) ||
973              !TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) {
974     return false;
975   }
976 
977   if (!ST.hasSDWAMac() && (Opc == AMDGPU::V_FMAC_F16_e32 ||
978                            Opc == AMDGPU::V_FMAC_F32_e32 ||
979                            Opc == AMDGPU::V_MAC_F16_e32 ||
980                            Opc == AMDGPU::V_MAC_F32_e32))
981     return false;
982 
983   // Check if target supports this SDWA opcode
984   if (TII->pseudoToMCOpcode(Opc) == -1)
985     return false;
986 
987   // FIXME: has SDWA but require handling of implicit VCC use
988   if (Opc == AMDGPU::V_CNDMASK_B32_e32)
989     return false;
990 
991   if (MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0)) {
992     if (!Src0->isReg() && !Src0->isImm())
993       return false;
994   }
995 
996   if (MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1)) {
997     if (!Src1->isReg() && !Src1->isImm())
998       return false;
999   }
1000 
1001   return true;
1002 }
1003 
1004 bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
1005                                    const SDWAOperandsVector &SDWAOperands) {
1006 
1007   LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
1008 
1009   // Convert to sdwa
1010   int SDWAOpcode;
1011   unsigned Opcode = MI.getOpcode();
1012   if (TII->isSDWA(Opcode)) {
1013     SDWAOpcode = Opcode;
1014   } else {
1015     SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
1016     if (SDWAOpcode == -1)
1017       SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
1018   }
1019   assert(SDWAOpcode != -1);
1020 
1021   const MCInstrDesc &SDWADesc = TII->get(SDWAOpcode);
1022 
1023   // Create SDWA version of instruction MI and initialize its operands
1024   MachineInstrBuilder SDWAInst =
1025     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), SDWADesc)
1026     .setMIFlags(MI.getFlags());
1027 
1028   // Copy dst, if it is present in original then should also be present in SDWA
1029   MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
1030   if (Dst) {
1031     assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst) != -1);
1032     SDWAInst.add(*Dst);
1033   } else if ((Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst))) {
1034     assert(Dst &&
1035            AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
1036     SDWAInst.add(*Dst);
1037   } else {
1038     assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
1039     SDWAInst.addReg(TRI->getVCC(), RegState::Define);
1040   }
1041 
1042   // Copy src0, initialize src0_modifiers. All sdwa instructions has src0 and
1043   // src0_modifiers (except for v_nop_sdwa, but it can't get here)
1044   MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1045   assert(
1046     Src0 &&
1047     AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0) != -1 &&
1048     AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_modifiers) != -1);
1049   if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers))
1050     SDWAInst.addImm(Mod->getImm());
1051   else
1052     SDWAInst.addImm(0);
1053   SDWAInst.add(*Src0);
1054 
1055   // Copy src1 if present, initialize src1_modifiers.
1056   MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1057   if (Src1) {
1058     assert(
1059       AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1) != -1 &&
1060       AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_modifiers) != -1);
1061     if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers))
1062       SDWAInst.addImm(Mod->getImm());
1063     else
1064       SDWAInst.addImm(0);
1065     SDWAInst.add(*Src1);
1066   }
1067 
1068   if (SDWAOpcode == AMDGPU::V_FMAC_F16_sdwa ||
1069       SDWAOpcode == AMDGPU::V_FMAC_F32_sdwa ||
1070       SDWAOpcode == AMDGPU::V_MAC_F16_sdwa ||
1071       SDWAOpcode == AMDGPU::V_MAC_F32_sdwa) {
1072     // v_mac_f16/32 has additional src2 operand tied to vdst
1073     MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
1074     assert(Src2);
1075     SDWAInst.add(*Src2);
1076   }
1077 
1078   // Copy clamp if present, initialize otherwise
1079   assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::clamp) != -1);
1080   MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp);
1081   if (Clamp) {
1082     SDWAInst.add(*Clamp);
1083   } else {
1084     SDWAInst.addImm(0);
1085   }
1086 
1087   // Copy omod if present, initialize otherwise if needed
1088   if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::omod) != -1) {
1089     MachineOperand *OMod = TII->getNamedOperand(MI, AMDGPU::OpName::omod);
1090     if (OMod) {
1091       SDWAInst.add(*OMod);
1092     } else {
1093       SDWAInst.addImm(0);
1094     }
1095   }
1096 
1097   // Copy dst_sel if present, initialize otherwise if needed
1098   if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_sel) != -1) {
1099     MachineOperand *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
1100     if (DstSel) {
1101       SDWAInst.add(*DstSel);
1102     } else {
1103       SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1104     }
1105   }
1106 
1107   // Copy dst_unused if present, initialize otherwise if needed
1108   if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_unused) != -1) {
1109     MachineOperand *DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
1110     if (DstUnused) {
1111       SDWAInst.add(*DstUnused);
1112     } else {
1113       SDWAInst.addImm(AMDGPU::SDWA::DstUnused::UNUSED_PAD);
1114     }
1115   }
1116 
1117   // Copy src0_sel if present, initialize otherwise
1118   assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_sel) != -1);
1119   MachineOperand *Src0Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
1120   if (Src0Sel) {
1121     SDWAInst.add(*Src0Sel);
1122   } else {
1123     SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1124   }
1125 
1126   // Copy src1_sel if present, initialize otherwise if needed
1127   if (Src1) {
1128     assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_sel) != -1);
1129     MachineOperand *Src1Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
1130     if (Src1Sel) {
1131       SDWAInst.add(*Src1Sel);
1132     } else {
1133       SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1134     }
1135   }
1136 
1137   // Check for a preserved register that needs to be copied.
1138   auto DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
1139   if (DstUnused &&
1140       DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) {
1141     // We expect, if we are here, that the instruction was already in it's SDWA form,
1142     // with a tied operand.
1143     assert(Dst && Dst->isTied());
1144     assert(Opcode == static_cast<unsigned int>(SDWAOpcode));
1145     // We also expect a vdst, since sdst can't preserve.
1146     auto PreserveDstIdx = AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst);
1147     assert(PreserveDstIdx != -1);
1148 
1149     auto TiedIdx = MI.findTiedOperandIdx(PreserveDstIdx);
1150     auto Tied = MI.getOperand(TiedIdx);
1151 
1152     SDWAInst.add(Tied);
1153     SDWAInst->tieOperands(PreserveDstIdx, SDWAInst->getNumOperands() - 1);
1154   }
1155 
1156   // Apply all sdwa operand patterns.
1157   bool Converted = false;
1158   for (auto &Operand : SDWAOperands) {
1159     LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
1160     // There should be no intesection between SDWA operands and potential MIs
1161     // e.g.:
1162     // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
1163     // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
1164     // v_add_u32 v3, v4, v2
1165     //
1166     // In that example it is possible that we would fold 2nd instruction into 3rd
1167     // (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd (that was
1168     // already destroyed). So if SDWAOperand is also a potential MI then do not
1169     // apply it.
1170     if (PotentialMatches.count(Operand->getParentInst()) == 0)
1171       Converted |= Operand->convertToSDWA(*SDWAInst, TII);
1172   }
1173   if (Converted) {
1174     ConvertedInstructions.push_back(SDWAInst);
1175   } else {
1176     SDWAInst->eraseFromParent();
1177     return false;
1178   }
1179 
1180   LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
1181   ++NumSDWAInstructionsPeepholed;
1182 
1183   MI.eraseFromParent();
1184   return true;
1185 }
1186 
1187 // If an instruction was converted to SDWA it should not have immediates or SGPR
1188 // operands (allowed one SGPR on GFX9). Copy its scalar operands into VGPRs.
1189 void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI,
1190                                             const GCNSubtarget &ST) const {
1191   const MCInstrDesc &Desc = TII->get(MI.getOpcode());
1192   unsigned ConstantBusCount = 0;
1193   for (MachineOperand &Op : MI.explicit_uses()) {
1194     if (!Op.isImm() && !(Op.isReg() && !TRI->isVGPR(*MRI, Op.getReg())))
1195       continue;
1196 
1197     unsigned I = MI.getOperandNo(&Op);
1198     if (Desc.OpInfo[I].RegClass == -1 ||
1199        !TRI->hasVGPRs(TRI->getRegClass(Desc.OpInfo[I].RegClass)))
1200       continue;
1201 
1202     if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() &&
1203         TRI->isSGPRReg(*MRI, Op.getReg())) {
1204       ++ConstantBusCount;
1205       continue;
1206     }
1207 
1208     Register VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1209     auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1210                         TII->get(AMDGPU::V_MOV_B32_e32), VGPR);
1211     if (Op.isImm())
1212       Copy.addImm(Op.getImm());
1213     else if (Op.isReg())
1214       Copy.addReg(Op.getReg(), Op.isKill() ? RegState::Kill : 0,
1215                   Op.getSubReg());
1216     Op.ChangeToRegister(VGPR, false);
1217   }
1218 }
1219 
1220 bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
1221   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1222 
1223   if (!ST.hasSDWA() || skipFunction(MF.getFunction()))
1224     return false;
1225 
1226   MRI = &MF.getRegInfo();
1227   TRI = ST.getRegisterInfo();
1228   TII = ST.getInstrInfo();
1229 
1230   // Find all SDWA operands in MF.
1231   bool Ret = false;
1232   for (MachineBasicBlock &MBB : MF) {
1233     bool Changed = false;
1234     do {
1235       // Preprocess the ADD/SUB pairs so they could be SDWA'ed.
1236       // Look for a possible ADD or SUB that resulted from a previously lowered
1237       // V_{ADD|SUB}_U64_PSEUDO. The function pseudoOpConvertToVOP2
1238       // lowers the pair of instructions into e32 form.
1239       matchSDWAOperands(MBB);
1240       for (const auto &OperandPair : SDWAOperands) {
1241         const auto &Operand = OperandPair.second;
1242         MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
1243         if (PotentialMI &&
1244            (PotentialMI->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 ||
1245             PotentialMI->getOpcode() == AMDGPU::V_SUB_CO_U32_e64))
1246           pseudoOpConvertToVOP2(*PotentialMI, ST);
1247       }
1248       SDWAOperands.clear();
1249 
1250       // Generate potential match list.
1251       matchSDWAOperands(MBB);
1252 
1253       for (const auto &OperandPair : SDWAOperands) {
1254         const auto &Operand = OperandPair.second;
1255         MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
1256         if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST)) {
1257           PotentialMatches[PotentialMI].push_back(Operand.get());
1258         }
1259       }
1260 
1261       for (auto &PotentialPair : PotentialMatches) {
1262         MachineInstr &PotentialMI = *PotentialPair.first;
1263         convertToSDWA(PotentialMI, PotentialPair.second);
1264       }
1265 
1266       PotentialMatches.clear();
1267       SDWAOperands.clear();
1268 
1269       Changed = !ConvertedInstructions.empty();
1270 
1271       if (Changed)
1272         Ret = true;
1273       while (!ConvertedInstructions.empty())
1274         legalizeScalarOperands(*ConvertedInstructions.pop_back_val(), ST);
1275     } while (Changed);
1276   }
1277 
1278   return Ret;
1279 }
1280