1 //===- SIPeepholeSDWA.cpp - Peephole optimization for SDWA instructions ---===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file This pass tries to apply several peephole SDWA patterns.
11 ///
12 /// E.g. original:
13 /// V_LSHRREV_B32_e32 %0, 16, %1
14 /// V_ADD_I32_e32 %2, %0, %3
15 /// V_LSHLREV_B32_e32 %4, 16, %2
16 ///
17 /// Replace:
18 /// V_ADD_I32_sdwa %4, %1, %3
19 /// dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
20 ///
21 //===----------------------------------------------------------------------===//
22
23 #include "AMDGPU.h"
24 #include "AMDGPUSubtarget.h"
25 #include "SIDefines.h"
26 #include "SIInstrInfo.h"
27 #include "SIRegisterInfo.h"
28 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
29 #include "Utils/AMDGPUBaseInfo.h"
30 #include "llvm/ADT/None.h"
31 #include "llvm/ADT/Optional.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/CodeGen/MachineBasicBlock.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineFunctionPass.h"
38 #include "llvm/CodeGen/MachineInstr.h"
39 #include "llvm/CodeGen/MachineInstrBuilder.h"
40 #include "llvm/CodeGen/MachineOperand.h"
41 #include "llvm/CodeGen/MachineRegisterInfo.h"
42 #include "llvm/CodeGen/TargetRegisterInfo.h"
43 #include "llvm/Config/llvm-config.h"
44 #include "llvm/MC/LaneBitmask.h"
45 #include "llvm/MC/MCInstrDesc.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include <algorithm>
50 #include <cassert>
51 #include <cstdint>
52 #include <memory>
53 #include <unordered_map>
54
55 using namespace llvm;
56
57 #define DEBUG_TYPE "si-peephole-sdwa"
58
59 STATISTIC(NumSDWAPatternsFound, "Number of SDWA patterns found.");
60 STATISTIC(NumSDWAInstructionsPeepholed,
61 "Number of instruction converted to SDWA.");
62
63 namespace {
64
65 class SDWAOperand;
66 class SDWADstOperand;
67
68 class SIPeepholeSDWA : public MachineFunctionPass {
69 public:
70 using SDWAOperandsVector = SmallVector<SDWAOperand *, 4>;
71
72 private:
73 MachineRegisterInfo *MRI;
74 const SIRegisterInfo *TRI;
75 const SIInstrInfo *TII;
76
77 std::unordered_map<MachineInstr *, std::unique_ptr<SDWAOperand>> SDWAOperands;
78 std::unordered_map<MachineInstr *, SDWAOperandsVector> PotentialMatches;
79 SmallVector<MachineInstr *, 8> ConvertedInstructions;
80
81 Optional<int64_t> foldToImm(const MachineOperand &Op) const;
82
83 public:
84 static char ID;
85
SIPeepholeSDWA()86 SIPeepholeSDWA() : MachineFunctionPass(ID) {
87 initializeSIPeepholeSDWAPass(*PassRegistry::getPassRegistry());
88 }
89
90 bool runOnMachineFunction(MachineFunction &MF) override;
91 void matchSDWAOperands(MachineBasicBlock &MBB);
92 std::unique_ptr<SDWAOperand> matchSDWAOperand(MachineInstr &MI);
93 bool isConvertibleToSDWA(MachineInstr &MI, const GCNSubtarget &ST) const;
94 void pseudoOpConvertToVOP2(MachineInstr &MI,
95 const GCNSubtarget &ST) const;
96 bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands);
97 void legalizeScalarOperands(MachineInstr &MI, const GCNSubtarget &ST) const;
98
getPassName() const99 StringRef getPassName() const override { return "SI Peephole SDWA"; }
100
getAnalysisUsage(AnalysisUsage & AU) const101 void getAnalysisUsage(AnalysisUsage &AU) const override {
102 AU.setPreservesCFG();
103 MachineFunctionPass::getAnalysisUsage(AU);
104 }
105 };
106
107 class SDWAOperand {
108 private:
109 MachineOperand *Target; // Operand that would be used in converted instruction
110 MachineOperand *Replaced; // Operand that would be replace by Target
111
112 public:
SDWAOperand(MachineOperand * TargetOp,MachineOperand * ReplacedOp)113 SDWAOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp)
114 : Target(TargetOp), Replaced(ReplacedOp) {
115 assert(Target->isReg());
116 assert(Replaced->isReg());
117 }
118
119 virtual ~SDWAOperand() = default;
120
121 virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) = 0;
122 virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
123
getTargetOperand() const124 MachineOperand *getTargetOperand() const { return Target; }
getReplacedOperand() const125 MachineOperand *getReplacedOperand() const { return Replaced; }
getParentInst() const126 MachineInstr *getParentInst() const { return Target->getParent(); }
127
getMRI() const128 MachineRegisterInfo *getMRI() const {
129 return &getParentInst()->getParent()->getParent()->getRegInfo();
130 }
131
132 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
133 virtual void print(raw_ostream& OS) const = 0;
dump() const134 void dump() const { print(dbgs()); }
135 #endif
136 };
137
138 using namespace AMDGPU::SDWA;
139
140 class SDWASrcOperand : public SDWAOperand {
141 private:
142 SdwaSel SrcSel;
143 bool Abs;
144 bool Neg;
145 bool Sext;
146
147 public:
SDWASrcOperand(MachineOperand * TargetOp,MachineOperand * ReplacedOp,SdwaSel SrcSel_=DWORD,bool Abs_=false,bool Neg_=false,bool Sext_=false)148 SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
149 SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
150 bool Sext_ = false)
151 : SDWAOperand(TargetOp, ReplacedOp),
152 SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
153
154 MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
155 bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
156
getSrcSel() const157 SdwaSel getSrcSel() const { return SrcSel; }
getAbs() const158 bool getAbs() const { return Abs; }
getNeg() const159 bool getNeg() const { return Neg; }
getSext() const160 bool getSext() const { return Sext; }
161
162 uint64_t getSrcMods(const SIInstrInfo *TII,
163 const MachineOperand *SrcOp) const;
164
165 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
166 void print(raw_ostream& OS) const override;
167 #endif
168 };
169
170 class SDWADstOperand : public SDWAOperand {
171 private:
172 SdwaSel DstSel;
173 DstUnused DstUn;
174
175 public:
176
SDWADstOperand(MachineOperand * TargetOp,MachineOperand * ReplacedOp,SdwaSel DstSel_=DWORD,DstUnused DstUn_=UNUSED_PAD)177 SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
178 SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
179 : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
180
181 MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
182 bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
183
getDstSel() const184 SdwaSel getDstSel() const { return DstSel; }
getDstUnused() const185 DstUnused getDstUnused() const { return DstUn; }
186
187 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
188 void print(raw_ostream& OS) const override;
189 #endif
190 };
191
192 class SDWADstPreserveOperand : public SDWADstOperand {
193 private:
194 MachineOperand *Preserve;
195
196 public:
SDWADstPreserveOperand(MachineOperand * TargetOp,MachineOperand * ReplacedOp,MachineOperand * PreserveOp,SdwaSel DstSel_=DWORD)197 SDWADstPreserveOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
198 MachineOperand *PreserveOp, SdwaSel DstSel_ = DWORD)
199 : SDWADstOperand(TargetOp, ReplacedOp, DstSel_, UNUSED_PRESERVE),
200 Preserve(PreserveOp) {}
201
202 bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
203
getPreservedOperand() const204 MachineOperand *getPreservedOperand() const { return Preserve; }
205
206 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
207 void print(raw_ostream& OS) const override;
208 #endif
209 };
210
211 } // end anonymous namespace
212
213 INITIALIZE_PASS(SIPeepholeSDWA, DEBUG_TYPE, "SI Peephole SDWA", false, false)
214
215 char SIPeepholeSDWA::ID = 0;
216
217 char &llvm::SIPeepholeSDWAID = SIPeepholeSDWA::ID;
218
createSIPeepholeSDWAPass()219 FunctionPass *llvm::createSIPeepholeSDWAPass() {
220 return new SIPeepholeSDWA();
221 }
222
223
224 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
operator <<(raw_ostream & OS,SdwaSel Sel)225 static raw_ostream& operator<<(raw_ostream &OS, SdwaSel Sel) {
226 switch(Sel) {
227 case BYTE_0: OS << "BYTE_0"; break;
228 case BYTE_1: OS << "BYTE_1"; break;
229 case BYTE_2: OS << "BYTE_2"; break;
230 case BYTE_3: OS << "BYTE_3"; break;
231 case WORD_0: OS << "WORD_0"; break;
232 case WORD_1: OS << "WORD_1"; break;
233 case DWORD: OS << "DWORD"; break;
234 }
235 return OS;
236 }
237
operator <<(raw_ostream & OS,const DstUnused & Un)238 static raw_ostream& operator<<(raw_ostream &OS, const DstUnused &Un) {
239 switch(Un) {
240 case UNUSED_PAD: OS << "UNUSED_PAD"; break;
241 case UNUSED_SEXT: OS << "UNUSED_SEXT"; break;
242 case UNUSED_PRESERVE: OS << "UNUSED_PRESERVE"; break;
243 }
244 return OS;
245 }
246
operator <<(raw_ostream & OS,const SDWAOperand & Operand)247 static raw_ostream& operator<<(raw_ostream &OS, const SDWAOperand &Operand) {
248 Operand.print(OS);
249 return OS;
250 }
251
252 LLVM_DUMP_METHOD
print(raw_ostream & OS) const253 void SDWASrcOperand::print(raw_ostream& OS) const {
254 OS << "SDWA src: " << *getTargetOperand()
255 << " src_sel:" << getSrcSel()
256 << " abs:" << getAbs() << " neg:" << getNeg()
257 << " sext:" << getSext() << '\n';
258 }
259
260 LLVM_DUMP_METHOD
print(raw_ostream & OS) const261 void SDWADstOperand::print(raw_ostream& OS) const {
262 OS << "SDWA dst: " << *getTargetOperand()
263 << " dst_sel:" << getDstSel()
264 << " dst_unused:" << getDstUnused() << '\n';
265 }
266
267 LLVM_DUMP_METHOD
print(raw_ostream & OS) const268 void SDWADstPreserveOperand::print(raw_ostream& OS) const {
269 OS << "SDWA preserve dst: " << *getTargetOperand()
270 << " dst_sel:" << getDstSel()
271 << " preserve:" << *getPreservedOperand() << '\n';
272 }
273
274 #endif
275
copyRegOperand(MachineOperand & To,const MachineOperand & From)276 static void copyRegOperand(MachineOperand &To, const MachineOperand &From) {
277 assert(To.isReg() && From.isReg());
278 To.setReg(From.getReg());
279 To.setSubReg(From.getSubReg());
280 To.setIsUndef(From.isUndef());
281 if (To.isUse()) {
282 To.setIsKill(From.isKill());
283 } else {
284 To.setIsDead(From.isDead());
285 }
286 }
287
isSameReg(const MachineOperand & LHS,const MachineOperand & RHS)288 static bool isSameReg(const MachineOperand &LHS, const MachineOperand &RHS) {
289 return LHS.isReg() &&
290 RHS.isReg() &&
291 LHS.getReg() == RHS.getReg() &&
292 LHS.getSubReg() == RHS.getSubReg();
293 }
294
findSingleRegUse(const MachineOperand * Reg,const MachineRegisterInfo * MRI)295 static MachineOperand *findSingleRegUse(const MachineOperand *Reg,
296 const MachineRegisterInfo *MRI) {
297 if (!Reg->isReg() || !Reg->isDef())
298 return nullptr;
299
300 MachineOperand *ResMO = nullptr;
301 for (MachineOperand &UseMO : MRI->use_nodbg_operands(Reg->getReg())) {
302 // If there exist use of subreg of Reg then return nullptr
303 if (!isSameReg(UseMO, *Reg))
304 return nullptr;
305
306 // Check that there is only one instruction that uses Reg
307 if (!ResMO) {
308 ResMO = &UseMO;
309 } else if (ResMO->getParent() != UseMO.getParent()) {
310 return nullptr;
311 }
312 }
313
314 return ResMO;
315 }
316
findSingleRegDef(const MachineOperand * Reg,const MachineRegisterInfo * MRI)317 static MachineOperand *findSingleRegDef(const MachineOperand *Reg,
318 const MachineRegisterInfo *MRI) {
319 if (!Reg->isReg())
320 return nullptr;
321
322 MachineInstr *DefInstr = MRI->getUniqueVRegDef(Reg->getReg());
323 if (!DefInstr)
324 return nullptr;
325
326 for (auto &DefMO : DefInstr->defs()) {
327 if (DefMO.isReg() && DefMO.getReg() == Reg->getReg())
328 return &DefMO;
329 }
330
331 // Ignore implicit defs.
332 return nullptr;
333 }
334
getSrcMods(const SIInstrInfo * TII,const MachineOperand * SrcOp) const335 uint64_t SDWASrcOperand::getSrcMods(const SIInstrInfo *TII,
336 const MachineOperand *SrcOp) const {
337 uint64_t Mods = 0;
338 const auto *MI = SrcOp->getParent();
339 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) {
340 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) {
341 Mods = Mod->getImm();
342 }
343 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) {
344 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) {
345 Mods = Mod->getImm();
346 }
347 }
348 if (Abs || Neg) {
349 assert(!Sext &&
350 "Float and integer src modifiers can't be set simulteniously");
351 Mods |= Abs ? SISrcMods::ABS : 0;
352 Mods ^= Neg ? SISrcMods::NEG : 0;
353 } else if (Sext) {
354 Mods |= SISrcMods::SEXT;
355 }
356
357 return Mods;
358 }
359
potentialToConvert(const SIInstrInfo * TII)360 MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII) {
361 // For SDWA src operand potential instruction is one that use register
362 // defined by parent instruction
363 MachineOperand *PotentialMO = findSingleRegUse(getReplacedOperand(), getMRI());
364 if (!PotentialMO)
365 return nullptr;
366
367 return PotentialMO->getParent();
368 }
369
convertToSDWA(MachineInstr & MI,const SIInstrInfo * TII)370 bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
371 // Find operand in instruction that matches source operand and replace it with
372 // target operand. Set corresponding src_sel
373 bool IsPreserveSrc = false;
374 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
375 MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
376 MachineOperand *SrcMods =
377 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
378 assert(Src && (Src->isReg() || Src->isImm()));
379 if (!isSameReg(*Src, *getReplacedOperand())) {
380 // If this is not src0 then it could be src1
381 Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
382 SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
383 SrcMods = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
384
385 if (!Src ||
386 !isSameReg(*Src, *getReplacedOperand())) {
387 // It's possible this Src is a tied operand for
388 // UNUSED_PRESERVE, in which case we can either
389 // abandon the peephole attempt, or if legal we can
390 // copy the target operand into the tied slot
391 // if the preserve operation will effectively cause the same
392 // result by overwriting the rest of the dst.
393 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
394 MachineOperand *DstUnused =
395 TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
396
397 if (Dst &&
398 DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) {
399 // This will work if the tied src is acessing WORD_0, and the dst is
400 // writing WORD_1. Modifiers don't matter because all the bits that
401 // would be impacted are being overwritten by the dst.
402 // Any other case will not work.
403 SdwaSel DstSel = static_cast<SdwaSel>(
404 TII->getNamedImmOperand(MI, AMDGPU::OpName::dst_sel));
405 if (DstSel == AMDGPU::SDWA::SdwaSel::WORD_1 &&
406 getSrcSel() == AMDGPU::SDWA::SdwaSel::WORD_0) {
407 IsPreserveSrc = true;
408 auto DstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
409 AMDGPU::OpName::vdst);
410 auto TiedIdx = MI.findTiedOperandIdx(DstIdx);
411 Src = &MI.getOperand(TiedIdx);
412 SrcSel = nullptr;
413 SrcMods = nullptr;
414 } else {
415 // Not legal to convert this src
416 return false;
417 }
418 }
419 }
420 assert(Src && Src->isReg());
421
422 if ((MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
423 MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
424 !isSameReg(*Src, *getReplacedOperand())) {
425 // In case of v_mac_f16/32_sdwa this pass can try to apply src operand to
426 // src2. This is not allowed.
427 return false;
428 }
429
430 assert(isSameReg(*Src, *getReplacedOperand()) &&
431 (IsPreserveSrc || (SrcSel && SrcMods)));
432 }
433 copyRegOperand(*Src, *getTargetOperand());
434 if (!IsPreserveSrc) {
435 SrcSel->setImm(getSrcSel());
436 SrcMods->setImm(getSrcMods(TII, Src));
437 }
438 getTargetOperand()->setIsKill(false);
439 return true;
440 }
441
potentialToConvert(const SIInstrInfo * TII)442 MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII) {
443 // For SDWA dst operand potential instruction is one that defines register
444 // that this operand uses
445 MachineRegisterInfo *MRI = getMRI();
446 MachineInstr *ParentMI = getParentInst();
447
448 MachineOperand *PotentialMO = findSingleRegDef(getReplacedOperand(), MRI);
449 if (!PotentialMO)
450 return nullptr;
451
452 // Check that ParentMI is the only instruction that uses replaced register
453 for (MachineInstr &UseInst : MRI->use_nodbg_instructions(PotentialMO->getReg())) {
454 if (&UseInst != ParentMI)
455 return nullptr;
456 }
457
458 return PotentialMO->getParent();
459 }
460
convertToSDWA(MachineInstr & MI,const SIInstrInfo * TII)461 bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
462 // Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
463
464 if ((MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
465 MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
466 getDstSel() != AMDGPU::SDWA::DWORD) {
467 // v_mac_f16/32_sdwa allow dst_sel to be equal only to DWORD
468 return false;
469 }
470
471 MachineOperand *Operand = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
472 assert(Operand &&
473 Operand->isReg() &&
474 isSameReg(*Operand, *getReplacedOperand()));
475 copyRegOperand(*Operand, *getTargetOperand());
476 MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
477 assert(DstSel);
478 DstSel->setImm(getDstSel());
479 MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
480 assert(DstUnused);
481 DstUnused->setImm(getDstUnused());
482
483 // Remove original instruction because it would conflict with our new
484 // instruction by register definition
485 getParentInst()->eraseFromParent();
486 return true;
487 }
488
convertToSDWA(MachineInstr & MI,const SIInstrInfo * TII)489 bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
490 const SIInstrInfo *TII) {
491 // MI should be moved right before v_or_b32.
492 // For this we should clear all kill flags on uses of MI src-operands or else
493 // we can encounter problem with use of killed operand.
494 for (MachineOperand &MO : MI.uses()) {
495 if (!MO.isReg())
496 continue;
497 getMRI()->clearKillFlags(MO.getReg());
498 }
499
500 // Move MI before v_or_b32
501 auto MBB = MI.getParent();
502 MBB->remove(&MI);
503 MBB->insert(getParentInst(), &MI);
504
505 // Add Implicit use of preserved register
506 MachineInstrBuilder MIB(*MBB->getParent(), MI);
507 MIB.addReg(getPreservedOperand()->getReg(),
508 RegState::ImplicitKill,
509 getPreservedOperand()->getSubReg());
510
511 // Tie dst to implicit use
512 MI.tieOperands(AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst),
513 MI.getNumOperands() - 1);
514
515 // Convert MI as any other SDWADstOperand and remove v_or_b32
516 return SDWADstOperand::convertToSDWA(MI, TII);
517 }
518
foldToImm(const MachineOperand & Op) const519 Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
520 if (Op.isImm()) {
521 return Op.getImm();
522 }
523
524 // If this is not immediate then it can be copy of immediate value, e.g.:
525 // %1 = S_MOV_B32 255;
526 if (Op.isReg()) {
527 for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
528 if (!isSameReg(Op, Def))
529 continue;
530
531 const MachineInstr *DefInst = Def.getParent();
532 if (!TII->isFoldableCopy(*DefInst))
533 return None;
534
535 const MachineOperand &Copied = DefInst->getOperand(1);
536 if (!Copied.isImm())
537 return None;
538
539 return Copied.getImm();
540 }
541 }
542
543 return None;
544 }
545
546 std::unique_ptr<SDWAOperand>
matchSDWAOperand(MachineInstr & MI)547 SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
548 unsigned Opcode = MI.getOpcode();
549 switch (Opcode) {
550 case AMDGPU::V_LSHRREV_B32_e32:
551 case AMDGPU::V_ASHRREV_I32_e32:
552 case AMDGPU::V_LSHLREV_B32_e32:
553 case AMDGPU::V_LSHRREV_B32_e64:
554 case AMDGPU::V_ASHRREV_I32_e64:
555 case AMDGPU::V_LSHLREV_B32_e64: {
556 // from: v_lshrrev_b32_e32 v1, 16/24, v0
557 // to SDWA src:v0 src_sel:WORD_1/BYTE_3
558
559 // from: v_ashrrev_i32_e32 v1, 16/24, v0
560 // to SDWA src:v0 src_sel:WORD_1/BYTE_3 sext:1
561
562 // from: v_lshlrev_b32_e32 v1, 16/24, v0
563 // to SDWA dst:v1 dst_sel:WORD_1/BYTE_3 dst_unused:UNUSED_PAD
564 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
565 auto Imm = foldToImm(*Src0);
566 if (!Imm)
567 break;
568
569 if (*Imm != 16 && *Imm != 24)
570 break;
571
572 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
573 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
574 if (TRI->isPhysicalRegister(Src1->getReg()) ||
575 TRI->isPhysicalRegister(Dst->getReg()))
576 break;
577
578 if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
579 Opcode == AMDGPU::V_LSHLREV_B32_e64) {
580 return make_unique<SDWADstOperand>(
581 Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
582 } else {
583 return make_unique<SDWASrcOperand>(
584 Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
585 Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
586 Opcode != AMDGPU::V_LSHRREV_B32_e64);
587 }
588 break;
589 }
590
591 case AMDGPU::V_LSHRREV_B16_e32:
592 case AMDGPU::V_ASHRREV_I16_e32:
593 case AMDGPU::V_LSHLREV_B16_e32:
594 case AMDGPU::V_LSHRREV_B16_e64:
595 case AMDGPU::V_ASHRREV_I16_e64:
596 case AMDGPU::V_LSHLREV_B16_e64: {
597 // from: v_lshrrev_b16_e32 v1, 8, v0
598 // to SDWA src:v0 src_sel:BYTE_1
599
600 // from: v_ashrrev_i16_e32 v1, 8, v0
601 // to SDWA src:v0 src_sel:BYTE_1 sext:1
602
603 // from: v_lshlrev_b16_e32 v1, 8, v0
604 // to SDWA dst:v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD
605 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
606 auto Imm = foldToImm(*Src0);
607 if (!Imm || *Imm != 8)
608 break;
609
610 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
611 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
612
613 if (TRI->isPhysicalRegister(Src1->getReg()) ||
614 TRI->isPhysicalRegister(Dst->getReg()))
615 break;
616
617 if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
618 Opcode == AMDGPU::V_LSHLREV_B16_e64) {
619 return make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD);
620 } else {
621 return make_unique<SDWASrcOperand>(
622 Src1, Dst, BYTE_1, false, false,
623 Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
624 Opcode != AMDGPU::V_LSHRREV_B16_e64);
625 }
626 break;
627 }
628
629 case AMDGPU::V_BFE_I32:
630 case AMDGPU::V_BFE_U32: {
631 // e.g.:
632 // from: v_bfe_u32 v1, v0, 8, 8
633 // to SDWA src:v0 src_sel:BYTE_1
634
635 // offset | width | src_sel
636 // ------------------------
637 // 0 | 8 | BYTE_0
638 // 0 | 16 | WORD_0
639 // 0 | 32 | DWORD ?
640 // 8 | 8 | BYTE_1
641 // 16 | 8 | BYTE_2
642 // 16 | 16 | WORD_1
643 // 24 | 8 | BYTE_3
644
645 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
646 auto Offset = foldToImm(*Src1);
647 if (!Offset)
648 break;
649
650 MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
651 auto Width = foldToImm(*Src2);
652 if (!Width)
653 break;
654
655 SdwaSel SrcSel = DWORD;
656
657 if (*Offset == 0 && *Width == 8)
658 SrcSel = BYTE_0;
659 else if (*Offset == 0 && *Width == 16)
660 SrcSel = WORD_0;
661 else if (*Offset == 0 && *Width == 32)
662 SrcSel = DWORD;
663 else if (*Offset == 8 && *Width == 8)
664 SrcSel = BYTE_1;
665 else if (*Offset == 16 && *Width == 8)
666 SrcSel = BYTE_2;
667 else if (*Offset == 16 && *Width == 16)
668 SrcSel = WORD_1;
669 else if (*Offset == 24 && *Width == 8)
670 SrcSel = BYTE_3;
671 else
672 break;
673
674 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
675 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
676
677 if (TRI->isPhysicalRegister(Src0->getReg()) ||
678 TRI->isPhysicalRegister(Dst->getReg()))
679 break;
680
681 return make_unique<SDWASrcOperand>(
682 Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32);
683 }
684
685 case AMDGPU::V_AND_B32_e32:
686 case AMDGPU::V_AND_B32_e64: {
687 // e.g.:
688 // from: v_and_b32_e32 v1, 0x0000ffff/0x000000ff, v0
689 // to SDWA src:v0 src_sel:WORD_0/BYTE_0
690
691 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
692 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
693 auto ValSrc = Src1;
694 auto Imm = foldToImm(*Src0);
695
696 if (!Imm) {
697 Imm = foldToImm(*Src1);
698 ValSrc = Src0;
699 }
700
701 if (!Imm || (*Imm != 0x0000ffff && *Imm != 0x000000ff))
702 break;
703
704 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
705
706 if (TRI->isPhysicalRegister(ValSrc->getReg()) ||
707 TRI->isPhysicalRegister(Dst->getReg()))
708 break;
709
710 return make_unique<SDWASrcOperand>(
711 ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
712 }
713
714 case AMDGPU::V_OR_B32_e32:
715 case AMDGPU::V_OR_B32_e64: {
716 // Patterns for dst_unused:UNUSED_PRESERVE.
717 // e.g., from:
718 // v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD
719 // src1_sel:WORD_1 src2_sel:WORD1
720 // v_add_f16_e32 v3, v1, v2
721 // v_or_b32_e32 v4, v0, v3
722 // to SDWA preserve dst:v4 dst_sel:WORD_1 dst_unused:UNUSED_PRESERVE preserve:v3
723
724 // Check if one of operands of v_or_b32 is SDWA instruction
725 using CheckRetType = Optional<std::pair<MachineOperand *, MachineOperand *>>;
726 auto CheckOROperandsForSDWA =
727 [&](const MachineOperand *Op1, const MachineOperand *Op2) -> CheckRetType {
728 if (!Op1 || !Op1->isReg() || !Op2 || !Op2->isReg())
729 return CheckRetType(None);
730
731 MachineOperand *Op1Def = findSingleRegDef(Op1, MRI);
732 if (!Op1Def)
733 return CheckRetType(None);
734
735 MachineInstr *Op1Inst = Op1Def->getParent();
736 if (!TII->isSDWA(*Op1Inst))
737 return CheckRetType(None);
738
739 MachineOperand *Op2Def = findSingleRegDef(Op2, MRI);
740 if (!Op2Def)
741 return CheckRetType(None);
742
743 return CheckRetType(std::make_pair(Op1Def, Op2Def));
744 };
745
746 MachineOperand *OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
747 MachineOperand *OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
748 assert(OrSDWA && OrOther);
749 auto Res = CheckOROperandsForSDWA(OrSDWA, OrOther);
750 if (!Res) {
751 OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
752 OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
753 assert(OrSDWA && OrOther);
754 Res = CheckOROperandsForSDWA(OrSDWA, OrOther);
755 if (!Res)
756 break;
757 }
758
759 MachineOperand *OrSDWADef = Res->first;
760 MachineOperand *OrOtherDef = Res->second;
761 assert(OrSDWADef && OrOtherDef);
762
763 MachineInstr *SDWAInst = OrSDWADef->getParent();
764 MachineInstr *OtherInst = OrOtherDef->getParent();
765
766 // Check that OtherInstr is actually bitwise compatible with SDWAInst = their
767 // destination patterns don't overlap. Compatible instruction can be either
768 // regular instruction with compatible bitness or SDWA instruction with
769 // correct dst_sel
770 // SDWAInst | OtherInst bitness / OtherInst dst_sel
771 // -----------------------------------------------------
772 // DWORD | no / no
773 // WORD_0 | no / BYTE_2/3, WORD_1
774 // WORD_1 | 8/16-bit instructions / BYTE_0/1, WORD_0
775 // BYTE_0 | no / BYTE_1/2/3, WORD_1
776 // BYTE_1 | 8-bit / BYTE_0/2/3, WORD_1
777 // BYTE_2 | 8/16-bit / BYTE_0/1/3. WORD_0
778 // BYTE_3 | 8/16/24-bit / BYTE_0/1/2, WORD_0
779 // E.g. if SDWAInst is v_add_f16_sdwa dst_sel:WORD_1 then v_add_f16 is OK
780 // but v_add_f32 is not.
781
782 // TODO: add support for non-SDWA instructions as OtherInst.
783 // For now this only works with SDWA instructions. For regular instructions
784 // there is no way to determine if the instruction writes only 8/16/24-bit
785 // out of full register size and all registers are at min 32-bit wide.
786 if (!TII->isSDWA(*OtherInst))
787 break;
788
789 SdwaSel DstSel = static_cast<SdwaSel>(
790 TII->getNamedImmOperand(*SDWAInst, AMDGPU::OpName::dst_sel));;
791 SdwaSel OtherDstSel = static_cast<SdwaSel>(
792 TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_sel));
793
794 bool DstSelAgree = false;
795 switch (DstSel) {
796 case WORD_0: DstSelAgree = ((OtherDstSel == BYTE_2) ||
797 (OtherDstSel == BYTE_3) ||
798 (OtherDstSel == WORD_1));
799 break;
800 case WORD_1: DstSelAgree = ((OtherDstSel == BYTE_0) ||
801 (OtherDstSel == BYTE_1) ||
802 (OtherDstSel == WORD_0));
803 break;
804 case BYTE_0: DstSelAgree = ((OtherDstSel == BYTE_1) ||
805 (OtherDstSel == BYTE_2) ||
806 (OtherDstSel == BYTE_3) ||
807 (OtherDstSel == WORD_1));
808 break;
809 case BYTE_1: DstSelAgree = ((OtherDstSel == BYTE_0) ||
810 (OtherDstSel == BYTE_2) ||
811 (OtherDstSel == BYTE_3) ||
812 (OtherDstSel == WORD_1));
813 break;
814 case BYTE_2: DstSelAgree = ((OtherDstSel == BYTE_0) ||
815 (OtherDstSel == BYTE_1) ||
816 (OtherDstSel == BYTE_3) ||
817 (OtherDstSel == WORD_0));
818 break;
819 case BYTE_3: DstSelAgree = ((OtherDstSel == BYTE_0) ||
820 (OtherDstSel == BYTE_1) ||
821 (OtherDstSel == BYTE_2) ||
822 (OtherDstSel == WORD_0));
823 break;
824 default: DstSelAgree = false;
825 }
826
827 if (!DstSelAgree)
828 break;
829
830 // Also OtherInst dst_unused should be UNUSED_PAD
831 DstUnused OtherDstUnused = static_cast<DstUnused>(
832 TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_unused));
833 if (OtherDstUnused != DstUnused::UNUSED_PAD)
834 break;
835
836 // Create DstPreserveOperand
837 MachineOperand *OrDst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
838 assert(OrDst && OrDst->isReg());
839
840 return make_unique<SDWADstPreserveOperand>(
841 OrDst, OrSDWADef, OrOtherDef, DstSel);
842
843 }
844 }
845
846 return std::unique_ptr<SDWAOperand>(nullptr);
847 }
848
matchSDWAOperands(MachineBasicBlock & MBB)849 void SIPeepholeSDWA::matchSDWAOperands(MachineBasicBlock &MBB) {
850 for (MachineInstr &MI : MBB) {
851 if (auto Operand = matchSDWAOperand(MI)) {
852 LLVM_DEBUG(dbgs() << "Match: " << MI << "To: " << *Operand << '\n');
853 SDWAOperands[&MI] = std::move(Operand);
854 ++NumSDWAPatternsFound;
855 }
856 }
857 }
858
859 // Convert the V_ADDC_U32_e64 into V_ADDC_U32_e32, and
860 // V_ADD_I32_e64 into V_ADD_I32_e32. This allows isConvertibleToSDWA
861 // to perform its transformation on V_ADD_I32_e32 into V_ADD_I32_sdwa.
862 //
863 // We are transforming from a VOP3 into a VOP2 form of the instruction.
864 // %19:vgpr_32 = V_AND_B32_e32 255,
865 // killed %16:vgpr_32, implicit $exec
866 // %47:vgpr_32, %49:sreg_64_xexec = V_ADD_I32_e64
867 // %26.sub0:vreg_64, %19:vgpr_32, implicit $exec
868 // %48:vgpr_32, dead %50:sreg_64_xexec = V_ADDC_U32_e64
869 // %26.sub1:vreg_64, %54:vgpr_32, killed %49:sreg_64_xexec, implicit $exec
870 //
871 // becomes
872 // %47:vgpr_32 = V_ADD_I32_sdwa
873 // 0, %26.sub0:vreg_64, 0, killed %16:vgpr_32, 0, 6, 0, 6, 0,
874 // implicit-def $vcc, implicit $exec
875 // %48:vgpr_32 = V_ADDC_U32_e32
876 // 0, %26.sub1:vreg_64, implicit-def $vcc, implicit $vcc, implicit $exec
pseudoOpConvertToVOP2(MachineInstr & MI,const GCNSubtarget & ST) const877 void SIPeepholeSDWA::pseudoOpConvertToVOP2(MachineInstr &MI,
878 const GCNSubtarget &ST) const {
879 int Opc = MI.getOpcode();
880 assert((Opc == AMDGPU::V_ADD_I32_e64 || Opc == AMDGPU::V_SUB_I32_e64) &&
881 "Currently only handles V_ADD_I32_e64 or V_SUB_I32_e64");
882
883 // Can the candidate MI be shrunk?
884 if (!TII->canShrink(MI, *MRI))
885 return;
886 Opc = AMDGPU::getVOPe32(Opc);
887 // Find the related ADD instruction.
888 const MachineOperand *Sdst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
889 if (!Sdst)
890 return;
891 MachineOperand *NextOp = findSingleRegUse(Sdst, MRI);
892 if (!NextOp)
893 return;
894 MachineInstr &MISucc = *NextOp->getParent();
895 // Can the successor be shrunk?
896 if (!TII->canShrink(MISucc, *MRI))
897 return;
898 int SuccOpc = AMDGPU::getVOPe32(MISucc.getOpcode());
899 // Make sure the carry in/out are subsequently unused.
900 MachineOperand *CarryIn = TII->getNamedOperand(MISucc, AMDGPU::OpName::src2);
901 if (!CarryIn)
902 return;
903 MachineOperand *CarryOut = TII->getNamedOperand(MISucc, AMDGPU::OpName::sdst);
904 if (!CarryOut)
905 return;
906 if (!MRI->hasOneUse(CarryIn->getReg()) || !MRI->use_empty(CarryOut->getReg()))
907 return;
908 // Make sure VCC or its subregs are dead before MI.
909 MachineBasicBlock &MBB = *MI.getParent();
910 auto Liveness = MBB.computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 25);
911 if (Liveness != MachineBasicBlock::LQR_Dead)
912 return;
913 // Check if VCC is referenced in range of (MI,MISucc].
914 for (auto I = std::next(MI.getIterator()), E = MISucc.getIterator();
915 I != E; ++I) {
916 if (I->modifiesRegister(AMDGPU::VCC, TRI))
917 return;
918 }
919 // Make the two new e32 instruction variants.
920 // Replace MI with V_{SUB|ADD}_I32_e32
921 auto NewMI = BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(Opc));
922 NewMI.add(*TII->getNamedOperand(MI, AMDGPU::OpName::vdst));
923 NewMI.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
924 NewMI.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src1));
925 MI.eraseFromParent();
926 // Replace MISucc with V_{SUBB|ADDC}_U32_e32
927 auto NewInst = BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(SuccOpc));
928 NewInst.add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::vdst));
929 NewInst.add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src0));
930 NewInst.add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src1));
931 MISucc.eraseFromParent();
932 }
933
isConvertibleToSDWA(MachineInstr & MI,const GCNSubtarget & ST) const934 bool SIPeepholeSDWA::isConvertibleToSDWA(MachineInstr &MI,
935 const GCNSubtarget &ST) const {
936 // Check if this is already an SDWA instruction
937 unsigned Opc = MI.getOpcode();
938 if (TII->isSDWA(Opc))
939 return true;
940
941 // Check if this instruction has opcode that supports SDWA
942 if (AMDGPU::getSDWAOp(Opc) == -1)
943 Opc = AMDGPU::getVOPe32(Opc);
944
945 if (AMDGPU::getSDWAOp(Opc) == -1)
946 return false;
947
948 if (!ST.hasSDWAOmod() && TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
949 return false;
950
951 if (TII->isVOPC(Opc)) {
952 if (!ST.hasSDWASdst()) {
953 const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
954 if (SDst && SDst->getReg() != AMDGPU::VCC)
955 return false;
956 }
957
958 if (!ST.hasSDWAOutModsVOPC() &&
959 (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
960 TII->hasModifiersSet(MI, AMDGPU::OpName::omod)))
961 return false;
962
963 } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst) ||
964 !TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) {
965 return false;
966 }
967
968 if (!ST.hasSDWAMac() && (Opc == AMDGPU::V_MAC_F16_e32 ||
969 Opc == AMDGPU::V_MAC_F32_e32))
970 return false;
971
972 // FIXME: has SDWA but require handling of implicit VCC use
973 if (Opc == AMDGPU::V_CNDMASK_B32_e32)
974 return false;
975
976 return true;
977 }
978
convertToSDWA(MachineInstr & MI,const SDWAOperandsVector & SDWAOperands)979 bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
980 const SDWAOperandsVector &SDWAOperands) {
981
982 LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
983
984 // Convert to sdwa
985 int SDWAOpcode;
986 unsigned Opcode = MI.getOpcode();
987 if (TII->isSDWA(Opcode)) {
988 SDWAOpcode = Opcode;
989 } else {
990 SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
991 if (SDWAOpcode == -1)
992 SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
993 }
994 assert(SDWAOpcode != -1);
995
996 const MCInstrDesc &SDWADesc = TII->get(SDWAOpcode);
997
998 // Create SDWA version of instruction MI and initialize its operands
999 MachineInstrBuilder SDWAInst =
1000 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), SDWADesc);
1001
1002 // Copy dst, if it is present in original then should also be present in SDWA
1003 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
1004 if (Dst) {
1005 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst) != -1);
1006 SDWAInst.add(*Dst);
1007 } else if ((Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst))) {
1008 assert(Dst &&
1009 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
1010 SDWAInst.add(*Dst);
1011 } else {
1012 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
1013 SDWAInst.addReg(AMDGPU::VCC, RegState::Define);
1014 }
1015
1016 // Copy src0, initialize src0_modifiers. All sdwa instructions has src0 and
1017 // src0_modifiers (except for v_nop_sdwa, but it can't get here)
1018 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1019 assert(
1020 Src0 &&
1021 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0) != -1 &&
1022 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_modifiers) != -1);
1023 if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers))
1024 SDWAInst.addImm(Mod->getImm());
1025 else
1026 SDWAInst.addImm(0);
1027 SDWAInst.add(*Src0);
1028
1029 // Copy src1 if present, initialize src1_modifiers.
1030 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1031 if (Src1) {
1032 assert(
1033 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1) != -1 &&
1034 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_modifiers) != -1);
1035 if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers))
1036 SDWAInst.addImm(Mod->getImm());
1037 else
1038 SDWAInst.addImm(0);
1039 SDWAInst.add(*Src1);
1040 }
1041
1042 if (SDWAOpcode == AMDGPU::V_MAC_F16_sdwa ||
1043 SDWAOpcode == AMDGPU::V_MAC_F32_sdwa) {
1044 // v_mac_f16/32 has additional src2 operand tied to vdst
1045 MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
1046 assert(Src2);
1047 SDWAInst.add(*Src2);
1048 }
1049
1050 // Copy clamp if present, initialize otherwise
1051 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::clamp) != -1);
1052 MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp);
1053 if (Clamp) {
1054 SDWAInst.add(*Clamp);
1055 } else {
1056 SDWAInst.addImm(0);
1057 }
1058
1059 // Copy omod if present, initialize otherwise if needed
1060 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::omod) != -1) {
1061 MachineOperand *OMod = TII->getNamedOperand(MI, AMDGPU::OpName::omod);
1062 if (OMod) {
1063 SDWAInst.add(*OMod);
1064 } else {
1065 SDWAInst.addImm(0);
1066 }
1067 }
1068
1069 // Copy dst_sel if present, initialize otherwise if needed
1070 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_sel) != -1) {
1071 MachineOperand *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
1072 if (DstSel) {
1073 SDWAInst.add(*DstSel);
1074 } else {
1075 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1076 }
1077 }
1078
1079 // Copy dst_unused if present, initialize otherwise if needed
1080 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_unused) != -1) {
1081 MachineOperand *DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
1082 if (DstUnused) {
1083 SDWAInst.add(*DstUnused);
1084 } else {
1085 SDWAInst.addImm(AMDGPU::SDWA::DstUnused::UNUSED_PAD);
1086 }
1087 }
1088
1089 // Copy src0_sel if present, initialize otherwise
1090 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_sel) != -1);
1091 MachineOperand *Src0Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
1092 if (Src0Sel) {
1093 SDWAInst.add(*Src0Sel);
1094 } else {
1095 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1096 }
1097
1098 // Copy src1_sel if present, initialize otherwise if needed
1099 if (Src1) {
1100 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_sel) != -1);
1101 MachineOperand *Src1Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
1102 if (Src1Sel) {
1103 SDWAInst.add(*Src1Sel);
1104 } else {
1105 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1106 }
1107 }
1108
1109 // Check for a preserved register that needs to be copied.
1110 auto DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
1111 if (DstUnused &&
1112 DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) {
1113 // We expect, if we are here, that the instruction was already in it's SDWA form,
1114 // with a tied operand.
1115 assert(Dst && Dst->isTied());
1116 assert(Opcode == static_cast<unsigned int>(SDWAOpcode));
1117 // We also expect a vdst, since sdst can't preserve.
1118 auto PreserveDstIdx = AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst);
1119 assert(PreserveDstIdx != -1);
1120
1121 auto TiedIdx = MI.findTiedOperandIdx(PreserveDstIdx);
1122 auto Tied = MI.getOperand(TiedIdx);
1123
1124 SDWAInst.add(Tied);
1125 SDWAInst->tieOperands(PreserveDstIdx, SDWAInst->getNumOperands() - 1);
1126 }
1127
1128 // Apply all sdwa operand patterns.
1129 bool Converted = false;
1130 for (auto &Operand : SDWAOperands) {
1131 LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
1132 // There should be no intesection between SDWA operands and potential MIs
1133 // e.g.:
1134 // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
1135 // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
1136 // v_add_u32 v3, v4, v2
1137 //
1138 // In that example it is possible that we would fold 2nd instruction into 3rd
1139 // (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd (that was
1140 // already destroyed). So if SDWAOperand is also a potential MI then do not
1141 // apply it.
1142 if (PotentialMatches.count(Operand->getParentInst()) == 0)
1143 Converted |= Operand->convertToSDWA(*SDWAInst, TII);
1144 }
1145 if (Converted) {
1146 ConvertedInstructions.push_back(SDWAInst);
1147 } else {
1148 SDWAInst->eraseFromParent();
1149 return false;
1150 }
1151
1152 LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
1153 ++NumSDWAInstructionsPeepholed;
1154
1155 MI.eraseFromParent();
1156 return true;
1157 }
1158
1159 // If an instruction was converted to SDWA it should not have immediates or SGPR
1160 // operands (allowed one SGPR on GFX9). Copy its scalar operands into VGPRs.
legalizeScalarOperands(MachineInstr & MI,const GCNSubtarget & ST) const1161 void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI,
1162 const GCNSubtarget &ST) const {
1163 const MCInstrDesc &Desc = TII->get(MI.getOpcode());
1164 unsigned ConstantBusCount = 0;
1165 for (MachineOperand &Op : MI.explicit_uses()) {
1166 if (!Op.isImm() && !(Op.isReg() && !TRI->isVGPR(*MRI, Op.getReg())))
1167 continue;
1168
1169 unsigned I = MI.getOperandNo(&Op);
1170 if (Desc.OpInfo[I].RegClass == -1 ||
1171 !TRI->hasVGPRs(TRI->getRegClass(Desc.OpInfo[I].RegClass)))
1172 continue;
1173
1174 if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() &&
1175 TRI->isSGPRReg(*MRI, Op.getReg())) {
1176 ++ConstantBusCount;
1177 continue;
1178 }
1179
1180 unsigned VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1181 auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1182 TII->get(AMDGPU::V_MOV_B32_e32), VGPR);
1183 if (Op.isImm())
1184 Copy.addImm(Op.getImm());
1185 else if (Op.isReg())
1186 Copy.addReg(Op.getReg(), Op.isKill() ? RegState::Kill : 0,
1187 Op.getSubReg());
1188 Op.ChangeToRegister(VGPR, false);
1189 }
1190 }
1191
runOnMachineFunction(MachineFunction & MF)1192 bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
1193 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1194
1195 if (!ST.hasSDWA() || skipFunction(MF.getFunction()))
1196 return false;
1197
1198 MRI = &MF.getRegInfo();
1199 TRI = ST.getRegisterInfo();
1200 TII = ST.getInstrInfo();
1201
1202 // Find all SDWA operands in MF.
1203 bool Ret = false;
1204 for (MachineBasicBlock &MBB : MF) {
1205 bool Changed = false;
1206 do {
1207 // Preprocess the ADD/SUB pairs so they could be SDWA'ed.
1208 // Look for a possible ADD or SUB that resulted from a previously lowered
1209 // V_{ADD|SUB}_U64_PSEUDO. The function pseudoOpConvertToVOP2
1210 // lowers the pair of instructions into e32 form.
1211 matchSDWAOperands(MBB);
1212 for (const auto &OperandPair : SDWAOperands) {
1213 const auto &Operand = OperandPair.second;
1214 MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
1215 if (PotentialMI &&
1216 (PotentialMI->getOpcode() == AMDGPU::V_ADD_I32_e64 ||
1217 PotentialMI->getOpcode() == AMDGPU::V_SUB_I32_e64))
1218 pseudoOpConvertToVOP2(*PotentialMI, ST);
1219 }
1220 SDWAOperands.clear();
1221
1222 // Generate potential match list.
1223 matchSDWAOperands(MBB);
1224
1225 for (const auto &OperandPair : SDWAOperands) {
1226 const auto &Operand = OperandPair.second;
1227 MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
1228 if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST)) {
1229 PotentialMatches[PotentialMI].push_back(Operand.get());
1230 }
1231 }
1232
1233 for (auto &PotentialPair : PotentialMatches) {
1234 MachineInstr &PotentialMI = *PotentialPair.first;
1235 convertToSDWA(PotentialMI, PotentialPair.second);
1236 }
1237
1238 PotentialMatches.clear();
1239 SDWAOperands.clear();
1240
1241 Changed = !ConvertedInstructions.empty();
1242
1243 if (Changed)
1244 Ret = true;
1245 while (!ConvertedInstructions.empty())
1246 legalizeScalarOperands(*ConvertedInstructions.pop_back_val(), ST);
1247 } while (Changed);
1248 }
1249
1250 return Ret;
1251 }
1252