1 //===-- GCNHazardRecognizers.cpp - GCN Hazard Recognizer Impls ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements hazard recognizers for scheduling on GCN processors.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "GCNHazardRecognizer.h"
14 #include "GCNSubtarget.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "SIMachineFunctionInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/ScheduleDAG.h"
19 #include "llvm/Support/TargetParser.h"
20
21 using namespace llvm;
22
23 namespace {
24
25 struct MFMAPaddingRatioParser : public cl::parser<unsigned> {
MFMAPaddingRatioParser__anon1717562d0111::MFMAPaddingRatioParser26 MFMAPaddingRatioParser(cl::Option &O) : cl::parser<unsigned>(O) {}
27
parse__anon1717562d0111::MFMAPaddingRatioParser28 bool parse(cl::Option &O, StringRef ArgName, StringRef Arg, unsigned &Value) {
29 if (Arg.getAsInteger(0, Value))
30 return O.error("'" + Arg + "' value invalid for uint argument!");
31
32 if (Value > 100)
33 return O.error("'" + Arg + "' value must be in the range [0, 100]!");
34
35 return false;
36 }
37 };
38
39 } // end anonymous namespace
40
41 static cl::opt<unsigned, false, MFMAPaddingRatioParser>
42 MFMAPaddingRatio("amdgpu-mfma-padding-ratio", cl::init(0), cl::Hidden,
43 cl::desc("Fill a percentage of the latency between "
44 "neighboring MFMA with s_nops."));
45
46 //===----------------------------------------------------------------------===//
47 // Hazard Recognizer Implementation
48 //===----------------------------------------------------------------------===//
49
50 static bool shouldRunLdsBranchVmemWARHazardFixup(const MachineFunction &MF,
51 const GCNSubtarget &ST);
52
GCNHazardRecognizer(const MachineFunction & MF)53 GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction &MF) :
54 IsHazardRecognizerMode(false),
55 CurrCycleInstr(nullptr),
56 MF(MF),
57 ST(MF.getSubtarget<GCNSubtarget>()),
58 TII(*ST.getInstrInfo()),
59 TRI(TII.getRegisterInfo()),
60 ClauseUses(TRI.getNumRegUnits()),
61 ClauseDefs(TRI.getNumRegUnits()) {
62 MaxLookAhead = MF.getRegInfo().isPhysRegUsed(AMDGPU::AGPR0) ? 19 : 5;
63 TSchedModel.init(&ST);
64 RunLdsBranchVmemWARHazardFixup = shouldRunLdsBranchVmemWARHazardFixup(MF, ST);
65 }
66
Reset()67 void GCNHazardRecognizer::Reset() {
68 EmittedInstrs.clear();
69 }
70
EmitInstruction(SUnit * SU)71 void GCNHazardRecognizer::EmitInstruction(SUnit *SU) {
72 EmitInstruction(SU->getInstr());
73 }
74
EmitInstruction(MachineInstr * MI)75 void GCNHazardRecognizer::EmitInstruction(MachineInstr *MI) {
76 CurrCycleInstr = MI;
77 }
78
isDivFMas(unsigned Opcode)79 static bool isDivFMas(unsigned Opcode) {
80 return Opcode == AMDGPU::V_DIV_FMAS_F32_e64 || Opcode == AMDGPU::V_DIV_FMAS_F64_e64;
81 }
82
isSGetReg(unsigned Opcode)83 static bool isSGetReg(unsigned Opcode) {
84 return Opcode == AMDGPU::S_GETREG_B32;
85 }
86
isSSetReg(unsigned Opcode)87 static bool isSSetReg(unsigned Opcode) {
88 switch (Opcode) {
89 case AMDGPU::S_SETREG_B32:
90 case AMDGPU::S_SETREG_B32_mode:
91 case AMDGPU::S_SETREG_IMM32_B32:
92 case AMDGPU::S_SETREG_IMM32_B32_mode:
93 return true;
94 }
95 return false;
96 }
97
isRWLane(unsigned Opcode)98 static bool isRWLane(unsigned Opcode) {
99 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32;
100 }
101
isRFE(unsigned Opcode)102 static bool isRFE(unsigned Opcode) {
103 return Opcode == AMDGPU::S_RFE_B64;
104 }
105
isSMovRel(unsigned Opcode)106 static bool isSMovRel(unsigned Opcode) {
107 switch (Opcode) {
108 case AMDGPU::S_MOVRELS_B32:
109 case AMDGPU::S_MOVRELS_B64:
110 case AMDGPU::S_MOVRELD_B32:
111 case AMDGPU::S_MOVRELD_B64:
112 return true;
113 default:
114 return false;
115 }
116 }
117
isDGEMM(unsigned Opcode)118 static bool isDGEMM(unsigned Opcode) {
119 return AMDGPU::getMAIIsDGEMM(Opcode);
120 }
121
isXDL(const GCNSubtarget & ST,const MachineInstr & MI)122 static bool isXDL(const GCNSubtarget &ST, const MachineInstr &MI) {
123 unsigned Opcode = MI.getOpcode();
124
125 if (!SIInstrInfo::isMAI(MI) ||
126 isDGEMM(Opcode) ||
127 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
128 Opcode == AMDGPU::V_ACCVGPR_READ_B32_e64)
129 return false;
130
131 if (!ST.hasGFX940Insts())
132 return true;
133
134 return AMDGPU::getMAIIsGFX940XDL(Opcode);
135 }
136
isSendMsgTraceDataOrGDS(const SIInstrInfo & TII,const MachineInstr & MI)137 static bool isSendMsgTraceDataOrGDS(const SIInstrInfo &TII,
138 const MachineInstr &MI) {
139 if (TII.isAlwaysGDS(MI.getOpcode()))
140 return true;
141
142 switch (MI.getOpcode()) {
143 case AMDGPU::S_SENDMSG:
144 case AMDGPU::S_SENDMSGHALT:
145 case AMDGPU::S_TTRACEDATA:
146 return true;
147 // These DS opcodes don't support GDS.
148 case AMDGPU::DS_NOP:
149 case AMDGPU::DS_PERMUTE_B32:
150 case AMDGPU::DS_BPERMUTE_B32:
151 return false;
152 default:
153 if (TII.isDS(MI.getOpcode())) {
154 int GDS = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
155 AMDGPU::OpName::gds);
156 if (MI.getOperand(GDS).getImm())
157 return true;
158 }
159 return false;
160 }
161 }
162
isPermlane(const MachineInstr & MI)163 static bool isPermlane(const MachineInstr &MI) {
164 unsigned Opcode = MI.getOpcode();
165 return Opcode == AMDGPU::V_PERMLANE16_B32_e64 ||
166 Opcode == AMDGPU::V_PERMLANEX16_B32_e64;
167 }
168
isLdsDma(const MachineInstr & MI)169 static bool isLdsDma(const MachineInstr &MI) {
170 return SIInstrInfo::isVALU(MI) &&
171 (SIInstrInfo::isMUBUF(MI) || SIInstrInfo::isFLAT(MI));
172 }
173
getHWReg(const SIInstrInfo * TII,const MachineInstr & RegInstr)174 static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) {
175 const MachineOperand *RegOp = TII->getNamedOperand(RegInstr,
176 AMDGPU::OpName::simm16);
177 return RegOp->getImm() & AMDGPU::Hwreg::ID_MASK_;
178 }
179
180 ScheduleHazardRecognizer::HazardType
getHazardType(SUnit * SU,int Stalls)181 GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
182 MachineInstr *MI = SU->getInstr();
183 // If we are not in "HazardRecognizerMode" and therefore not being run from
184 // the scheduler, track possible stalls from hazards but don't insert noops.
185 auto HazardType = IsHazardRecognizerMode ? NoopHazard : Hazard;
186
187 if (MI->isBundle())
188 return NoHazard;
189
190 if (SIInstrInfo::isSMRD(*MI) && checkSMRDHazards(MI) > 0)
191 return HazardType;
192
193 if (ST.hasNSAtoVMEMBug() && checkNSAtoVMEMHazard(MI) > 0)
194 return HazardType;
195
196 if (checkFPAtomicToDenormModeHazard(MI) > 0)
197 return HazardType;
198
199 if (ST.hasNoDataDepHazard())
200 return NoHazard;
201
202 // FIXME: Should flat be considered vmem?
203 if ((SIInstrInfo::isVMEM(*MI) ||
204 SIInstrInfo::isFLAT(*MI))
205 && checkVMEMHazards(MI) > 0)
206 return HazardType;
207
208 if (SIInstrInfo::isVALU(*MI) && checkVALUHazards(MI) > 0)
209 return HazardType;
210
211 if (SIInstrInfo::isDPP(*MI) && checkDPPHazards(MI) > 0)
212 return HazardType;
213
214 if (isDivFMas(MI->getOpcode()) && checkDivFMasHazards(MI) > 0)
215 return HazardType;
216
217 if (isRWLane(MI->getOpcode()) && checkRWLaneHazards(MI) > 0)
218 return HazardType;
219
220 if ((SIInstrInfo::isVALU(*MI) || SIInstrInfo::isVMEM(*MI) ||
221 SIInstrInfo::isFLAT(*MI) || SIInstrInfo::isDS(*MI) ||
222 SIInstrInfo::isEXP(*MI)) && checkMAIVALUHazards(MI) > 0)
223 return HazardType;
224
225 if (isSGetReg(MI->getOpcode()) && checkGetRegHazards(MI) > 0)
226 return HazardType;
227
228 if (isSSetReg(MI->getOpcode()) && checkSetRegHazards(MI) > 0)
229 return HazardType;
230
231 if (isRFE(MI->getOpcode()) && checkRFEHazards(MI) > 0)
232 return HazardType;
233
234 if (((ST.hasReadM0MovRelInterpHazard() &&
235 (TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode()))) ||
236 (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI)) ||
237 (ST.hasReadM0LdsDmaHazard() && isLdsDma(*MI)) ||
238 (ST.hasReadM0LdsDirectHazard() &&
239 MI->readsRegister(AMDGPU::LDS_DIRECT))) &&
240 checkReadM0Hazards(MI) > 0)
241 return HazardType;
242
243 if (SIInstrInfo::isMAI(*MI) && checkMAIHazards(MI) > 0)
244 return HazardType;
245
246 if ((SIInstrInfo::isVMEM(*MI) ||
247 SIInstrInfo::isFLAT(*MI) ||
248 SIInstrInfo::isDS(*MI)) && checkMAILdStHazards(MI) > 0)
249 return HazardType;
250
251 if (MI->isInlineAsm() && checkInlineAsmHazards(MI) > 0)
252 return HazardType;
253
254 return NoHazard;
255 }
256
insertNoopsInBundle(MachineInstr * MI,const SIInstrInfo & TII,unsigned Quantity)257 static void insertNoopsInBundle(MachineInstr *MI, const SIInstrInfo &TII,
258 unsigned Quantity) {
259 while (Quantity > 0) {
260 unsigned Arg = std::min(Quantity, 8u);
261 Quantity -= Arg;
262 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII.get(AMDGPU::S_NOP))
263 .addImm(Arg - 1);
264 }
265 }
266
267 unsigned
getMFMAPipelineWaitStates(const MachineInstr & MI) const268 GCNHazardRecognizer::getMFMAPipelineWaitStates(const MachineInstr &MI) const {
269 const MCSchedClassDesc *SC = TSchedModel.resolveSchedClass(&MI);
270 assert(TSchedModel.getWriteProcResBegin(SC) !=
271 TSchedModel.getWriteProcResEnd(SC));
272 return TSchedModel.getWriteProcResBegin(SC)->Cycles;
273 }
274
processBundle()275 void GCNHazardRecognizer::processBundle() {
276 MachineBasicBlock::instr_iterator MI = std::next(CurrCycleInstr->getIterator());
277 MachineBasicBlock::instr_iterator E = CurrCycleInstr->getParent()->instr_end();
278 // Check bundled MachineInstr's for hazards.
279 for (; MI != E && MI->isInsideBundle(); ++MI) {
280 CurrCycleInstr = &*MI;
281 unsigned WaitStates = PreEmitNoopsCommon(CurrCycleInstr);
282
283 if (IsHazardRecognizerMode) {
284 fixHazards(CurrCycleInstr);
285
286 insertNoopsInBundle(CurrCycleInstr, TII, WaitStates);
287 }
288
289 // It’s unnecessary to track more than MaxLookAhead instructions. Since we
290 // include the bundled MI directly after, only add a maximum of
291 // (MaxLookAhead - 1) noops to EmittedInstrs.
292 for (unsigned i = 0, e = std::min(WaitStates, MaxLookAhead - 1); i < e; ++i)
293 EmittedInstrs.push_front(nullptr);
294
295 EmittedInstrs.push_front(CurrCycleInstr);
296 EmittedInstrs.resize(MaxLookAhead);
297 }
298 CurrCycleInstr = nullptr;
299 }
300
PreEmitNoops(MachineInstr * MI)301 unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr *MI) {
302 IsHazardRecognizerMode = true;
303 CurrCycleInstr = MI;
304 unsigned W = PreEmitNoopsCommon(MI);
305 fixHazards(MI);
306 CurrCycleInstr = nullptr;
307 return W;
308 }
309
PreEmitNoopsCommon(MachineInstr * MI)310 unsigned GCNHazardRecognizer::PreEmitNoopsCommon(MachineInstr *MI) {
311 if (MI->isBundle())
312 return 0;
313
314 int WaitStates = 0;
315
316 if (SIInstrInfo::isSMRD(*MI))
317 return std::max(WaitStates, checkSMRDHazards(MI));
318
319 if (ST.hasNSAtoVMEMBug())
320 WaitStates = std::max(WaitStates, checkNSAtoVMEMHazard(MI));
321
322 WaitStates = std::max(WaitStates, checkFPAtomicToDenormModeHazard(MI));
323
324 if (ST.hasNoDataDepHazard())
325 return WaitStates;
326
327 if (SIInstrInfo::isVMEM(*MI) || SIInstrInfo::isFLAT(*MI))
328 WaitStates = std::max(WaitStates, checkVMEMHazards(MI));
329
330 if (SIInstrInfo::isVALU(*MI))
331 WaitStates = std::max(WaitStates, checkVALUHazards(MI));
332
333 if (SIInstrInfo::isDPP(*MI))
334 WaitStates = std::max(WaitStates, checkDPPHazards(MI));
335
336 if (isDivFMas(MI->getOpcode()))
337 WaitStates = std::max(WaitStates, checkDivFMasHazards(MI));
338
339 if (isRWLane(MI->getOpcode()))
340 WaitStates = std::max(WaitStates, checkRWLaneHazards(MI));
341
342 if ((SIInstrInfo::isVALU(*MI) || SIInstrInfo::isVMEM(*MI) ||
343 SIInstrInfo::isFLAT(*MI) || SIInstrInfo::isDS(*MI) ||
344 SIInstrInfo::isEXP(*MI)) && checkMAIVALUHazards(MI) > 0)
345 WaitStates = std::max(WaitStates, checkMAIVALUHazards(MI));
346
347 if (MI->isInlineAsm())
348 return std::max(WaitStates, checkInlineAsmHazards(MI));
349
350 if (isSGetReg(MI->getOpcode()))
351 return std::max(WaitStates, checkGetRegHazards(MI));
352
353 if (isSSetReg(MI->getOpcode()))
354 return std::max(WaitStates, checkSetRegHazards(MI));
355
356 if (isRFE(MI->getOpcode()))
357 return std::max(WaitStates, checkRFEHazards(MI));
358
359 if ((ST.hasReadM0MovRelInterpHazard() &&
360 (TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode()))) ||
361 (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI)) ||
362 (ST.hasReadM0LdsDmaHazard() && isLdsDma(*MI)) ||
363 (ST.hasReadM0LdsDirectHazard() && MI->readsRegister(AMDGPU::LDS_DIRECT)))
364 return std::max(WaitStates, checkReadM0Hazards(MI));
365
366 if (SIInstrInfo::isMAI(*MI))
367 return std::max(WaitStates, checkMAIHazards(MI));
368
369 if (SIInstrInfo::isVMEM(*MI) ||
370 SIInstrInfo::isFLAT(*MI) ||
371 SIInstrInfo::isDS(*MI))
372 return std::max(WaitStates, checkMAILdStHazards(MI));
373
374 return WaitStates;
375 }
376
EmitNoop()377 void GCNHazardRecognizer::EmitNoop() {
378 EmittedInstrs.push_front(nullptr);
379 }
380
AdvanceCycle()381 void GCNHazardRecognizer::AdvanceCycle() {
382 // When the scheduler detects a stall, it will call AdvanceCycle() without
383 // emitting any instructions.
384 if (!CurrCycleInstr) {
385 EmittedInstrs.push_front(nullptr);
386 return;
387 }
388
389 if (CurrCycleInstr->isBundle()) {
390 processBundle();
391 return;
392 }
393
394 unsigned NumWaitStates = TII.getNumWaitStates(*CurrCycleInstr);
395 if (!NumWaitStates) {
396 CurrCycleInstr = nullptr;
397 return;
398 }
399
400 // Keep track of emitted instructions
401 EmittedInstrs.push_front(CurrCycleInstr);
402
403 // Add a nullptr for each additional wait state after the first. Make sure
404 // not to add more than getMaxLookAhead() items to the list, since we
405 // truncate the list to that size right after this loop.
406 for (unsigned i = 1, e = std::min(NumWaitStates, getMaxLookAhead());
407 i < e; ++i) {
408 EmittedInstrs.push_front(nullptr);
409 }
410
411 // getMaxLookahead() is the largest number of wait states we will ever need
412 // to insert, so there is no point in keeping track of more than that many
413 // wait states.
414 EmittedInstrs.resize(getMaxLookAhead());
415
416 CurrCycleInstr = nullptr;
417 }
418
RecedeCycle()419 void GCNHazardRecognizer::RecedeCycle() {
420 llvm_unreachable("hazard recognizer does not support bottom-up scheduling.");
421 }
422
423 //===----------------------------------------------------------------------===//
424 // Helper Functions
425 //===----------------------------------------------------------------------===//
426
427 typedef enum { HazardFound, HazardExpired, NoHazardFound } HazardFnResult;
428
429 typedef function_ref<bool(const MachineInstr &, int WaitStates)> IsExpiredFn;
430 typedef function_ref<unsigned int(const MachineInstr &)> GetNumWaitStatesFn;
431
432 // Search for a hazard in a block and its predecessors.
433 template <typename StateT>
434 static bool
hasHazard(StateT State,function_ref<HazardFnResult (StateT &,const MachineInstr &)> IsHazard,function_ref<void (StateT &,const MachineInstr &)> UpdateState,const MachineBasicBlock * MBB,MachineBasicBlock::const_reverse_instr_iterator I,DenseSet<const MachineBasicBlock * > & Visited)435 hasHazard(StateT State,
436 function_ref<HazardFnResult(StateT &, const MachineInstr &)> IsHazard,
437 function_ref<void(StateT &, const MachineInstr &)> UpdateState,
438 const MachineBasicBlock *MBB,
439 MachineBasicBlock::const_reverse_instr_iterator I,
440 DenseSet<const MachineBasicBlock *> &Visited) {
441 for (auto E = MBB->instr_rend(); I != E; ++I) {
442 // No need to look at parent BUNDLE instructions.
443 if (I->isBundle())
444 continue;
445
446 switch (IsHazard(State, *I)) {
447 case HazardFound:
448 return true;
449 case HazardExpired:
450 return false;
451 default:
452 // Continue search
453 break;
454 }
455
456 if (I->isInlineAsm() || I->isMetaInstruction())
457 continue;
458
459 UpdateState(State, *I);
460 }
461
462 for (MachineBasicBlock *Pred : MBB->predecessors()) {
463 if (!Visited.insert(Pred).second)
464 continue;
465
466 if (hasHazard(State, IsHazard, UpdateState, Pred, Pred->instr_rbegin(),
467 Visited))
468 return true;
469 }
470
471 return false;
472 }
473
474 // Returns a minimum wait states since \p I walking all predecessors.
475 // Only scans until \p IsExpired does not return true.
476 // Can only be run in a hazard recognizer mode.
getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard,const MachineBasicBlock * MBB,MachineBasicBlock::const_reverse_instr_iterator I,int WaitStates,IsExpiredFn IsExpired,DenseSet<const MachineBasicBlock * > & Visited,GetNumWaitStatesFn GetNumWaitStates=SIInstrInfo::getNumWaitStates)477 static int getWaitStatesSince(
478 GCNHazardRecognizer::IsHazardFn IsHazard, const MachineBasicBlock *MBB,
479 MachineBasicBlock::const_reverse_instr_iterator I, int WaitStates,
480 IsExpiredFn IsExpired, DenseSet<const MachineBasicBlock *> &Visited,
481 GetNumWaitStatesFn GetNumWaitStates = SIInstrInfo::getNumWaitStates) {
482 for (auto E = MBB->instr_rend(); I != E; ++I) {
483 // Don't add WaitStates for parent BUNDLE instructions.
484 if (I->isBundle())
485 continue;
486
487 if (IsHazard(*I))
488 return WaitStates;
489
490 if (I->isInlineAsm())
491 continue;
492
493 WaitStates += GetNumWaitStates(*I);
494
495 if (IsExpired(*I, WaitStates))
496 return std::numeric_limits<int>::max();
497 }
498
499 int MinWaitStates = std::numeric_limits<int>::max();
500 for (MachineBasicBlock *Pred : MBB->predecessors()) {
501 if (!Visited.insert(Pred).second)
502 continue;
503
504 int W = getWaitStatesSince(IsHazard, Pred, Pred->instr_rbegin(), WaitStates,
505 IsExpired, Visited, GetNumWaitStates);
506
507 MinWaitStates = std::min(MinWaitStates, W);
508 }
509
510 return MinWaitStates;
511 }
512
getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard,const MachineInstr * MI,IsExpiredFn IsExpired)513 static int getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard,
514 const MachineInstr *MI, IsExpiredFn IsExpired) {
515 DenseSet<const MachineBasicBlock *> Visited;
516 return getWaitStatesSince(IsHazard, MI->getParent(),
517 std::next(MI->getReverseIterator()),
518 0, IsExpired, Visited);
519 }
520
getWaitStatesSince(IsHazardFn IsHazard,int Limit)521 int GCNHazardRecognizer::getWaitStatesSince(IsHazardFn IsHazard, int Limit) {
522 if (IsHazardRecognizerMode) {
523 auto IsExpiredFn = [Limit](const MachineInstr &, int WaitStates) {
524 return WaitStates >= Limit;
525 };
526 return ::getWaitStatesSince(IsHazard, CurrCycleInstr, IsExpiredFn);
527 }
528
529 int WaitStates = 0;
530 for (MachineInstr *MI : EmittedInstrs) {
531 if (MI) {
532 if (IsHazard(*MI))
533 return WaitStates;
534
535 if (MI->isInlineAsm())
536 continue;
537 }
538 ++WaitStates;
539
540 if (WaitStates >= Limit)
541 break;
542 }
543 return std::numeric_limits<int>::max();
544 }
545
getWaitStatesSinceDef(unsigned Reg,IsHazardFn IsHazardDef,int Limit)546 int GCNHazardRecognizer::getWaitStatesSinceDef(unsigned Reg,
547 IsHazardFn IsHazardDef,
548 int Limit) {
549 const SIRegisterInfo *TRI = ST.getRegisterInfo();
550
551 auto IsHazardFn = [IsHazardDef, TRI, Reg](const MachineInstr &MI) {
552 return IsHazardDef(MI) && MI.modifiesRegister(Reg, TRI);
553 };
554
555 return getWaitStatesSince(IsHazardFn, Limit);
556 }
557
getWaitStatesSinceSetReg(IsHazardFn IsHazard,int Limit)558 int GCNHazardRecognizer::getWaitStatesSinceSetReg(IsHazardFn IsHazard,
559 int Limit) {
560 auto IsHazardFn = [IsHazard](const MachineInstr &MI) {
561 return isSSetReg(MI.getOpcode()) && IsHazard(MI);
562 };
563
564 return getWaitStatesSince(IsHazardFn, Limit);
565 }
566
567 //===----------------------------------------------------------------------===//
568 // No-op Hazard Detection
569 //===----------------------------------------------------------------------===//
570
addRegUnits(const SIRegisterInfo & TRI,BitVector & BV,MCRegister Reg)571 static void addRegUnits(const SIRegisterInfo &TRI, BitVector &BV,
572 MCRegister Reg) {
573 for (MCRegUnitIterator RUI(Reg, &TRI); RUI.isValid(); ++RUI)
574 BV.set(*RUI);
575 }
576
addRegsToSet(const SIRegisterInfo & TRI,iterator_range<MachineInstr::const_mop_iterator> Ops,BitVector & Set)577 static void addRegsToSet(const SIRegisterInfo &TRI,
578 iterator_range<MachineInstr::const_mop_iterator> Ops,
579 BitVector &Set) {
580 for (const MachineOperand &Op : Ops) {
581 if (Op.isReg())
582 addRegUnits(TRI, Set, Op.getReg().asMCReg());
583 }
584 }
585
addClauseInst(const MachineInstr & MI)586 void GCNHazardRecognizer::addClauseInst(const MachineInstr &MI) {
587 // XXX: Do we need to worry about implicit operands
588 addRegsToSet(TRI, MI.defs(), ClauseDefs);
589 addRegsToSet(TRI, MI.uses(), ClauseUses);
590 }
591
breaksSMEMSoftClause(MachineInstr * MI)592 static bool breaksSMEMSoftClause(MachineInstr *MI) {
593 return !SIInstrInfo::isSMRD(*MI);
594 }
595
breaksVMEMSoftClause(MachineInstr * MI)596 static bool breaksVMEMSoftClause(MachineInstr *MI) {
597 return !SIInstrInfo::isVMEM(*MI) && !SIInstrInfo::isFLAT(*MI);
598 }
599
checkSoftClauseHazards(MachineInstr * MEM)600 int GCNHazardRecognizer::checkSoftClauseHazards(MachineInstr *MEM) {
601 // SMEM soft clause are only present on VI+, and only matter if xnack is
602 // enabled.
603 if (!ST.isXNACKEnabled())
604 return 0;
605
606 bool IsSMRD = TII.isSMRD(*MEM);
607
608 resetClause();
609
610 // A soft-clause is any group of consecutive SMEM instructions. The
611 // instructions in this group may return out of order and/or may be
612 // replayed (i.e. the same instruction issued more than once).
613 //
614 // In order to handle these situations correctly we need to make sure that
615 // when a clause has more than one instruction, no instruction in the clause
616 // writes to a register that is read by another instruction in the clause
617 // (including itself). If we encounter this situation, we need to break the
618 // clause by inserting a non SMEM instruction.
619
620 for (MachineInstr *MI : EmittedInstrs) {
621 // When we hit a non-SMEM instruction then we have passed the start of the
622 // clause and we can stop.
623 if (!MI)
624 break;
625
626 if (IsSMRD ? breaksSMEMSoftClause(MI) : breaksVMEMSoftClause(MI))
627 break;
628
629 addClauseInst(*MI);
630 }
631
632 if (ClauseDefs.none())
633 return 0;
634
635 // We need to make sure not to put loads and stores in the same clause if they
636 // use the same address. For now, just start a new clause whenever we see a
637 // store.
638 if (MEM->mayStore())
639 return 1;
640
641 addClauseInst(*MEM);
642
643 // If the set of defs and uses intersect then we cannot add this instruction
644 // to the clause, so we have a hazard.
645 return ClauseDefs.anyCommon(ClauseUses) ? 1 : 0;
646 }
647
checkSMRDHazards(MachineInstr * SMRD)648 int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) {
649 int WaitStatesNeeded = 0;
650
651 WaitStatesNeeded = checkSoftClauseHazards(SMRD);
652
653 // This SMRD hazard only affects SI.
654 if (!ST.hasSMRDReadVALUDefHazard())
655 return WaitStatesNeeded;
656
657 // A read of an SGPR by SMRD instruction requires 4 wait states when the
658 // SGPR was written by a VALU instruction.
659 int SmrdSgprWaitStates = 4;
660 auto IsHazardDefFn = [this](const MachineInstr &MI) {
661 return TII.isVALU(MI);
662 };
663 auto IsBufferHazardDefFn = [this](const MachineInstr &MI) {
664 return TII.isSALU(MI);
665 };
666
667 bool IsBufferSMRD = TII.isBufferSMRD(*SMRD);
668
669 for (const MachineOperand &Use : SMRD->uses()) {
670 if (!Use.isReg())
671 continue;
672 int WaitStatesNeededForUse =
673 SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn,
674 SmrdSgprWaitStates);
675 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
676
677 // This fixes what appears to be undocumented hardware behavior in SI where
678 // s_mov writing a descriptor and s_buffer_load_dword reading the descriptor
679 // needs some number of nops in between. We don't know how many we need, but
680 // let's use 4. This wasn't discovered before probably because the only
681 // case when this happens is when we expand a 64-bit pointer into a full
682 // descriptor and use s_buffer_load_dword instead of s_load_dword, which was
683 // probably never encountered in the closed-source land.
684 if (IsBufferSMRD) {
685 int WaitStatesNeededForUse =
686 SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(),
687 IsBufferHazardDefFn,
688 SmrdSgprWaitStates);
689 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
690 }
691 }
692
693 return WaitStatesNeeded;
694 }
695
checkVMEMHazards(MachineInstr * VMEM)696 int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) {
697 if (!ST.hasVMEMReadSGPRVALUDefHazard())
698 return 0;
699
700 int WaitStatesNeeded = checkSoftClauseHazards(VMEM);
701
702 // A read of an SGPR by a VMEM instruction requires 5 wait states when the
703 // SGPR was written by a VALU Instruction.
704 const int VmemSgprWaitStates = 5;
705 auto IsHazardDefFn = [this](const MachineInstr &MI) {
706 return TII.isVALU(MI);
707 };
708 for (const MachineOperand &Use : VMEM->uses()) {
709 if (!Use.isReg() || TRI.isVectorRegister(MF.getRegInfo(), Use.getReg()))
710 continue;
711
712 int WaitStatesNeededForUse =
713 VmemSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn,
714 VmemSgprWaitStates);
715 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
716 }
717 return WaitStatesNeeded;
718 }
719
checkDPPHazards(MachineInstr * DPP)720 int GCNHazardRecognizer::checkDPPHazards(MachineInstr *DPP) {
721 const SIRegisterInfo *TRI = ST.getRegisterInfo();
722 const SIInstrInfo *TII = ST.getInstrInfo();
723
724 // Check for DPP VGPR read after VALU VGPR write and EXEC write.
725 int DppVgprWaitStates = 2;
726 int DppExecWaitStates = 5;
727 int WaitStatesNeeded = 0;
728 auto IsHazardDefFn = [TII](const MachineInstr &MI) {
729 return TII->isVALU(MI);
730 };
731
732 for (const MachineOperand &Use : DPP->uses()) {
733 if (!Use.isReg() || !TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
734 continue;
735 int WaitStatesNeededForUse =
736 DppVgprWaitStates - getWaitStatesSinceDef(
737 Use.getReg(),
738 [](const MachineInstr &) { return true; },
739 DppVgprWaitStates);
740 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
741 }
742
743 WaitStatesNeeded = std::max(
744 WaitStatesNeeded,
745 DppExecWaitStates - getWaitStatesSinceDef(AMDGPU::EXEC, IsHazardDefFn,
746 DppExecWaitStates));
747
748 return WaitStatesNeeded;
749 }
750
checkDivFMasHazards(MachineInstr * DivFMas)751 int GCNHazardRecognizer::checkDivFMasHazards(MachineInstr *DivFMas) {
752 const SIInstrInfo *TII = ST.getInstrInfo();
753
754 // v_div_fmas requires 4 wait states after a write to vcc from a VALU
755 // instruction.
756 const int DivFMasWaitStates = 4;
757 auto IsHazardDefFn = [TII](const MachineInstr &MI) {
758 return TII->isVALU(MI);
759 };
760 int WaitStatesNeeded = getWaitStatesSinceDef(AMDGPU::VCC, IsHazardDefFn,
761 DivFMasWaitStates);
762
763 return DivFMasWaitStates - WaitStatesNeeded;
764 }
765
checkGetRegHazards(MachineInstr * GetRegInstr)766 int GCNHazardRecognizer::checkGetRegHazards(MachineInstr *GetRegInstr) {
767 const SIInstrInfo *TII = ST.getInstrInfo();
768 unsigned GetRegHWReg = getHWReg(TII, *GetRegInstr);
769
770 const int GetRegWaitStates = 2;
771 auto IsHazardFn = [TII, GetRegHWReg](const MachineInstr &MI) {
772 return GetRegHWReg == getHWReg(TII, MI);
773 };
774 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, GetRegWaitStates);
775
776 return GetRegWaitStates - WaitStatesNeeded;
777 }
778
checkSetRegHazards(MachineInstr * SetRegInstr)779 int GCNHazardRecognizer::checkSetRegHazards(MachineInstr *SetRegInstr) {
780 const SIInstrInfo *TII = ST.getInstrInfo();
781 unsigned HWReg = getHWReg(TII, *SetRegInstr);
782
783 const int SetRegWaitStates = ST.getSetRegWaitStates();
784 auto IsHazardFn = [TII, HWReg](const MachineInstr &MI) {
785 return HWReg == getHWReg(TII, MI);
786 };
787 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, SetRegWaitStates);
788 return SetRegWaitStates - WaitStatesNeeded;
789 }
790
createsVALUHazard(const MachineInstr & MI)791 int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) {
792 if (!MI.mayStore())
793 return -1;
794
795 const SIInstrInfo *TII = ST.getInstrInfo();
796 unsigned Opcode = MI.getOpcode();
797 const MCInstrDesc &Desc = MI.getDesc();
798
799 int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
800 int VDataRCID = -1;
801 if (VDataIdx != -1)
802 VDataRCID = Desc.OpInfo[VDataIdx].RegClass;
803
804 if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) {
805 // There is no hazard if the instruction does not use vector regs
806 // (like wbinvl1)
807 if (VDataIdx == -1)
808 return -1;
809 // For MUBUF/MTBUF instructions this hazard only exists if the
810 // instruction is not using a register in the soffset field.
811 const MachineOperand *SOffset =
812 TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
813 // If we have no soffset operand, then assume this field has been
814 // hardcoded to zero.
815 if (AMDGPU::getRegBitWidth(VDataRCID) > 64 &&
816 (!SOffset || !SOffset->isReg()))
817 return VDataIdx;
818 }
819
820 // MIMG instructions create a hazard if they don't use a 256-bit T# and
821 // the store size is greater than 8 bytes and they have more than two bits
822 // of their dmask set.
823 // All our MIMG definitions use a 256-bit T#, so we can skip checking for them.
824 if (TII->isMIMG(MI)) {
825 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
826 assert(SRsrcIdx != -1 &&
827 AMDGPU::getRegBitWidth(Desc.OpInfo[SRsrcIdx].RegClass) == 256);
828 (void)SRsrcIdx;
829 }
830
831 if (TII->isFLAT(MI)) {
832 int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
833 if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64)
834 return DataIdx;
835 }
836
837 return -1;
838 }
839
840 int
checkVALUHazardsHelper(const MachineOperand & Def,const MachineRegisterInfo & MRI)841 GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand &Def,
842 const MachineRegisterInfo &MRI) {
843 // Helper to check for the hazard where VMEM instructions that store more than
844 // 8 bytes can have there store data over written by the next instruction.
845 const SIRegisterInfo *TRI = ST.getRegisterInfo();
846
847 const int VALUWaitStates = ST.hasGFX940Insts() ? 2 : 1;
848 int WaitStatesNeeded = 0;
849
850 if (!TRI->isVectorRegister(MRI, Def.getReg()))
851 return WaitStatesNeeded;
852 Register Reg = Def.getReg();
853 auto IsHazardFn = [this, Reg, TRI](const MachineInstr &MI) {
854 int DataIdx = createsVALUHazard(MI);
855 return DataIdx >= 0 &&
856 TRI->regsOverlap(MI.getOperand(DataIdx).getReg(), Reg);
857 };
858 int WaitStatesNeededForDef =
859 VALUWaitStates - getWaitStatesSince(IsHazardFn, VALUWaitStates);
860 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
861
862 return WaitStatesNeeded;
863 }
864
checkVALUHazards(MachineInstr * VALU)865 int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
866 int WaitStatesNeeded = 0;
867
868 if (ST.hasTransForwardingHazard() && !SIInstrInfo::isTRANS(*VALU)) {
869 const int TransDefWaitstates = 1;
870
871 auto IsTransDefFn = [this, VALU](const MachineInstr &MI) {
872 if (!SIInstrInfo::isTRANS(MI))
873 return false;
874 const SIRegisterInfo *TRI = ST.getRegisterInfo();
875 const SIInstrInfo *TII = ST.getInstrInfo();
876 Register Def = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)->getReg();
877
878 for (const MachineOperand &Use : VALU->explicit_uses()) {
879 if (Use.isReg() && TRI->regsOverlap(Def, Use.getReg()))
880 return true;
881 }
882
883 return false;
884 };
885
886 int WaitStatesNeededForDef =
887 TransDefWaitstates -
888 getWaitStatesSince(IsTransDefFn, TransDefWaitstates);
889 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
890 }
891
892 if (ST.hasDstSelForwardingHazard()) {
893 const int Shift16DefWaitstates = 1;
894
895 auto IsShift16BitDefFn = [this, VALU](const MachineInstr &MI) {
896 if (!SIInstrInfo::isVALU(MI))
897 return false;
898 const SIInstrInfo *TII = ST.getInstrInfo();
899 if (SIInstrInfo::isSDWA(MI)) {
900 if (auto *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel))
901 if (DstSel->getImm() == AMDGPU::SDWA::DWORD)
902 return false;
903 } else {
904 if ((AMDGPU::getNamedOperandIdx(MI.getOpcode(),
905 AMDGPU::OpName::op_sel) == -1) ||
906 !(TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)
907 ->getImm() &
908 SISrcMods::DST_OP_SEL))
909 return false;
910 }
911 const SIRegisterInfo *TRI = ST.getRegisterInfo();
912 if (auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) {
913 Register Def = Dst->getReg();
914
915 for (const MachineOperand &Use : VALU->explicit_uses()) {
916 if (Use.isReg() && TRI->regsOverlap(Def, Use.getReg()))
917 return true;
918 }
919 }
920
921 return false;
922 };
923
924 int WaitStatesNeededForDef =
925 Shift16DefWaitstates -
926 getWaitStatesSince(IsShift16BitDefFn, Shift16DefWaitstates);
927 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
928 }
929
930 if (ST.hasVDecCoExecHazard()) {
931 const int VALUWriteSGPRVALUReadWaitstates = 2;
932 const int VALUWriteEXECRWLane = 4;
933 const int VALUWriteVGPRReadlaneRead = 1;
934
935 const SIRegisterInfo *TRI = ST.getRegisterInfo();
936 const MachineRegisterInfo &MRI = MF.getRegInfo();
937 Register UseReg;
938 auto IsVALUDefSGPRFn = [&UseReg, TRI](const MachineInstr &MI) {
939 if (!SIInstrInfo::isVALU(MI))
940 return false;
941 return MI.modifiesRegister(UseReg, TRI);
942 };
943
944 for (const MachineOperand &Use : VALU->explicit_uses()) {
945 if (!Use.isReg())
946 continue;
947
948 UseReg = Use.getReg();
949 if (TRI->isSGPRReg(MRI, UseReg)) {
950 int WaitStatesNeededForDef =
951 VALUWriteSGPRVALUReadWaitstates -
952 getWaitStatesSince(IsVALUDefSGPRFn,
953 VALUWriteSGPRVALUReadWaitstates);
954 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
955 }
956 }
957
958 if (VALU->readsRegister(AMDGPU::VCC, TRI)) {
959 UseReg = AMDGPU::VCC;
960 int WaitStatesNeededForDef =
961 VALUWriteSGPRVALUReadWaitstates -
962 getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteSGPRVALUReadWaitstates);
963 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
964 }
965
966 switch (VALU->getOpcode()) {
967 case AMDGPU::V_READLANE_B32:
968 case AMDGPU::V_READFIRSTLANE_B32: {
969 MachineOperand *Src = TII.getNamedOperand(*VALU, AMDGPU::OpName::src0);
970 UseReg = Src->getReg();
971 int WaitStatesNeededForDef =
972 VALUWriteVGPRReadlaneRead -
973 getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteVGPRReadlaneRead);
974 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
975 }
976 LLVM_FALLTHROUGH;
977 case AMDGPU::V_WRITELANE_B32: {
978 UseReg = AMDGPU::EXEC;
979 int WaitStatesNeededForDef =
980 VALUWriteEXECRWLane -
981 getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteEXECRWLane);
982 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
983 break;
984 }
985 default:
986 break;
987 }
988 }
989
990 // This checks for the hazard where VMEM instructions that store more than
991 // 8 bytes can have there store data over written by the next instruction.
992 if (!ST.has12DWordStoreHazard())
993 return WaitStatesNeeded;
994
995 const MachineRegisterInfo &MRI = MF.getRegInfo();
996
997 for (const MachineOperand &Def : VALU->defs()) {
998 WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Def, MRI));
999 }
1000
1001 return WaitStatesNeeded;
1002 }
1003
checkInlineAsmHazards(MachineInstr * IA)1004 int GCNHazardRecognizer::checkInlineAsmHazards(MachineInstr *IA) {
1005 // This checks for hazards associated with inline asm statements.
1006 // Since inline asms can contain just about anything, we use this
1007 // to call/leverage other check*Hazard routines. Note that
1008 // this function doesn't attempt to address all possible inline asm
1009 // hazards (good luck), but is a collection of what has been
1010 // problematic thus far.
1011
1012 // see checkVALUHazards()
1013 if (!ST.has12DWordStoreHazard())
1014 return 0;
1015
1016 const MachineRegisterInfo &MRI = MF.getRegInfo();
1017 int WaitStatesNeeded = 0;
1018
1019 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = IA->getNumOperands();
1020 I != E; ++I) {
1021 const MachineOperand &Op = IA->getOperand(I);
1022 if (Op.isReg() && Op.isDef()) {
1023 WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Op, MRI));
1024 }
1025 }
1026
1027 return WaitStatesNeeded;
1028 }
1029
checkRWLaneHazards(MachineInstr * RWLane)1030 int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) {
1031 const SIInstrInfo *TII = ST.getInstrInfo();
1032 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1033 const MachineRegisterInfo &MRI = MF.getRegInfo();
1034
1035 const MachineOperand *LaneSelectOp =
1036 TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1);
1037
1038 if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg()))
1039 return 0;
1040
1041 Register LaneSelectReg = LaneSelectOp->getReg();
1042 auto IsHazardFn = [TII](const MachineInstr &MI) { return TII->isVALU(MI); };
1043
1044 const int RWLaneWaitStates = 4;
1045 int WaitStatesSince = getWaitStatesSinceDef(LaneSelectReg, IsHazardFn,
1046 RWLaneWaitStates);
1047 return RWLaneWaitStates - WaitStatesSince;
1048 }
1049
checkRFEHazards(MachineInstr * RFE)1050 int GCNHazardRecognizer::checkRFEHazards(MachineInstr *RFE) {
1051 if (!ST.hasRFEHazards())
1052 return 0;
1053
1054 const SIInstrInfo *TII = ST.getInstrInfo();
1055
1056 const int RFEWaitStates = 1;
1057
1058 auto IsHazardFn = [TII](const MachineInstr &MI) {
1059 return getHWReg(TII, MI) == AMDGPU::Hwreg::ID_TRAPSTS;
1060 };
1061 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, RFEWaitStates);
1062 return RFEWaitStates - WaitStatesNeeded;
1063 }
1064
checkReadM0Hazards(MachineInstr * MI)1065 int GCNHazardRecognizer::checkReadM0Hazards(MachineInstr *MI) {
1066 const SIInstrInfo *TII = ST.getInstrInfo();
1067 const int ReadM0WaitStates = 1;
1068 auto IsHazardFn = [TII](const MachineInstr &MI) { return TII->isSALU(MI); };
1069 return ReadM0WaitStates -
1070 getWaitStatesSinceDef(AMDGPU::M0, IsHazardFn, ReadM0WaitStates);
1071 }
1072
fixHazards(MachineInstr * MI)1073 void GCNHazardRecognizer::fixHazards(MachineInstr *MI) {
1074 fixVMEMtoScalarWriteHazards(MI);
1075 fixVcmpxPermlaneHazards(MI);
1076 fixSMEMtoVectorWriteHazards(MI);
1077 fixVcmpxExecWARHazard(MI);
1078 fixLdsBranchVmemWARHazard(MI);
1079 if (ST.hasLdsDirect()) {
1080 fixLdsDirectVALUHazard(MI);
1081 fixLdsDirectVMEMHazard(MI);
1082 }
1083 fixVALUPartialForwardingHazard(MI);
1084 fixVALUTransUseHazard(MI);
1085 fixWMMAHazards(MI);
1086 }
1087
fixVcmpxPermlaneHazards(MachineInstr * MI)1088 bool GCNHazardRecognizer::fixVcmpxPermlaneHazards(MachineInstr *MI) {
1089 if (!ST.hasVcmpxPermlaneHazard() || !isPermlane(*MI))
1090 return false;
1091
1092 const SIInstrInfo *TII = ST.getInstrInfo();
1093 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1094 auto IsHazardFn = [TII, TRI](const MachineInstr &MI) {
1095 return (TII->isVOPC(MI) ||
1096 ((TII->isVOP3(MI) || TII->isSDWA(MI)) && MI.isCompare())) &&
1097 MI.modifiesRegister(AMDGPU::EXEC, TRI);
1098 };
1099
1100 auto IsExpiredFn = [](const MachineInstr &MI, int) {
1101 unsigned Opc = MI.getOpcode();
1102 return SIInstrInfo::isVALU(MI) && Opc != AMDGPU::V_NOP_e32 &&
1103 Opc != AMDGPU::V_NOP_e64 && Opc != AMDGPU::V_NOP_sdwa;
1104 };
1105
1106 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1107 std::numeric_limits<int>::max())
1108 return false;
1109
1110 // V_NOP will be discarded by SQ.
1111 // Use V_MOV_B32 v?, v?. Register must be alive so use src0 of V_PERMLANE*
1112 // which is always a VGPR and available.
1113 auto *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
1114 Register Reg = Src0->getReg();
1115 bool IsUndef = Src0->isUndef();
1116 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1117 TII->get(AMDGPU::V_MOV_B32_e32))
1118 .addReg(Reg, RegState::Define | (IsUndef ? RegState::Dead : 0))
1119 .addReg(Reg, IsUndef ? RegState::Undef : RegState::Kill);
1120
1121 return true;
1122 }
1123
fixVMEMtoScalarWriteHazards(MachineInstr * MI)1124 bool GCNHazardRecognizer::fixVMEMtoScalarWriteHazards(MachineInstr *MI) {
1125 if (!ST.hasVMEMtoScalarWriteHazard())
1126 return false;
1127
1128 if (!SIInstrInfo::isSALU(*MI) && !SIInstrInfo::isSMRD(*MI))
1129 return false;
1130
1131 if (MI->getNumDefs() == 0)
1132 return false;
1133
1134 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1135
1136 auto IsHazardFn = [TRI, MI](const MachineInstr &I) {
1137 if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isDS(I) &&
1138 !SIInstrInfo::isFLAT(I))
1139 return false;
1140
1141 for (const MachineOperand &Def : MI->defs()) {
1142 const MachineOperand *Op =
1143 I.findRegisterUseOperand(Def.getReg(), false, TRI);
1144 if (!Op)
1145 continue;
1146 return true;
1147 }
1148 return false;
1149 };
1150
1151 auto IsExpiredFn = [](const MachineInstr &MI, int) {
1152 return SIInstrInfo::isVALU(MI) ||
1153 (MI.getOpcode() == AMDGPU::S_WAITCNT &&
1154 !MI.getOperand(0).getImm()) ||
1155 (MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
1156 MI.getOperand(0).getImm() == 0xffe3);
1157 };
1158
1159 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1160 std::numeric_limits<int>::max())
1161 return false;
1162
1163 const SIInstrInfo *TII = ST.getInstrInfo();
1164 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1165 TII->get(AMDGPU::S_WAITCNT_DEPCTR))
1166 .addImm(0xffe3);
1167 return true;
1168 }
1169
fixSMEMtoVectorWriteHazards(MachineInstr * MI)1170 bool GCNHazardRecognizer::fixSMEMtoVectorWriteHazards(MachineInstr *MI) {
1171 if (!ST.hasSMEMtoVectorWriteHazard())
1172 return false;
1173
1174 if (!SIInstrInfo::isVALU(*MI))
1175 return false;
1176
1177 unsigned SDSTName;
1178 switch (MI->getOpcode()) {
1179 case AMDGPU::V_READLANE_B32:
1180 case AMDGPU::V_READFIRSTLANE_B32:
1181 SDSTName = AMDGPU::OpName::vdst;
1182 break;
1183 default:
1184 SDSTName = AMDGPU::OpName::sdst;
1185 break;
1186 }
1187
1188 const SIInstrInfo *TII = ST.getInstrInfo();
1189 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1190 const AMDGPU::IsaVersion IV = AMDGPU::getIsaVersion(ST.getCPU());
1191 const MachineOperand *SDST = TII->getNamedOperand(*MI, SDSTName);
1192 if (!SDST) {
1193 for (const auto &MO : MI->implicit_operands()) {
1194 if (MO.isDef() && TRI->isSGPRClass(TRI->getPhysRegClass(MO.getReg()))) {
1195 SDST = &MO;
1196 break;
1197 }
1198 }
1199 }
1200
1201 if (!SDST)
1202 return false;
1203
1204 const Register SDSTReg = SDST->getReg();
1205 auto IsHazardFn = [SDSTReg, TRI](const MachineInstr &I) {
1206 return SIInstrInfo::isSMRD(I) && I.readsRegister(SDSTReg, TRI);
1207 };
1208
1209 auto IsExpiredFn = [TII, IV](const MachineInstr &MI, int) {
1210 if (TII->isSALU(MI)) {
1211 switch (MI.getOpcode()) {
1212 case AMDGPU::S_SETVSKIP:
1213 case AMDGPU::S_VERSION:
1214 case AMDGPU::S_WAITCNT_VSCNT:
1215 case AMDGPU::S_WAITCNT_VMCNT:
1216 case AMDGPU::S_WAITCNT_EXPCNT:
1217 // These instructions cannot not mitigate the hazard.
1218 return false;
1219 case AMDGPU::S_WAITCNT_LGKMCNT:
1220 // Reducing lgkmcnt count to 0 always mitigates the hazard.
1221 return (MI.getOperand(1).getImm() == 0) &&
1222 (MI.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1223 case AMDGPU::S_WAITCNT: {
1224 const int64_t Imm = MI.getOperand(0).getImm();
1225 AMDGPU::Waitcnt Decoded = AMDGPU::decodeWaitcnt(IV, Imm);
1226 return (Decoded.LgkmCnt == 0);
1227 }
1228 default:
1229 // SOPP instructions cannot mitigate the hazard.
1230 if (TII->isSOPP(MI))
1231 return false;
1232 // At this point the SALU can be assumed to mitigate the hazard
1233 // because either:
1234 // (a) it is independent of the at risk SMEM (breaking chain),
1235 // or
1236 // (b) it is dependent on the SMEM, in which case an appropriate
1237 // s_waitcnt lgkmcnt _must_ exist between it and the at risk
1238 // SMEM instruction.
1239 return true;
1240 }
1241 }
1242 return false;
1243 };
1244
1245 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1246 std::numeric_limits<int>::max())
1247 return false;
1248
1249 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1250 TII->get(AMDGPU::S_MOV_B32), AMDGPU::SGPR_NULL)
1251 .addImm(0);
1252 return true;
1253 }
1254
fixVcmpxExecWARHazard(MachineInstr * MI)1255 bool GCNHazardRecognizer::fixVcmpxExecWARHazard(MachineInstr *MI) {
1256 if (!ST.hasVcmpxExecWARHazard() || !SIInstrInfo::isVALU(*MI))
1257 return false;
1258
1259 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1260 if (!MI->modifiesRegister(AMDGPU::EXEC, TRI))
1261 return false;
1262
1263 auto IsHazardFn = [TRI](const MachineInstr &I) {
1264 if (SIInstrInfo::isVALU(I))
1265 return false;
1266 return I.readsRegister(AMDGPU::EXEC, TRI);
1267 };
1268
1269 const SIInstrInfo *TII = ST.getInstrInfo();
1270 auto IsExpiredFn = [TII, TRI](const MachineInstr &MI, int) {
1271 if (SIInstrInfo::isVALU(MI)) {
1272 if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst))
1273 return true;
1274 for (auto MO : MI.implicit_operands())
1275 if (MO.isDef() && TRI->isSGPRClass(TRI->getPhysRegClass(MO.getReg())))
1276 return true;
1277 }
1278 if (MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
1279 (MI.getOperand(0).getImm() & 0xfffe) == 0xfffe)
1280 return true;
1281 return false;
1282 };
1283
1284 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1285 std::numeric_limits<int>::max())
1286 return false;
1287
1288 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1289 TII->get(AMDGPU::S_WAITCNT_DEPCTR))
1290 .addImm(0xfffe);
1291 return true;
1292 }
1293
shouldRunLdsBranchVmemWARHazardFixup(const MachineFunction & MF,const GCNSubtarget & ST)1294 static bool shouldRunLdsBranchVmemWARHazardFixup(const MachineFunction &MF,
1295 const GCNSubtarget &ST) {
1296 if (!ST.hasLdsBranchVmemWARHazard())
1297 return false;
1298
1299 // Check if the necessary condition for the hazard is met: both LDS and VMEM
1300 // instructions need to appear in the same function.
1301 bool HasLds = false;
1302 bool HasVmem = false;
1303 for (auto &MBB : MF) {
1304 for (auto &MI : MBB) {
1305 HasLds |= SIInstrInfo::isDS(MI);
1306 HasVmem |=
1307 SIInstrInfo::isVMEM(MI) || SIInstrInfo::isSegmentSpecificFLAT(MI);
1308 if (HasLds && HasVmem)
1309 return true;
1310 }
1311 }
1312 return false;
1313 }
1314
fixLdsBranchVmemWARHazard(MachineInstr * MI)1315 bool GCNHazardRecognizer::fixLdsBranchVmemWARHazard(MachineInstr *MI) {
1316 if (!RunLdsBranchVmemWARHazardFixup)
1317 return false;
1318
1319 assert(ST.hasLdsBranchVmemWARHazard());
1320
1321 auto IsHazardInst = [](const MachineInstr &MI) {
1322 if (SIInstrInfo::isDS(MI))
1323 return 1;
1324 if (SIInstrInfo::isVMEM(MI) || SIInstrInfo::isSegmentSpecificFLAT(MI))
1325 return 2;
1326 return 0;
1327 };
1328
1329 auto InstType = IsHazardInst(*MI);
1330 if (!InstType)
1331 return false;
1332
1333 auto IsExpiredFn = [&IsHazardInst](const MachineInstr &I, int) {
1334 return IsHazardInst(I) || (I.getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
1335 I.getOperand(0).getReg() == AMDGPU::SGPR_NULL &&
1336 !I.getOperand(1).getImm());
1337 };
1338
1339 auto IsHazardFn = [InstType, &IsHazardInst](const MachineInstr &I) {
1340 if (!I.isBranch())
1341 return false;
1342
1343 auto IsHazardFn = [InstType, IsHazardInst](const MachineInstr &I) {
1344 auto InstType2 = IsHazardInst(I);
1345 return InstType2 && InstType != InstType2;
1346 };
1347
1348 auto IsExpiredFn = [InstType, &IsHazardInst](const MachineInstr &I, int) {
1349 auto InstType2 = IsHazardInst(I);
1350 if (InstType == InstType2)
1351 return true;
1352
1353 return I.getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
1354 I.getOperand(0).getReg() == AMDGPU::SGPR_NULL &&
1355 !I.getOperand(1).getImm();
1356 };
1357
1358 return ::getWaitStatesSince(IsHazardFn, &I, IsExpiredFn) !=
1359 std::numeric_limits<int>::max();
1360 };
1361
1362 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1363 std::numeric_limits<int>::max())
1364 return false;
1365
1366 const SIInstrInfo *TII = ST.getInstrInfo();
1367 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1368 TII->get(AMDGPU::S_WAITCNT_VSCNT))
1369 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1370 .addImm(0);
1371
1372 return true;
1373 }
1374
fixLdsDirectVALUHazard(MachineInstr * MI)1375 bool GCNHazardRecognizer::fixLdsDirectVALUHazard(MachineInstr *MI) {
1376 if (!SIInstrInfo::isLDSDIR(*MI))
1377 return false;
1378
1379 const int NoHazardWaitStates = 15;
1380 const MachineOperand *VDST = TII.getNamedOperand(*MI, AMDGPU::OpName::vdst);
1381 const Register VDSTReg = VDST->getReg();
1382
1383 bool VisitedTrans = false;
1384 auto IsHazardFn = [this, VDSTReg, &VisitedTrans](const MachineInstr &I) {
1385 if (!SIInstrInfo::isVALU(I))
1386 return false;
1387 VisitedTrans = VisitedTrans || SIInstrInfo::isTRANS(I);
1388 // Cover both WAR and WAW
1389 return I.readsRegister(VDSTReg, &TRI) || I.modifiesRegister(VDSTReg, &TRI);
1390 };
1391 auto IsExpiredFn = [&](const MachineInstr &I, int WaitStates) {
1392 if (WaitStates >= NoHazardWaitStates)
1393 return true;
1394 // Instructions which cause va_vdst==0 expire hazard
1395 return SIInstrInfo::isVMEM(I) || SIInstrInfo::isFLAT(I) ||
1396 SIInstrInfo::isDS(I) || SIInstrInfo::isEXP(I);
1397 };
1398 auto GetWaitStatesFn = [](const MachineInstr &MI) {
1399 return SIInstrInfo::isVALU(MI) ? 1 : 0;
1400 };
1401
1402 DenseSet<const MachineBasicBlock *> Visited;
1403 auto Count = ::getWaitStatesSince(IsHazardFn, MI->getParent(),
1404 std::next(MI->getReverseIterator()), 0,
1405 IsExpiredFn, Visited, GetWaitStatesFn);
1406
1407 // Transcendentals can execute in parallel to other VALUs.
1408 // This makes va_vdst count unusable with a mixture of VALU and TRANS.
1409 if (VisitedTrans)
1410 Count = 0;
1411
1412 MachineOperand *WaitVdstOp =
1413 TII.getNamedOperand(*MI, AMDGPU::OpName::waitvdst);
1414 WaitVdstOp->setImm(std::min(Count, NoHazardWaitStates));
1415
1416 return true;
1417 }
1418
fixLdsDirectVMEMHazard(MachineInstr * MI)1419 bool GCNHazardRecognizer::fixLdsDirectVMEMHazard(MachineInstr *MI) {
1420 if (!SIInstrInfo::isLDSDIR(*MI))
1421 return false;
1422
1423 const MachineOperand *VDST = TII.getNamedOperand(*MI, AMDGPU::OpName::vdst);
1424 const Register VDSTReg = VDST->getReg();
1425
1426 auto IsHazardFn = [this, VDSTReg](const MachineInstr &I) {
1427 if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isFLAT(I) &&
1428 !SIInstrInfo::isDS(I))
1429 return false;
1430 return I.readsRegister(VDSTReg, &TRI) || I.modifiesRegister(VDSTReg, &TRI);
1431 };
1432 auto IsExpiredFn = [](const MachineInstr &I, int) {
1433 return SIInstrInfo::isVALU(I) || SIInstrInfo::isEXP(I) ||
1434 (I.getOpcode() == AMDGPU::S_WAITCNT && !I.getOperand(0).getImm()) ||
1435 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
1436 I.getOperand(0).getImm() == 0xffe3);
1437 };
1438
1439 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1440 std::numeric_limits<int>::max())
1441 return false;
1442
1443 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1444 TII.get(AMDGPU::S_WAITCNT_DEPCTR))
1445 .addImm(0xffe3);
1446
1447 return true;
1448 }
1449
fixVALUPartialForwardingHazard(MachineInstr * MI)1450 bool GCNHazardRecognizer::fixVALUPartialForwardingHazard(MachineInstr *MI) {
1451 if (!ST.isWave64())
1452 return false;
1453 if (!ST.hasVALUPartialForwardingHazard())
1454 return false;
1455 if (!SIInstrInfo::isVALU(*MI))
1456 return false;
1457
1458 SmallSetVector<Register, 4> SrcVGPRs;
1459
1460 for (const MachineOperand &Use : MI->explicit_uses()) {
1461 if (Use.isReg() && TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
1462 SrcVGPRs.insert(Use.getReg());
1463 }
1464
1465 // Only applies with >= 2 unique VGPR sources
1466 if (SrcVGPRs.size() <= 1)
1467 return false;
1468
1469 // Look for the following pattern:
1470 // Va <- VALU [PreExecPos]
1471 // intv1
1472 // Exec <- SALU [ExecPos]
1473 // intv2
1474 // Vb <- VALU [PostExecPos]
1475 // intv3
1476 // MI Va, Vb (WaitState = 0)
1477 //
1478 // Where:
1479 // intv1 + intv2 <= 2 VALUs
1480 // intv3 <= 4 VALUs
1481 //
1482 // If found, insert an appropriate S_WAITCNT_DEPCTR before MI.
1483
1484 const int Intv1plus2MaxVALUs = 2;
1485 const int Intv3MaxVALUs = 4;
1486 const int IntvMaxVALUs = 6;
1487 const int NoHazardVALUWaitStates = IntvMaxVALUs + 2;
1488
1489 struct StateType {
1490 SmallDenseMap<Register, int, 4> DefPos;
1491 int ExecPos = std::numeric_limits<int>::max();
1492 int VALUs = 0;
1493 };
1494
1495 StateType State;
1496
1497 // This overloads expiry testing with all the hazard detection
1498 auto IsHazardFn = [&, this](StateType &State, const MachineInstr &I) {
1499 // Too many VALU states have passed
1500 if (State.VALUs > NoHazardVALUWaitStates)
1501 return HazardExpired;
1502
1503 // Instructions which cause va_vdst==0 expire hazard
1504 if (SIInstrInfo::isVMEM(I) || SIInstrInfo::isFLAT(I) ||
1505 SIInstrInfo::isDS(I) || SIInstrInfo::isEXP(I) ||
1506 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
1507 I.getOperand(0).getImm() == 0x0fff))
1508 return HazardExpired;
1509
1510 // Track registers writes
1511 bool Changed = false;
1512 if (SIInstrInfo::isVALU(I)) {
1513 for (Register Src : SrcVGPRs) {
1514 if (!State.DefPos.count(Src) && I.modifiesRegister(Src, &TRI)) {
1515 State.DefPos[Src] = State.VALUs;
1516 Changed = true;
1517 }
1518 }
1519 } else if (SIInstrInfo::isSALU(I)) {
1520 if (State.ExecPos == std::numeric_limits<int>::max()) {
1521 if (!State.DefPos.empty() && I.modifiesRegister(AMDGPU::EXEC, &TRI)) {
1522 State.ExecPos = State.VALUs;
1523 Changed = true;
1524 }
1525 }
1526 }
1527
1528 // Early expiration: too many VALUs in intv3
1529 if (State.VALUs > Intv3MaxVALUs && State.DefPos.empty())
1530 return HazardExpired;
1531
1532 // Only evaluate state if something changed
1533 if (!Changed)
1534 return NoHazardFound;
1535
1536 // Determine positions of VALUs pre/post exec change
1537 if (State.ExecPos == std::numeric_limits<int>::max())
1538 return NoHazardFound;
1539
1540 int PreExecPos = std::numeric_limits<int>::max();
1541 int PostExecPos = std::numeric_limits<int>::max();
1542
1543 for (auto Entry : State.DefPos) {
1544 int DefVALUs = Entry.second;
1545 if (DefVALUs != std::numeric_limits<int>::max()) {
1546 if (DefVALUs >= State.ExecPos)
1547 PreExecPos = std::min(PreExecPos, DefVALUs);
1548 else if (DefVALUs < State.ExecPos)
1549 PostExecPos = std::min(PostExecPos, DefVALUs);
1550 }
1551 }
1552
1553 // Need a VALUs post exec change
1554 if (PostExecPos == std::numeric_limits<int>::max())
1555 return NoHazardFound;
1556
1557 // Too many VALUs in intv3?
1558 int Intv3VALUs = PostExecPos;
1559 if (Intv3VALUs > Intv3MaxVALUs)
1560 return HazardExpired;
1561
1562 // Too many VALUs in intv2?
1563 int Intv2VALUs = (State.ExecPos - PostExecPos) - 1;
1564 if (Intv2VALUs > Intv1plus2MaxVALUs)
1565 return HazardExpired;
1566
1567 // Need a VALUs pre exec change
1568 if (PreExecPos == std::numeric_limits<int>::max())
1569 return NoHazardFound;
1570
1571 // Too many VALUs in intv1?
1572 int Intv1VALUs = PreExecPos - State.ExecPos;
1573 if (Intv1VALUs > Intv1plus2MaxVALUs)
1574 return HazardExpired;
1575
1576 // Too many VALUs in intv1 + intv2
1577 if (Intv1VALUs + Intv2VALUs > Intv1plus2MaxVALUs)
1578 return HazardExpired;
1579
1580 return HazardFound;
1581 };
1582 auto UpdateStateFn = [](StateType &State, const MachineInstr &MI) {
1583 if (SIInstrInfo::isVALU(MI))
1584 State.VALUs += 1;
1585 };
1586
1587 DenseSet<const MachineBasicBlock *> Visited;
1588 if (!hasHazard<StateType>(State, IsHazardFn, UpdateStateFn, MI->getParent(),
1589 std::next(MI->getReverseIterator()), Visited))
1590 return false;
1591
1592 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1593 TII.get(AMDGPU::S_WAITCNT_DEPCTR))
1594 .addImm(0x0fff);
1595
1596 return true;
1597 }
1598
fixVALUTransUseHazard(MachineInstr * MI)1599 bool GCNHazardRecognizer::fixVALUTransUseHazard(MachineInstr *MI) {
1600 if (!ST.hasVALUTransUseHazard())
1601 return false;
1602 if (!SIInstrInfo::isVALU(*MI))
1603 return false;
1604
1605 SmallSet<Register, 4> SrcVGPRs;
1606
1607 for (const MachineOperand &Use : MI->explicit_uses()) {
1608 if (Use.isReg() && TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
1609 SrcVGPRs.insert(Use.getReg());
1610 }
1611
1612 // Look for the following pattern:
1613 // Va <- TRANS VALU
1614 // intv
1615 // MI Va (WaitState = 0)
1616 //
1617 // Where:
1618 // intv <= 5 VALUs / 1 TRANS
1619 //
1620 // If found, insert an appropriate S_WAITCNT_DEPCTR before MI.
1621
1622 const int IntvMaxVALUs = 5;
1623 const int IntvMaxTRANS = 1;
1624
1625 struct StateType {
1626 int VALUs = 0;
1627 int TRANS = 0;
1628 };
1629
1630 StateType State;
1631
1632 // This overloads expiry testing with all the hazard detection
1633 auto IsHazardFn = [&, this](StateType &State, const MachineInstr &I) {
1634 // Too many VALU states have passed
1635 if (State.VALUs > IntvMaxVALUs || State.TRANS > IntvMaxTRANS)
1636 return HazardExpired;
1637
1638 // Instructions which cause va_vdst==0 expire hazard
1639 if (SIInstrInfo::isVMEM(I) || SIInstrInfo::isFLAT(I) ||
1640 SIInstrInfo::isDS(I) || SIInstrInfo::isEXP(I) ||
1641 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
1642 I.getOperand(0).getImm() == 0x0fff))
1643 return HazardExpired;
1644
1645 // Track registers writes
1646 if (SIInstrInfo::isTRANS(I)) {
1647 for (Register Src : SrcVGPRs) {
1648 if (I.modifiesRegister(Src, &TRI)) {
1649 return HazardFound;
1650 }
1651 }
1652 }
1653
1654 return NoHazardFound;
1655 };
1656 auto UpdateStateFn = [](StateType &State, const MachineInstr &MI) {
1657 if (SIInstrInfo::isVALU(MI))
1658 State.VALUs += 1;
1659 if (SIInstrInfo::isTRANS(MI))
1660 State.TRANS += 1;
1661 };
1662
1663 DenseSet<const MachineBasicBlock *> Visited;
1664 if (!hasHazard<StateType>(State, IsHazardFn, UpdateStateFn, MI->getParent(),
1665 std::next(MI->getReverseIterator()), Visited))
1666 return false;
1667
1668 // Hazard is observed - insert a wait on va_dst counter to ensure hazard is
1669 // avoided (mask 0x0fff achieves this).
1670 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1671 TII.get(AMDGPU::S_WAITCNT_DEPCTR))
1672 .addImm(0x0fff);
1673
1674 return true;
1675 }
1676
fixWMMAHazards(MachineInstr * MI)1677 bool GCNHazardRecognizer::fixWMMAHazards(MachineInstr *MI) {
1678 if (!SIInstrInfo::isWMMA(*MI))
1679 return false;
1680
1681 const SIInstrInfo *TII = ST.getInstrInfo();
1682 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1683
1684 auto IsHazardFn = [MI, TII, TRI](const MachineInstr &I) {
1685 if (!SIInstrInfo::isWMMA(I))
1686 return false;
1687
1688 // Src0 or Src1 of the current wmma instruction overlaps with the dest of
1689 // the previous wmma.
1690 const Register CurSrc0Reg =
1691 TII->getNamedOperand(*MI, AMDGPU::OpName::src0)->getReg();
1692 const Register CurSrc1Reg =
1693 TII->getNamedOperand(*MI, AMDGPU::OpName::src1)->getReg();
1694
1695 const Register PrevDstReg =
1696 TII->getNamedOperand(I, AMDGPU::OpName::vdst)->getReg();
1697
1698 if (TRI->regsOverlap(PrevDstReg, CurSrc0Reg) ||
1699 TRI->regsOverlap(PrevDstReg, CurSrc1Reg)) {
1700 return true;
1701 }
1702
1703 // Src2 of the current wmma instruction overlaps with the dest of the
1704 // previous wmma.
1705 const MachineOperand *Src2 =
1706 TII->getNamedOperand(*MI, AMDGPU::OpName::src2);
1707 const Register CurSrc2Reg = Src2->isReg() ? Src2->getReg() : Register();
1708
1709 if (CurSrc2Reg != AMDGPU::NoRegister &&
1710 TRI->regsOverlap(PrevDstReg, CurSrc2Reg)) {
1711
1712 const MachineOperand *Src2Mods =
1713 TII->getNamedOperand(*MI, AMDGPU::OpName::src2_modifiers);
1714 const bool NoSrc2Mods =
1715 (Src2Mods->getImm() & (SISrcMods::NEG | SISrcMods::NEG_HI)) == 0;
1716 // Exception: there is no hazard if the wmma instructions are of the same
1717 // type and there is no input modifier on src2 of the current instruction.
1718 return !(NoSrc2Mods && (TII->pseudoToMCOpcode(I.getOpcode()) ==
1719 TII->pseudoToMCOpcode(MI->getOpcode())));
1720 }
1721
1722 return false;
1723 };
1724
1725 auto IsExpiredFn = [](const MachineInstr &I, int) {
1726 return SIInstrInfo::isVALU(I);
1727 };
1728
1729 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1730 std::numeric_limits<int>::max())
1731 return false;
1732
1733 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AMDGPU::V_NOP_e32));
1734
1735 return true;
1736 }
1737
checkNSAtoVMEMHazard(MachineInstr * MI)1738 int GCNHazardRecognizer::checkNSAtoVMEMHazard(MachineInstr *MI) {
1739 int NSAtoVMEMWaitStates = 1;
1740
1741 if (!ST.hasNSAtoVMEMBug())
1742 return 0;
1743
1744 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isMTBUF(*MI))
1745 return 0;
1746
1747 const SIInstrInfo *TII = ST.getInstrInfo();
1748 const auto *Offset = TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
1749 if (!Offset || (Offset->getImm() & 6) == 0)
1750 return 0;
1751
1752 auto IsHazardFn = [TII](const MachineInstr &I) {
1753 if (!SIInstrInfo::isMIMG(I))
1754 return false;
1755 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(I.getOpcode());
1756 return Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA &&
1757 TII->getInstSizeInBytes(I) >= 16;
1758 };
1759
1760 return NSAtoVMEMWaitStates - getWaitStatesSince(IsHazardFn, 1);
1761 }
1762
checkFPAtomicToDenormModeHazard(MachineInstr * MI)1763 int GCNHazardRecognizer::checkFPAtomicToDenormModeHazard(MachineInstr *MI) {
1764 int FPAtomicToDenormModeWaitStates = 3;
1765
1766 if (MI->getOpcode() != AMDGPU::S_DENORM_MODE)
1767 return 0;
1768
1769 auto IsHazardFn = [](const MachineInstr &I) {
1770 if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isFLAT(I))
1771 return false;
1772 return SIInstrInfo::isFPAtomic(I);
1773 };
1774
1775 auto IsExpiredFn = [](const MachineInstr &MI, int WaitStates) {
1776 if (WaitStates >= 3 || SIInstrInfo::isVALU(MI))
1777 return true;
1778
1779 switch (MI.getOpcode()) {
1780 case AMDGPU::S_WAITCNT:
1781 case AMDGPU::S_WAITCNT_VSCNT:
1782 case AMDGPU::S_WAITCNT_VMCNT:
1783 case AMDGPU::S_WAITCNT_EXPCNT:
1784 case AMDGPU::S_WAITCNT_LGKMCNT:
1785 case AMDGPU::S_WAIT_IDLE:
1786 return true;
1787 default:
1788 break;
1789 }
1790
1791 return false;
1792 };
1793
1794 return FPAtomicToDenormModeWaitStates -
1795 ::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn);
1796 }
1797
checkMAIHazards(MachineInstr * MI)1798 int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) {
1799 assert(SIInstrInfo::isMAI(*MI));
1800
1801 return ST.hasGFX90AInsts() ? checkMAIHazards90A(MI) : checkMAIHazards908(MI);
1802 }
1803
checkMFMAPadding(MachineInstr * MI)1804 int GCNHazardRecognizer::checkMFMAPadding(MachineInstr *MI) {
1805 // Early exit if no padding is requested.
1806 if (MFMAPaddingRatio == 0)
1807 return 0;
1808
1809 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1810 if (!SIInstrInfo::isMFMA(*MI) || MFI->getOccupancy() < 2)
1811 return 0;
1812
1813 int NeighborMFMALatency = 0;
1814 auto IsNeighboringMFMA = [&NeighborMFMALatency,
1815 this](const MachineInstr &MI) {
1816 if (!SIInstrInfo::isMFMA(MI))
1817 return false;
1818
1819 NeighborMFMALatency = this->getMFMAPipelineWaitStates(MI);
1820 return true;
1821 };
1822
1823 const int MaxMFMAPipelineWaitStates = 16;
1824 int WaitStatesSinceNeighborMFMA =
1825 getWaitStatesSince(IsNeighboringMFMA, MaxMFMAPipelineWaitStates);
1826
1827 int NeighborMFMAPaddingNeeded =
1828 (NeighborMFMALatency * MFMAPaddingRatio / 100) -
1829 WaitStatesSinceNeighborMFMA;
1830
1831 return std::max(0, NeighborMFMAPaddingNeeded);
1832 }
1833
checkMAIHazards908(MachineInstr * MI)1834 int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) {
1835 int WaitStatesNeeded = 0;
1836 unsigned Opc = MI->getOpcode();
1837
1838 auto IsVALUFn = [](const MachineInstr &MI) {
1839 return SIInstrInfo::isVALU(MI);
1840 };
1841
1842 if (Opc != AMDGPU::V_ACCVGPR_READ_B32_e64) { // MFMA or v_accvgpr_write
1843 const int LegacyVALUWritesVGPRWaitStates = 2;
1844 const int VALUWritesExecWaitStates = 4;
1845 const int MaxWaitStates = 4;
1846
1847 int WaitStatesNeededForUse = VALUWritesExecWaitStates -
1848 getWaitStatesSinceDef(AMDGPU::EXEC, IsVALUFn, MaxWaitStates);
1849 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
1850
1851 if (WaitStatesNeeded < MaxWaitStates) {
1852 for (const MachineOperand &Use : MI->explicit_uses()) {
1853 const int MaxWaitStates = 2;
1854
1855 if (!Use.isReg() || !TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
1856 continue;
1857
1858 int WaitStatesNeededForUse = LegacyVALUWritesVGPRWaitStates -
1859 getWaitStatesSinceDef(Use.getReg(), IsVALUFn, MaxWaitStates);
1860 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
1861
1862 if (WaitStatesNeeded == MaxWaitStates)
1863 break;
1864 }
1865 }
1866 }
1867
1868 for (const MachineOperand &Op : MI->explicit_operands()) {
1869 if (!Op.isReg() || !TRI.isAGPR(MF.getRegInfo(), Op.getReg()))
1870 continue;
1871
1872 if (Op.isDef() && Opc != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
1873 continue;
1874
1875 const int MFMAWritesAGPROverlappedSrcABWaitStates = 4;
1876 const int MFMAWritesAGPROverlappedSrcCWaitStates = 2;
1877 const int MFMA4x4WritesAGPRAccVgprReadWaitStates = 4;
1878 const int MFMA16x16WritesAGPRAccVgprReadWaitStates = 10;
1879 const int MFMA32x32WritesAGPRAccVgprReadWaitStates = 18;
1880 const int MFMA4x4WritesAGPRAccVgprWriteWaitStates = 1;
1881 const int MFMA16x16WritesAGPRAccVgprWriteWaitStates = 7;
1882 const int MFMA32x32WritesAGPRAccVgprWriteWaitStates = 15;
1883 const int MaxWaitStates = 18;
1884 Register Reg = Op.getReg();
1885 unsigned HazardDefLatency = 0;
1886
1887 auto IsOverlappedMFMAFn = [Reg, &HazardDefLatency,
1888 this](const MachineInstr &MI) {
1889 if (!SIInstrInfo::isMFMA(MI))
1890 return false;
1891 Register DstReg = MI.getOperand(0).getReg();
1892 if (DstReg == Reg)
1893 return false;
1894 HazardDefLatency =
1895 std::max(HazardDefLatency, TSchedModel.computeInstrLatency(&MI));
1896 return TRI.regsOverlap(DstReg, Reg);
1897 };
1898
1899 int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsOverlappedMFMAFn,
1900 MaxWaitStates);
1901 int NeedWaitStates = MFMAWritesAGPROverlappedSrcABWaitStates;
1902 int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1903 int OpNo = MI->getOperandNo(&Op);
1904 if (OpNo == SrcCIdx) {
1905 NeedWaitStates = MFMAWritesAGPROverlappedSrcCWaitStates;
1906 } else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64) {
1907 switch (HazardDefLatency) {
1908 case 2: NeedWaitStates = MFMA4x4WritesAGPRAccVgprReadWaitStates;
1909 break;
1910 case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprReadWaitStates;
1911 break;
1912 case 16: LLVM_FALLTHROUGH;
1913 default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprReadWaitStates;
1914 break;
1915 }
1916 } else if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64) {
1917 switch (HazardDefLatency) {
1918 case 2: NeedWaitStates = MFMA4x4WritesAGPRAccVgprWriteWaitStates;
1919 break;
1920 case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprWriteWaitStates;
1921 break;
1922 case 16: LLVM_FALLTHROUGH;
1923 default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprWriteWaitStates;
1924 break;
1925 }
1926 }
1927
1928 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
1929 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
1930
1931 if (WaitStatesNeeded == MaxWaitStates)
1932 return WaitStatesNeeded; // Early exit.
1933
1934 auto IsAccVgprWriteFn = [Reg, this](const MachineInstr &MI) {
1935 if (MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
1936 return false;
1937 Register DstReg = MI.getOperand(0).getReg();
1938 return TRI.regsOverlap(Reg, DstReg);
1939 };
1940
1941 const int AccVGPRWriteMFMAReadSrcCWaitStates = 1;
1942 const int AccVGPRWriteMFMAReadSrcABWaitStates = 3;
1943 const int AccVGPRWriteAccVgprReadWaitStates = 3;
1944 NeedWaitStates = AccVGPRWriteMFMAReadSrcABWaitStates;
1945 if (OpNo == SrcCIdx)
1946 NeedWaitStates = AccVGPRWriteMFMAReadSrcCWaitStates;
1947 else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64)
1948 NeedWaitStates = AccVGPRWriteAccVgprReadWaitStates;
1949
1950 WaitStatesNeededForUse = NeedWaitStates -
1951 getWaitStatesSinceDef(Reg, IsAccVgprWriteFn, MaxWaitStates);
1952 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
1953
1954 if (WaitStatesNeeded == MaxWaitStates)
1955 return WaitStatesNeeded; // Early exit.
1956 }
1957
1958 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64) {
1959 const int MFMA4x4ReadSrcCAccVgprWriteWaitStates = 0;
1960 const int MFMA16x16ReadSrcCAccVgprWriteWaitStates = 5;
1961 const int MFMA32x32ReadSrcCAccVgprWriteWaitStates = 13;
1962 const int MaxWaitStates = 13;
1963 Register DstReg = MI->getOperand(0).getReg();
1964 unsigned HazardDefLatency = 0;
1965
1966 auto IsSrcCMFMAFn = [DstReg, &HazardDefLatency,
1967 this](const MachineInstr &MI) {
1968 if (!SIInstrInfo::isMFMA(MI))
1969 return false;
1970 Register Reg = TII.getNamedOperand(MI, AMDGPU::OpName::src2)->getReg();
1971 HazardDefLatency =
1972 std::max(HazardDefLatency, TSchedModel.computeInstrLatency(&MI));
1973 return TRI.regsOverlap(Reg, DstReg);
1974 };
1975
1976 int WaitStatesSince = getWaitStatesSince(IsSrcCMFMAFn, MaxWaitStates);
1977 int NeedWaitStates;
1978 switch (HazardDefLatency) {
1979 case 2: NeedWaitStates = MFMA4x4ReadSrcCAccVgprWriteWaitStates;
1980 break;
1981 case 8: NeedWaitStates = MFMA16x16ReadSrcCAccVgprWriteWaitStates;
1982 break;
1983 case 16: LLVM_FALLTHROUGH;
1984 default: NeedWaitStates = MFMA32x32ReadSrcCAccVgprWriteWaitStates;
1985 break;
1986 }
1987
1988 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSince;
1989 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
1990 }
1991
1992 // Pad neighboring MFMA with noops for better inter-wave performance.
1993 WaitStatesNeeded = std::max(WaitStatesNeeded, checkMFMAPadding(MI));
1994
1995 return WaitStatesNeeded;
1996 }
1997
checkMAIHazards90A(MachineInstr * MI)1998 int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
1999 int WaitStatesNeeded = 0;
2000 unsigned Opc = MI->getOpcode();
2001
2002 auto IsLegacyVALUFn = [](const MachineInstr &MI) {
2003 return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isMFMA(MI);
2004 };
2005
2006 auto IsLegacyVALUNotDotFn = [](const MachineInstr &MI) {
2007 return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isMFMA(MI) &&
2008 !SIInstrInfo::isDOT(MI);
2009 };
2010
2011 if (!SIInstrInfo::isMFMA(*MI))
2012 return WaitStatesNeeded;
2013
2014 const int VALUWritesExecWaitStates = 4;
2015 int WaitStatesNeededForUse = VALUWritesExecWaitStates -
2016 getWaitStatesSinceDef(AMDGPU::EXEC, IsLegacyVALUFn,
2017 VALUWritesExecWaitStates);
2018 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2019
2020 int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
2021
2022 // Loop for both DGEMM and S/HGEMM 2nd instruction.
2023 for (const MachineOperand &Use : MI->explicit_uses()) {
2024 const int LegacyVALUNotDotWritesVGPRWaitStates = 2;
2025 const int SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates = 2;
2026 const int GFX940_XDL2PassWritesVGPROverlappedSMFMASrcCWaitStates = 3;
2027 const int GFX940_XDL4PassWritesVGPROverlappedSMFMASrcCWaitStates = 5;
2028 const int GFX940_SMFMA4PassWritesVGPROverlappedSMFMASrcCWaitStates = 4;
2029 const int GFX940_XDL8PassWritesVGPROverlappedSMFMASrcCWaitStates = 9;
2030 const int GFX940_SMFMA8PassWritesVGPROverlappedSMFMASrcCWaitStates = 8;
2031 const int GFX940_XDL16PassWritesVGPROverlappedSMFMASrcCWaitStates = 17;
2032 const int GFX940_SMFMA16PassWritesVGPROverlappedSMFMASrcCWaitStates = 16;
2033 const int SMFMA16x16WritesVGPROverlappedSMFMASrcCWaitStates = 8;
2034 const int SMFMA32x32WritesVGPROverlappedSMFMASrcCWaitStates = 16;
2035 const int SMFMA4x4WritesVGPROverlappedDMFMASrcCWaitStates = 3;
2036 const int SMFMA16x16WritesVGPROverlappedDMFMASrcCWaitStates = 9;
2037 const int SMFMA32x32WritesVGPROverlappedDMFMASrcCWaitStates = 17;
2038 const int DMFMA16x16WritesVGPROverlappedSrcCWaitStates = 9;
2039 const int DMFMA4x4WritesVGPROverlappedSrcCWaitStates = 4;
2040 const int SMFMA4x4WritesVGPROverlappedSrcABWaitStates = 5;
2041 const int SMFMA16x16WritesVGPROverlappedSrcABWaitStates = 11;
2042 const int SMFMA32x32WritesVGPROverlappedSrcABWaitStates = 19;
2043 const int GFX940_SMFMA2PassWritesVGPROverlappedSrcABWaitStates = 4;
2044 const int GFX940_SMFMA4PassWritesVGPROverlappedSrcABWaitStates = 6;
2045 const int GFX940_SMFMA8PassWritesVGPROverlappedSrcABWaitStates = 10;
2046 const int GFX940_SMFMA16PassWritesVGPROverlappedSrcABWaitStates = 18;
2047 const int GFX940_XDL2PassWritesVGPROverlappedSrcABWaitStates = 5;
2048 const int GFX940_XDL4PassWritesVGPROverlappedSrcABWaitStates = 7;
2049 const int GFX940_XDL8PassWritesVGPROverlappedSrcABWaitStates = 11;
2050 const int GFX940_XDL16PassWritesVGPROverlappedSrcABWaitStates = 19;
2051 const int DMFMA4x4WritesVGPROverlappedMFMASrcABWaitStates = 6;
2052 const int DMFMA16x16WritesVGPROverlappedMFMASrcABWaitStates = 11;
2053 const int DMFMA4x4WritesVGPRFullSrcCWaitStates = 4;
2054 const int GFX940_SMFMA4x4WritesVGPRFullSrcCWaitStates = 2;
2055 const int MaxWaitStates = 19;
2056
2057 if (!Use.isReg())
2058 continue;
2059 Register Reg = Use.getReg();
2060 bool FullReg;
2061 const MachineInstr *MI1;
2062
2063 auto IsOverlappedMFMAFn = [Reg, &FullReg, &MI1,
2064 this](const MachineInstr &MI) {
2065 if (!SIInstrInfo::isMFMA(MI))
2066 return false;
2067 Register DstReg = MI.getOperand(0).getReg();
2068 FullReg = (DstReg == Reg);
2069 MI1 = &MI;
2070 return TRI.regsOverlap(DstReg, Reg);
2071 };
2072
2073 WaitStatesNeededForUse = LegacyVALUNotDotWritesVGPRWaitStates -
2074 getWaitStatesSinceDef(Reg, IsLegacyVALUNotDotFn, MaxWaitStates);
2075 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2076
2077 int NumWaitStates =
2078 getWaitStatesSinceDef(Reg, IsOverlappedMFMAFn, MaxWaitStates);
2079 if (NumWaitStates == std::numeric_limits<int>::max())
2080 continue;
2081
2082 int OpNo = MI->getOperandNo(&Use);
2083 unsigned Opc1 = MI1->getOpcode();
2084 int NeedWaitStates = 0;
2085 if (OpNo == SrcCIdx) {
2086 if (!isDGEMM(Opc) && (!ST.hasGFX940Insts() && isDGEMM(Opc1))) {
2087 NeedWaitStates = 0;
2088 } else if (FullReg) {
2089 if ((Opc == AMDGPU::V_MFMA_F64_4X4X4F64_e64 ||
2090 Opc == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64) &&
2091 (Opc1 == AMDGPU::V_MFMA_F64_4X4X4F64_e64 ||
2092 Opc1 == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64))
2093 NeedWaitStates = DMFMA4x4WritesVGPRFullSrcCWaitStates;
2094 else if (ST.hasGFX940Insts() &&
2095 TSchedModel.computeInstrLatency(MI1) == 2)
2096 NeedWaitStates = GFX940_SMFMA4x4WritesVGPRFullSrcCWaitStates;
2097 } else {
2098 switch (Opc1) {
2099 case AMDGPU::V_MFMA_F64_16X16X4F64_e64:
2100 case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64:
2101 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64:
2102 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64:
2103 if (!isXDL(ST, *MI))
2104 NeedWaitStates = DMFMA16x16WritesVGPROverlappedSrcCWaitStates;
2105 break;
2106 case AMDGPU::V_MFMA_F64_4X4X4F64_e64:
2107 case AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64:
2108 if (!isXDL(ST, *MI))
2109 NeedWaitStates = DMFMA4x4WritesVGPROverlappedSrcCWaitStates;
2110 break;
2111 default:
2112 if (ST.hasGFX940Insts() && isXDL(ST, *MI) && !isXDL(ST, *MI1))
2113 break;
2114 switch (TSchedModel.computeInstrLatency(MI1)) {
2115 case 2:
2116 NeedWaitStates = ST.hasGFX940Insts()
2117 ? isXDL(ST, *MI1)
2118 ? GFX940_XDL2PassWritesVGPROverlappedSMFMASrcCWaitStates
2119 : SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates
2120 : isDGEMM(Opc)
2121 ? SMFMA4x4WritesVGPROverlappedDMFMASrcCWaitStates
2122 : SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates;
2123 break;
2124 case 4:
2125 assert(ST.hasGFX940Insts());
2126 NeedWaitStates = isXDL(ST, *MI1)
2127 ? GFX940_XDL4PassWritesVGPROverlappedSMFMASrcCWaitStates
2128 : GFX940_SMFMA4PassWritesVGPROverlappedSMFMASrcCWaitStates;
2129 break;
2130 case 8:
2131 NeedWaitStates = ST.hasGFX940Insts()
2132 ? isXDL(ST, *MI1)
2133 ? GFX940_XDL8PassWritesVGPROverlappedSMFMASrcCWaitStates
2134 : GFX940_SMFMA8PassWritesVGPROverlappedSMFMASrcCWaitStates
2135 : isDGEMM(Opc)
2136 ? SMFMA16x16WritesVGPROverlappedDMFMASrcCWaitStates
2137 : SMFMA16x16WritesVGPROverlappedSMFMASrcCWaitStates;
2138 break;
2139 case 16: LLVM_FALLTHROUGH;
2140 default:
2141 NeedWaitStates = ST.hasGFX940Insts()
2142 ? isXDL(ST, *MI1)
2143 ? GFX940_XDL16PassWritesVGPROverlappedSMFMASrcCWaitStates
2144 : GFX940_SMFMA16PassWritesVGPROverlappedSMFMASrcCWaitStates
2145 : isDGEMM(Opc)
2146 ? SMFMA32x32WritesVGPROverlappedDMFMASrcCWaitStates
2147 : SMFMA32x32WritesVGPROverlappedSMFMASrcCWaitStates;
2148 }
2149 }
2150 }
2151 } else {
2152 switch (Opc1) {
2153 case AMDGPU::V_MFMA_F64_16X16X4F64_e64:
2154 case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64:
2155 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64:
2156 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64:
2157 NeedWaitStates = DMFMA16x16WritesVGPROverlappedMFMASrcABWaitStates;
2158 break;
2159 case AMDGPU::V_MFMA_F64_4X4X4F64_e64:
2160 case AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64:
2161 NeedWaitStates = DMFMA4x4WritesVGPROverlappedMFMASrcABWaitStates;
2162 break;
2163 default:
2164 switch (TSchedModel.computeInstrLatency(MI1)) {
2165 case 2:
2166 NeedWaitStates = ST.hasGFX940Insts()
2167 ? isXDL(ST, *MI1)
2168 ? GFX940_XDL2PassWritesVGPROverlappedSrcABWaitStates
2169 : GFX940_SMFMA2PassWritesVGPROverlappedSrcABWaitStates
2170 : SMFMA4x4WritesVGPROverlappedSrcABWaitStates;
2171 break;
2172 case 4:
2173 assert(ST.hasGFX940Insts());
2174 NeedWaitStates = isXDL(ST, *MI1)
2175 ? GFX940_XDL4PassWritesVGPROverlappedSrcABWaitStates
2176 : GFX940_SMFMA4PassWritesVGPROverlappedSrcABWaitStates;
2177 break;
2178 case 8:
2179 NeedWaitStates = ST.hasGFX940Insts()
2180 ? isXDL(ST, *MI1)
2181 ? GFX940_XDL8PassWritesVGPROverlappedSrcABWaitStates
2182 : GFX940_SMFMA8PassWritesVGPROverlappedSrcABWaitStates
2183 : SMFMA16x16WritesVGPROverlappedSrcABWaitStates;
2184 break;
2185 case 16: LLVM_FALLTHROUGH;
2186 default:
2187 NeedWaitStates = ST.hasGFX940Insts()
2188 ? isXDL(ST, *MI1)
2189 ? GFX940_XDL16PassWritesVGPROverlappedSrcABWaitStates
2190 : GFX940_SMFMA16PassWritesVGPROverlappedSrcABWaitStates
2191 : SMFMA32x32WritesVGPROverlappedSrcABWaitStates;
2192 }
2193 }
2194 }
2195 if (WaitStatesNeeded >= NeedWaitStates)
2196 continue;
2197
2198 WaitStatesNeededForUse = NeedWaitStates - NumWaitStates;
2199 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2200
2201 if (WaitStatesNeeded == MaxWaitStates)
2202 break;
2203 }
2204
2205 return WaitStatesNeeded;
2206 }
2207
checkMAILdStHazards(MachineInstr * MI)2208 int GCNHazardRecognizer::checkMAILdStHazards(MachineInstr *MI) {
2209 // On gfx90a+ relevant hazards are checked in checkMAIVALUHazards()
2210 if (!ST.hasMAIInsts() || ST.hasGFX90AInsts())
2211 return 0;
2212
2213 int WaitStatesNeeded = 0;
2214
2215 auto IsAccVgprReadFn = [](const MachineInstr &MI) {
2216 return MI.getOpcode() == AMDGPU::V_ACCVGPR_READ_B32_e64;
2217 };
2218
2219 for (const MachineOperand &Op : MI->explicit_uses()) {
2220 if (!Op.isReg() || !TRI.isVGPR(MF.getRegInfo(), Op.getReg()))
2221 continue;
2222
2223 Register Reg = Op.getReg();
2224
2225 const int AccVgprReadLdStWaitStates = 2;
2226 const int VALUWriteAccVgprRdWrLdStDepVALUWaitStates = 1;
2227 const int MaxWaitStates = 2;
2228
2229 int WaitStatesNeededForUse = AccVgprReadLdStWaitStates -
2230 getWaitStatesSinceDef(Reg, IsAccVgprReadFn, MaxWaitStates);
2231 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2232
2233 if (WaitStatesNeeded == MaxWaitStates)
2234 return WaitStatesNeeded; // Early exit.
2235
2236 auto IsVALUAccVgprRdWrCheckFn = [Reg, this](const MachineInstr &MI) {
2237 if (MI.getOpcode() != AMDGPU::V_ACCVGPR_READ_B32_e64 &&
2238 MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
2239 return false;
2240 auto IsVALUFn = [](const MachineInstr &MI) {
2241 return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isMAI(MI);
2242 };
2243 return getWaitStatesSinceDef(Reg, IsVALUFn, 2 /*MaxWaitStates*/) <
2244 std::numeric_limits<int>::max();
2245 };
2246
2247 WaitStatesNeededForUse = VALUWriteAccVgprRdWrLdStDepVALUWaitStates -
2248 getWaitStatesSince(IsVALUAccVgprRdWrCheckFn, MaxWaitStates);
2249 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2250 }
2251
2252 return WaitStatesNeeded;
2253 }
2254
checkMAIVALUHazards(MachineInstr * MI)2255 int GCNHazardRecognizer::checkMAIVALUHazards(MachineInstr *MI) {
2256 if (!ST.hasGFX90AInsts())
2257 return 0;
2258
2259 auto IsDGEMMFn = [](const MachineInstr &MI) -> bool {
2260 return isDGEMM(MI.getOpcode());
2261 };
2262
2263 // This is checked in checkMAIHazards90A()
2264 if (SIInstrInfo::isMFMA(*MI))
2265 return 0;
2266
2267 int WaitStatesNeeded = 0;
2268
2269 bool IsMemOrExport = SIInstrInfo::isVMEM(*MI) ||
2270 SIInstrInfo::isFLAT(*MI) ||
2271 SIInstrInfo::isDS(*MI) ||
2272 SIInstrInfo::isEXP(*MI);
2273 bool IsVALU = SIInstrInfo::isVALU(*MI);
2274
2275 const MachineInstr *MFMA = nullptr;
2276 unsigned Reg;
2277 auto IsMFMAWriteFn = [&Reg, &MFMA, this](const MachineInstr &MI) {
2278 if (!SIInstrInfo::isMFMA(MI) ||
2279 !TRI.regsOverlap(MI.getOperand(0).getReg(), Reg))
2280 return false;
2281 MFMA = &MI;
2282 return true;
2283 };
2284
2285 const MachineInstr *DOT = nullptr;
2286 auto IsDotWriteFn = [&Reg, &DOT, this](const MachineInstr &MI) {
2287 if (!SIInstrInfo::isDOT(MI) ||
2288 !TRI.regsOverlap(MI.getOperand(0).getReg(), Reg))
2289 return false;
2290 DOT = &MI;
2291 return true;
2292 };
2293
2294 int SrcCIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
2295 AMDGPU::OpName::src2);
2296
2297 if (IsMemOrExport || IsVALU) {
2298 const int SMFMA4x4WriteVgprVALUMemExpReadWaitStates = 5;
2299 const int SMFMA16x16WriteVgprVALUMemExpReadWaitStates = 11;
2300 const int SMFMA32x32WriteVgprVALUMemExpReadWaitStates = 19;
2301 const int GFX940_SMFMA2PassWriteVgprVALUMemExpReadWaitStates = 4;
2302 const int GFX940_SMFMA4PassWriteVgprVALUMemExpReadWaitStates = 6;
2303 const int GFX940_SMFMA8PassWriteVgprVALUMemExpReadWaitStates = 10;
2304 const int GFX940_SMFMA16PassWriteVgprVALUMemExpReadWaitStates = 18;
2305 const int GFX940_XDL2PassWriteVgprVALUMemExpReadWaitStates = 5;
2306 const int GFX940_XDL4PassWriteVgprVALUMemExpReadWaitStates = 7;
2307 const int GFX940_XDL8PassWriteVgprVALUMemExpReadWaitStates = 11;
2308 const int GFX940_XDL16PassWriteVgprVALUMemExpReadWaitStates = 19;
2309 const int DMFMA4x4WriteVgprMemExpReadWaitStates = 9;
2310 const int DMFMA16x16WriteVgprMemExpReadWaitStates = 18;
2311 const int DMFMA4x4WriteVgprVALUReadWaitStates = 6;
2312 const int DMFMA16x16WriteVgprVALUReadWaitStates = 11;
2313 const int DotWriteSameDotReadSrcAB = 3;
2314 const int DotWriteDifferentVALURead = 3;
2315 const int MaxWaitStates = 19;
2316
2317 for (const MachineOperand &Use : MI->explicit_uses()) {
2318 if (!Use.isReg())
2319 continue;
2320 Reg = Use.getReg();
2321
2322 DOT = nullptr;
2323 int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsDotWriteFn,
2324 MaxWaitStates);
2325 if (DOT) {
2326 int NeedWaitStates = 0;
2327 if (DOT->getOpcode() == MI->getOpcode()) {
2328 if (&Use - &MI->getOperand(0) != SrcCIdx)
2329 NeedWaitStates = DotWriteSameDotReadSrcAB;
2330 } else {
2331 NeedWaitStates = DotWriteDifferentVALURead;
2332 }
2333
2334 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
2335 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2336 }
2337
2338 MFMA = nullptr;
2339 WaitStatesSinceDef =
2340 getWaitStatesSinceDef(Reg, IsMFMAWriteFn, MaxWaitStates);
2341 if (!MFMA)
2342 continue;
2343
2344 unsigned HazardDefLatency = TSchedModel.computeInstrLatency(MFMA);
2345 int NeedWaitStates = MaxWaitStates;
2346 switch (HazardDefLatency) {
2347 case 2:
2348 NeedWaitStates =
2349 ST.hasGFX940Insts()
2350 ? isXDL(ST, *MFMA)
2351 ? GFX940_XDL2PassWriteVgprVALUMemExpReadWaitStates
2352 : GFX940_SMFMA2PassWriteVgprVALUMemExpReadWaitStates
2353 : SMFMA4x4WriteVgprVALUMemExpReadWaitStates;
2354 break;
2355 case 4:
2356 assert(isDGEMM(MFMA->getOpcode()) || ST.hasGFX940Insts());
2357 NeedWaitStates =
2358 isDGEMM(MFMA->getOpcode())
2359 ? IsMemOrExport ? DMFMA4x4WriteVgprMemExpReadWaitStates
2360 : DMFMA4x4WriteVgprVALUReadWaitStates
2361 : isXDL(ST, *MFMA)
2362 ? GFX940_XDL4PassWriteVgprVALUMemExpReadWaitStates
2363 : GFX940_SMFMA4PassWriteVgprVALUMemExpReadWaitStates;
2364 break;
2365 case 8:
2366 NeedWaitStates =
2367 ST.hasGFX940Insts()
2368 ? isXDL(ST, *MFMA)
2369 ? GFX940_XDL8PassWriteVgprVALUMemExpReadWaitStates
2370 : GFX940_SMFMA8PassWriteVgprVALUMemExpReadWaitStates
2371 : SMFMA16x16WriteVgprVALUMemExpReadWaitStates;
2372 break;
2373 case 16: LLVM_FALLTHROUGH;
2374 default:
2375 NeedWaitStates =
2376 isDGEMM(MFMA->getOpcode())
2377 ? IsMemOrExport ? DMFMA16x16WriteVgprMemExpReadWaitStates
2378 : DMFMA16x16WriteVgprVALUReadWaitStates
2379 : ST.hasGFX940Insts()
2380 ? isXDL(ST, *MFMA)
2381 ? GFX940_XDL16PassWriteVgprVALUMemExpReadWaitStates
2382 : GFX940_SMFMA16PassWriteVgprVALUMemExpReadWaitStates
2383 : SMFMA32x32WriteVgprVALUMemExpReadWaitStates;
2384 break;
2385 }
2386
2387 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
2388 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2389
2390 if (WaitStatesNeeded == MaxWaitStates)
2391 break;
2392 }
2393 }
2394
2395 unsigned Opc = MI->getOpcode();
2396 const int DMFMAToFMA64WaitStates = 2;
2397 if ((Opc == AMDGPU::V_FMA_F64_e64 ||
2398 Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64 ||
2399 Opc == AMDGPU::V_FMAC_F64_dpp) &&
2400 WaitStatesNeeded < DMFMAToFMA64WaitStates) {
2401 int WaitStatesNeededForUse = DMFMAToFMA64WaitStates -
2402 getWaitStatesSince(IsDGEMMFn, DMFMAToFMA64WaitStates);
2403 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2404 }
2405
2406 if (!IsVALU && !IsMemOrExport)
2407 return WaitStatesNeeded;
2408
2409 for (const MachineOperand &Def : MI->defs()) {
2410 const int SMFMA4x4WriteVgprVALUWawWaitStates = 5;
2411 const int SMFMA16x16WriteVgprVALUWawWaitStates = 11;
2412 const int SMFMA32x32WriteVgprVALUWawWaitStates = 19;
2413 const int GFX940_SMFMA2PassWriteVgprVALUWawWaitStates = 4;
2414 const int GFX940_SMFMA4PassWriteVgprVALUWawWaitStates = 6;
2415 const int GFX940_SMFMA8PassWriteVgprVALUWawWaitStates = 10;
2416 const int GFX940_SMFMA16PassWriteVgprVALUWawWaitStates = 18;
2417 const int GFX940_XDL2PassWriteVgprVALUWawWaitStates = 5;
2418 const int GFX940_XDL4PassWriteVgprVALUWawWaitStates = 7;
2419 const int GFX940_XDL8PassWriteVgprVALUWawWaitStates = 11;
2420 const int GFX940_XDL16PassWriteVgprVALUWawWaitStates = 19;
2421 const int SMFMA4x4ReadVgprVALUWarWaitStates = 1;
2422 const int GFX940_XDL4PassReadVgprVALUWarWaitStates = 3;
2423 const int SMFMA16x16ReadVgprVALUWarWaitStates = 7;
2424 const int SMFMA32x32ReadVgprVALUWarWaitStates = 15;
2425 const int DMFMA4x4WriteVgprVALUWriteWaitStates = 6;
2426 const int DMFMA16x16WriteVgprVALUWriteWaitStates = 11;
2427 const int DotWriteDifferentVALUWrite = 3;
2428 const int MaxWaitStates = 19;
2429 const int MaxWarWaitStates = 15;
2430
2431 Reg = Def.getReg();
2432
2433 DOT = nullptr;
2434 int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsDotWriteFn,
2435 MaxWaitStates);
2436 if (DOT && DOT->getOpcode() != MI->getOpcode())
2437 WaitStatesNeeded = std::max(WaitStatesNeeded, DotWriteDifferentVALUWrite -
2438 WaitStatesSinceDef);
2439
2440 MFMA = nullptr;
2441 WaitStatesSinceDef =
2442 getWaitStatesSinceDef(Reg, IsMFMAWriteFn, MaxWaitStates);
2443 if (MFMA) {
2444 int NeedWaitStates = MaxWaitStates;
2445 switch (TSchedModel.computeInstrLatency(MFMA)) {
2446 case 2:
2447 NeedWaitStates = ST.hasGFX940Insts()
2448 ? isXDL(ST, *MFMA)
2449 ? GFX940_XDL2PassWriteVgprVALUWawWaitStates
2450 : GFX940_SMFMA2PassWriteVgprVALUWawWaitStates
2451 : SMFMA4x4WriteVgprVALUWawWaitStates;
2452 break;
2453 case 4:
2454 assert(isDGEMM(MFMA->getOpcode()) || ST.hasGFX940Insts());
2455 NeedWaitStates = isDGEMM(MFMA->getOpcode())
2456 ? DMFMA4x4WriteVgprVALUWriteWaitStates
2457 : isXDL(ST, *MFMA)
2458 ? GFX940_XDL4PassWriteVgprVALUWawWaitStates
2459 : GFX940_SMFMA4PassWriteVgprVALUWawWaitStates;
2460 break;
2461 case 8:
2462 NeedWaitStates = ST.hasGFX940Insts()
2463 ? isXDL(ST, *MFMA)
2464 ? GFX940_XDL8PassWriteVgprVALUWawWaitStates
2465 : GFX940_SMFMA8PassWriteVgprVALUWawWaitStates
2466 : SMFMA16x16WriteVgprVALUWawWaitStates;
2467 break;
2468 case 16: LLVM_FALLTHROUGH;
2469 default:
2470 NeedWaitStates = isDGEMM(MFMA->getOpcode())
2471 ? DMFMA16x16WriteVgprVALUWriteWaitStates
2472 : ST.hasGFX940Insts()
2473 ? isXDL(ST, *MFMA)
2474 ? GFX940_XDL16PassWriteVgprVALUWawWaitStates
2475 : GFX940_SMFMA16PassWriteVgprVALUWawWaitStates
2476 : SMFMA32x32WriteVgprVALUWawWaitStates;
2477 break;
2478 }
2479
2480 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
2481 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2482
2483 if (WaitStatesNeeded == MaxWaitStates)
2484 break;
2485 }
2486
2487 auto IsSMFMAReadAsCFn = [&Reg, &MFMA, this](const MachineInstr &MI) {
2488 if (!SIInstrInfo::isMFMA(MI) || isDGEMM(MI.getOpcode()) ||
2489 !MI.readsRegister(Reg, &TRI))
2490 return false;
2491
2492 if (ST.hasGFX940Insts() && !isXDL(ST, MI))
2493 return false;
2494
2495 const MachineOperand *SrcC =
2496 TII.getNamedOperand(MI, AMDGPU::OpName::src2);
2497 assert(SrcC);
2498 if (!SrcC->isReg() || !TRI.regsOverlap(SrcC->getReg(), Reg))
2499 return false;
2500
2501 MFMA = &MI;
2502 return true;
2503 };
2504
2505 MFMA = nullptr;
2506 int WaitStatesSinceUse = getWaitStatesSince(IsSMFMAReadAsCFn,
2507 MaxWarWaitStates);
2508 if (!MFMA)
2509 continue;
2510
2511 unsigned HazardDefLatency = TSchedModel.computeInstrLatency(MFMA);
2512 int NeedWaitStates = MaxWaitStates;
2513 switch (HazardDefLatency) {
2514 case 2: NeedWaitStates = SMFMA4x4ReadVgprVALUWarWaitStates;
2515 break;
2516 case 4: assert(ST.hasGFX940Insts());
2517 NeedWaitStates = GFX940_XDL4PassReadVgprVALUWarWaitStates;
2518 break;
2519 case 8: NeedWaitStates = SMFMA16x16ReadVgprVALUWarWaitStates;
2520 break;
2521 case 16: LLVM_FALLTHROUGH;
2522 default: NeedWaitStates = SMFMA32x32ReadVgprVALUWarWaitStates;
2523 break;
2524 }
2525
2526 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceUse;
2527 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2528 }
2529
2530 return WaitStatesNeeded;
2531 }
2532
ShouldPreferAnother(SUnit * SU)2533 bool GCNHazardRecognizer::ShouldPreferAnother(SUnit *SU) {
2534 if (!SU->isInstr())
2535 return false;
2536
2537 const MachineInstr *MAI = nullptr;
2538
2539 auto IsMFMAFn = [&MAI](const MachineInstr &MI) {
2540 MAI = nullptr;
2541 if (SIInstrInfo::isMFMA(MI))
2542 MAI = &MI;
2543 return MAI != nullptr;
2544 };
2545
2546 MachineInstr *MI = SU->getInstr();
2547 if (IsMFMAFn(*MI)) {
2548 int W = getWaitStatesSince(IsMFMAFn, 16);
2549 if (MAI)
2550 return W < (int)TSchedModel.computeInstrLatency(MAI);
2551 }
2552
2553 return false;
2554 }
2555