1 //===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// Insert wait instructions for memory reads and writes.
12 ///
13 /// Memory reads and writes are issued asynchronously, so we need to insert
14 /// S_WAITCNT instructions when we want to access any of their results or
15 /// overwrite any register that's used asynchronously.
16 ///
17 /// TODO: This pass currently keeps one timeline per hardware counter. A more
18 /// finely-grained approach that keeps one timeline per event type could
19 /// sometimes get away with generating weaker s_waitcnt instructions. For
20 /// example, when both SMEM and LDS are in flight and we need to wait for
21 /// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient,
22 /// but the pass will currently generate a conservative lgkmcnt(0) because
23 /// multiple event types are in flight.
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "AMDGPU.h"
28 #include "AMDGPUSubtarget.h"
29 #include "SIDefines.h"
30 #include "SIInstrInfo.h"
31 #include "SIMachineFunctionInfo.h"
32 #include "SIRegisterInfo.h"
33 #include "Utils/AMDGPUBaseInfo.h"
34 #include "llvm/ADT/DenseMap.h"
35 #include "llvm/ADT/DenseSet.h"
36 #include "llvm/ADT/PostOrderIterator.h"
37 #include "llvm/ADT/STLExtras.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineFunctionPass.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineMemOperand.h"
45 #include "llvm/CodeGen/MachineOperand.h"
46 #include "llvm/CodeGen/MachineRegisterInfo.h"
47 #include "llvm/IR/DebugLoc.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/DebugCounter.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <cstdint>
56 #include <cstring>
57 #include <memory>
58 #include <utility>
59 #include <vector>
60 
61 using namespace llvm;
62 
63 #define DEBUG_TYPE "si-insert-waitcnts"
64 
65 DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE"-forceexp",
66               "Force emit s_waitcnt expcnt(0) instrs");
67 DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE"-forcelgkm",
68               "Force emit s_waitcnt lgkmcnt(0) instrs");
69 DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE"-forcevm",
70               "Force emit s_waitcnt vmcnt(0) instrs");
71 
72 static cl::opt<unsigned> ForceEmitZeroFlag(
73   "amdgpu-waitcnt-forcezero",
74   cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
75   cl::init(0), cl::Hidden);
76 
77 namespace {
78 
79 template <typename EnumT>
80 class enum_iterator
81     : public iterator_facade_base<enum_iterator<EnumT>,
82                                   std::forward_iterator_tag, const EnumT> {
83   EnumT Value;
84 public:
85   enum_iterator() = default;
86   enum_iterator(EnumT Value) : Value(Value) {}
87 
88   enum_iterator &operator++() {
89     Value = static_cast<EnumT>(Value + 1);
90     return *this;
91   }
92 
93   bool operator==(const enum_iterator &RHS) const { return Value == RHS.Value; }
94 
95   EnumT operator*() const { return Value; }
96 };
97 
98 // Class of object that encapsulates latest instruction counter score
99 // associated with the operand.  Used for determining whether
100 // s_waitcnt instruction needs to be emited.
101 
102 #define CNT_MASK(t) (1u << (t))
103 
104 enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, NUM_INST_CNTS };
105 
106 iterator_range<enum_iterator<InstCounterType>> inst_counter_types() {
107   return make_range(enum_iterator<InstCounterType>(VM_CNT),
108                     enum_iterator<InstCounterType>(NUM_INST_CNTS));
109 }
110 
111 using RegInterval = std::pair<signed, signed>;
112 
113 struct {
114   uint32_t VmcntMax;
115   uint32_t ExpcntMax;
116   uint32_t LgkmcntMax;
117   int32_t NumVGPRsMax;
118   int32_t NumSGPRsMax;
119 } HardwareLimits;
120 
121 struct {
122   unsigned VGPR0;
123   unsigned VGPRL;
124   unsigned SGPR0;
125   unsigned SGPRL;
126 } RegisterEncoding;
127 
128 enum WaitEventType {
129   VMEM_ACCESS,      // vector-memory read & write
130   LDS_ACCESS,       // lds read & write
131   GDS_ACCESS,       // gds read & write
132   SQ_MESSAGE,       // send message
133   SMEM_ACCESS,      // scalar-memory read & write
134   EXP_GPR_LOCK,     // export holding on its data src
135   GDS_GPR_LOCK,     // GDS holding on its data and addr src
136   EXP_POS_ACCESS,   // write to export position
137   EXP_PARAM_ACCESS, // write to export parameter
138   VMW_GPR_LOCK,     // vector-memory write holding on its data src
139   NUM_WAIT_EVENTS,
140 };
141 
142 static const uint32_t WaitEventMaskForInst[NUM_INST_CNTS] = {
143   (1 << VMEM_ACCESS),
144   (1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) |
145       (1 << SQ_MESSAGE),
146   (1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) |
147       (1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS),
148 };
149 
150 // The mapping is:
151 //  0                .. SQ_MAX_PGM_VGPRS-1               real VGPRs
152 //  SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1                  extra VGPR-like slots
153 //  NUM_ALL_VGPRS    .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs
154 // We reserve a fixed number of VGPR slots in the scoring tables for
155 // special tokens like SCMEM_LDS (needed for buffer load to LDS).
156 enum RegisterMapping {
157   SQ_MAX_PGM_VGPRS = 256, // Maximum programmable VGPRs across all targets.
158   SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets.
159   NUM_EXTRA_VGPRS = 1,    // A reserved slot for DS.
160   EXTRA_VGPR_LDS = 0,     // This is a placeholder the Shader algorithm uses.
161   NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts.
162 };
163 
164 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) {
165   switch (T) {
166   case VM_CNT:
167     Wait.VmCnt = std::min(Wait.VmCnt, Count);
168     break;
169   case EXP_CNT:
170     Wait.ExpCnt = std::min(Wait.ExpCnt, Count);
171     break;
172   case LGKM_CNT:
173     Wait.LgkmCnt = std::min(Wait.LgkmCnt, Count);
174     break;
175   default:
176     llvm_unreachable("bad InstCounterType");
177   }
178 }
179 
180 // This objects maintains the current score brackets of each wait counter, and
181 // a per-register scoreboard for each wait counter.
182 //
183 // We also maintain the latest score for every event type that can change the
184 // waitcnt in order to know if there are multiple types of events within
185 // the brackets. When multiple types of event happen in the bracket,
186 // wait count may get decreased out of order, therefore we need to put in
187 // "s_waitcnt 0" before use.
188 class WaitcntBrackets {
189 public:
190   WaitcntBrackets(const GCNSubtarget *SubTarget) : ST(SubTarget) {
191     for (auto T : inst_counter_types())
192       memset(VgprScores[T], 0, sizeof(VgprScores[T]));
193   }
194 
195   static uint32_t getWaitCountMax(InstCounterType T) {
196     switch (T) {
197     case VM_CNT:
198       return HardwareLimits.VmcntMax;
199     case LGKM_CNT:
200       return HardwareLimits.LgkmcntMax;
201     case EXP_CNT:
202       return HardwareLimits.ExpcntMax;
203     default:
204       break;
205     }
206     return 0;
207   }
208 
209   uint32_t getScoreLB(InstCounterType T) const {
210     assert(T < NUM_INST_CNTS);
211     if (T >= NUM_INST_CNTS)
212       return 0;
213     return ScoreLBs[T];
214   }
215 
216   uint32_t getScoreUB(InstCounterType T) const {
217     assert(T < NUM_INST_CNTS);
218     if (T >= NUM_INST_CNTS)
219       return 0;
220     return ScoreUBs[T];
221   }
222 
223   // Mapping from event to counter.
224   InstCounterType eventCounter(WaitEventType E) {
225     if (E == VMEM_ACCESS)
226       return VM_CNT;
227     if (WaitEventMaskForInst[LGKM_CNT] & (1 << E))
228       return LGKM_CNT;
229     assert(WaitEventMaskForInst[EXP_CNT] & (1 << E));
230     return EXP_CNT;
231   }
232 
233   uint32_t getRegScore(int GprNo, InstCounterType T) {
234     if (GprNo < NUM_ALL_VGPRS) {
235       return VgprScores[T][GprNo];
236     }
237     assert(T == LGKM_CNT);
238     return SgprScores[GprNo - NUM_ALL_VGPRS];
239   }
240 
241   void clear() {
242     memset(ScoreLBs, 0, sizeof(ScoreLBs));
243     memset(ScoreUBs, 0, sizeof(ScoreUBs));
244     PendingEvents = 0;
245     memset(MixedPendingEvents, 0, sizeof(MixedPendingEvents));
246     for (auto T : inst_counter_types())
247       memset(VgprScores[T], 0, sizeof(VgprScores[T]));
248     memset(SgprScores, 0, sizeof(SgprScores));
249   }
250 
251   bool merge(const WaitcntBrackets &Other);
252 
253   RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII,
254                              const MachineRegisterInfo *MRI,
255                              const SIRegisterInfo *TRI, unsigned OpNo,
256                              bool Def) const;
257 
258   int32_t getMaxVGPR() const { return VgprUB; }
259   int32_t getMaxSGPR() const { return SgprUB; }
260 
261   bool counterOutOfOrder(InstCounterType T) const;
262   bool simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const;
263   bool simplifyWaitcnt(InstCounterType T, unsigned &Count) const;
264   void determineWait(InstCounterType T, uint32_t ScoreToWait,
265                      AMDGPU::Waitcnt &Wait) const;
266   void applyWaitcnt(const AMDGPU::Waitcnt &Wait);
267   void applyWaitcnt(InstCounterType T, unsigned Count);
268   void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI,
269                      const MachineRegisterInfo *MRI, WaitEventType E,
270                      MachineInstr &MI);
271 
272   bool hasPending() const { return PendingEvents != 0; }
273   bool hasPendingEvent(WaitEventType E) const {
274     return PendingEvents & (1 << E);
275   }
276 
277   bool hasPendingFlat() const {
278     return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] &&
279              LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) ||
280             (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] &&
281              LastFlat[VM_CNT] <= ScoreUBs[VM_CNT]));
282   }
283 
284   void setPendingFlat() {
285     LastFlat[VM_CNT] = ScoreUBs[VM_CNT];
286     LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT];
287   }
288 
289   void print(raw_ostream &);
290   void dump() { print(dbgs()); }
291 
292 private:
293   struct MergeInfo {
294     uint32_t OldLB;
295     uint32_t OtherLB;
296     uint32_t MyShift;
297     uint32_t OtherShift;
298   };
299   static bool mergeScore(const MergeInfo &M, uint32_t &Score,
300                          uint32_t OtherScore);
301 
302   void setScoreLB(InstCounterType T, uint32_t Val) {
303     assert(T < NUM_INST_CNTS);
304     if (T >= NUM_INST_CNTS)
305       return;
306     ScoreLBs[T] = Val;
307   }
308 
309   void setScoreUB(InstCounterType T, uint32_t Val) {
310     assert(T < NUM_INST_CNTS);
311     if (T >= NUM_INST_CNTS)
312       return;
313     ScoreUBs[T] = Val;
314     if (T == EXP_CNT) {
315       uint32_t UB = ScoreUBs[T] - getWaitCountMax(EXP_CNT);
316       if (ScoreLBs[T] < UB && UB < ScoreUBs[T])
317         ScoreLBs[T] = UB;
318     }
319   }
320 
321   void setRegScore(int GprNo, InstCounterType T, uint32_t Val) {
322     if (GprNo < NUM_ALL_VGPRS) {
323       if (GprNo > VgprUB) {
324         VgprUB = GprNo;
325       }
326       VgprScores[T][GprNo] = Val;
327     } else {
328       assert(T == LGKM_CNT);
329       if (GprNo - NUM_ALL_VGPRS > SgprUB) {
330         SgprUB = GprNo - NUM_ALL_VGPRS;
331       }
332       SgprScores[GprNo - NUM_ALL_VGPRS] = Val;
333     }
334   }
335 
336   void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII,
337                    const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI,
338                    unsigned OpNo, uint32_t Val);
339 
340   const GCNSubtarget *ST = nullptr;
341   uint32_t ScoreLBs[NUM_INST_CNTS] = {0};
342   uint32_t ScoreUBs[NUM_INST_CNTS] = {0};
343   uint32_t PendingEvents = 0;
344   bool MixedPendingEvents[NUM_INST_CNTS] = {false};
345   // Remember the last flat memory operation.
346   uint32_t LastFlat[NUM_INST_CNTS] = {0};
347   // wait_cnt scores for every vgpr.
348   // Keep track of the VgprUB and SgprUB to make merge at join efficient.
349   int32_t VgprUB = 0;
350   int32_t SgprUB = 0;
351   uint32_t VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS];
352   // Wait cnt scores for every sgpr, only lgkmcnt is relevant.
353   uint32_t SgprScores[SQ_MAX_PGM_SGPRS] = {0};
354 };
355 
356 class SIInsertWaitcnts : public MachineFunctionPass {
357 private:
358   const GCNSubtarget *ST = nullptr;
359   const SIInstrInfo *TII = nullptr;
360   const SIRegisterInfo *TRI = nullptr;
361   const MachineRegisterInfo *MRI = nullptr;
362   AMDGPU::IsaVersion IV;
363 
364   DenseSet<MachineInstr *> TrackedWaitcntSet;
365   DenseSet<MachineInstr *> VCCZBugHandledSet;
366 
367   struct BlockInfo {
368     MachineBasicBlock *MBB;
369     std::unique_ptr<WaitcntBrackets> Incoming;
370     bool Dirty = true;
371 
372     explicit BlockInfo(MachineBasicBlock *MBB) : MBB(MBB) {}
373   };
374 
375   std::vector<BlockInfo> BlockInfos; // by reverse post-order traversal index
376   DenseMap<MachineBasicBlock *, unsigned> RpotIdxMap;
377 
378   // ForceEmitZeroWaitcnts: force all waitcnts insts to be s_waitcnt 0
379   // because of amdgpu-waitcnt-forcezero flag
380   bool ForceEmitZeroWaitcnts;
381   bool ForceEmitWaitcnt[NUM_INST_CNTS];
382 
383 public:
384   static char ID;
385 
386   SIInsertWaitcnts() : MachineFunctionPass(ID) {
387     (void)ForceExpCounter;
388     (void)ForceLgkmCounter;
389     (void)ForceVMCounter;
390   }
391 
392   bool runOnMachineFunction(MachineFunction &MF) override;
393 
394   StringRef getPassName() const override {
395     return "SI insert wait instructions";
396   }
397 
398   void getAnalysisUsage(AnalysisUsage &AU) const override {
399     AU.setPreservesCFG();
400     MachineFunctionPass::getAnalysisUsage(AU);
401   }
402 
403   bool isForceEmitWaitcnt() const {
404     for (auto T : inst_counter_types())
405       if (ForceEmitWaitcnt[T])
406         return true;
407     return false;
408   }
409 
410   void setForceEmitWaitcnt() {
411 // For non-debug builds, ForceEmitWaitcnt has been initialized to false;
412 // For debug builds, get the debug counter info and adjust if need be
413 #ifndef NDEBUG
414     if (DebugCounter::isCounterSet(ForceExpCounter) &&
415         DebugCounter::shouldExecute(ForceExpCounter)) {
416       ForceEmitWaitcnt[EXP_CNT] = true;
417     } else {
418       ForceEmitWaitcnt[EXP_CNT] = false;
419     }
420 
421     if (DebugCounter::isCounterSet(ForceLgkmCounter) &&
422          DebugCounter::shouldExecute(ForceLgkmCounter)) {
423       ForceEmitWaitcnt[LGKM_CNT] = true;
424     } else {
425       ForceEmitWaitcnt[LGKM_CNT] = false;
426     }
427 
428     if (DebugCounter::isCounterSet(ForceVMCounter) &&
429         DebugCounter::shouldExecute(ForceVMCounter)) {
430       ForceEmitWaitcnt[VM_CNT] = true;
431     } else {
432       ForceEmitWaitcnt[VM_CNT] = false;
433     }
434 #endif // NDEBUG
435   }
436 
437   bool mayAccessLDSThroughFlat(const MachineInstr &MI) const;
438   bool generateWaitcntInstBefore(MachineInstr &MI,
439                                  WaitcntBrackets &ScoreBrackets,
440                                  MachineInstr *OldWaitcntInstr);
441   void updateEventWaitcntAfter(MachineInstr &Inst,
442                                WaitcntBrackets *ScoreBrackets);
443   bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block,
444                             WaitcntBrackets &ScoreBrackets);
445 };
446 
447 } // end anonymous namespace
448 
449 RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI,
450                                             const SIInstrInfo *TII,
451                                             const MachineRegisterInfo *MRI,
452                                             const SIRegisterInfo *TRI,
453                                             unsigned OpNo, bool Def) const {
454   const MachineOperand &Op = MI->getOperand(OpNo);
455   if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg()) ||
456       (Def && !Op.isDef()))
457     return {-1, -1};
458 
459   // A use via a PW operand does not need a waitcnt.
460   // A partial write is not a WAW.
461   assert(!Op.getSubReg() || !Op.isUndef());
462 
463   RegInterval Result;
464   const MachineRegisterInfo &MRIA = *MRI;
465 
466   unsigned Reg = TRI->getEncodingValue(Op.getReg());
467 
468   if (TRI->isVGPR(MRIA, Op.getReg())) {
469     assert(Reg >= RegisterEncoding.VGPR0 && Reg <= RegisterEncoding.VGPRL);
470     Result.first = Reg - RegisterEncoding.VGPR0;
471     assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS);
472   } else if (TRI->isSGPRReg(MRIA, Op.getReg())) {
473     assert(Reg >= RegisterEncoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS);
474     Result.first = Reg - RegisterEncoding.SGPR0 + NUM_ALL_VGPRS;
475     assert(Result.first >= NUM_ALL_VGPRS &&
476            Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS);
477   }
478   // TODO: Handle TTMP
479   // else if (TRI->isTTMP(MRIA, Reg.getReg())) ...
480   else
481     return {-1, -1};
482 
483   const MachineInstr &MIA = *MI;
484   const TargetRegisterClass *RC = TII->getOpRegClass(MIA, OpNo);
485   unsigned Size = TRI->getRegSizeInBits(*RC);
486   Result.second = Result.first + (Size / 32);
487 
488   return Result;
489 }
490 
491 void WaitcntBrackets::setExpScore(const MachineInstr *MI,
492                                   const SIInstrInfo *TII,
493                                   const SIRegisterInfo *TRI,
494                                   const MachineRegisterInfo *MRI, unsigned OpNo,
495                                   uint32_t Val) {
496   RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo, false);
497   LLVM_DEBUG({
498     const MachineOperand &Opnd = MI->getOperand(OpNo);
499     assert(TRI->isVGPR(*MRI, Opnd.getReg()));
500   });
501   for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
502     setRegScore(RegNo, EXP_CNT, Val);
503   }
504 }
505 
506 void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
507                                     const SIRegisterInfo *TRI,
508                                     const MachineRegisterInfo *MRI,
509                                     WaitEventType E, MachineInstr &Inst) {
510   const MachineRegisterInfo &MRIA = *MRI;
511   InstCounterType T = eventCounter(E);
512   uint32_t CurrScore = getScoreUB(T) + 1;
513   if (CurrScore == 0)
514     report_fatal_error("InsertWaitcnt score wraparound");
515   // PendingEvents and ScoreUB need to be update regardless if this event
516   // changes the score of a register or not.
517   // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message.
518   if (!hasPendingEvent(E)) {
519     if (PendingEvents & WaitEventMaskForInst[T])
520       MixedPendingEvents[T] = true;
521     PendingEvents |= 1 << E;
522   }
523   setScoreUB(T, CurrScore);
524 
525   if (T == EXP_CNT) {
526     // Put score on the source vgprs. If this is a store, just use those
527     // specific register(s).
528     if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) {
529       // All GDS operations must protect their address register (same as
530       // export.)
531       if (Inst.getOpcode() != AMDGPU::DS_APPEND &&
532           Inst.getOpcode() != AMDGPU::DS_CONSUME) {
533         setExpScore(
534             &Inst, TII, TRI, MRI,
535             AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr),
536             CurrScore);
537       }
538       if (Inst.mayStore()) {
539         setExpScore(
540             &Inst, TII, TRI, MRI,
541             AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0),
542             CurrScore);
543         if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
544                                        AMDGPU::OpName::data1) != -1) {
545           setExpScore(&Inst, TII, TRI, MRI,
546                       AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
547                                                  AMDGPU::OpName::data1),
548                       CurrScore);
549         }
550       } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1 &&
551                  Inst.getOpcode() != AMDGPU::DS_GWS_INIT &&
552                  Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V &&
553                  Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR &&
554                  Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P &&
555                  Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER &&
556                  Inst.getOpcode() != AMDGPU::DS_APPEND &&
557                  Inst.getOpcode() != AMDGPU::DS_CONSUME &&
558                  Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
559         for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
560           const MachineOperand &Op = Inst.getOperand(I);
561           if (Op.isReg() && !Op.isDef() && TRI->isVGPR(MRIA, Op.getReg())) {
562             setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
563           }
564         }
565       }
566     } else if (TII->isFLAT(Inst)) {
567       if (Inst.mayStore()) {
568         setExpScore(
569             &Inst, TII, TRI, MRI,
570             AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
571             CurrScore);
572       } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
573         setExpScore(
574             &Inst, TII, TRI, MRI,
575             AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
576             CurrScore);
577       }
578     } else if (TII->isMIMG(Inst)) {
579       if (Inst.mayStore()) {
580         setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
581       } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
582         setExpScore(
583             &Inst, TII, TRI, MRI,
584             AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
585             CurrScore);
586       }
587     } else if (TII->isMTBUF(Inst)) {
588       if (Inst.mayStore()) {
589         setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
590       }
591     } else if (TII->isMUBUF(Inst)) {
592       if (Inst.mayStore()) {
593         setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
594       } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
595         setExpScore(
596             &Inst, TII, TRI, MRI,
597             AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
598             CurrScore);
599       }
600     } else {
601       if (TII->isEXP(Inst)) {
602         // For export the destination registers are really temps that
603         // can be used as the actual source after export patching, so
604         // we need to treat them like sources and set the EXP_CNT
605         // score.
606         for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
607           MachineOperand &DefMO = Inst.getOperand(I);
608           if (DefMO.isReg() && DefMO.isDef() &&
609               TRI->isVGPR(MRIA, DefMO.getReg())) {
610             setRegScore(TRI->getEncodingValue(DefMO.getReg()), EXP_CNT,
611                         CurrScore);
612           }
613         }
614       }
615       for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
616         MachineOperand &MO = Inst.getOperand(I);
617         if (MO.isReg() && !MO.isDef() && TRI->isVGPR(MRIA, MO.getReg())) {
618           setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
619         }
620       }
621     }
622 #if 0 // TODO: check if this is handled by MUBUF code above.
623   } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD ||
624        Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 ||
625        Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) {
626     MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data);
627     unsigned OpNo;//TODO: find the OpNo for this operand;
628     RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo, false);
629     for (signed RegNo = Interval.first; RegNo < Interval.second;
630     ++RegNo) {
631       setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore);
632     }
633 #endif
634   } else {
635     // Match the score to the destination registers.
636     for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
637       RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I, true);
638       if (T == VM_CNT && Interval.first >= NUM_ALL_VGPRS)
639         continue;
640       for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
641         setRegScore(RegNo, T, CurrScore);
642       }
643     }
644     if (TII->isDS(Inst) && Inst.mayStore()) {
645       setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore);
646     }
647   }
648 }
649 
650 void WaitcntBrackets::print(raw_ostream &OS) {
651   OS << '\n';
652   for (auto T : inst_counter_types()) {
653     uint32_t LB = getScoreLB(T);
654     uint32_t UB = getScoreUB(T);
655 
656     switch (T) {
657     case VM_CNT:
658       OS << "    VM_CNT(" << UB - LB << "): ";
659       break;
660     case LGKM_CNT:
661       OS << "    LGKM_CNT(" << UB - LB << "): ";
662       break;
663     case EXP_CNT:
664       OS << "    EXP_CNT(" << UB - LB << "): ";
665       break;
666     default:
667       OS << "    UNKNOWN(" << UB - LB << "): ";
668       break;
669     }
670 
671     if (LB < UB) {
672       // Print vgpr scores.
673       for (int J = 0; J <= getMaxVGPR(); J++) {
674         uint32_t RegScore = getRegScore(J, T);
675         if (RegScore <= LB)
676           continue;
677         uint32_t RelScore = RegScore - LB - 1;
678         if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) {
679           OS << RelScore << ":v" << J << " ";
680         } else {
681           OS << RelScore << ":ds ";
682         }
683       }
684       // Also need to print sgpr scores for lgkm_cnt.
685       if (T == LGKM_CNT) {
686         for (int J = 0; J <= getMaxSGPR(); J++) {
687           uint32_t RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT);
688           if (RegScore <= LB)
689             continue;
690           uint32_t RelScore = RegScore - LB - 1;
691           OS << RelScore << ":s" << J << " ";
692         }
693       }
694     }
695     OS << '\n';
696   }
697   OS << '\n';
698 }
699 
700 /// Simplify the waitcnt, in the sense of removing redundant counts, and return
701 /// whether a waitcnt instruction is needed at all.
702 bool WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const {
703   return simplifyWaitcnt(VM_CNT, Wait.VmCnt) |
704          simplifyWaitcnt(EXP_CNT, Wait.ExpCnt) |
705          simplifyWaitcnt(LGKM_CNT, Wait.LgkmCnt);
706 }
707 
708 bool WaitcntBrackets::simplifyWaitcnt(InstCounterType T,
709                                       unsigned &Count) const {
710   const uint32_t LB = getScoreLB(T);
711   const uint32_t UB = getScoreUB(T);
712   if (Count < UB && UB - Count > LB)
713     return true;
714 
715   Count = ~0u;
716   return false;
717 }
718 
719 void WaitcntBrackets::determineWait(InstCounterType T, uint32_t ScoreToWait,
720                                     AMDGPU::Waitcnt &Wait) const {
721   // If the score of src_operand falls within the bracket, we need an
722   // s_waitcnt instruction.
723   const uint32_t LB = getScoreLB(T);
724   const uint32_t UB = getScoreUB(T);
725   if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
726     if ((T == VM_CNT || T == LGKM_CNT) &&
727         hasPendingFlat() &&
728         !ST->hasFlatLgkmVMemCountInOrder()) {
729       // If there is a pending FLAT operation, and this is a VMem or LGKM
730       // waitcnt and the target can report early completion, then we need
731       // to force a waitcnt 0.
732       addWait(Wait, T, 0);
733     } else if (counterOutOfOrder(T)) {
734       // Counter can get decremented out-of-order when there
735       // are multiple types event in the bracket. Also emit an s_wait counter
736       // with a conservative value of 0 for the counter.
737       addWait(Wait, T, 0);
738     } else {
739       addWait(Wait, T, UB - ScoreToWait);
740     }
741   }
742 }
743 
744 void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) {
745   applyWaitcnt(VM_CNT, Wait.VmCnt);
746   applyWaitcnt(EXP_CNT, Wait.ExpCnt);
747   applyWaitcnt(LGKM_CNT, Wait.LgkmCnt);
748 }
749 
750 void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) {
751   const uint32_t UB = getScoreUB(T);
752   if (Count >= UB)
753     return;
754   if (Count != 0) {
755     if (counterOutOfOrder(T))
756       return;
757     setScoreLB(T, std::max(getScoreLB(T), UB - Count));
758   } else {
759     setScoreLB(T, UB);
760     MixedPendingEvents[T] = false;
761     PendingEvents &= ~WaitEventMaskForInst[T];
762   }
763 }
764 
765 // Where there are multiple types of event in the bracket of a counter,
766 // the decrement may go out of order.
767 bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const {
768   // Scalar memory read always can go out of order.
769   if (T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS))
770     return true;
771   return MixedPendingEvents[T];
772 }
773 
774 INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
775                       false)
776 INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
777                     false)
778 
779 char SIInsertWaitcnts::ID = 0;
780 
781 char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID;
782 
783 FunctionPass *llvm::createSIInsertWaitcntsPass() {
784   return new SIInsertWaitcnts();
785 }
786 
787 static bool readsVCCZ(const MachineInstr &MI) {
788   unsigned Opc = MI.getOpcode();
789   return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
790          !MI.getOperand(1).isUndef();
791 }
792 
793 ///  Generate s_waitcnt instruction to be placed before cur_Inst.
794 ///  Instructions of a given type are returned in order,
795 ///  but instructions of different types can complete out of order.
796 ///  We rely on this in-order completion
797 ///  and simply assign a score to the memory access instructions.
798 ///  We keep track of the active "score bracket" to determine
799 ///  if an access of a memory read requires an s_waitcnt
800 ///  and if so what the value of each counter is.
801 ///  The "score bracket" is bound by the lower bound and upper bound
802 ///  scores (*_score_LB and *_score_ub respectively).
803 bool SIInsertWaitcnts::generateWaitcntInstBefore(
804     MachineInstr &MI, WaitcntBrackets &ScoreBrackets,
805     MachineInstr *OldWaitcntInstr) {
806   setForceEmitWaitcnt();
807   bool IsForceEmitWaitcnt = isForceEmitWaitcnt();
808 
809   if (MI.isDebugInstr())
810     return false;
811 
812   AMDGPU::Waitcnt Wait;
813 
814   // See if this instruction has a forced S_WAITCNT VM.
815   // TODO: Handle other cases of NeedsWaitcntVmBefore()
816   if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 ||
817       MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC ||
818       MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL) {
819     Wait.VmCnt = 0;
820   }
821 
822   // All waits must be resolved at call return.
823   // NOTE: this could be improved with knowledge of all call sites or
824   //   with knowledge of the called routines.
825   if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
826       MI.getOpcode() == AMDGPU::S_SETPC_B64_return) {
827     Wait = AMDGPU::Waitcnt::allZero();
828   }
829   // Resolve vm waits before gs-done.
830   else if ((MI.getOpcode() == AMDGPU::S_SENDMSG ||
831             MI.getOpcode() == AMDGPU::S_SENDMSGHALT) &&
832            ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_) ==
833             AMDGPU::SendMsg::ID_GS_DONE)) {
834     Wait.VmCnt = 0;
835   }
836 #if 0 // TODO: the following blocks of logic when we have fence.
837   else if (MI.getOpcode() == SC_FENCE) {
838     const unsigned int group_size =
839       context->shader_info->GetMaxThreadGroupSize();
840     // group_size == 0 means thread group size is unknown at compile time
841     const bool group_is_multi_wave =
842       (group_size == 0 || group_size > target_info->GetWaveFrontSize());
843     const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence();
844 
845     for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) {
846       SCRegType src_type = Inst->GetSrcType(i);
847       switch (src_type) {
848         case SCMEM_LDS:
849           if (group_is_multi_wave ||
850             context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) {
851             EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
852                                ScoreBrackets->getScoreUB(LGKM_CNT));
853             // LDS may have to wait for VM_CNT after buffer load to LDS
854             if (target_info->HasBufferLoadToLDS()) {
855               EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
856                                  ScoreBrackets->getScoreUB(VM_CNT));
857             }
858           }
859           break;
860 
861         case SCMEM_GDS:
862           if (group_is_multi_wave || fence_is_global) {
863             EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
864               ScoreBrackets->getScoreUB(EXP_CNT));
865             EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
866               ScoreBrackets->getScoreUB(LGKM_CNT));
867           }
868           break;
869 
870         case SCMEM_UAV:
871         case SCMEM_TFBUF:
872         case SCMEM_RING:
873         case SCMEM_SCATTER:
874           if (group_is_multi_wave || fence_is_global) {
875             EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
876               ScoreBrackets->getScoreUB(EXP_CNT));
877             EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
878               ScoreBrackets->getScoreUB(VM_CNT));
879           }
880           break;
881 
882         case SCMEM_SCRATCH:
883         default:
884           break;
885       }
886     }
887   }
888 #endif
889 
890   // Export & GDS instructions do not read the EXEC mask until after the export
891   // is granted (which can occur well after the instruction is issued).
892   // The shader program must flush all EXP operations on the export-count
893   // before overwriting the EXEC mask.
894   else {
895     if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) {
896       // Export and GDS are tracked individually, either may trigger a waitcnt
897       // for EXEC.
898       if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
899           ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
900           ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
901           ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
902         Wait.ExpCnt = 0;
903       }
904     }
905 
906 #if 0 // TODO: the following code to handle CALL.
907     // The argument passing for CALLs should suffice for VM_CNT and LGKM_CNT.
908     // However, there is a problem with EXP_CNT, because the call cannot
909     // easily tell if a register is used in the function, and if it did, then
910     // the referring instruction would have to have an S_WAITCNT, which is
911     // dependent on all call sites. So Instead, force S_WAITCNT for EXP_CNTs
912     // before the call.
913     if (MI.getOpcode() == SC_CALL) {
914       if (ScoreBrackets->getScoreUB(EXP_CNT) >
915         ScoreBrackets->getScoreLB(EXP_CNT)) {
916         ScoreBrackets->setScoreLB(EXP_CNT, ScoreBrackets->getScoreUB(EXP_CNT));
917         EmitWaitcnt |= CNT_MASK(EXP_CNT);
918       }
919     }
920 #endif
921 
922     // FIXME: Should not be relying on memoperands.
923     // Look at the source operands of every instruction to see if
924     // any of them results from a previous memory operation that affects
925     // its current usage. If so, an s_waitcnt instruction needs to be
926     // emitted.
927     // If the source operand was defined by a load, add the s_waitcnt
928     // instruction.
929     for (const MachineMemOperand *Memop : MI.memoperands()) {
930       unsigned AS = Memop->getAddrSpace();
931       if (AS != AMDGPUAS::LOCAL_ADDRESS)
932         continue;
933       unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
934       // VM_CNT is only relevant to vgpr or LDS.
935       ScoreBrackets.determineWait(
936           VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
937     }
938 
939     for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
940       const MachineOperand &Op = MI.getOperand(I);
941       const MachineRegisterInfo &MRIA = *MRI;
942       RegInterval Interval =
943           ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I, false);
944       for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
945         if (TRI->isVGPR(MRIA, Op.getReg())) {
946           // VM_CNT is only relevant to vgpr or LDS.
947           ScoreBrackets.determineWait(
948               VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
949         }
950         ScoreBrackets.determineWait(
951             LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
952       }
953     }
954     // End of for loop that looks at all source operands to decide vm_wait_cnt
955     // and lgk_wait_cnt.
956 
957     // Two cases are handled for destination operands:
958     // 1) If the destination operand was defined by a load, add the s_waitcnt
959     // instruction to guarantee the right WAW order.
960     // 2) If a destination operand that was used by a recent export/store ins,
961     // add s_waitcnt on exp_cnt to guarantee the WAR order.
962     if (MI.mayStore()) {
963       // FIXME: Should not be relying on memoperands.
964       for (const MachineMemOperand *Memop : MI.memoperands()) {
965         unsigned AS = Memop->getAddrSpace();
966         if (AS != AMDGPUAS::LOCAL_ADDRESS)
967           continue;
968         unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
969         ScoreBrackets.determineWait(
970             VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
971         ScoreBrackets.determineWait(
972             EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait);
973       }
974     }
975     for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
976       MachineOperand &Def = MI.getOperand(I);
977       const MachineRegisterInfo &MRIA = *MRI;
978       RegInterval Interval =
979           ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I, true);
980       for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
981         if (TRI->isVGPR(MRIA, Def.getReg())) {
982           ScoreBrackets.determineWait(
983               VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
984           ScoreBrackets.determineWait(
985               EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait);
986         }
987         ScoreBrackets.determineWait(
988             LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
989       }
990     } // End of for loop that looks at all dest operands.
991   }
992 
993   // Check to see if this is an S_BARRIER, and if an implicit S_WAITCNT 0
994   // occurs before the instruction. Doing it here prevents any additional
995   // S_WAITCNTs from being emitted if the instruction was marked as
996   // requiring a WAITCNT beforehand.
997   if (MI.getOpcode() == AMDGPU::S_BARRIER &&
998       !ST->hasAutoWaitcntBeforeBarrier()) {
999     Wait = AMDGPU::Waitcnt::allZero();
1000   }
1001 
1002   // TODO: Remove this work-around, enable the assert for Bug 457939
1003   //       after fixing the scheduler. Also, the Shader Compiler code is
1004   //       independent of target.
1005   if (readsVCCZ(MI) && ST->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS) {
1006     if (ScoreBrackets.getScoreLB(LGKM_CNT) <
1007             ScoreBrackets.getScoreUB(LGKM_CNT) &&
1008         ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1009       Wait.LgkmCnt = 0;
1010     }
1011   }
1012 
1013   // Early-out if no wait is indicated.
1014   if (!ScoreBrackets.simplifyWaitcnt(Wait) && !IsForceEmitWaitcnt) {
1015     bool Modified = false;
1016     if (OldWaitcntInstr) {
1017       if (TrackedWaitcntSet.count(OldWaitcntInstr)) {
1018         TrackedWaitcntSet.erase(OldWaitcntInstr);
1019         OldWaitcntInstr->eraseFromParent();
1020         Modified = true;
1021       } else {
1022         int64_t Imm = OldWaitcntInstr->getOperand(0).getImm();
1023         ScoreBrackets.applyWaitcnt(AMDGPU::decodeWaitcnt(IV, Imm));
1024       }
1025       Modified = true;
1026     }
1027     return Modified;
1028   }
1029 
1030   if (ForceEmitZeroWaitcnts)
1031     Wait = AMDGPU::Waitcnt::allZero();
1032 
1033   if (ForceEmitWaitcnt[VM_CNT])
1034     Wait.VmCnt = 0;
1035   if (ForceEmitWaitcnt[EXP_CNT])
1036     Wait.ExpCnt = 0;
1037   if (ForceEmitWaitcnt[LGKM_CNT])
1038     Wait.LgkmCnt = 0;
1039 
1040   ScoreBrackets.applyWaitcnt(Wait);
1041 
1042   AMDGPU::Waitcnt OldWait;
1043   if (OldWaitcntInstr) {
1044     OldWait =
1045         AMDGPU::decodeWaitcnt(IV, OldWaitcntInstr->getOperand(0).getImm());
1046   }
1047   if (OldWait.dominates(Wait))
1048     return false;
1049 
1050   if (OldWaitcntInstr && !TrackedWaitcntSet.count(OldWaitcntInstr))
1051     Wait = Wait.combined(OldWait);
1052 
1053   unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait);
1054   if (OldWaitcntInstr) {
1055     OldWaitcntInstr->getOperand(0).setImm(Enc);
1056 
1057     LLVM_DEBUG(dbgs() << "updateWaitcntInBlock\n"
1058                       << "Old Instr: " << MI << '\n'
1059                       << "New Instr: " << *OldWaitcntInstr << '\n');
1060   } else {
1061     auto SWaitInst = BuildMI(*MI.getParent(), MI.getIterator(),
1062                              MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
1063                          .addImm(Enc);
1064     TrackedWaitcntSet.insert(SWaitInst);
1065 
1066     LLVM_DEBUG(dbgs() << "insertWaitcntInBlock\n"
1067                       << "Old Instr: " << MI << '\n'
1068                       << "New Instr: " << *SWaitInst << '\n');
1069   }
1070 
1071   return true;
1072 }
1073 
1074 // This is a flat memory operation. Check to see if it has memory
1075 // tokens for both LDS and Memory, and if so mark it as a flat.
1076 bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const {
1077   if (MI.memoperands_empty())
1078     return true;
1079 
1080   for (const MachineMemOperand *Memop : MI.memoperands()) {
1081     unsigned AS = Memop->getAddrSpace();
1082     if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS)
1083       return true;
1084   }
1085 
1086   return false;
1087 }
1088 
1089 void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
1090                                                WaitcntBrackets *ScoreBrackets) {
1091   // Now look at the instruction opcode. If it is a memory access
1092   // instruction, update the upper-bound of the appropriate counter's
1093   // bracket and the destination operand scores.
1094   // TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere.
1095   if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) {
1096     if (TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
1097       ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst);
1098       ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst);
1099     } else {
1100       ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
1101     }
1102   } else if (TII->isFLAT(Inst)) {
1103     assert(Inst.mayLoad() || Inst.mayStore());
1104 
1105     if (TII->usesVM_CNT(Inst))
1106       ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
1107 
1108     if (TII->usesLGKM_CNT(Inst)) {
1109       ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
1110 
1111       // This is a flat memory operation, so note it - it will require
1112       // that both the VM and LGKM be flushed to zero if it is pending when
1113       // a VM or LGKM dependency occurs.
1114       if (mayAccessLDSThroughFlat(Inst))
1115         ScoreBrackets->setPendingFlat();
1116     }
1117   } else if (SIInstrInfo::isVMEM(Inst) &&
1118              // TODO: get a better carve out.
1119              Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1 &&
1120              Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_SC &&
1121              Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_VOL) {
1122     ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
1123     if (ST->vmemWriteNeedsExpWaitcnt() &&
1124         (Inst.mayStore() || AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1)) {
1125       ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst);
1126     }
1127   } else if (TII->isSMRD(Inst)) {
1128     ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
1129   } else {
1130     switch (Inst.getOpcode()) {
1131     case AMDGPU::S_SENDMSG:
1132     case AMDGPU::S_SENDMSGHALT:
1133       ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst);
1134       break;
1135     case AMDGPU::EXP:
1136     case AMDGPU::EXP_DONE: {
1137       int Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
1138       if (Imm >= 32 && Imm <= 63)
1139         ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst);
1140       else if (Imm >= 12 && Imm <= 15)
1141         ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst);
1142       else
1143         ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst);
1144       break;
1145     }
1146     case AMDGPU::S_MEMTIME:
1147     case AMDGPU::S_MEMREALTIME:
1148       ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
1149       break;
1150     default:
1151       break;
1152     }
1153   }
1154 }
1155 
1156 bool WaitcntBrackets::mergeScore(const MergeInfo &M, uint32_t &Score,
1157                                  uint32_t OtherScore) {
1158   uint32_t MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift;
1159   uint32_t OtherShifted =
1160       OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift;
1161   Score = std::max(MyShifted, OtherShifted);
1162   return OtherShifted > MyShifted;
1163 }
1164 
1165 /// Merge the pending events and associater score brackets of \p Other into
1166 /// this brackets status.
1167 ///
1168 /// Returns whether the merge resulted in a change that requires tighter waits
1169 /// (i.e. the merged brackets strictly dominate the original brackets).
1170 bool WaitcntBrackets::merge(const WaitcntBrackets &Other) {
1171   bool StrictDom = false;
1172 
1173   for (auto T : inst_counter_types()) {
1174     // Merge event flags for this counter
1175     const bool OldOutOfOrder = counterOutOfOrder(T);
1176     const uint32_t OldEvents = PendingEvents & WaitEventMaskForInst[T];
1177     const uint32_t OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T];
1178     if (OtherEvents & ~OldEvents)
1179       StrictDom = true;
1180     if (Other.MixedPendingEvents[T] ||
1181         (OldEvents && OtherEvents && OldEvents != OtherEvents))
1182       MixedPendingEvents[T] = true;
1183     PendingEvents |= OtherEvents;
1184 
1185     // Merge scores for this counter
1186     const uint32_t MyPending = ScoreUBs[T] - ScoreLBs[T];
1187     const uint32_t OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T];
1188     MergeInfo M;
1189     M.OldLB = ScoreLBs[T];
1190     M.OtherLB = Other.ScoreLBs[T];
1191     M.MyShift = OtherPending > MyPending ? OtherPending - MyPending : 0;
1192     M.OtherShift = ScoreUBs[T] - Other.ScoreUBs[T] + M.MyShift;
1193 
1194     const uint32_t NewUB = ScoreUBs[T] + M.MyShift;
1195     if (NewUB < ScoreUBs[T])
1196       report_fatal_error("waitcnt score overflow");
1197     ScoreUBs[T] = NewUB;
1198     ScoreLBs[T] = std::min(M.OldLB + M.MyShift, M.OtherLB + M.OtherShift);
1199 
1200     StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]);
1201 
1202     bool RegStrictDom = false;
1203     for (int J = 0, E = std::max(getMaxVGPR(), Other.getMaxVGPR()) + 1; J != E;
1204          J++) {
1205       RegStrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]);
1206     }
1207 
1208     if (T == LGKM_CNT) {
1209       for (int J = 0, E = std::max(getMaxSGPR(), Other.getMaxSGPR()) + 1;
1210            J != E; J++) {
1211         RegStrictDom |= mergeScore(M, SgprScores[J], Other.SgprScores[J]);
1212       }
1213     }
1214 
1215     if (RegStrictDom && !OldOutOfOrder)
1216       StrictDom = true;
1217   }
1218 
1219   VgprUB = std::max(getMaxVGPR(), Other.getMaxVGPR());
1220   SgprUB = std::max(getMaxSGPR(), Other.getMaxSGPR());
1221 
1222   return StrictDom;
1223 }
1224 
1225 // Generate s_waitcnt instructions where needed.
1226 bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
1227                                             MachineBasicBlock &Block,
1228                                             WaitcntBrackets &ScoreBrackets) {
1229   bool Modified = false;
1230 
1231   LLVM_DEBUG({
1232     dbgs() << "*** Block" << Block.getNumber() << " ***";
1233     ScoreBrackets.dump();
1234   });
1235 
1236   // Walk over the instructions.
1237   MachineInstr *OldWaitcntInstr = nullptr;
1238 
1239   for (MachineBasicBlock::iterator Iter = Block.begin(), E = Block.end();
1240        Iter != E;) {
1241     MachineInstr &Inst = *Iter;
1242 
1243     // Remove any previously existing waitcnts.
1244     if (Inst.getOpcode() == AMDGPU::S_WAITCNT) {
1245       if (OldWaitcntInstr) {
1246         if (TrackedWaitcntSet.count(OldWaitcntInstr)) {
1247           TrackedWaitcntSet.erase(OldWaitcntInstr);
1248           OldWaitcntInstr->eraseFromParent();
1249           OldWaitcntInstr = nullptr;
1250         } else if (!TrackedWaitcntSet.count(&Inst)) {
1251           // Two successive s_waitcnt's, both of which are pre-existing and
1252           // are therefore preserved.
1253           int64_t Imm = OldWaitcntInstr->getOperand(0).getImm();
1254           ScoreBrackets.applyWaitcnt(AMDGPU::decodeWaitcnt(IV, Imm));
1255         } else {
1256           ++Iter;
1257           Inst.eraseFromParent();
1258           Modified = true;
1259           continue;
1260         }
1261       }
1262 
1263       OldWaitcntInstr = &Inst;
1264       ++Iter;
1265       continue;
1266     }
1267 
1268     bool VCCZBugWorkAround = false;
1269     if (readsVCCZ(Inst) &&
1270         (!VCCZBugHandledSet.count(&Inst))) {
1271       if (ScoreBrackets.getScoreLB(LGKM_CNT) <
1272               ScoreBrackets.getScoreUB(LGKM_CNT) &&
1273           ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1274         if (ST->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
1275           VCCZBugWorkAround = true;
1276       }
1277     }
1278 
1279     // Generate an s_waitcnt instruction to be placed before
1280     // cur_Inst, if needed.
1281     Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr);
1282     OldWaitcntInstr = nullptr;
1283 
1284     updateEventWaitcntAfter(Inst, &ScoreBrackets);
1285 
1286 #if 0 // TODO: implement resource type check controlled by options with ub = LB.
1287     // If this instruction generates a S_SETVSKIP because it is an
1288     // indexed resource, and we are on Tahiti, then it will also force
1289     // an S_WAITCNT vmcnt(0)
1290     if (RequireCheckResourceType(Inst, context)) {
1291       // Force the score to as if an S_WAITCNT vmcnt(0) is emitted.
1292       ScoreBrackets->setScoreLB(VM_CNT,
1293       ScoreBrackets->getScoreUB(VM_CNT));
1294     }
1295 #endif
1296 
1297     LLVM_DEBUG({
1298       Inst.print(dbgs());
1299       ScoreBrackets.dump();
1300     });
1301 
1302     // Check to see if this is a GWS instruction. If so, and if this is CI or
1303     // VI, then the generated code sequence will include an S_WAITCNT 0.
1304     // TODO: Are these the only GWS instructions?
1305     if (Inst.getOpcode() == AMDGPU::DS_GWS_INIT ||
1306         Inst.getOpcode() == AMDGPU::DS_GWS_SEMA_V ||
1307         Inst.getOpcode() == AMDGPU::DS_GWS_SEMA_BR ||
1308         Inst.getOpcode() == AMDGPU::DS_GWS_SEMA_P ||
1309         Inst.getOpcode() == AMDGPU::DS_GWS_BARRIER) {
1310       // TODO: && context->target_info->GwsRequiresMemViolTest() ) {
1311       ScoreBrackets.applyWaitcnt(AMDGPU::Waitcnt::allZero());
1312     }
1313 
1314     // TODO: Remove this work-around after fixing the scheduler and enable the
1315     // assert above.
1316     if (VCCZBugWorkAround) {
1317       // Restore the vccz bit.  Any time a value is written to vcc, the vcc
1318       // bit is updated, so we can restore the bit by reading the value of
1319       // vcc and then writing it back to the register.
1320       BuildMI(Block, Inst, Inst.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
1321               AMDGPU::VCC)
1322           .addReg(AMDGPU::VCC);
1323       VCCZBugHandledSet.insert(&Inst);
1324       Modified = true;
1325     }
1326 
1327     ++Iter;
1328   }
1329 
1330   return Modified;
1331 }
1332 
1333 bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
1334   ST = &MF.getSubtarget<GCNSubtarget>();
1335   TII = ST->getInstrInfo();
1336   TRI = &TII->getRegisterInfo();
1337   MRI = &MF.getRegInfo();
1338   IV = AMDGPU::getIsaVersion(ST->getCPU());
1339   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1340 
1341   ForceEmitZeroWaitcnts = ForceEmitZeroFlag;
1342   for (auto T : inst_counter_types())
1343     ForceEmitWaitcnt[T] = false;
1344 
1345   HardwareLimits.VmcntMax = AMDGPU::getVmcntBitMask(IV);
1346   HardwareLimits.ExpcntMax = AMDGPU::getExpcntBitMask(IV);
1347   HardwareLimits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV);
1348 
1349   HardwareLimits.NumVGPRsMax = ST->getAddressableNumVGPRs();
1350   HardwareLimits.NumSGPRsMax = ST->getAddressableNumSGPRs();
1351   assert(HardwareLimits.NumVGPRsMax <= SQ_MAX_PGM_VGPRS);
1352   assert(HardwareLimits.NumSGPRsMax <= SQ_MAX_PGM_SGPRS);
1353 
1354   RegisterEncoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0);
1355   RegisterEncoding.VGPRL =
1356       RegisterEncoding.VGPR0 + HardwareLimits.NumVGPRsMax - 1;
1357   RegisterEncoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0);
1358   RegisterEncoding.SGPRL =
1359       RegisterEncoding.SGPR0 + HardwareLimits.NumSGPRsMax - 1;
1360 
1361   TrackedWaitcntSet.clear();
1362   VCCZBugHandledSet.clear();
1363   RpotIdxMap.clear();
1364   BlockInfos.clear();
1365 
1366   // Keep iterating over the blocks in reverse post order, inserting and
1367   // updating s_waitcnt where needed, until a fix point is reached.
1368   for (MachineBasicBlock *MBB :
1369        ReversePostOrderTraversal<MachineFunction *>(&MF)) {
1370     RpotIdxMap[MBB] = BlockInfos.size();
1371     BlockInfos.emplace_back(MBB);
1372   }
1373 
1374   std::unique_ptr<WaitcntBrackets> Brackets;
1375   bool Modified = false;
1376   bool Repeat;
1377   do {
1378     Repeat = false;
1379 
1380     for (BlockInfo &BI : BlockInfos) {
1381       if (!BI.Dirty)
1382         continue;
1383 
1384       unsigned Idx = std::distance(&*BlockInfos.begin(), &BI);
1385 
1386       if (BI.Incoming) {
1387         if (!Brackets)
1388           Brackets = llvm::make_unique<WaitcntBrackets>(*BI.Incoming);
1389         else
1390           *Brackets = *BI.Incoming;
1391       } else {
1392         if (!Brackets)
1393           Brackets = llvm::make_unique<WaitcntBrackets>(ST);
1394         else
1395           Brackets->clear();
1396       }
1397 
1398       Modified |= insertWaitcntInBlock(MF, *BI.MBB, *Brackets);
1399       BI.Dirty = false;
1400 
1401       if (Brackets->hasPending()) {
1402         BlockInfo *MoveBracketsToSucc = nullptr;
1403         for (MachineBasicBlock *Succ : BI.MBB->successors()) {
1404           unsigned SuccIdx = RpotIdxMap[Succ];
1405           BlockInfo &SuccBI = BlockInfos[SuccIdx];
1406           if (!SuccBI.Incoming) {
1407             SuccBI.Dirty = true;
1408             if (SuccIdx <= Idx)
1409               Repeat = true;
1410             if (!MoveBracketsToSucc) {
1411               MoveBracketsToSucc = &SuccBI;
1412             } else {
1413               SuccBI.Incoming = llvm::make_unique<WaitcntBrackets>(*Brackets);
1414             }
1415           } else if (SuccBI.Incoming->merge(*Brackets)) {
1416             SuccBI.Dirty = true;
1417             if (SuccIdx <= Idx)
1418               Repeat = true;
1419           }
1420         }
1421         if (MoveBracketsToSucc)
1422           MoveBracketsToSucc->Incoming = std::move(Brackets);
1423       }
1424     }
1425   } while (Repeat);
1426 
1427   SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
1428 
1429   bool HaveScalarStores = false;
1430 
1431   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
1432        ++BI) {
1433     MachineBasicBlock &MBB = *BI;
1434 
1435     for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;
1436          ++I) {
1437       if (!HaveScalarStores && TII->isScalarStore(*I))
1438         HaveScalarStores = true;
1439 
1440       if (I->getOpcode() == AMDGPU::S_ENDPGM ||
1441           I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
1442         EndPgmBlocks.push_back(&MBB);
1443     }
1444   }
1445 
1446   if (HaveScalarStores) {
1447     // If scalar writes are used, the cache must be flushed or else the next
1448     // wave to reuse the same scratch memory can be clobbered.
1449     //
1450     // Insert s_dcache_wb at wave termination points if there were any scalar
1451     // stores, and only if the cache hasn't already been flushed. This could be
1452     // improved by looking across blocks for flushes in postdominating blocks
1453     // from the stores but an explicitly requested flush is probably very rare.
1454     for (MachineBasicBlock *MBB : EndPgmBlocks) {
1455       bool SeenDCacheWB = false;
1456 
1457       for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
1458            ++I) {
1459         if (I->getOpcode() == AMDGPU::S_DCACHE_WB)
1460           SeenDCacheWB = true;
1461         else if (TII->isScalarStore(*I))
1462           SeenDCacheWB = false;
1463 
1464         // FIXME: It would be better to insert this before a waitcnt if any.
1465         if ((I->getOpcode() == AMDGPU::S_ENDPGM ||
1466              I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
1467             !SeenDCacheWB) {
1468           Modified = true;
1469           BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB));
1470         }
1471       }
1472     }
1473   }
1474 
1475   if (!MFI->isEntryFunction()) {
1476     // Wait for any outstanding memory operations that the input registers may
1477     // depend on. We can't track them and it's better to the wait after the
1478     // costly call sequence.
1479 
1480     // TODO: Could insert earlier and schedule more liberally with operations
1481     // that only use caller preserved registers.
1482     MachineBasicBlock &EntryBB = MF.front();
1483     BuildMI(EntryBB, EntryBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WAITCNT))
1484       .addImm(0);
1485 
1486     Modified = true;
1487   }
1488 
1489   return Modified;
1490 }
1491