1 //===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Insert wait instructions for memory reads and writes. 11 /// 12 /// Memory reads and writes are issued asynchronously, so we need to insert 13 /// S_WAITCNT instructions when we want to access any of their results or 14 /// overwrite any register that's used asynchronously. 15 /// 16 /// TODO: This pass currently keeps one timeline per hardware counter. A more 17 /// finely-grained approach that keeps one timeline per event type could 18 /// sometimes get away with generating weaker s_waitcnt instructions. For 19 /// example, when both SMEM and LDS are in flight and we need to wait for 20 /// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient, 21 /// but the pass will currently generate a conservative lgkmcnt(0) because 22 /// multiple event types are in flight. 23 // 24 //===----------------------------------------------------------------------===// 25 26 #include "AMDGPU.h" 27 #include "AMDGPUSubtarget.h" 28 #include "SIDefines.h" 29 #include "SIInstrInfo.h" 30 #include "SIMachineFunctionInfo.h" 31 #include "SIRegisterInfo.h" 32 #include "Utils/AMDGPUBaseInfo.h" 33 #include "llvm/ADT/DenseMap.h" 34 #include "llvm/ADT/DenseSet.h" 35 #include "llvm/ADT/MapVector.h" 36 #include "llvm/ADT/PostOrderIterator.h" 37 #include "llvm/ADT/STLExtras.h" 38 #include "llvm/ADT/SmallVector.h" 39 #include "llvm/CodeGen/MachineBasicBlock.h" 40 #include "llvm/CodeGen/MachineFunction.h" 41 #include "llvm/CodeGen/MachineFunctionPass.h" 42 #include "llvm/CodeGen/MachineInstr.h" 43 #include "llvm/CodeGen/MachineInstrBuilder.h" 44 #include "llvm/CodeGen/MachineMemOperand.h" 45 #include "llvm/CodeGen/MachineOperand.h" 46 #include "llvm/CodeGen/MachinePostDominators.h" 47 #include "llvm/CodeGen/MachineRegisterInfo.h" 48 #include "llvm/InitializePasses.h" 49 #include "llvm/IR/DebugLoc.h" 50 #include "llvm/Pass.h" 51 #include "llvm/Support/Debug.h" 52 #include "llvm/Support/DebugCounter.h" 53 #include "llvm/Support/ErrorHandling.h" 54 #include "llvm/Support/raw_ostream.h" 55 #include <algorithm> 56 #include <cassert> 57 #include <cstdint> 58 #include <cstring> 59 #include <memory> 60 #include <utility> 61 62 using namespace llvm; 63 64 #define DEBUG_TYPE "si-insert-waitcnts" 65 66 DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE"-forceexp", 67 "Force emit s_waitcnt expcnt(0) instrs"); 68 DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE"-forcelgkm", 69 "Force emit s_waitcnt lgkmcnt(0) instrs"); 70 DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE"-forcevm", 71 "Force emit s_waitcnt vmcnt(0) instrs"); 72 73 static cl::opt<bool> ForceEmitZeroFlag( 74 "amdgpu-waitcnt-forcezero", 75 cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), 76 cl::init(false), cl::Hidden); 77 78 namespace { 79 80 template <typename EnumT> 81 class enum_iterator 82 : public iterator_facade_base<enum_iterator<EnumT>, 83 std::forward_iterator_tag, const EnumT> { 84 EnumT Value; 85 public: 86 enum_iterator() = default; 87 enum_iterator(EnumT Value) : Value(Value) {} 88 89 enum_iterator &operator++() { 90 Value = static_cast<EnumT>(Value + 1); 91 return *this; 92 } 93 94 bool operator==(const enum_iterator &RHS) const { return Value == RHS.Value; } 95 96 EnumT operator*() const { return Value; } 97 }; 98 99 // Class of object that encapsulates latest instruction counter score 100 // associated with the operand. Used for determining whether 101 // s_waitcnt instruction needs to be emited. 102 103 #define CNT_MASK(t) (1u << (t)) 104 105 enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, VS_CNT, NUM_INST_CNTS }; 106 107 iterator_range<enum_iterator<InstCounterType>> inst_counter_types() { 108 return make_range(enum_iterator<InstCounterType>(VM_CNT), 109 enum_iterator<InstCounterType>(NUM_INST_CNTS)); 110 } 111 112 using RegInterval = std::pair<int, int>; 113 114 struct { 115 unsigned VmcntMax; 116 unsigned ExpcntMax; 117 unsigned LgkmcntMax; 118 unsigned VscntMax; 119 } HardwareLimits; 120 121 struct { 122 unsigned VGPR0; 123 unsigned VGPRL; 124 unsigned SGPR0; 125 unsigned SGPRL; 126 } RegisterEncoding; 127 128 enum WaitEventType { 129 VMEM_ACCESS, // vector-memory read & write 130 VMEM_READ_ACCESS, // vector-memory read 131 VMEM_WRITE_ACCESS,// vector-memory write 132 LDS_ACCESS, // lds read & write 133 GDS_ACCESS, // gds read & write 134 SQ_MESSAGE, // send message 135 SMEM_ACCESS, // scalar-memory read & write 136 EXP_GPR_LOCK, // export holding on its data src 137 GDS_GPR_LOCK, // GDS holding on its data and addr src 138 EXP_POS_ACCESS, // write to export position 139 EXP_PARAM_ACCESS, // write to export parameter 140 VMW_GPR_LOCK, // vector-memory write holding on its data src 141 NUM_WAIT_EVENTS, 142 }; 143 144 static const unsigned WaitEventMaskForInst[NUM_INST_CNTS] = { 145 (1 << VMEM_ACCESS) | (1 << VMEM_READ_ACCESS), 146 (1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) | 147 (1 << SQ_MESSAGE), 148 (1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) | 149 (1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS), 150 (1 << VMEM_WRITE_ACCESS) 151 }; 152 153 // The mapping is: 154 // 0 .. SQ_MAX_PGM_VGPRS-1 real VGPRs 155 // SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1 extra VGPR-like slots 156 // NUM_ALL_VGPRS .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs 157 // We reserve a fixed number of VGPR slots in the scoring tables for 158 // special tokens like SCMEM_LDS (needed for buffer load to LDS). 159 enum RegisterMapping { 160 SQ_MAX_PGM_VGPRS = 256, // Maximum programmable VGPRs across all targets. 161 SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets. 162 NUM_EXTRA_VGPRS = 1, // A reserved slot for DS. 163 EXTRA_VGPR_LDS = 0, // This is a placeholder the Shader algorithm uses. 164 NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts. 165 }; 166 167 // Enumerate different types of result-returning VMEM operations. Although 168 // s_waitcnt orders them all with a single vmcnt counter, in the absence of 169 // s_waitcnt only instructions of the same VmemType are guaranteed to write 170 // their results in order -- so there is no need to insert an s_waitcnt between 171 // two instructions of the same type that write the same vgpr. 172 enum VmemType { 173 // BUF instructions and MIMG instructions without a sampler. 174 VMEM_NOSAMPLER, 175 // MIMG instructions with a sampler. 176 VMEM_SAMPLER, 177 }; 178 179 VmemType getVmemType(const MachineInstr &Inst) { 180 assert(SIInstrInfo::isVMEM(Inst)); 181 if (!SIInstrInfo::isMIMG(Inst)) 182 return VMEM_NOSAMPLER; 183 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Inst.getOpcode()); 184 return AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler 185 ? VMEM_SAMPLER 186 : VMEM_NOSAMPLER; 187 } 188 189 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) { 190 switch (T) { 191 case VM_CNT: 192 Wait.VmCnt = std::min(Wait.VmCnt, Count); 193 break; 194 case EXP_CNT: 195 Wait.ExpCnt = std::min(Wait.ExpCnt, Count); 196 break; 197 case LGKM_CNT: 198 Wait.LgkmCnt = std::min(Wait.LgkmCnt, Count); 199 break; 200 case VS_CNT: 201 Wait.VsCnt = std::min(Wait.VsCnt, Count); 202 break; 203 default: 204 llvm_unreachable("bad InstCounterType"); 205 } 206 } 207 208 // This objects maintains the current score brackets of each wait counter, and 209 // a per-register scoreboard for each wait counter. 210 // 211 // We also maintain the latest score for every event type that can change the 212 // waitcnt in order to know if there are multiple types of events within 213 // the brackets. When multiple types of event happen in the bracket, 214 // wait count may get decreased out of order, therefore we need to put in 215 // "s_waitcnt 0" before use. 216 class WaitcntBrackets { 217 public: 218 WaitcntBrackets(const GCNSubtarget *SubTarget) : ST(SubTarget) {} 219 220 static unsigned getWaitCountMax(InstCounterType T) { 221 switch (T) { 222 case VM_CNT: 223 return HardwareLimits.VmcntMax; 224 case LGKM_CNT: 225 return HardwareLimits.LgkmcntMax; 226 case EXP_CNT: 227 return HardwareLimits.ExpcntMax; 228 case VS_CNT: 229 return HardwareLimits.VscntMax; 230 default: 231 break; 232 } 233 return 0; 234 } 235 236 unsigned getScoreLB(InstCounterType T) const { 237 assert(T < NUM_INST_CNTS); 238 return ScoreLBs[T]; 239 } 240 241 unsigned getScoreUB(InstCounterType T) const { 242 assert(T < NUM_INST_CNTS); 243 return ScoreUBs[T]; 244 } 245 246 // Mapping from event to counter. 247 InstCounterType eventCounter(WaitEventType E) { 248 if (WaitEventMaskForInst[VM_CNT] & (1 << E)) 249 return VM_CNT; 250 if (WaitEventMaskForInst[LGKM_CNT] & (1 << E)) 251 return LGKM_CNT; 252 if (WaitEventMaskForInst[VS_CNT] & (1 << E)) 253 return VS_CNT; 254 assert(WaitEventMaskForInst[EXP_CNT] & (1 << E)); 255 return EXP_CNT; 256 } 257 258 unsigned getRegScore(int GprNo, InstCounterType T) { 259 if (GprNo < NUM_ALL_VGPRS) { 260 return VgprScores[T][GprNo]; 261 } 262 assert(T == LGKM_CNT); 263 return SgprScores[GprNo - NUM_ALL_VGPRS]; 264 } 265 266 bool merge(const WaitcntBrackets &Other); 267 268 RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII, 269 const MachineRegisterInfo *MRI, 270 const SIRegisterInfo *TRI, unsigned OpNo) const; 271 272 bool counterOutOfOrder(InstCounterType T) const; 273 bool simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const; 274 bool simplifyWaitcnt(InstCounterType T, unsigned &Count) const; 275 void determineWait(InstCounterType T, unsigned ScoreToWait, 276 AMDGPU::Waitcnt &Wait) const; 277 void applyWaitcnt(const AMDGPU::Waitcnt &Wait); 278 void applyWaitcnt(InstCounterType T, unsigned Count); 279 void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI, 280 const MachineRegisterInfo *MRI, WaitEventType E, 281 MachineInstr &MI); 282 283 bool hasPending() const { return PendingEvents != 0; } 284 bool hasPendingEvent(WaitEventType E) const { 285 return PendingEvents & (1 << E); 286 } 287 288 bool hasMixedPendingEvents(InstCounterType T) const { 289 unsigned Events = PendingEvents & WaitEventMaskForInst[T]; 290 // Return true if more than one bit is set in Events. 291 return Events & (Events - 1); 292 } 293 294 bool hasPendingFlat() const { 295 return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] && 296 LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) || 297 (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] && 298 LastFlat[VM_CNT] <= ScoreUBs[VM_CNT])); 299 } 300 301 void setPendingFlat() { 302 LastFlat[VM_CNT] = ScoreUBs[VM_CNT]; 303 LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT]; 304 } 305 306 // Return true if there might be pending writes to the specified vgpr by VMEM 307 // instructions with types different from V. 308 bool hasOtherPendingVmemTypes(int GprNo, VmemType V) const { 309 assert(GprNo < NUM_ALL_VGPRS); 310 return VgprVmemTypes[GprNo] & ~(1 << V); 311 } 312 313 void clearVgprVmemTypes(int GprNo) { 314 assert(GprNo < NUM_ALL_VGPRS); 315 VgprVmemTypes[GprNo] = 0; 316 } 317 318 void print(raw_ostream &); 319 void dump() { print(dbgs()); } 320 321 private: 322 struct MergeInfo { 323 unsigned OldLB; 324 unsigned OtherLB; 325 unsigned MyShift; 326 unsigned OtherShift; 327 }; 328 static bool mergeScore(const MergeInfo &M, unsigned &Score, 329 unsigned OtherScore); 330 331 void setScoreLB(InstCounterType T, unsigned Val) { 332 assert(T < NUM_INST_CNTS); 333 ScoreLBs[T] = Val; 334 } 335 336 void setScoreUB(InstCounterType T, unsigned Val) { 337 assert(T < NUM_INST_CNTS); 338 ScoreUBs[T] = Val; 339 if (T == EXP_CNT) { 340 unsigned UB = ScoreUBs[T] - getWaitCountMax(EXP_CNT); 341 if (ScoreLBs[T] < UB && UB < ScoreUBs[T]) 342 ScoreLBs[T] = UB; 343 } 344 } 345 346 void setRegScore(int GprNo, InstCounterType T, unsigned Val) { 347 if (GprNo < NUM_ALL_VGPRS) { 348 VgprUB = std::max(VgprUB, GprNo); 349 VgprScores[T][GprNo] = Val; 350 } else { 351 assert(T == LGKM_CNT); 352 SgprUB = std::max(SgprUB, GprNo - NUM_ALL_VGPRS); 353 SgprScores[GprNo - NUM_ALL_VGPRS] = Val; 354 } 355 } 356 357 void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII, 358 const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI, 359 unsigned OpNo, unsigned Val); 360 361 const GCNSubtarget *ST = nullptr; 362 unsigned ScoreLBs[NUM_INST_CNTS] = {0}; 363 unsigned ScoreUBs[NUM_INST_CNTS] = {0}; 364 unsigned PendingEvents = 0; 365 // Remember the last flat memory operation. 366 unsigned LastFlat[NUM_INST_CNTS] = {0}; 367 // wait_cnt scores for every vgpr. 368 // Keep track of the VgprUB and SgprUB to make merge at join efficient. 369 int VgprUB = -1; 370 int SgprUB = -1; 371 unsigned VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS] = {{0}}; 372 // Wait cnt scores for every sgpr, only lgkmcnt is relevant. 373 unsigned SgprScores[SQ_MAX_PGM_SGPRS] = {0}; 374 // Bitmask of the VmemTypes of VMEM instructions that might have a pending 375 // write to each vgpr. 376 unsigned char VgprVmemTypes[NUM_ALL_VGPRS] = {0}; 377 }; 378 379 class SIInsertWaitcnts : public MachineFunctionPass { 380 private: 381 const GCNSubtarget *ST = nullptr; 382 const SIInstrInfo *TII = nullptr; 383 const SIRegisterInfo *TRI = nullptr; 384 const MachineRegisterInfo *MRI = nullptr; 385 AMDGPU::IsaVersion IV; 386 387 DenseSet<MachineInstr *> TrackedWaitcntSet; 388 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses; 389 MachinePostDominatorTree *PDT; 390 391 struct BlockInfo { 392 MachineBasicBlock *MBB; 393 std::unique_ptr<WaitcntBrackets> Incoming; 394 bool Dirty = true; 395 396 explicit BlockInfo(MachineBasicBlock *MBB) : MBB(MBB) {} 397 }; 398 399 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos; 400 401 // ForceEmitZeroWaitcnts: force all waitcnts insts to be s_waitcnt 0 402 // because of amdgpu-waitcnt-forcezero flag 403 bool ForceEmitZeroWaitcnts; 404 bool ForceEmitWaitcnt[NUM_INST_CNTS]; 405 406 public: 407 static char ID; 408 409 SIInsertWaitcnts() : MachineFunctionPass(ID) { 410 (void)ForceExpCounter; 411 (void)ForceLgkmCounter; 412 (void)ForceVMCounter; 413 } 414 415 bool runOnMachineFunction(MachineFunction &MF) override; 416 417 StringRef getPassName() const override { 418 return "SI insert wait instructions"; 419 } 420 421 void getAnalysisUsage(AnalysisUsage &AU) const override { 422 AU.setPreservesCFG(); 423 AU.addRequired<MachinePostDominatorTree>(); 424 MachineFunctionPass::getAnalysisUsage(AU); 425 } 426 427 bool isForceEmitWaitcnt() const { 428 for (auto T : inst_counter_types()) 429 if (ForceEmitWaitcnt[T]) 430 return true; 431 return false; 432 } 433 434 void setForceEmitWaitcnt() { 435 // For non-debug builds, ForceEmitWaitcnt has been initialized to false; 436 // For debug builds, get the debug counter info and adjust if need be 437 #ifndef NDEBUG 438 if (DebugCounter::isCounterSet(ForceExpCounter) && 439 DebugCounter::shouldExecute(ForceExpCounter)) { 440 ForceEmitWaitcnt[EXP_CNT] = true; 441 } else { 442 ForceEmitWaitcnt[EXP_CNT] = false; 443 } 444 445 if (DebugCounter::isCounterSet(ForceLgkmCounter) && 446 DebugCounter::shouldExecute(ForceLgkmCounter)) { 447 ForceEmitWaitcnt[LGKM_CNT] = true; 448 } else { 449 ForceEmitWaitcnt[LGKM_CNT] = false; 450 } 451 452 if (DebugCounter::isCounterSet(ForceVMCounter) && 453 DebugCounter::shouldExecute(ForceVMCounter)) { 454 ForceEmitWaitcnt[VM_CNT] = true; 455 } else { 456 ForceEmitWaitcnt[VM_CNT] = false; 457 } 458 #endif // NDEBUG 459 } 460 461 bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const; 462 bool mayAccessLDSThroughFlat(const MachineInstr &MI) const; 463 bool generateWaitcntInstBefore(MachineInstr &MI, 464 WaitcntBrackets &ScoreBrackets, 465 MachineInstr *OldWaitcntInstr); 466 void updateEventWaitcntAfter(MachineInstr &Inst, 467 WaitcntBrackets *ScoreBrackets); 468 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block, 469 WaitcntBrackets &ScoreBrackets); 470 }; 471 472 } // end anonymous namespace 473 474 RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI, 475 const SIInstrInfo *TII, 476 const MachineRegisterInfo *MRI, 477 const SIRegisterInfo *TRI, 478 unsigned OpNo) const { 479 const MachineOperand &Op = MI->getOperand(OpNo); 480 assert(Op.isReg()); 481 if (!TRI->isInAllocatableClass(Op.getReg()) || TRI->isAGPR(*MRI, Op.getReg())) 482 return {-1, -1}; 483 484 // A use via a PW operand does not need a waitcnt. 485 // A partial write is not a WAW. 486 assert(!Op.getSubReg() || !Op.isUndef()); 487 488 RegInterval Result; 489 490 unsigned Reg = TRI->getEncodingValue(AMDGPU::getMCReg(Op.getReg(), *ST)); 491 492 if (TRI->isVGPR(*MRI, Op.getReg())) { 493 assert(Reg >= RegisterEncoding.VGPR0 && Reg <= RegisterEncoding.VGPRL); 494 Result.first = Reg - RegisterEncoding.VGPR0; 495 assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS); 496 } else if (TRI->isSGPRReg(*MRI, Op.getReg())) { 497 assert(Reg >= RegisterEncoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS); 498 Result.first = Reg - RegisterEncoding.SGPR0 + NUM_ALL_VGPRS; 499 assert(Result.first >= NUM_ALL_VGPRS && 500 Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS); 501 } 502 // TODO: Handle TTMP 503 // else if (TRI->isTTMP(*MRI, Reg.getReg())) ... 504 else 505 return {-1, -1}; 506 507 const TargetRegisterClass *RC = TII->getOpRegClass(*MI, OpNo); 508 unsigned Size = TRI->getRegSizeInBits(*RC); 509 Result.second = Result.first + ((Size + 16) / 32); 510 511 return Result; 512 } 513 514 void WaitcntBrackets::setExpScore(const MachineInstr *MI, 515 const SIInstrInfo *TII, 516 const SIRegisterInfo *TRI, 517 const MachineRegisterInfo *MRI, unsigned OpNo, 518 unsigned Val) { 519 RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo); 520 assert(TRI->isVGPR(*MRI, MI->getOperand(OpNo).getReg())); 521 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 522 setRegScore(RegNo, EXP_CNT, Val); 523 } 524 } 525 526 void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, 527 const SIRegisterInfo *TRI, 528 const MachineRegisterInfo *MRI, 529 WaitEventType E, MachineInstr &Inst) { 530 InstCounterType T = eventCounter(E); 531 unsigned CurrScore = getScoreUB(T) + 1; 532 if (CurrScore == 0) 533 report_fatal_error("InsertWaitcnt score wraparound"); 534 // PendingEvents and ScoreUB need to be update regardless if this event 535 // changes the score of a register or not. 536 // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message. 537 PendingEvents |= 1 << E; 538 setScoreUB(T, CurrScore); 539 540 if (T == EXP_CNT) { 541 // Put score on the source vgprs. If this is a store, just use those 542 // specific register(s). 543 if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) { 544 int AddrOpIdx = 545 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr); 546 // All GDS operations must protect their address register (same as 547 // export.) 548 if (AddrOpIdx != -1) { 549 setExpScore(&Inst, TII, TRI, MRI, AddrOpIdx, CurrScore); 550 } 551 552 if (Inst.mayStore()) { 553 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 554 AMDGPU::OpName::data0) != -1) { 555 setExpScore( 556 &Inst, TII, TRI, MRI, 557 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0), 558 CurrScore); 559 } 560 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 561 AMDGPU::OpName::data1) != -1) { 562 setExpScore(&Inst, TII, TRI, MRI, 563 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 564 AMDGPU::OpName::data1), 565 CurrScore); 566 } 567 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1 && 568 Inst.getOpcode() != AMDGPU::DS_GWS_INIT && 569 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V && 570 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR && 571 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P && 572 Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER && 573 Inst.getOpcode() != AMDGPU::DS_APPEND && 574 Inst.getOpcode() != AMDGPU::DS_CONSUME && 575 Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) { 576 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 577 const MachineOperand &Op = Inst.getOperand(I); 578 if (Op.isReg() && !Op.isDef() && TRI->isVGPR(*MRI, Op.getReg())) { 579 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore); 580 } 581 } 582 } 583 } else if (TII->isFLAT(Inst)) { 584 if (Inst.mayStore()) { 585 setExpScore( 586 &Inst, TII, TRI, MRI, 587 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 588 CurrScore); 589 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) { 590 setExpScore( 591 &Inst, TII, TRI, MRI, 592 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 593 CurrScore); 594 } 595 } else if (TII->isMIMG(Inst)) { 596 if (Inst.mayStore()) { 597 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 598 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) { 599 setExpScore( 600 &Inst, TII, TRI, MRI, 601 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 602 CurrScore); 603 } 604 } else if (TII->isMTBUF(Inst)) { 605 if (Inst.mayStore()) { 606 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 607 } 608 } else if (TII->isMUBUF(Inst)) { 609 if (Inst.mayStore()) { 610 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 611 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) { 612 setExpScore( 613 &Inst, TII, TRI, MRI, 614 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 615 CurrScore); 616 } 617 } else { 618 if (TII->isEXP(Inst)) { 619 // For export the destination registers are really temps that 620 // can be used as the actual source after export patching, so 621 // we need to treat them like sources and set the EXP_CNT 622 // score. 623 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 624 MachineOperand &DefMO = Inst.getOperand(I); 625 if (DefMO.isReg() && DefMO.isDef() && 626 TRI->isVGPR(*MRI, DefMO.getReg())) { 627 setRegScore( 628 TRI->getEncodingValue(AMDGPU::getMCReg(DefMO.getReg(), *ST)), 629 EXP_CNT, CurrScore); 630 } 631 } 632 } 633 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 634 MachineOperand &MO = Inst.getOperand(I); 635 if (MO.isReg() && !MO.isDef() && TRI->isVGPR(*MRI, MO.getReg())) { 636 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore); 637 } 638 } 639 } 640 #if 0 // TODO: check if this is handled by MUBUF code above. 641 } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD || 642 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 || 643 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) { 644 MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data); 645 unsigned OpNo;//TODO: find the OpNo for this operand; 646 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo); 647 for (int RegNo = Interval.first; RegNo < Interval.second; 648 ++RegNo) { 649 setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore); 650 } 651 #endif 652 } else { 653 // Match the score to the destination registers. 654 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 655 auto &Op = Inst.getOperand(I); 656 if (!Op.isReg() || !Op.isDef()) 657 continue; 658 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I); 659 if (T == VM_CNT) { 660 if (Interval.first >= NUM_ALL_VGPRS) 661 continue; 662 if (SIInstrInfo::isVMEM(Inst)) { 663 VmemType V = getVmemType(Inst); 664 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) 665 VgprVmemTypes[RegNo] |= 1 << V; 666 } 667 } 668 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 669 setRegScore(RegNo, T, CurrScore); 670 } 671 } 672 if (TII->isDS(Inst) && Inst.mayStore()) { 673 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore); 674 } 675 } 676 } 677 678 void WaitcntBrackets::print(raw_ostream &OS) { 679 OS << '\n'; 680 for (auto T : inst_counter_types()) { 681 unsigned LB = getScoreLB(T); 682 unsigned UB = getScoreUB(T); 683 684 switch (T) { 685 case VM_CNT: 686 OS << " VM_CNT(" << UB - LB << "): "; 687 break; 688 case LGKM_CNT: 689 OS << " LGKM_CNT(" << UB - LB << "): "; 690 break; 691 case EXP_CNT: 692 OS << " EXP_CNT(" << UB - LB << "): "; 693 break; 694 case VS_CNT: 695 OS << " VS_CNT(" << UB - LB << "): "; 696 break; 697 default: 698 OS << " UNKNOWN(" << UB - LB << "): "; 699 break; 700 } 701 702 if (LB < UB) { 703 // Print vgpr scores. 704 for (int J = 0; J <= VgprUB; J++) { 705 unsigned RegScore = getRegScore(J, T); 706 if (RegScore <= LB) 707 continue; 708 unsigned RelScore = RegScore - LB - 1; 709 if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) { 710 OS << RelScore << ":v" << J << " "; 711 } else { 712 OS << RelScore << ":ds "; 713 } 714 } 715 // Also need to print sgpr scores for lgkm_cnt. 716 if (T == LGKM_CNT) { 717 for (int J = 0; J <= SgprUB; J++) { 718 unsigned RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT); 719 if (RegScore <= LB) 720 continue; 721 unsigned RelScore = RegScore - LB - 1; 722 OS << RelScore << ":s" << J << " "; 723 } 724 } 725 } 726 OS << '\n'; 727 } 728 OS << '\n'; 729 } 730 731 /// Simplify the waitcnt, in the sense of removing redundant counts, and return 732 /// whether a waitcnt instruction is needed at all. 733 bool WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const { 734 return simplifyWaitcnt(VM_CNT, Wait.VmCnt) | 735 simplifyWaitcnt(EXP_CNT, Wait.ExpCnt) | 736 simplifyWaitcnt(LGKM_CNT, Wait.LgkmCnt) | 737 simplifyWaitcnt(VS_CNT, Wait.VsCnt); 738 } 739 740 bool WaitcntBrackets::simplifyWaitcnt(InstCounterType T, 741 unsigned &Count) const { 742 const unsigned LB = getScoreLB(T); 743 const unsigned UB = getScoreUB(T); 744 if (Count < UB && UB - Count > LB) 745 return true; 746 747 Count = ~0u; 748 return false; 749 } 750 751 void WaitcntBrackets::determineWait(InstCounterType T, unsigned ScoreToWait, 752 AMDGPU::Waitcnt &Wait) const { 753 // If the score of src_operand falls within the bracket, we need an 754 // s_waitcnt instruction. 755 const unsigned LB = getScoreLB(T); 756 const unsigned UB = getScoreUB(T); 757 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) { 758 if ((T == VM_CNT || T == LGKM_CNT) && 759 hasPendingFlat() && 760 !ST->hasFlatLgkmVMemCountInOrder()) { 761 // If there is a pending FLAT operation, and this is a VMem or LGKM 762 // waitcnt and the target can report early completion, then we need 763 // to force a waitcnt 0. 764 addWait(Wait, T, 0); 765 } else if (counterOutOfOrder(T)) { 766 // Counter can get decremented out-of-order when there 767 // are multiple types event in the bracket. Also emit an s_wait counter 768 // with a conservative value of 0 for the counter. 769 addWait(Wait, T, 0); 770 } else { 771 // If a counter has been maxed out avoid overflow by waiting for 772 // MAX(CounterType) - 1 instead. 773 unsigned NeededWait = std::min(UB - ScoreToWait, getWaitCountMax(T) - 1); 774 addWait(Wait, T, NeededWait); 775 } 776 } 777 } 778 779 void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) { 780 applyWaitcnt(VM_CNT, Wait.VmCnt); 781 applyWaitcnt(EXP_CNT, Wait.ExpCnt); 782 applyWaitcnt(LGKM_CNT, Wait.LgkmCnt); 783 applyWaitcnt(VS_CNT, Wait.VsCnt); 784 } 785 786 void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) { 787 const unsigned UB = getScoreUB(T); 788 if (Count >= UB) 789 return; 790 if (Count != 0) { 791 if (counterOutOfOrder(T)) 792 return; 793 setScoreLB(T, std::max(getScoreLB(T), UB - Count)); 794 } else { 795 setScoreLB(T, UB); 796 PendingEvents &= ~WaitEventMaskForInst[T]; 797 } 798 } 799 800 // Where there are multiple types of event in the bracket of a counter, 801 // the decrement may go out of order. 802 bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const { 803 // Scalar memory read always can go out of order. 804 if (T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS)) 805 return true; 806 return hasMixedPendingEvents(T); 807 } 808 809 INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false, 810 false) 811 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree) 812 INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false, 813 false) 814 815 char SIInsertWaitcnts::ID = 0; 816 817 char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID; 818 819 FunctionPass *llvm::createSIInsertWaitcntsPass() { 820 return new SIInsertWaitcnts(); 821 } 822 823 static bool readsVCCZ(const MachineInstr &MI) { 824 unsigned Opc = MI.getOpcode(); 825 return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) && 826 !MI.getOperand(1).isUndef(); 827 } 828 829 /// \returns true if the callee inserts an s_waitcnt 0 on function entry. 830 static bool callWaitsOnFunctionEntry(const MachineInstr &MI) { 831 // Currently all conventions wait, but this may not always be the case. 832 // 833 // TODO: If IPRA is enabled, and the callee is isSafeForNoCSROpt, it may make 834 // senses to omit the wait and do it in the caller. 835 return true; 836 } 837 838 /// \returns true if the callee is expected to wait for any outstanding waits 839 /// before returning. 840 static bool callWaitsOnFunctionReturn(const MachineInstr &MI) { 841 return true; 842 } 843 844 /// Generate s_waitcnt instruction to be placed before cur_Inst. 845 /// Instructions of a given type are returned in order, 846 /// but instructions of different types can complete out of order. 847 /// We rely on this in-order completion 848 /// and simply assign a score to the memory access instructions. 849 /// We keep track of the active "score bracket" to determine 850 /// if an access of a memory read requires an s_waitcnt 851 /// and if so what the value of each counter is. 852 /// The "score bracket" is bound by the lower bound and upper bound 853 /// scores (*_score_LB and *_score_ub respectively). 854 bool SIInsertWaitcnts::generateWaitcntInstBefore( 855 MachineInstr &MI, WaitcntBrackets &ScoreBrackets, 856 MachineInstr *OldWaitcntInstr) { 857 setForceEmitWaitcnt(); 858 bool IsForceEmitWaitcnt = isForceEmitWaitcnt(); 859 860 if (MI.isMetaInstruction()) 861 return false; 862 863 AMDGPU::Waitcnt Wait; 864 865 // See if this instruction has a forced S_WAITCNT VM. 866 // TODO: Handle other cases of NeedsWaitcntVmBefore() 867 if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 || 868 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC || 869 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL || 870 MI.getOpcode() == AMDGPU::BUFFER_GL0_INV || 871 MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) { 872 Wait.VmCnt = 0; 873 } 874 875 // All waits must be resolved at call return. 876 // NOTE: this could be improved with knowledge of all call sites or 877 // with knowledge of the called routines. 878 if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG || 879 MI.getOpcode() == AMDGPU::S_SETPC_B64_return || 880 (MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) { 881 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 882 } 883 // Resolve vm waits before gs-done. 884 else if ((MI.getOpcode() == AMDGPU::S_SENDMSG || 885 MI.getOpcode() == AMDGPU::S_SENDMSGHALT) && 886 ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_) == 887 AMDGPU::SendMsg::ID_GS_DONE)) { 888 Wait.VmCnt = 0; 889 } 890 #if 0 // TODO: the following blocks of logic when we have fence. 891 else if (MI.getOpcode() == SC_FENCE) { 892 const unsigned int group_size = 893 context->shader_info->GetMaxThreadGroupSize(); 894 // group_size == 0 means thread group size is unknown at compile time 895 const bool group_is_multi_wave = 896 (group_size == 0 || group_size > target_info->GetWaveFrontSize()); 897 const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence(); 898 899 for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) { 900 SCRegType src_type = Inst->GetSrcType(i); 901 switch (src_type) { 902 case SCMEM_LDS: 903 if (group_is_multi_wave || 904 context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) { 905 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT, 906 ScoreBrackets->getScoreUB(LGKM_CNT)); 907 // LDS may have to wait for VM_CNT after buffer load to LDS 908 if (target_info->HasBufferLoadToLDS()) { 909 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT, 910 ScoreBrackets->getScoreUB(VM_CNT)); 911 } 912 } 913 break; 914 915 case SCMEM_GDS: 916 if (group_is_multi_wave || fence_is_global) { 917 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT, 918 ScoreBrackets->getScoreUB(EXP_CNT)); 919 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT, 920 ScoreBrackets->getScoreUB(LGKM_CNT)); 921 } 922 break; 923 924 case SCMEM_UAV: 925 case SCMEM_TFBUF: 926 case SCMEM_RING: 927 case SCMEM_SCATTER: 928 if (group_is_multi_wave || fence_is_global) { 929 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT, 930 ScoreBrackets->getScoreUB(EXP_CNT)); 931 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT, 932 ScoreBrackets->getScoreUB(VM_CNT)); 933 } 934 break; 935 936 case SCMEM_SCRATCH: 937 default: 938 break; 939 } 940 } 941 } 942 #endif 943 944 // Export & GDS instructions do not read the EXEC mask until after the export 945 // is granted (which can occur well after the instruction is issued). 946 // The shader program must flush all EXP operations on the export-count 947 // before overwriting the EXEC mask. 948 else { 949 if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) { 950 // Export and GDS are tracked individually, either may trigger a waitcnt 951 // for EXEC. 952 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) || 953 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) || 954 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) || 955 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) { 956 Wait.ExpCnt = 0; 957 } 958 } 959 960 if (MI.isCall() && callWaitsOnFunctionEntry(MI)) { 961 // The function is going to insert a wait on everything in its prolog. 962 // This still needs to be careful if the call target is a load (e.g. a GOT 963 // load). We also need to check WAW depenancy with saved PC. 964 Wait = AMDGPU::Waitcnt(); 965 966 int CallAddrOpIdx = 967 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); 968 969 if (MI.getOperand(CallAddrOpIdx).isReg()) { 970 RegInterval CallAddrOpInterval = 971 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, CallAddrOpIdx); 972 973 for (int RegNo = CallAddrOpInterval.first; 974 RegNo < CallAddrOpInterval.second; ++RegNo) 975 ScoreBrackets.determineWait( 976 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 977 978 int RtnAddrOpIdx = 979 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); 980 if (RtnAddrOpIdx != -1) { 981 RegInterval RtnAddrOpInterval = 982 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, RtnAddrOpIdx); 983 984 for (int RegNo = RtnAddrOpInterval.first; 985 RegNo < RtnAddrOpInterval.second; ++RegNo) 986 ScoreBrackets.determineWait( 987 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 988 } 989 } 990 } else { 991 // FIXME: Should not be relying on memoperands. 992 // Look at the source operands of every instruction to see if 993 // any of them results from a previous memory operation that affects 994 // its current usage. If so, an s_waitcnt instruction needs to be 995 // emitted. 996 // If the source operand was defined by a load, add the s_waitcnt 997 // instruction. 998 // 999 // Two cases are handled for destination operands: 1000 // 1) If the destination operand was defined by a load, add the s_waitcnt 1001 // instruction to guarantee the right WAW order. 1002 // 2) If a destination operand that was used by a recent export/store ins, 1003 // add s_waitcnt on exp_cnt to guarantee the WAR order. 1004 for (const MachineMemOperand *Memop : MI.memoperands()) { 1005 const Value *Ptr = Memop->getValue(); 1006 if (Memop->isStore() && SLoadAddresses.count(Ptr)) { 1007 addWait(Wait, LGKM_CNT, 0); 1008 if (PDT->dominates(MI.getParent(), SLoadAddresses.find(Ptr)->second)) 1009 SLoadAddresses.erase(Ptr); 1010 } 1011 unsigned AS = Memop->getAddrSpace(); 1012 if (AS != AMDGPUAS::LOCAL_ADDRESS) 1013 continue; 1014 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS; 1015 // VM_CNT is only relevant to vgpr or LDS. 1016 ScoreBrackets.determineWait( 1017 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 1018 if (Memop->isStore()) { 1019 ScoreBrackets.determineWait( 1020 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait); 1021 } 1022 } 1023 1024 // Loop over use and def operands. 1025 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 1026 MachineOperand &Op = MI.getOperand(I); 1027 if (!Op.isReg()) 1028 continue; 1029 RegInterval Interval = 1030 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I); 1031 1032 const bool IsVGPR = TRI->isVGPR(*MRI, Op.getReg()); 1033 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 1034 if (IsVGPR) { 1035 // RAW always needs an s_waitcnt. WAW needs an s_waitcnt unless the 1036 // previous write and this write are the same type of VMEM 1037 // instruction, in which case they're guaranteed to write their 1038 // results in order anyway. 1039 if (Op.isUse() || !SIInstrInfo::isVMEM(MI) || 1040 ScoreBrackets.hasOtherPendingVmemTypes(RegNo, 1041 getVmemType(MI))) { 1042 ScoreBrackets.determineWait( 1043 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 1044 ScoreBrackets.clearVgprVmemTypes(RegNo); 1045 } 1046 if (Op.isDef()) { 1047 ScoreBrackets.determineWait( 1048 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait); 1049 } 1050 } 1051 ScoreBrackets.determineWait( 1052 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 1053 } 1054 } 1055 } 1056 } 1057 1058 // Check to see if this is an S_BARRIER, and if an implicit S_WAITCNT 0 1059 // occurs before the instruction. Doing it here prevents any additional 1060 // S_WAITCNTs from being emitted if the instruction was marked as 1061 // requiring a WAITCNT beforehand. 1062 if (MI.getOpcode() == AMDGPU::S_BARRIER && 1063 !ST->hasAutoWaitcntBeforeBarrier()) { 1064 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 1065 } 1066 1067 // TODO: Remove this work-around, enable the assert for Bug 457939 1068 // after fixing the scheduler. Also, the Shader Compiler code is 1069 // independent of target. 1070 if (readsVCCZ(MI) && ST->hasReadVCCZBug()) { 1071 if (ScoreBrackets.getScoreLB(LGKM_CNT) < 1072 ScoreBrackets.getScoreUB(LGKM_CNT) && 1073 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { 1074 Wait.LgkmCnt = 0; 1075 } 1076 } 1077 1078 // Early-out if no wait is indicated. 1079 if (!ScoreBrackets.simplifyWaitcnt(Wait) && !IsForceEmitWaitcnt) { 1080 bool Modified = false; 1081 if (OldWaitcntInstr) { 1082 for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II); 1083 &*II != &MI; II = NextI, ++NextI) { 1084 if (II->isDebugInstr()) 1085 continue; 1086 1087 if (TrackedWaitcntSet.count(&*II)) { 1088 TrackedWaitcntSet.erase(&*II); 1089 II->eraseFromParent(); 1090 Modified = true; 1091 } else if (II->getOpcode() == AMDGPU::S_WAITCNT) { 1092 int64_t Imm = II->getOperand(0).getImm(); 1093 ScoreBrackets.applyWaitcnt(AMDGPU::decodeWaitcnt(IV, Imm)); 1094 } else { 1095 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT); 1096 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL); 1097 auto W = TII->getNamedOperand(*II, AMDGPU::OpName::simm16)->getImm(); 1098 ScoreBrackets.applyWaitcnt(AMDGPU::Waitcnt(~0u, ~0u, ~0u, W)); 1099 } 1100 } 1101 } 1102 return Modified; 1103 } 1104 1105 if (ForceEmitZeroWaitcnts) 1106 Wait = AMDGPU::Waitcnt::allZero(ST->hasVscnt()); 1107 1108 if (ForceEmitWaitcnt[VM_CNT]) 1109 Wait.VmCnt = 0; 1110 if (ForceEmitWaitcnt[EXP_CNT]) 1111 Wait.ExpCnt = 0; 1112 if (ForceEmitWaitcnt[LGKM_CNT]) 1113 Wait.LgkmCnt = 0; 1114 if (ForceEmitWaitcnt[VS_CNT]) 1115 Wait.VsCnt = 0; 1116 1117 ScoreBrackets.applyWaitcnt(Wait); 1118 1119 AMDGPU::Waitcnt OldWait; 1120 bool Modified = false; 1121 1122 if (OldWaitcntInstr) { 1123 for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II); 1124 &*II != &MI; II = NextI, NextI++) { 1125 if (II->isDebugInstr()) 1126 continue; 1127 1128 if (II->getOpcode() == AMDGPU::S_WAITCNT) { 1129 unsigned IEnc = II->getOperand(0).getImm(); 1130 AMDGPU::Waitcnt IWait = AMDGPU::decodeWaitcnt(IV, IEnc); 1131 OldWait = OldWait.combined(IWait); 1132 if (!TrackedWaitcntSet.count(&*II)) 1133 Wait = Wait.combined(IWait); 1134 unsigned NewEnc = AMDGPU::encodeWaitcnt(IV, Wait); 1135 if (IEnc != NewEnc) { 1136 II->getOperand(0).setImm(NewEnc); 1137 Modified = true; 1138 } 1139 Wait.VmCnt = ~0u; 1140 Wait.LgkmCnt = ~0u; 1141 Wait.ExpCnt = ~0u; 1142 } else { 1143 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT); 1144 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL); 1145 1146 unsigned ICnt = TII->getNamedOperand(*II, AMDGPU::OpName::simm16) 1147 ->getImm(); 1148 OldWait.VsCnt = std::min(OldWait.VsCnt, ICnt); 1149 if (!TrackedWaitcntSet.count(&*II)) 1150 Wait.VsCnt = std::min(Wait.VsCnt, ICnt); 1151 if (Wait.VsCnt != ICnt) { 1152 TII->getNamedOperand(*II, AMDGPU::OpName::simm16)->setImm(Wait.VsCnt); 1153 Modified = true; 1154 } 1155 Wait.VsCnt = ~0u; 1156 } 1157 1158 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1159 << "Old Instr: " << MI 1160 << "New Instr: " << *II << '\n'); 1161 1162 if (!Wait.hasWait()) 1163 return Modified; 1164 } 1165 } 1166 1167 if (Wait.VmCnt != ~0u || Wait.LgkmCnt != ~0u || Wait.ExpCnt != ~0u) { 1168 unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait); 1169 auto SWaitInst = BuildMI(*MI.getParent(), MI.getIterator(), 1170 MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) 1171 .addImm(Enc); 1172 TrackedWaitcntSet.insert(SWaitInst); 1173 Modified = true; 1174 1175 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1176 << "Old Instr: " << MI 1177 << "New Instr: " << *SWaitInst << '\n'); 1178 } 1179 1180 if (Wait.VsCnt != ~0u) { 1181 assert(ST->hasVscnt()); 1182 1183 auto SWaitInst = 1184 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), 1185 TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1186 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1187 .addImm(Wait.VsCnt); 1188 TrackedWaitcntSet.insert(SWaitInst); 1189 Modified = true; 1190 1191 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1192 << "Old Instr: " << MI 1193 << "New Instr: " << *SWaitInst << '\n'); 1194 } 1195 1196 return Modified; 1197 } 1198 1199 // This is a flat memory operation. Check to see if it has memory tokens other 1200 // than LDS. Other address spaces supported by flat memory operations involve 1201 // global memory. 1202 bool SIInsertWaitcnts::mayAccessVMEMThroughFlat(const MachineInstr &MI) const { 1203 assert(TII->isFLAT(MI)); 1204 1205 // All flat instructions use the VMEM counter. 1206 assert(TII->usesVM_CNT(MI)); 1207 1208 // If there are no memory operands then conservatively assume the flat 1209 // operation may access VMEM. 1210 if (MI.memoperands_empty()) 1211 return true; 1212 1213 // See if any memory operand specifies an address space that involves VMEM. 1214 // Flat operations only supported FLAT, LOCAL (LDS), or address spaces 1215 // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION 1216 // (GDS) address space is not supported by flat operations. Therefore, simply 1217 // return true unless only the LDS address space is found. 1218 for (const MachineMemOperand *Memop : MI.memoperands()) { 1219 unsigned AS = Memop->getAddrSpace(); 1220 assert(AS != AMDGPUAS::REGION_ADDRESS); 1221 if (AS != AMDGPUAS::LOCAL_ADDRESS) 1222 return true; 1223 } 1224 1225 return false; 1226 } 1227 1228 // This is a flat memory operation. Check to see if it has memory tokens for 1229 // either LDS or FLAT. 1230 bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const { 1231 assert(TII->isFLAT(MI)); 1232 1233 // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter. 1234 if (!TII->usesLGKM_CNT(MI)) 1235 return false; 1236 1237 // If there are no memory operands then conservatively assume the flat 1238 // operation may access LDS. 1239 if (MI.memoperands_empty()) 1240 return true; 1241 1242 // See if any memory operand specifies an address space that involves LDS. 1243 for (const MachineMemOperand *Memop : MI.memoperands()) { 1244 unsigned AS = Memop->getAddrSpace(); 1245 if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) 1246 return true; 1247 } 1248 1249 return false; 1250 } 1251 1252 void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, 1253 WaitcntBrackets *ScoreBrackets) { 1254 // Now look at the instruction opcode. If it is a memory access 1255 // instruction, update the upper-bound of the appropriate counter's 1256 // bracket and the destination operand scores. 1257 // TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere. 1258 if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) { 1259 if (TII->isAlwaysGDS(Inst.getOpcode()) || 1260 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) { 1261 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst); 1262 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst); 1263 } else { 1264 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); 1265 } 1266 } else if (TII->isFLAT(Inst)) { 1267 assert(Inst.mayLoadOrStore()); 1268 1269 int FlatASCount = 0; 1270 1271 if (mayAccessVMEMThroughFlat(Inst)) { 1272 ++FlatASCount; 1273 if (!ST->hasVscnt()) 1274 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst); 1275 else if (Inst.mayLoad() && 1276 AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1) 1277 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst); 1278 else 1279 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst); 1280 } 1281 1282 if (mayAccessLDSThroughFlat(Inst)) { 1283 ++FlatASCount; 1284 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); 1285 } 1286 1287 // A Flat memory operation must access at least one address space. 1288 assert(FlatASCount); 1289 1290 // This is a flat memory operation that access both VMEM and LDS, so note it 1291 // - it will require that both the VM and LGKM be flushed to zero if it is 1292 // pending when a VM or LGKM dependency occurs. 1293 if (FlatASCount > 1) 1294 ScoreBrackets->setPendingFlat(); 1295 } else if (SIInstrInfo::isVMEM(Inst) && 1296 // TODO: get a better carve out. 1297 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1 && 1298 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_SC && 1299 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_VOL && 1300 Inst.getOpcode() != AMDGPU::BUFFER_GL0_INV && 1301 Inst.getOpcode() != AMDGPU::BUFFER_GL1_INV) { 1302 if (!ST->hasVscnt()) 1303 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst); 1304 else if ((Inst.mayLoad() && 1305 AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1) || 1306 /* IMAGE_GET_RESINFO / IMAGE_GET_LOD */ 1307 (TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore())) 1308 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst); 1309 else if (Inst.mayStore()) 1310 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst); 1311 1312 if (ST->vmemWriteNeedsExpWaitcnt() && 1313 (Inst.mayStore() || AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1)) { 1314 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst); 1315 } 1316 } else if (TII->isSMRD(Inst)) { 1317 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); 1318 } else if (Inst.isCall()) { 1319 if (callWaitsOnFunctionReturn(Inst)) { 1320 // Act as a wait on everything 1321 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 1322 } else { 1323 // May need to way wait for anything. 1324 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt()); 1325 } 1326 } else if (SIInstrInfo::isEXP(Inst)) { 1327 int Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm(); 1328 if (Imm >= 32 && Imm <= 63) 1329 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst); 1330 else if (Imm >= 12 && Imm <= 15) 1331 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst); 1332 else 1333 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst); 1334 } else { 1335 switch (Inst.getOpcode()) { 1336 case AMDGPU::S_SENDMSG: 1337 case AMDGPU::S_SENDMSGHALT: 1338 ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst); 1339 break; 1340 case AMDGPU::S_MEMTIME: 1341 case AMDGPU::S_MEMREALTIME: 1342 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); 1343 break; 1344 } 1345 } 1346 } 1347 1348 bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score, 1349 unsigned OtherScore) { 1350 unsigned MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift; 1351 unsigned OtherShifted = 1352 OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift; 1353 Score = std::max(MyShifted, OtherShifted); 1354 return OtherShifted > MyShifted; 1355 } 1356 1357 /// Merge the pending events and associater score brackets of \p Other into 1358 /// this brackets status. 1359 /// 1360 /// Returns whether the merge resulted in a change that requires tighter waits 1361 /// (i.e. the merged brackets strictly dominate the original brackets). 1362 bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { 1363 bool StrictDom = false; 1364 1365 VgprUB = std::max(VgprUB, Other.VgprUB); 1366 SgprUB = std::max(SgprUB, Other.SgprUB); 1367 1368 for (auto T : inst_counter_types()) { 1369 // Merge event flags for this counter 1370 const bool OldOutOfOrder = counterOutOfOrder(T); 1371 const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[T]; 1372 const unsigned OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T]; 1373 if (OtherEvents & ~OldEvents) 1374 StrictDom = true; 1375 PendingEvents |= OtherEvents; 1376 1377 // Merge scores for this counter 1378 const unsigned MyPending = ScoreUBs[T] - ScoreLBs[T]; 1379 const unsigned OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T]; 1380 const unsigned NewUB = ScoreLBs[T] + std::max(MyPending, OtherPending); 1381 if (NewUB < ScoreLBs[T]) 1382 report_fatal_error("waitcnt score overflow"); 1383 1384 MergeInfo M; 1385 M.OldLB = ScoreLBs[T]; 1386 M.OtherLB = Other.ScoreLBs[T]; 1387 M.MyShift = NewUB - ScoreUBs[T]; 1388 M.OtherShift = NewUB - Other.ScoreUBs[T]; 1389 1390 ScoreUBs[T] = NewUB; 1391 1392 StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]); 1393 1394 bool RegStrictDom = false; 1395 for (int J = 0; J <= VgprUB; J++) { 1396 RegStrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]); 1397 } 1398 1399 if (T == VM_CNT) { 1400 for (int J = 0; J <= VgprUB; J++) { 1401 unsigned char NewVmemTypes = VgprVmemTypes[J] | Other.VgprVmemTypes[J]; 1402 RegStrictDom |= NewVmemTypes != VgprVmemTypes[J]; 1403 VgprVmemTypes[J] = NewVmemTypes; 1404 } 1405 } 1406 1407 if (T == LGKM_CNT) { 1408 for (int J = 0; J <= SgprUB; J++) { 1409 RegStrictDom |= mergeScore(M, SgprScores[J], Other.SgprScores[J]); 1410 } 1411 } 1412 1413 if (RegStrictDom && !OldOutOfOrder) 1414 StrictDom = true; 1415 } 1416 1417 return StrictDom; 1418 } 1419 1420 // Generate s_waitcnt instructions where needed. 1421 bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF, 1422 MachineBasicBlock &Block, 1423 WaitcntBrackets &ScoreBrackets) { 1424 bool Modified = false; 1425 1426 LLVM_DEBUG({ 1427 dbgs() << "*** Block" << Block.getNumber() << " ***"; 1428 ScoreBrackets.dump(); 1429 }); 1430 1431 // Assume VCCZ is correct at basic block boundaries, unless and until we need 1432 // to handle cases where that is not true. 1433 bool VCCZCorrect = true; 1434 1435 // Walk over the instructions. 1436 MachineInstr *OldWaitcntInstr = nullptr; 1437 1438 for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(), 1439 E = Block.instr_end(); 1440 Iter != E;) { 1441 MachineInstr &Inst = *Iter; 1442 1443 // Track pre-existing waitcnts from earlier iterations. 1444 if (Inst.getOpcode() == AMDGPU::S_WAITCNT || 1445 (Inst.getOpcode() == AMDGPU::S_WAITCNT_VSCNT && 1446 Inst.getOperand(0).isReg() && 1447 Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL)) { 1448 if (!OldWaitcntInstr) 1449 OldWaitcntInstr = &Inst; 1450 ++Iter; 1451 continue; 1452 } 1453 1454 // We might need to restore vccz to its correct value for either of two 1455 // different reasons; see ST->hasReadVCCZBug() and 1456 // ST->partialVCCWritesUpdateVCCZ(). 1457 bool RestoreVCCZ = false; 1458 if (readsVCCZ(Inst)) { 1459 if (!VCCZCorrect) 1460 RestoreVCCZ = true; 1461 else if (ST->hasReadVCCZBug()) { 1462 // There is a hardware bug on CI/SI where SMRD instruction may corrupt 1463 // vccz bit, so when we detect that an instruction may read from a 1464 // corrupt vccz bit, we need to: 1465 // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD 1466 // operations to complete. 1467 // 2. Restore the correct value of vccz by writing the current value 1468 // of vcc back to vcc. 1469 if (ScoreBrackets.getScoreLB(LGKM_CNT) < 1470 ScoreBrackets.getScoreUB(LGKM_CNT) && 1471 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { 1472 RestoreVCCZ = true; 1473 } 1474 } 1475 } 1476 1477 if (TII->isSMRD(Inst)) { 1478 for (const MachineMemOperand *Memop : Inst.memoperands()) { 1479 const Value *Ptr = Memop->getValue(); 1480 SLoadAddresses.insert(std::make_pair(Ptr, Inst.getParent())); 1481 } 1482 } 1483 1484 if (!ST->partialVCCWritesUpdateVCCZ()) { 1485 // Up to gfx9, writes to vcc_lo and vcc_hi don't update vccz. 1486 // Writes to vcc will fix it. 1487 if (Inst.definesRegister(AMDGPU::VCC_LO) || 1488 Inst.definesRegister(AMDGPU::VCC_HI)) 1489 VCCZCorrect = false; 1490 else if (Inst.definesRegister(AMDGPU::VCC)) 1491 VCCZCorrect = true; 1492 } 1493 1494 // Generate an s_waitcnt instruction to be placed before 1495 // cur_Inst, if needed. 1496 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr); 1497 OldWaitcntInstr = nullptr; 1498 1499 updateEventWaitcntAfter(Inst, &ScoreBrackets); 1500 1501 #if 0 // TODO: implement resource type check controlled by options with ub = LB. 1502 // If this instruction generates a S_SETVSKIP because it is an 1503 // indexed resource, and we are on Tahiti, then it will also force 1504 // an S_WAITCNT vmcnt(0) 1505 if (RequireCheckResourceType(Inst, context)) { 1506 // Force the score to as if an S_WAITCNT vmcnt(0) is emitted. 1507 ScoreBrackets->setScoreLB(VM_CNT, 1508 ScoreBrackets->getScoreUB(VM_CNT)); 1509 } 1510 #endif 1511 1512 LLVM_DEBUG({ 1513 Inst.print(dbgs()); 1514 ScoreBrackets.dump(); 1515 }); 1516 1517 // TODO: Remove this work-around after fixing the scheduler and enable the 1518 // assert above. 1519 if (RestoreVCCZ) { 1520 // Restore the vccz bit. Any time a value is written to vcc, the vcc 1521 // bit is updated, so we can restore the bit by reading the value of 1522 // vcc and then writing it back to the register. 1523 BuildMI(Block, Inst, Inst.getDebugLoc(), 1524 TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64), 1525 TRI->getVCC()) 1526 .addReg(TRI->getVCC()); 1527 VCCZCorrect = true; 1528 Modified = true; 1529 } 1530 1531 ++Iter; 1532 } 1533 1534 return Modified; 1535 } 1536 1537 bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) { 1538 ST = &MF.getSubtarget<GCNSubtarget>(); 1539 TII = ST->getInstrInfo(); 1540 TRI = &TII->getRegisterInfo(); 1541 MRI = &MF.getRegInfo(); 1542 IV = AMDGPU::getIsaVersion(ST->getCPU()); 1543 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1544 PDT = &getAnalysis<MachinePostDominatorTree>(); 1545 1546 ForceEmitZeroWaitcnts = ForceEmitZeroFlag; 1547 for (auto T : inst_counter_types()) 1548 ForceEmitWaitcnt[T] = false; 1549 1550 HardwareLimits.VmcntMax = AMDGPU::getVmcntBitMask(IV); 1551 HardwareLimits.ExpcntMax = AMDGPU::getExpcntBitMask(IV); 1552 HardwareLimits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV); 1553 HardwareLimits.VscntMax = ST->hasVscnt() ? 63 : 0; 1554 1555 unsigned NumVGPRsMax = ST->getAddressableNumVGPRs(); 1556 unsigned NumSGPRsMax = ST->getAddressableNumSGPRs(); 1557 assert(NumVGPRsMax <= SQ_MAX_PGM_VGPRS); 1558 assert(NumSGPRsMax <= SQ_MAX_PGM_SGPRS); 1559 1560 RegisterEncoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0); 1561 RegisterEncoding.VGPRL = RegisterEncoding.VGPR0 + NumVGPRsMax - 1; 1562 RegisterEncoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0); 1563 RegisterEncoding.SGPRL = RegisterEncoding.SGPR0 + NumSGPRsMax - 1; 1564 1565 TrackedWaitcntSet.clear(); 1566 BlockInfos.clear(); 1567 1568 // Keep iterating over the blocks in reverse post order, inserting and 1569 // updating s_waitcnt where needed, until a fix point is reached. 1570 for (auto *MBB : ReversePostOrderTraversal<MachineFunction *>(&MF)) 1571 BlockInfos.insert({MBB, BlockInfo(MBB)}); 1572 1573 std::unique_ptr<WaitcntBrackets> Brackets; 1574 bool Modified = false; 1575 bool Repeat; 1576 do { 1577 Repeat = false; 1578 1579 for (auto BII = BlockInfos.begin(), BIE = BlockInfos.end(); BII != BIE; 1580 ++BII) { 1581 BlockInfo &BI = BII->second; 1582 if (!BI.Dirty) 1583 continue; 1584 1585 if (BI.Incoming) { 1586 if (!Brackets) 1587 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming); 1588 else 1589 *Brackets = *BI.Incoming; 1590 } else { 1591 if (!Brackets) 1592 Brackets = std::make_unique<WaitcntBrackets>(ST); 1593 else 1594 *Brackets = WaitcntBrackets(ST); 1595 } 1596 1597 Modified |= insertWaitcntInBlock(MF, *BI.MBB, *Brackets); 1598 BI.Dirty = false; 1599 1600 if (Brackets->hasPending()) { 1601 BlockInfo *MoveBracketsToSucc = nullptr; 1602 for (MachineBasicBlock *Succ : BI.MBB->successors()) { 1603 auto SuccBII = BlockInfos.find(Succ); 1604 BlockInfo &SuccBI = SuccBII->second; 1605 if (!SuccBI.Incoming) { 1606 SuccBI.Dirty = true; 1607 if (SuccBII <= BII) 1608 Repeat = true; 1609 if (!MoveBracketsToSucc) { 1610 MoveBracketsToSucc = &SuccBI; 1611 } else { 1612 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets); 1613 } 1614 } else if (SuccBI.Incoming->merge(*Brackets)) { 1615 SuccBI.Dirty = true; 1616 if (SuccBII <= BII) 1617 Repeat = true; 1618 } 1619 } 1620 if (MoveBracketsToSucc) 1621 MoveBracketsToSucc->Incoming = std::move(Brackets); 1622 } 1623 } 1624 } while (Repeat); 1625 1626 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks; 1627 1628 bool HaveScalarStores = false; 1629 1630 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE; 1631 ++BI) { 1632 MachineBasicBlock &MBB = *BI; 1633 1634 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; 1635 ++I) { 1636 if (!HaveScalarStores && TII->isScalarStore(*I)) 1637 HaveScalarStores = true; 1638 1639 if (I->getOpcode() == AMDGPU::S_ENDPGM || 1640 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) 1641 EndPgmBlocks.push_back(&MBB); 1642 } 1643 } 1644 1645 if (HaveScalarStores) { 1646 // If scalar writes are used, the cache must be flushed or else the next 1647 // wave to reuse the same scratch memory can be clobbered. 1648 // 1649 // Insert s_dcache_wb at wave termination points if there were any scalar 1650 // stores, and only if the cache hasn't already been flushed. This could be 1651 // improved by looking across blocks for flushes in postdominating blocks 1652 // from the stores but an explicitly requested flush is probably very rare. 1653 for (MachineBasicBlock *MBB : EndPgmBlocks) { 1654 bool SeenDCacheWB = false; 1655 1656 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; 1657 ++I) { 1658 if (I->getOpcode() == AMDGPU::S_DCACHE_WB) 1659 SeenDCacheWB = true; 1660 else if (TII->isScalarStore(*I)) 1661 SeenDCacheWB = false; 1662 1663 // FIXME: It would be better to insert this before a waitcnt if any. 1664 if ((I->getOpcode() == AMDGPU::S_ENDPGM || 1665 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) && 1666 !SeenDCacheWB) { 1667 Modified = true; 1668 BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB)); 1669 } 1670 } 1671 } 1672 } 1673 1674 if (!MFI->isEntryFunction()) { 1675 // Wait for any outstanding memory operations that the input registers may 1676 // depend on. We can't track them and it's better to the wait after the 1677 // costly call sequence. 1678 1679 // TODO: Could insert earlier and schedule more liberally with operations 1680 // that only use caller preserved registers. 1681 MachineBasicBlock &EntryBB = MF.front(); 1682 MachineBasicBlock::iterator I = EntryBB.begin(); 1683 for (MachineBasicBlock::iterator E = EntryBB.end(); 1684 I != E && (I->isPHI() || I->isMetaInstruction()); ++I) 1685 ; 1686 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT)).addImm(0); 1687 if (ST->hasVscnt()) 1688 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1689 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1690 .addImm(0); 1691 1692 Modified = true; 1693 } 1694 1695 return Modified; 1696 } 1697