1 //===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Insert wait instructions for memory reads and writes. 11 /// 12 /// Memory reads and writes are issued asynchronously, so we need to insert 13 /// S_WAITCNT instructions when we want to access any of their results or 14 /// overwrite any register that's used asynchronously. 15 /// 16 /// TODO: This pass currently keeps one timeline per hardware counter. A more 17 /// finely-grained approach that keeps one timeline per event type could 18 /// sometimes get away with generating weaker s_waitcnt instructions. For 19 /// example, when both SMEM and LDS are in flight and we need to wait for 20 /// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient, 21 /// but the pass will currently generate a conservative lgkmcnt(0) because 22 /// multiple event types are in flight. 23 // 24 //===----------------------------------------------------------------------===// 25 26 #include "AMDGPU.h" 27 #include "GCNSubtarget.h" 28 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 29 #include "SIMachineFunctionInfo.h" 30 #include "Utils/AMDGPUBaseInfo.h" 31 #include "llvm/ADT/MapVector.h" 32 #include "llvm/ADT/PostOrderIterator.h" 33 #include "llvm/ADT/Sequence.h" 34 #include "llvm/CodeGen/MachinePostDominators.h" 35 #include "llvm/InitializePasses.h" 36 #include "llvm/Support/DebugCounter.h" 37 #include "llvm/Support/TargetParser.h" 38 using namespace llvm; 39 40 #define DEBUG_TYPE "si-insert-waitcnts" 41 42 DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE"-forceexp", 43 "Force emit s_waitcnt expcnt(0) instrs"); 44 DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE"-forcelgkm", 45 "Force emit s_waitcnt lgkmcnt(0) instrs"); 46 DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE"-forcevm", 47 "Force emit s_waitcnt vmcnt(0) instrs"); 48 49 static cl::opt<bool> ForceEmitZeroFlag( 50 "amdgpu-waitcnt-forcezero", 51 cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), 52 cl::init(false), cl::Hidden); 53 54 namespace { 55 // Class of object that encapsulates latest instruction counter score 56 // associated with the operand. Used for determining whether 57 // s_waitcnt instruction needs to be emitted. 58 59 #define CNT_MASK(t) (1u << (t)) 60 61 enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, VS_CNT, NUM_INST_CNTS }; 62 } // namespace 63 64 namespace llvm { 65 template <> struct enum_iteration_traits<InstCounterType> { 66 static constexpr bool is_iterable = true; 67 }; 68 } // namespace llvm 69 70 namespace { 71 auto inst_counter_types() { return enum_seq(VM_CNT, NUM_INST_CNTS); } 72 73 using RegInterval = std::pair<int, int>; 74 75 struct HardwareLimits { 76 unsigned VmcntMax; 77 unsigned ExpcntMax; 78 unsigned LgkmcntMax; 79 unsigned VscntMax; 80 }; 81 82 struct RegisterEncoding { 83 unsigned VGPR0; 84 unsigned VGPRL; 85 unsigned SGPR0; 86 unsigned SGPRL; 87 }; 88 89 enum WaitEventType { 90 VMEM_ACCESS, // vector-memory read & write 91 VMEM_READ_ACCESS, // vector-memory read 92 VMEM_WRITE_ACCESS, // vector-memory write 93 LDS_ACCESS, // lds read & write 94 GDS_ACCESS, // gds read & write 95 SQ_MESSAGE, // send message 96 SMEM_ACCESS, // scalar-memory read & write 97 EXP_GPR_LOCK, // export holding on its data src 98 GDS_GPR_LOCK, // GDS holding on its data and addr src 99 EXP_POS_ACCESS, // write to export position 100 EXP_PARAM_ACCESS, // write to export parameter 101 VMW_GPR_LOCK, // vector-memory write holding on its data src 102 EXP_LDS_ACCESS, // read by ldsdir counting as export 103 NUM_WAIT_EVENTS, 104 }; 105 106 static const unsigned WaitEventMaskForInst[NUM_INST_CNTS] = { 107 (1 << VMEM_ACCESS) | (1 << VMEM_READ_ACCESS), 108 (1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) | 109 (1 << SQ_MESSAGE), 110 (1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) | 111 (1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS) | (1 << EXP_LDS_ACCESS), 112 (1 << VMEM_WRITE_ACCESS)}; 113 114 // The mapping is: 115 // 0 .. SQ_MAX_PGM_VGPRS-1 real VGPRs 116 // SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1 extra VGPR-like slots 117 // NUM_ALL_VGPRS .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs 118 // We reserve a fixed number of VGPR slots in the scoring tables for 119 // special tokens like SCMEM_LDS (needed for buffer load to LDS). 120 enum RegisterMapping { 121 SQ_MAX_PGM_VGPRS = 512, // Maximum programmable VGPRs across all targets. 122 AGPR_OFFSET = 256, // Maximum programmable ArchVGPRs across all targets. 123 SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets. 124 NUM_EXTRA_VGPRS = 1, // A reserved slot for DS. 125 EXTRA_VGPR_LDS = 0, // An artificial register to track LDS writes. 126 NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts. 127 }; 128 129 // Enumerate different types of result-returning VMEM operations. Although 130 // s_waitcnt orders them all with a single vmcnt counter, in the absence of 131 // s_waitcnt only instructions of the same VmemType are guaranteed to write 132 // their results in order -- so there is no need to insert an s_waitcnt between 133 // two instructions of the same type that write the same vgpr. 134 enum VmemType { 135 // BUF instructions and MIMG instructions without a sampler. 136 VMEM_NOSAMPLER, 137 // MIMG instructions with a sampler. 138 VMEM_SAMPLER, 139 // BVH instructions 140 VMEM_BVH 141 }; 142 143 VmemType getVmemType(const MachineInstr &Inst) { 144 assert(SIInstrInfo::isVMEM(Inst)); 145 if (!SIInstrInfo::isMIMG(Inst)) 146 return VMEM_NOSAMPLER; 147 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Inst.getOpcode()); 148 const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo = 149 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 150 return BaseInfo->BVH ? VMEM_BVH 151 : BaseInfo->Sampler ? VMEM_SAMPLER : VMEM_NOSAMPLER; 152 } 153 154 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) { 155 switch (T) { 156 case VM_CNT: 157 Wait.VmCnt = std::min(Wait.VmCnt, Count); 158 break; 159 case EXP_CNT: 160 Wait.ExpCnt = std::min(Wait.ExpCnt, Count); 161 break; 162 case LGKM_CNT: 163 Wait.LgkmCnt = std::min(Wait.LgkmCnt, Count); 164 break; 165 case VS_CNT: 166 Wait.VsCnt = std::min(Wait.VsCnt, Count); 167 break; 168 default: 169 llvm_unreachable("bad InstCounterType"); 170 } 171 } 172 173 // This objects maintains the current score brackets of each wait counter, and 174 // a per-register scoreboard for each wait counter. 175 // 176 // We also maintain the latest score for every event type that can change the 177 // waitcnt in order to know if there are multiple types of events within 178 // the brackets. When multiple types of event happen in the bracket, 179 // wait count may get decreased out of order, therefore we need to put in 180 // "s_waitcnt 0" before use. 181 class WaitcntBrackets { 182 public: 183 WaitcntBrackets(const GCNSubtarget *SubTarget, HardwareLimits Limits, 184 RegisterEncoding Encoding) 185 : ST(SubTarget), Limits(Limits), Encoding(Encoding) {} 186 187 unsigned getWaitCountMax(InstCounterType T) const { 188 switch (T) { 189 case VM_CNT: 190 return Limits.VmcntMax; 191 case LGKM_CNT: 192 return Limits.LgkmcntMax; 193 case EXP_CNT: 194 return Limits.ExpcntMax; 195 case VS_CNT: 196 return Limits.VscntMax; 197 default: 198 break; 199 } 200 return 0; 201 } 202 203 unsigned getScoreLB(InstCounterType T) const { 204 assert(T < NUM_INST_CNTS); 205 return ScoreLBs[T]; 206 } 207 208 unsigned getScoreUB(InstCounterType T) const { 209 assert(T < NUM_INST_CNTS); 210 return ScoreUBs[T]; 211 } 212 213 // Mapping from event to counter. 214 InstCounterType eventCounter(WaitEventType E) { 215 if (WaitEventMaskForInst[VM_CNT] & (1 << E)) 216 return VM_CNT; 217 if (WaitEventMaskForInst[LGKM_CNT] & (1 << E)) 218 return LGKM_CNT; 219 if (WaitEventMaskForInst[VS_CNT] & (1 << E)) 220 return VS_CNT; 221 assert(WaitEventMaskForInst[EXP_CNT] & (1 << E)); 222 return EXP_CNT; 223 } 224 225 unsigned getRegScore(int GprNo, InstCounterType T) { 226 if (GprNo < NUM_ALL_VGPRS) { 227 return VgprScores[T][GprNo]; 228 } 229 assert(T == LGKM_CNT); 230 return SgprScores[GprNo - NUM_ALL_VGPRS]; 231 } 232 233 bool merge(const WaitcntBrackets &Other); 234 235 RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII, 236 const MachineRegisterInfo *MRI, 237 const SIRegisterInfo *TRI, unsigned OpNo) const; 238 239 bool counterOutOfOrder(InstCounterType T) const; 240 void simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const; 241 void simplifyWaitcnt(InstCounterType T, unsigned &Count) const; 242 void determineWait(InstCounterType T, unsigned ScoreToWait, 243 AMDGPU::Waitcnt &Wait) const; 244 void applyWaitcnt(const AMDGPU::Waitcnt &Wait); 245 void applyWaitcnt(InstCounterType T, unsigned Count); 246 void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI, 247 const MachineRegisterInfo *MRI, WaitEventType E, 248 MachineInstr &MI); 249 250 bool hasPending() const { return PendingEvents != 0; } 251 bool hasPendingEvent(WaitEventType E) const { 252 return PendingEvents & (1 << E); 253 } 254 255 bool hasMixedPendingEvents(InstCounterType T) const { 256 unsigned Events = PendingEvents & WaitEventMaskForInst[T]; 257 // Return true if more than one bit is set in Events. 258 return Events & (Events - 1); 259 } 260 261 bool hasPendingFlat() const { 262 return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] && 263 LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) || 264 (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] && 265 LastFlat[VM_CNT] <= ScoreUBs[VM_CNT])); 266 } 267 268 void setPendingFlat() { 269 LastFlat[VM_CNT] = ScoreUBs[VM_CNT]; 270 LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT]; 271 } 272 273 // Return true if there might be pending writes to the specified vgpr by VMEM 274 // instructions with types different from V. 275 bool hasOtherPendingVmemTypes(int GprNo, VmemType V) const { 276 assert(GprNo < NUM_ALL_VGPRS); 277 return VgprVmemTypes[GprNo] & ~(1 << V); 278 } 279 280 void clearVgprVmemTypes(int GprNo) { 281 assert(GprNo < NUM_ALL_VGPRS); 282 VgprVmemTypes[GprNo] = 0; 283 } 284 285 void print(raw_ostream &); 286 void dump() { print(dbgs()); } 287 288 private: 289 struct MergeInfo { 290 unsigned OldLB; 291 unsigned OtherLB; 292 unsigned MyShift; 293 unsigned OtherShift; 294 }; 295 static bool mergeScore(const MergeInfo &M, unsigned &Score, 296 unsigned OtherScore); 297 298 void setScoreLB(InstCounterType T, unsigned Val) { 299 assert(T < NUM_INST_CNTS); 300 ScoreLBs[T] = Val; 301 } 302 303 void setScoreUB(InstCounterType T, unsigned Val) { 304 assert(T < NUM_INST_CNTS); 305 ScoreUBs[T] = Val; 306 if (T == EXP_CNT) { 307 unsigned UB = ScoreUBs[T] - getWaitCountMax(EXP_CNT); 308 if (ScoreLBs[T] < UB && UB < ScoreUBs[T]) 309 ScoreLBs[T] = UB; 310 } 311 } 312 313 void setRegScore(int GprNo, InstCounterType T, unsigned Val) { 314 if (GprNo < NUM_ALL_VGPRS) { 315 VgprUB = std::max(VgprUB, GprNo); 316 VgprScores[T][GprNo] = Val; 317 } else { 318 assert(T == LGKM_CNT); 319 SgprUB = std::max(SgprUB, GprNo - NUM_ALL_VGPRS); 320 SgprScores[GprNo - NUM_ALL_VGPRS] = Val; 321 } 322 } 323 324 void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII, 325 const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI, 326 unsigned OpNo, unsigned Val); 327 328 const GCNSubtarget *ST = nullptr; 329 HardwareLimits Limits = {}; 330 RegisterEncoding Encoding = {}; 331 unsigned ScoreLBs[NUM_INST_CNTS] = {0}; 332 unsigned ScoreUBs[NUM_INST_CNTS] = {0}; 333 unsigned PendingEvents = 0; 334 // Remember the last flat memory operation. 335 unsigned LastFlat[NUM_INST_CNTS] = {0}; 336 // wait_cnt scores for every vgpr. 337 // Keep track of the VgprUB and SgprUB to make merge at join efficient. 338 int VgprUB = -1; 339 int SgprUB = -1; 340 unsigned VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS] = {{0}}; 341 // Wait cnt scores for every sgpr, only lgkmcnt is relevant. 342 unsigned SgprScores[SQ_MAX_PGM_SGPRS] = {0}; 343 // Bitmask of the VmemTypes of VMEM instructions that might have a pending 344 // write to each vgpr. 345 unsigned char VgprVmemTypes[NUM_ALL_VGPRS] = {0}; 346 }; 347 348 class SIInsertWaitcnts : public MachineFunctionPass { 349 private: 350 const GCNSubtarget *ST = nullptr; 351 const SIInstrInfo *TII = nullptr; 352 const SIRegisterInfo *TRI = nullptr; 353 const MachineRegisterInfo *MRI = nullptr; 354 AMDGPU::IsaVersion IV; 355 356 DenseSet<MachineInstr *> TrackedWaitcntSet; 357 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses; 358 MachinePostDominatorTree *PDT; 359 360 struct BlockInfo { 361 MachineBasicBlock *MBB; 362 std::unique_ptr<WaitcntBrackets> Incoming; 363 bool Dirty = true; 364 365 explicit BlockInfo(MachineBasicBlock *MBB) : MBB(MBB) {} 366 }; 367 368 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos; 369 370 // ForceEmitZeroWaitcnts: force all waitcnts insts to be s_waitcnt 0 371 // because of amdgpu-waitcnt-forcezero flag 372 bool ForceEmitZeroWaitcnts; 373 bool ForceEmitWaitcnt[NUM_INST_CNTS]; 374 375 public: 376 static char ID; 377 378 SIInsertWaitcnts() : MachineFunctionPass(ID) { 379 (void)ForceExpCounter; 380 (void)ForceLgkmCounter; 381 (void)ForceVMCounter; 382 } 383 384 bool runOnMachineFunction(MachineFunction &MF) override; 385 386 StringRef getPassName() const override { 387 return "SI insert wait instructions"; 388 } 389 390 void getAnalysisUsage(AnalysisUsage &AU) const override { 391 AU.setPreservesCFG(); 392 AU.addRequired<MachinePostDominatorTree>(); 393 MachineFunctionPass::getAnalysisUsage(AU); 394 } 395 396 bool isForceEmitWaitcnt() const { 397 for (auto T : inst_counter_types()) 398 if (ForceEmitWaitcnt[T]) 399 return true; 400 return false; 401 } 402 403 void setForceEmitWaitcnt() { 404 // For non-debug builds, ForceEmitWaitcnt has been initialized to false; 405 // For debug builds, get the debug counter info and adjust if need be 406 #ifndef NDEBUG 407 if (DebugCounter::isCounterSet(ForceExpCounter) && 408 DebugCounter::shouldExecute(ForceExpCounter)) { 409 ForceEmitWaitcnt[EXP_CNT] = true; 410 } else { 411 ForceEmitWaitcnt[EXP_CNT] = false; 412 } 413 414 if (DebugCounter::isCounterSet(ForceLgkmCounter) && 415 DebugCounter::shouldExecute(ForceLgkmCounter)) { 416 ForceEmitWaitcnt[LGKM_CNT] = true; 417 } else { 418 ForceEmitWaitcnt[LGKM_CNT] = false; 419 } 420 421 if (DebugCounter::isCounterSet(ForceVMCounter) && 422 DebugCounter::shouldExecute(ForceVMCounter)) { 423 ForceEmitWaitcnt[VM_CNT] = true; 424 } else { 425 ForceEmitWaitcnt[VM_CNT] = false; 426 } 427 #endif // NDEBUG 428 } 429 430 bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const; 431 bool mayAccessLDSThroughFlat(const MachineInstr &MI) const; 432 bool generateWaitcntInstBefore(MachineInstr &MI, 433 WaitcntBrackets &ScoreBrackets, 434 MachineInstr *OldWaitcntInstr); 435 void updateEventWaitcntAfter(MachineInstr &Inst, 436 WaitcntBrackets *ScoreBrackets); 437 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block, 438 WaitcntBrackets &ScoreBrackets); 439 bool applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets, 440 MachineInstr &OldWaitcntInstr, 441 AMDGPU::Waitcnt &Wait, const MachineInstr *MI); 442 }; 443 444 } // end anonymous namespace 445 446 RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI, 447 const SIInstrInfo *TII, 448 const MachineRegisterInfo *MRI, 449 const SIRegisterInfo *TRI, 450 unsigned OpNo) const { 451 const MachineOperand &Op = MI->getOperand(OpNo); 452 if (!TRI->isInAllocatableClass(Op.getReg())) 453 return {-1, -1}; 454 455 // A use via a PW operand does not need a waitcnt. 456 // A partial write is not a WAW. 457 assert(!Op.getSubReg() || !Op.isUndef()); 458 459 RegInterval Result; 460 461 unsigned Reg = TRI->getEncodingValue(AMDGPU::getMCReg(Op.getReg(), *ST)); 462 463 if (TRI->isVectorRegister(*MRI, Op.getReg())) { 464 assert(Reg >= Encoding.VGPR0 && Reg <= Encoding.VGPRL); 465 Result.first = Reg - Encoding.VGPR0; 466 if (TRI->isAGPR(*MRI, Op.getReg())) 467 Result.first += AGPR_OFFSET; 468 assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS); 469 } else if (TRI->isSGPRReg(*MRI, Op.getReg())) { 470 assert(Reg >= Encoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS); 471 Result.first = Reg - Encoding.SGPR0 + NUM_ALL_VGPRS; 472 assert(Result.first >= NUM_ALL_VGPRS && 473 Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS); 474 } 475 // TODO: Handle TTMP 476 // else if (TRI->isTTMP(*MRI, Reg.getReg())) ... 477 else 478 return {-1, -1}; 479 480 const TargetRegisterClass *RC = TII->getOpRegClass(*MI, OpNo); 481 unsigned Size = TRI->getRegSizeInBits(*RC); 482 Result.second = Result.first + ((Size + 16) / 32); 483 484 return Result; 485 } 486 487 void WaitcntBrackets::setExpScore(const MachineInstr *MI, 488 const SIInstrInfo *TII, 489 const SIRegisterInfo *TRI, 490 const MachineRegisterInfo *MRI, unsigned OpNo, 491 unsigned Val) { 492 RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo); 493 assert(TRI->isVectorRegister(*MRI, MI->getOperand(OpNo).getReg())); 494 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 495 setRegScore(RegNo, EXP_CNT, Val); 496 } 497 } 498 499 // MUBUF and FLAT LDS DMA operations need a wait on vmcnt before LDS written 500 // can be accessed. A load from LDS to VMEM does not need a wait. 501 static bool mayWriteLDSThroughDMA(const MachineInstr &MI) { 502 return SIInstrInfo::isVALU(MI) && 503 (SIInstrInfo::isMUBUF(MI) || SIInstrInfo::isFLAT(MI)) && 504 MI.getOpcode() != AMDGPU::BUFFER_STORE_LDS_DWORD; 505 } 506 507 void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, 508 const SIRegisterInfo *TRI, 509 const MachineRegisterInfo *MRI, 510 WaitEventType E, MachineInstr &Inst) { 511 InstCounterType T = eventCounter(E); 512 unsigned CurrScore = getScoreUB(T) + 1; 513 if (CurrScore == 0) 514 report_fatal_error("InsertWaitcnt score wraparound"); 515 // PendingEvents and ScoreUB need to be update regardless if this event 516 // changes the score of a register or not. 517 // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message. 518 PendingEvents |= 1 << E; 519 setScoreUB(T, CurrScore); 520 521 if (T == EXP_CNT) { 522 // Put score on the source vgprs. If this is a store, just use those 523 // specific register(s). 524 if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) { 525 int AddrOpIdx = 526 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr); 527 // All GDS operations must protect their address register (same as 528 // export.) 529 if (AddrOpIdx != -1) { 530 setExpScore(&Inst, TII, TRI, MRI, AddrOpIdx, CurrScore); 531 } 532 533 if (Inst.mayStore()) { 534 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 535 AMDGPU::OpName::data0) != -1) { 536 setExpScore( 537 &Inst, TII, TRI, MRI, 538 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0), 539 CurrScore); 540 } 541 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 542 AMDGPU::OpName::data1) != -1) { 543 setExpScore(&Inst, TII, TRI, MRI, 544 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 545 AMDGPU::OpName::data1), 546 CurrScore); 547 } 548 } else if (SIInstrInfo::isAtomicRet(Inst) && 549 Inst.getOpcode() != AMDGPU::DS_GWS_INIT && 550 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V && 551 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR && 552 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P && 553 Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER && 554 Inst.getOpcode() != AMDGPU::DS_APPEND && 555 Inst.getOpcode() != AMDGPU::DS_CONSUME && 556 Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) { 557 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 558 const MachineOperand &Op = Inst.getOperand(I); 559 if (Op.isReg() && !Op.isDef() && 560 TRI->isVectorRegister(*MRI, Op.getReg())) { 561 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore); 562 } 563 } 564 } 565 } else if (TII->isFLAT(Inst)) { 566 if (Inst.mayStore()) { 567 setExpScore( 568 &Inst, TII, TRI, MRI, 569 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 570 CurrScore); 571 } else if (SIInstrInfo::isAtomicRet(Inst)) { 572 setExpScore( 573 &Inst, TII, TRI, MRI, 574 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 575 CurrScore); 576 } 577 } else if (TII->isMIMG(Inst)) { 578 if (Inst.mayStore()) { 579 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 580 } else if (SIInstrInfo::isAtomicRet(Inst)) { 581 setExpScore( 582 &Inst, TII, TRI, MRI, 583 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 584 CurrScore); 585 } 586 } else if (TII->isMTBUF(Inst)) { 587 if (Inst.mayStore()) { 588 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 589 } 590 } else if (TII->isMUBUF(Inst)) { 591 if (Inst.mayStore()) { 592 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 593 } else if (SIInstrInfo::isAtomicRet(Inst)) { 594 setExpScore( 595 &Inst, TII, TRI, MRI, 596 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 597 CurrScore); 598 } 599 } else if (TII->isLDSDIR(Inst)) { 600 // LDSDIR instructions attach the score to the destination. 601 setExpScore( 602 &Inst, TII, TRI, MRI, 603 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::vdst), 604 CurrScore); 605 } else { 606 if (TII->isEXP(Inst)) { 607 // For export the destination registers are really temps that 608 // can be used as the actual source after export patching, so 609 // we need to treat them like sources and set the EXP_CNT 610 // score. 611 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 612 MachineOperand &DefMO = Inst.getOperand(I); 613 if (DefMO.isReg() && DefMO.isDef() && 614 TRI->isVGPR(*MRI, DefMO.getReg())) { 615 setRegScore( 616 TRI->getEncodingValue(AMDGPU::getMCReg(DefMO.getReg(), *ST)), 617 EXP_CNT, CurrScore); 618 } 619 } 620 } 621 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 622 MachineOperand &MO = Inst.getOperand(I); 623 if (MO.isReg() && !MO.isDef() && 624 TRI->isVectorRegister(*MRI, MO.getReg())) { 625 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore); 626 } 627 } 628 } 629 #if 0 // TODO: check if this is handled by MUBUF code above. 630 } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD || 631 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 || 632 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) { 633 MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data); 634 unsigned OpNo;//TODO: find the OpNo for this operand; 635 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo); 636 for (int RegNo = Interval.first; RegNo < Interval.second; 637 ++RegNo) { 638 setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore); 639 } 640 #endif 641 } else { 642 // Match the score to the destination registers. 643 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 644 auto &Op = Inst.getOperand(I); 645 if (!Op.isReg() || !Op.isDef()) 646 continue; 647 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I); 648 if (T == VM_CNT) { 649 if (Interval.first >= NUM_ALL_VGPRS) 650 continue; 651 if (SIInstrInfo::isVMEM(Inst)) { 652 VmemType V = getVmemType(Inst); 653 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) 654 VgprVmemTypes[RegNo] |= 1 << V; 655 } 656 } 657 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 658 setRegScore(RegNo, T, CurrScore); 659 } 660 } 661 if (Inst.mayStore() && (TII->isDS(Inst) || mayWriteLDSThroughDMA(Inst))) { 662 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore); 663 } 664 } 665 } 666 667 void WaitcntBrackets::print(raw_ostream &OS) { 668 OS << '\n'; 669 for (auto T : inst_counter_types()) { 670 unsigned LB = getScoreLB(T); 671 unsigned UB = getScoreUB(T); 672 673 switch (T) { 674 case VM_CNT: 675 OS << " VM_CNT(" << UB - LB << "): "; 676 break; 677 case LGKM_CNT: 678 OS << " LGKM_CNT(" << UB - LB << "): "; 679 break; 680 case EXP_CNT: 681 OS << " EXP_CNT(" << UB - LB << "): "; 682 break; 683 case VS_CNT: 684 OS << " VS_CNT(" << UB - LB << "): "; 685 break; 686 default: 687 OS << " UNKNOWN(" << UB - LB << "): "; 688 break; 689 } 690 691 if (LB < UB) { 692 // Print vgpr scores. 693 for (int J = 0; J <= VgprUB; J++) { 694 unsigned RegScore = getRegScore(J, T); 695 if (RegScore <= LB) 696 continue; 697 unsigned RelScore = RegScore - LB - 1; 698 if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) { 699 OS << RelScore << ":v" << J << " "; 700 } else { 701 OS << RelScore << ":ds "; 702 } 703 } 704 // Also need to print sgpr scores for lgkm_cnt. 705 if (T == LGKM_CNT) { 706 for (int J = 0; J <= SgprUB; J++) { 707 unsigned RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT); 708 if (RegScore <= LB) 709 continue; 710 unsigned RelScore = RegScore - LB - 1; 711 OS << RelScore << ":s" << J << " "; 712 } 713 } 714 } 715 OS << '\n'; 716 } 717 OS << '\n'; 718 } 719 720 /// Simplify the waitcnt, in the sense of removing redundant counts, and return 721 /// whether a waitcnt instruction is needed at all. 722 void WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const { 723 simplifyWaitcnt(VM_CNT, Wait.VmCnt); 724 simplifyWaitcnt(EXP_CNT, Wait.ExpCnt); 725 simplifyWaitcnt(LGKM_CNT, Wait.LgkmCnt); 726 simplifyWaitcnt(VS_CNT, Wait.VsCnt); 727 } 728 729 void WaitcntBrackets::simplifyWaitcnt(InstCounterType T, 730 unsigned &Count) const { 731 const unsigned LB = getScoreLB(T); 732 const unsigned UB = getScoreUB(T); 733 734 // The number of outstanding events for this type, T, can be calculated 735 // as (UB - LB). If the current Count is greater than or equal to the number 736 // of outstanding events, then the wait for this counter is redundant. 737 if (Count >= UB - LB) 738 Count = ~0u; 739 } 740 741 void WaitcntBrackets::determineWait(InstCounterType T, unsigned ScoreToWait, 742 AMDGPU::Waitcnt &Wait) const { 743 // If the score of src_operand falls within the bracket, we need an 744 // s_waitcnt instruction. 745 const unsigned LB = getScoreLB(T); 746 const unsigned UB = getScoreUB(T); 747 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) { 748 if ((T == VM_CNT || T == LGKM_CNT) && 749 hasPendingFlat() && 750 !ST->hasFlatLgkmVMemCountInOrder()) { 751 // If there is a pending FLAT operation, and this is a VMem or LGKM 752 // waitcnt and the target can report early completion, then we need 753 // to force a waitcnt 0. 754 addWait(Wait, T, 0); 755 } else if (counterOutOfOrder(T)) { 756 // Counter can get decremented out-of-order when there 757 // are multiple types event in the bracket. Also emit an s_wait counter 758 // with a conservative value of 0 for the counter. 759 addWait(Wait, T, 0); 760 } else { 761 // If a counter has been maxed out avoid overflow by waiting for 762 // MAX(CounterType) - 1 instead. 763 unsigned NeededWait = std::min(UB - ScoreToWait, getWaitCountMax(T) - 1); 764 addWait(Wait, T, NeededWait); 765 } 766 } 767 } 768 769 void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) { 770 applyWaitcnt(VM_CNT, Wait.VmCnt); 771 applyWaitcnt(EXP_CNT, Wait.ExpCnt); 772 applyWaitcnt(LGKM_CNT, Wait.LgkmCnt); 773 applyWaitcnt(VS_CNT, Wait.VsCnt); 774 } 775 776 void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) { 777 const unsigned UB = getScoreUB(T); 778 if (Count >= UB) 779 return; 780 if (Count != 0) { 781 if (counterOutOfOrder(T)) 782 return; 783 setScoreLB(T, std::max(getScoreLB(T), UB - Count)); 784 } else { 785 setScoreLB(T, UB); 786 PendingEvents &= ~WaitEventMaskForInst[T]; 787 } 788 } 789 790 // Where there are multiple types of event in the bracket of a counter, 791 // the decrement may go out of order. 792 bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const { 793 // Scalar memory read always can go out of order. 794 if (T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS)) 795 return true; 796 return hasMixedPendingEvents(T); 797 } 798 799 INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false, 800 false) 801 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree) 802 INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false, 803 false) 804 805 char SIInsertWaitcnts::ID = 0; 806 807 char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID; 808 809 FunctionPass *llvm::createSIInsertWaitcntsPass() { 810 return new SIInsertWaitcnts(); 811 } 812 813 /// Combine consecutive waitcnt instructions that precede \p MI and follow 814 /// \p OldWaitcntInstr and apply any extra wait from waitcnt that were added 815 /// by previous passes. Currently this pass conservatively assumes that these 816 /// preexisting waitcnt are required for correctness. 817 bool SIInsertWaitcnts::applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets, 818 MachineInstr &OldWaitcntInstr, 819 AMDGPU::Waitcnt &Wait, 820 const MachineInstr *MI) { 821 bool Modified = false; 822 MachineInstr *WaitcntInstr = nullptr; 823 MachineInstr *WaitcntVsCntInstr = nullptr; 824 for (auto II = OldWaitcntInstr.getIterator(), NextI = std::next(II); 825 &*II != MI; II = NextI, ++NextI) { 826 if (II->isMetaInstruction()) 827 continue; 828 829 if (II->getOpcode() == AMDGPU::S_WAITCNT) { 830 // Conservatively update required wait if this waitcnt was added in an 831 // earlier pass. In this case it will not exist in the tracked waitcnt 832 // set. 833 if (!TrackedWaitcntSet.count(&*II)) { 834 unsigned IEnc = II->getOperand(0).getImm(); 835 AMDGPU::Waitcnt OldWait = AMDGPU::decodeWaitcnt(IV, IEnc); 836 Wait = Wait.combined(OldWait); 837 } 838 839 // Merge consecutive waitcnt of the same type by erasing multiples. 840 if (!WaitcntInstr) { 841 WaitcntInstr = &*II; 842 } else { 843 II->eraseFromParent(); 844 Modified = true; 845 } 846 847 } else { 848 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT); 849 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL); 850 if (!TrackedWaitcntSet.count(&*II)) { 851 unsigned OldVSCnt = 852 TII->getNamedOperand(*II, AMDGPU::OpName::simm16)->getImm(); 853 Wait.VsCnt = std::min(Wait.VsCnt, OldVSCnt); 854 } 855 856 if (!WaitcntVsCntInstr) { 857 WaitcntVsCntInstr = &*II; 858 } else { 859 II->eraseFromParent(); 860 Modified = true; 861 } 862 } 863 } 864 865 // Updated encoding of merged waitcnt with the required wait. 866 if (WaitcntInstr) { 867 if (Wait.hasWaitExceptVsCnt()) { 868 unsigned NewEnc = AMDGPU::encodeWaitcnt(IV, Wait); 869 unsigned OldEnc = WaitcntInstr->getOperand(0).getImm(); 870 if (OldEnc != NewEnc) { 871 WaitcntInstr->getOperand(0).setImm(NewEnc); 872 Modified = true; 873 } 874 ScoreBrackets.applyWaitcnt(Wait); 875 Wait.VmCnt = ~0u; 876 Wait.LgkmCnt = ~0u; 877 Wait.ExpCnt = ~0u; 878 879 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 880 << "Old Instr: " << *MI << "New Instr: " << *WaitcntInstr 881 << '\n'); 882 } else { 883 WaitcntInstr->eraseFromParent(); 884 Modified = true; 885 } 886 } 887 888 if (WaitcntVsCntInstr) { 889 if (Wait.hasWaitVsCnt()) { 890 assert(ST->hasVscnt()); 891 unsigned OldVSCnt = 892 TII->getNamedOperand(*WaitcntVsCntInstr, AMDGPU::OpName::simm16) 893 ->getImm(); 894 if (Wait.VsCnt != OldVSCnt) { 895 TII->getNamedOperand(*WaitcntVsCntInstr, AMDGPU::OpName::simm16) 896 ->setImm(Wait.VsCnt); 897 Modified = true; 898 } 899 ScoreBrackets.applyWaitcnt(Wait); 900 Wait.VsCnt = ~0u; 901 902 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 903 << "Old Instr: " << *MI 904 << "New Instr: " << *WaitcntVsCntInstr << '\n'); 905 } else { 906 WaitcntVsCntInstr->eraseFromParent(); 907 Modified = true; 908 } 909 } 910 911 return Modified; 912 } 913 914 static bool readsVCCZ(const MachineInstr &MI) { 915 unsigned Opc = MI.getOpcode(); 916 return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) && 917 !MI.getOperand(1).isUndef(); 918 } 919 920 /// \returns true if the callee inserts an s_waitcnt 0 on function entry. 921 static bool callWaitsOnFunctionEntry(const MachineInstr &MI) { 922 // Currently all conventions wait, but this may not always be the case. 923 // 924 // TODO: If IPRA is enabled, and the callee is isSafeForNoCSROpt, it may make 925 // senses to omit the wait and do it in the caller. 926 return true; 927 } 928 929 /// \returns true if the callee is expected to wait for any outstanding waits 930 /// before returning. 931 static bool callWaitsOnFunctionReturn(const MachineInstr &MI) { 932 return true; 933 } 934 935 /// Generate s_waitcnt instruction to be placed before cur_Inst. 936 /// Instructions of a given type are returned in order, 937 /// but instructions of different types can complete out of order. 938 /// We rely on this in-order completion 939 /// and simply assign a score to the memory access instructions. 940 /// We keep track of the active "score bracket" to determine 941 /// if an access of a memory read requires an s_waitcnt 942 /// and if so what the value of each counter is. 943 /// The "score bracket" is bound by the lower bound and upper bound 944 /// scores (*_score_LB and *_score_ub respectively). 945 bool SIInsertWaitcnts::generateWaitcntInstBefore( 946 MachineInstr &MI, WaitcntBrackets &ScoreBrackets, 947 MachineInstr *OldWaitcntInstr) { 948 setForceEmitWaitcnt(); 949 950 if (MI.isMetaInstruction()) 951 return false; 952 953 AMDGPU::Waitcnt Wait; 954 bool Modified = false; 955 956 // FIXME: This should have already been handled by the memory legalizer. 957 // Removing this currently doesn't affect any lit tests, but we need to 958 // verify that nothing was relying on this. The number of buffer invalidates 959 // being handled here should not be expanded. 960 if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 || 961 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC || 962 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL || 963 MI.getOpcode() == AMDGPU::BUFFER_GL0_INV || 964 MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) { 965 Wait.VmCnt = 0; 966 } 967 968 // All waits must be resolved at call return. 969 // NOTE: this could be improved with knowledge of all call sites or 970 // with knowledge of the called routines. 971 if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG || 972 MI.getOpcode() == AMDGPU::SI_RETURN || 973 MI.getOpcode() == AMDGPU::S_SETPC_B64_return || 974 (MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) { 975 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 976 } 977 // Resolve vm waits before gs-done. 978 else if ((MI.getOpcode() == AMDGPU::S_SENDMSG || 979 MI.getOpcode() == AMDGPU::S_SENDMSGHALT) && 980 ST->hasLegacyGeometry() && 981 ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_PreGFX11_) == 982 AMDGPU::SendMsg::ID_GS_DONE_PreGFX11)) { 983 Wait.VmCnt = 0; 984 } 985 #if 0 // TODO: the following blocks of logic when we have fence. 986 else if (MI.getOpcode() == SC_FENCE) { 987 const unsigned int group_size = 988 context->shader_info->GetMaxThreadGroupSize(); 989 // group_size == 0 means thread group size is unknown at compile time 990 const bool group_is_multi_wave = 991 (group_size == 0 || group_size > target_info->GetWaveFrontSize()); 992 const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence(); 993 994 for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) { 995 SCRegType src_type = Inst->GetSrcType(i); 996 switch (src_type) { 997 case SCMEM_LDS: 998 if (group_is_multi_wave || 999 context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) { 1000 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT, 1001 ScoreBrackets->getScoreUB(LGKM_CNT)); 1002 // LDS may have to wait for VM_CNT after buffer load to LDS 1003 if (target_info->HasBufferLoadToLDS()) { 1004 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT, 1005 ScoreBrackets->getScoreUB(VM_CNT)); 1006 } 1007 } 1008 break; 1009 1010 case SCMEM_GDS: 1011 if (group_is_multi_wave || fence_is_global) { 1012 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT, 1013 ScoreBrackets->getScoreUB(EXP_CNT)); 1014 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT, 1015 ScoreBrackets->getScoreUB(LGKM_CNT)); 1016 } 1017 break; 1018 1019 case SCMEM_UAV: 1020 case SCMEM_TFBUF: 1021 case SCMEM_RING: 1022 case SCMEM_SCATTER: 1023 if (group_is_multi_wave || fence_is_global) { 1024 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT, 1025 ScoreBrackets->getScoreUB(EXP_CNT)); 1026 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT, 1027 ScoreBrackets->getScoreUB(VM_CNT)); 1028 } 1029 break; 1030 1031 case SCMEM_SCRATCH: 1032 default: 1033 break; 1034 } 1035 } 1036 } 1037 #endif 1038 1039 // Export & GDS instructions do not read the EXEC mask until after the export 1040 // is granted (which can occur well after the instruction is issued). 1041 // The shader program must flush all EXP operations on the export-count 1042 // before overwriting the EXEC mask. 1043 else { 1044 if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) { 1045 // Export and GDS are tracked individually, either may trigger a waitcnt 1046 // for EXEC. 1047 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) || 1048 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) || 1049 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) || 1050 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) { 1051 Wait.ExpCnt = 0; 1052 } 1053 } 1054 1055 if (MI.isCall() && callWaitsOnFunctionEntry(MI)) { 1056 // The function is going to insert a wait on everything in its prolog. 1057 // This still needs to be careful if the call target is a load (e.g. a GOT 1058 // load). We also need to check WAW dependency with saved PC. 1059 Wait = AMDGPU::Waitcnt(); 1060 1061 int CallAddrOpIdx = 1062 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); 1063 1064 if (MI.getOperand(CallAddrOpIdx).isReg()) { 1065 RegInterval CallAddrOpInterval = 1066 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, CallAddrOpIdx); 1067 1068 for (int RegNo = CallAddrOpInterval.first; 1069 RegNo < CallAddrOpInterval.second; ++RegNo) 1070 ScoreBrackets.determineWait( 1071 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 1072 1073 int RtnAddrOpIdx = 1074 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); 1075 if (RtnAddrOpIdx != -1) { 1076 RegInterval RtnAddrOpInterval = 1077 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, RtnAddrOpIdx); 1078 1079 for (int RegNo = RtnAddrOpInterval.first; 1080 RegNo < RtnAddrOpInterval.second; ++RegNo) 1081 ScoreBrackets.determineWait( 1082 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 1083 } 1084 } 1085 } else { 1086 // FIXME: Should not be relying on memoperands. 1087 // Look at the source operands of every instruction to see if 1088 // any of them results from a previous memory operation that affects 1089 // its current usage. If so, an s_waitcnt instruction needs to be 1090 // emitted. 1091 // If the source operand was defined by a load, add the s_waitcnt 1092 // instruction. 1093 // 1094 // Two cases are handled for destination operands: 1095 // 1) If the destination operand was defined by a load, add the s_waitcnt 1096 // instruction to guarantee the right WAW order. 1097 // 2) If a destination operand that was used by a recent export/store ins, 1098 // add s_waitcnt on exp_cnt to guarantee the WAR order. 1099 for (const MachineMemOperand *Memop : MI.memoperands()) { 1100 const Value *Ptr = Memop->getValue(); 1101 if (Memop->isStore() && SLoadAddresses.count(Ptr)) { 1102 addWait(Wait, LGKM_CNT, 0); 1103 if (PDT->dominates(MI.getParent(), SLoadAddresses.find(Ptr)->second)) 1104 SLoadAddresses.erase(Ptr); 1105 } 1106 unsigned AS = Memop->getAddrSpace(); 1107 if (AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS::FLAT_ADDRESS) 1108 continue; 1109 // No need to wait before load from VMEM to LDS. 1110 if (mayWriteLDSThroughDMA(MI)) 1111 continue; 1112 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS; 1113 // VM_CNT is only relevant to vgpr or LDS. 1114 ScoreBrackets.determineWait( 1115 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 1116 if (Memop->isStore()) { 1117 ScoreBrackets.determineWait( 1118 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait); 1119 } 1120 } 1121 1122 // Loop over use and def operands. 1123 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 1124 MachineOperand &Op = MI.getOperand(I); 1125 if (!Op.isReg()) 1126 continue; 1127 RegInterval Interval = 1128 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I); 1129 1130 const bool IsVGPR = TRI->isVectorRegister(*MRI, Op.getReg()); 1131 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 1132 if (IsVGPR) { 1133 // RAW always needs an s_waitcnt. WAW needs an s_waitcnt unless the 1134 // previous write and this write are the same type of VMEM 1135 // instruction, in which case they're guaranteed to write their 1136 // results in order anyway. 1137 if (Op.isUse() || !SIInstrInfo::isVMEM(MI) || 1138 ScoreBrackets.hasOtherPendingVmemTypes(RegNo, 1139 getVmemType(MI))) { 1140 ScoreBrackets.determineWait( 1141 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 1142 ScoreBrackets.clearVgprVmemTypes(RegNo); 1143 } 1144 if (Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) { 1145 ScoreBrackets.determineWait( 1146 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait); 1147 } 1148 } 1149 ScoreBrackets.determineWait( 1150 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 1151 } 1152 } 1153 } 1154 } 1155 1156 // Check to see if this is an S_BARRIER, and if an implicit S_WAITCNT 0 1157 // occurs before the instruction. Doing it here prevents any additional 1158 // S_WAITCNTs from being emitted if the instruction was marked as 1159 // requiring a WAITCNT beforehand. 1160 if (MI.getOpcode() == AMDGPU::S_BARRIER && 1161 !ST->hasAutoWaitcntBeforeBarrier()) { 1162 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 1163 } 1164 1165 // TODO: Remove this work-around, enable the assert for Bug 457939 1166 // after fixing the scheduler. Also, the Shader Compiler code is 1167 // independent of target. 1168 if (readsVCCZ(MI) && ST->hasReadVCCZBug()) { 1169 if (ScoreBrackets.getScoreLB(LGKM_CNT) < 1170 ScoreBrackets.getScoreUB(LGKM_CNT) && 1171 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { 1172 Wait.LgkmCnt = 0; 1173 } 1174 } 1175 1176 // Verify that the wait is actually needed. 1177 ScoreBrackets.simplifyWaitcnt(Wait); 1178 1179 if (ForceEmitZeroWaitcnts) 1180 Wait = AMDGPU::Waitcnt::allZero(ST->hasVscnt()); 1181 1182 if (ForceEmitWaitcnt[VM_CNT]) 1183 Wait.VmCnt = 0; 1184 if (ForceEmitWaitcnt[EXP_CNT]) 1185 Wait.ExpCnt = 0; 1186 if (ForceEmitWaitcnt[LGKM_CNT]) 1187 Wait.LgkmCnt = 0; 1188 if (ForceEmitWaitcnt[VS_CNT]) 1189 Wait.VsCnt = 0; 1190 1191 if (OldWaitcntInstr) { 1192 // Try to merge the required wait with preexisting waitcnt instructions. 1193 // Also erase redundant waitcnt. 1194 Modified = 1195 applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr, Wait, &MI); 1196 } else { 1197 // Update waitcnt brackets after determining the required wait. 1198 ScoreBrackets.applyWaitcnt(Wait); 1199 } 1200 1201 // ExpCnt can be merged into VINTERP. 1202 if (Wait.ExpCnt != ~0u && SIInstrInfo::isVINTERP(MI)) { 1203 MachineOperand *WaitExp = TII->getNamedOperand(MI, AMDGPU::OpName::waitexp); 1204 if (Wait.ExpCnt < WaitExp->getImm()) { 1205 WaitExp->setImm(Wait.ExpCnt); 1206 Modified = true; 1207 } 1208 Wait.ExpCnt = ~0u; 1209 1210 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1211 << "Update Instr: " << MI); 1212 } 1213 1214 // Build new waitcnt instructions unless no wait is needed or the old waitcnt 1215 // instruction was modified to handle the required wait. 1216 if (Wait.hasWaitExceptVsCnt()) { 1217 unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait); 1218 auto SWaitInst = BuildMI(*MI.getParent(), MI.getIterator(), 1219 MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) 1220 .addImm(Enc); 1221 TrackedWaitcntSet.insert(SWaitInst); 1222 Modified = true; 1223 1224 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1225 << "Old Instr: " << MI 1226 << "New Instr: " << *SWaitInst << '\n'); 1227 } 1228 1229 if (Wait.hasWaitVsCnt()) { 1230 assert(ST->hasVscnt()); 1231 1232 auto SWaitInst = 1233 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), 1234 TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1235 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1236 .addImm(Wait.VsCnt); 1237 TrackedWaitcntSet.insert(SWaitInst); 1238 Modified = true; 1239 1240 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1241 << "Old Instr: " << MI 1242 << "New Instr: " << *SWaitInst << '\n'); 1243 } 1244 1245 return Modified; 1246 } 1247 1248 // This is a flat memory operation. Check to see if it has memory tokens other 1249 // than LDS. Other address spaces supported by flat memory operations involve 1250 // global memory. 1251 bool SIInsertWaitcnts::mayAccessVMEMThroughFlat(const MachineInstr &MI) const { 1252 assert(TII->isFLAT(MI)); 1253 1254 // All flat instructions use the VMEM counter. 1255 assert(TII->usesVM_CNT(MI)); 1256 1257 // If there are no memory operands then conservatively assume the flat 1258 // operation may access VMEM. 1259 if (MI.memoperands_empty()) 1260 return true; 1261 1262 // See if any memory operand specifies an address space that involves VMEM. 1263 // Flat operations only supported FLAT, LOCAL (LDS), or address spaces 1264 // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION 1265 // (GDS) address space is not supported by flat operations. Therefore, simply 1266 // return true unless only the LDS address space is found. 1267 for (const MachineMemOperand *Memop : MI.memoperands()) { 1268 unsigned AS = Memop->getAddrSpace(); 1269 assert(AS != AMDGPUAS::REGION_ADDRESS); 1270 if (AS != AMDGPUAS::LOCAL_ADDRESS) 1271 return true; 1272 } 1273 1274 return false; 1275 } 1276 1277 // This is a flat memory operation. Check to see if it has memory tokens for 1278 // either LDS or FLAT. 1279 bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const { 1280 assert(TII->isFLAT(MI)); 1281 1282 // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter. 1283 if (!TII->usesLGKM_CNT(MI)) 1284 return false; 1285 1286 // If in tgsplit mode then there can be no use of LDS. 1287 if (ST->isTgSplitEnabled()) 1288 return false; 1289 1290 // If there are no memory operands then conservatively assume the flat 1291 // operation may access LDS. 1292 if (MI.memoperands_empty()) 1293 return true; 1294 1295 // See if any memory operand specifies an address space that involves LDS. 1296 for (const MachineMemOperand *Memop : MI.memoperands()) { 1297 unsigned AS = Memop->getAddrSpace(); 1298 if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) 1299 return true; 1300 } 1301 1302 return false; 1303 } 1304 1305 void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, 1306 WaitcntBrackets *ScoreBrackets) { 1307 // Now look at the instruction opcode. If it is a memory access 1308 // instruction, update the upper-bound of the appropriate counter's 1309 // bracket and the destination operand scores. 1310 // TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere. 1311 if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) { 1312 if (TII->isAlwaysGDS(Inst.getOpcode()) || 1313 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) { 1314 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst); 1315 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst); 1316 } else { 1317 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); 1318 } 1319 } else if (TII->isFLAT(Inst)) { 1320 assert(Inst.mayLoadOrStore()); 1321 1322 int FlatASCount = 0; 1323 1324 if (mayAccessVMEMThroughFlat(Inst)) { 1325 ++FlatASCount; 1326 if (!ST->hasVscnt()) 1327 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst); 1328 else if (Inst.mayLoad() && !SIInstrInfo::isAtomicNoRet(Inst)) 1329 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst); 1330 else 1331 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst); 1332 } 1333 1334 if (mayAccessLDSThroughFlat(Inst)) { 1335 ++FlatASCount; 1336 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); 1337 } 1338 1339 // A Flat memory operation must access at least one address space. 1340 assert(FlatASCount); 1341 1342 // This is a flat memory operation that access both VMEM and LDS, so note it 1343 // - it will require that both the VM and LGKM be flushed to zero if it is 1344 // pending when a VM or LGKM dependency occurs. 1345 if (FlatASCount > 1) 1346 ScoreBrackets->setPendingFlat(); 1347 } else if (SIInstrInfo::isVMEM(Inst) && 1348 !llvm::AMDGPU::getMUBUFIsBufferInv(Inst.getOpcode())) { 1349 if (!ST->hasVscnt()) 1350 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst); 1351 else if ((Inst.mayLoad() && !SIInstrInfo::isAtomicNoRet(Inst)) || 1352 /* IMAGE_GET_RESINFO / IMAGE_GET_LOD */ 1353 (TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore())) 1354 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst); 1355 else if (Inst.mayStore()) 1356 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst); 1357 1358 if (ST->vmemWriteNeedsExpWaitcnt() && 1359 (Inst.mayStore() || SIInstrInfo::isAtomicRet(Inst))) { 1360 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst); 1361 } 1362 } else if (TII->isSMRD(Inst)) { 1363 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); 1364 } else if (Inst.isCall()) { 1365 if (callWaitsOnFunctionReturn(Inst)) { 1366 // Act as a wait on everything 1367 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 1368 } else { 1369 // May need to way wait for anything. 1370 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt()); 1371 } 1372 } else if (SIInstrInfo::isLDSDIR(Inst)) { 1373 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_LDS_ACCESS, Inst); 1374 } else if (TII->isVINTERP(Inst)) { 1375 int64_t Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm(); 1376 ScoreBrackets->applyWaitcnt(EXP_CNT, Imm); 1377 } else if (SIInstrInfo::isEXP(Inst)) { 1378 unsigned Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm(); 1379 if (Imm >= AMDGPU::Exp::ET_PARAM0 && Imm <= AMDGPU::Exp::ET_PARAM31) 1380 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst); 1381 else if (Imm >= AMDGPU::Exp::ET_POS0 && Imm <= AMDGPU::Exp::ET_POS_LAST) 1382 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst); 1383 else 1384 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst); 1385 } else { 1386 switch (Inst.getOpcode()) { 1387 case AMDGPU::S_SENDMSG: 1388 case AMDGPU::S_SENDMSG_RTN_B32: 1389 case AMDGPU::S_SENDMSG_RTN_B64: 1390 case AMDGPU::S_SENDMSGHALT: 1391 ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst); 1392 break; 1393 case AMDGPU::S_MEMTIME: 1394 case AMDGPU::S_MEMREALTIME: 1395 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); 1396 break; 1397 } 1398 } 1399 } 1400 1401 bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score, 1402 unsigned OtherScore) { 1403 unsigned MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift; 1404 unsigned OtherShifted = 1405 OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift; 1406 Score = std::max(MyShifted, OtherShifted); 1407 return OtherShifted > MyShifted; 1408 } 1409 1410 /// Merge the pending events and associater score brackets of \p Other into 1411 /// this brackets status. 1412 /// 1413 /// Returns whether the merge resulted in a change that requires tighter waits 1414 /// (i.e. the merged brackets strictly dominate the original brackets). 1415 bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { 1416 bool StrictDom = false; 1417 1418 VgprUB = std::max(VgprUB, Other.VgprUB); 1419 SgprUB = std::max(SgprUB, Other.SgprUB); 1420 1421 for (auto T : inst_counter_types()) { 1422 // Merge event flags for this counter 1423 const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[T]; 1424 const unsigned OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T]; 1425 if (OtherEvents & ~OldEvents) 1426 StrictDom = true; 1427 PendingEvents |= OtherEvents; 1428 1429 // Merge scores for this counter 1430 const unsigned MyPending = ScoreUBs[T] - ScoreLBs[T]; 1431 const unsigned OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T]; 1432 const unsigned NewUB = ScoreLBs[T] + std::max(MyPending, OtherPending); 1433 if (NewUB < ScoreLBs[T]) 1434 report_fatal_error("waitcnt score overflow"); 1435 1436 MergeInfo M; 1437 M.OldLB = ScoreLBs[T]; 1438 M.OtherLB = Other.ScoreLBs[T]; 1439 M.MyShift = NewUB - ScoreUBs[T]; 1440 M.OtherShift = NewUB - Other.ScoreUBs[T]; 1441 1442 ScoreUBs[T] = NewUB; 1443 1444 StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]); 1445 1446 bool RegStrictDom = false; 1447 for (int J = 0; J <= VgprUB; J++) { 1448 RegStrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]); 1449 } 1450 1451 if (T == VM_CNT) { 1452 for (int J = 0; J <= VgprUB; J++) { 1453 unsigned char NewVmemTypes = VgprVmemTypes[J] | Other.VgprVmemTypes[J]; 1454 RegStrictDom |= NewVmemTypes != VgprVmemTypes[J]; 1455 VgprVmemTypes[J] = NewVmemTypes; 1456 } 1457 } 1458 1459 if (T == LGKM_CNT) { 1460 for (int J = 0; J <= SgprUB; J++) { 1461 RegStrictDom |= mergeScore(M, SgprScores[J], Other.SgprScores[J]); 1462 } 1463 } 1464 1465 if (RegStrictDom) 1466 StrictDom = true; 1467 } 1468 1469 return StrictDom; 1470 } 1471 1472 // Generate s_waitcnt instructions where needed. 1473 bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF, 1474 MachineBasicBlock &Block, 1475 WaitcntBrackets &ScoreBrackets) { 1476 bool Modified = false; 1477 1478 LLVM_DEBUG({ 1479 dbgs() << "*** Block" << Block.getNumber() << " ***"; 1480 ScoreBrackets.dump(); 1481 }); 1482 1483 // Track the correctness of vccz through this basic block. There are two 1484 // reasons why it might be incorrect; see ST->hasReadVCCZBug() and 1485 // ST->partialVCCWritesUpdateVCCZ(). 1486 bool VCCZCorrect = true; 1487 if (ST->hasReadVCCZBug()) { 1488 // vccz could be incorrect at a basic block boundary if a predecessor wrote 1489 // to vcc and then issued an smem load. 1490 VCCZCorrect = false; 1491 } else if (!ST->partialVCCWritesUpdateVCCZ()) { 1492 // vccz could be incorrect at a basic block boundary if a predecessor wrote 1493 // to vcc_lo or vcc_hi. 1494 VCCZCorrect = false; 1495 } 1496 1497 // Walk over the instructions. 1498 MachineInstr *OldWaitcntInstr = nullptr; 1499 1500 for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(), 1501 E = Block.instr_end(); 1502 Iter != E;) { 1503 MachineInstr &Inst = *Iter; 1504 1505 // Track pre-existing waitcnts that were added in earlier iterations or by 1506 // the memory legalizer. 1507 if (Inst.getOpcode() == AMDGPU::S_WAITCNT || 1508 (Inst.getOpcode() == AMDGPU::S_WAITCNT_VSCNT && 1509 Inst.getOperand(0).isReg() && 1510 Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL)) { 1511 if (!OldWaitcntInstr) 1512 OldWaitcntInstr = &Inst; 1513 ++Iter; 1514 continue; 1515 } 1516 1517 // Generate an s_waitcnt instruction to be placed before Inst, if needed. 1518 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr); 1519 OldWaitcntInstr = nullptr; 1520 1521 // Restore vccz if it's not known to be correct already. 1522 bool RestoreVCCZ = !VCCZCorrect && readsVCCZ(Inst); 1523 1524 // Don't examine operands unless we need to track vccz correctness. 1525 if (ST->hasReadVCCZBug() || !ST->partialVCCWritesUpdateVCCZ()) { 1526 if (Inst.definesRegister(AMDGPU::VCC_LO) || 1527 Inst.definesRegister(AMDGPU::VCC_HI)) { 1528 // Up to gfx9, writes to vcc_lo and vcc_hi don't update vccz. 1529 if (!ST->partialVCCWritesUpdateVCCZ()) 1530 VCCZCorrect = false; 1531 } else if (Inst.definesRegister(AMDGPU::VCC)) { 1532 // There is a hardware bug on CI/SI where SMRD instruction may corrupt 1533 // vccz bit, so when we detect that an instruction may read from a 1534 // corrupt vccz bit, we need to: 1535 // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD 1536 // operations to complete. 1537 // 2. Restore the correct value of vccz by writing the current value 1538 // of vcc back to vcc. 1539 if (ST->hasReadVCCZBug() && 1540 ScoreBrackets.getScoreLB(LGKM_CNT) < 1541 ScoreBrackets.getScoreUB(LGKM_CNT) && 1542 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { 1543 // Writes to vcc while there's an outstanding smem read may get 1544 // clobbered as soon as any read completes. 1545 VCCZCorrect = false; 1546 } else { 1547 // Writes to vcc will fix any incorrect value in vccz. 1548 VCCZCorrect = true; 1549 } 1550 } 1551 } 1552 1553 if (TII->isSMRD(Inst)) { 1554 for (const MachineMemOperand *Memop : Inst.memoperands()) { 1555 // No need to handle invariant loads when avoiding WAR conflicts, as 1556 // there cannot be a vector store to the same memory location. 1557 if (!Memop->isInvariant()) { 1558 const Value *Ptr = Memop->getValue(); 1559 SLoadAddresses.insert(std::make_pair(Ptr, Inst.getParent())); 1560 } 1561 } 1562 if (ST->hasReadVCCZBug()) { 1563 // This smem read could complete and clobber vccz at any time. 1564 VCCZCorrect = false; 1565 } 1566 } 1567 1568 updateEventWaitcntAfter(Inst, &ScoreBrackets); 1569 1570 #if 0 // TODO: implement resource type check controlled by options with ub = LB. 1571 // If this instruction generates a S_SETVSKIP because it is an 1572 // indexed resource, and we are on Tahiti, then it will also force 1573 // an S_WAITCNT vmcnt(0) 1574 if (RequireCheckResourceType(Inst, context)) { 1575 // Force the score to as if an S_WAITCNT vmcnt(0) is emitted. 1576 ScoreBrackets->setScoreLB(VM_CNT, 1577 ScoreBrackets->getScoreUB(VM_CNT)); 1578 } 1579 #endif 1580 1581 LLVM_DEBUG({ 1582 Inst.print(dbgs()); 1583 ScoreBrackets.dump(); 1584 }); 1585 1586 // TODO: Remove this work-around after fixing the scheduler and enable the 1587 // assert above. 1588 if (RestoreVCCZ) { 1589 // Restore the vccz bit. Any time a value is written to vcc, the vcc 1590 // bit is updated, so we can restore the bit by reading the value of 1591 // vcc and then writing it back to the register. 1592 BuildMI(Block, Inst, Inst.getDebugLoc(), 1593 TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64), 1594 TRI->getVCC()) 1595 .addReg(TRI->getVCC()); 1596 VCCZCorrect = true; 1597 Modified = true; 1598 } 1599 1600 ++Iter; 1601 } 1602 1603 return Modified; 1604 } 1605 1606 bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) { 1607 ST = &MF.getSubtarget<GCNSubtarget>(); 1608 TII = ST->getInstrInfo(); 1609 TRI = &TII->getRegisterInfo(); 1610 MRI = &MF.getRegInfo(); 1611 IV = AMDGPU::getIsaVersion(ST->getCPU()); 1612 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1613 PDT = &getAnalysis<MachinePostDominatorTree>(); 1614 1615 ForceEmitZeroWaitcnts = ForceEmitZeroFlag; 1616 for (auto T : inst_counter_types()) 1617 ForceEmitWaitcnt[T] = false; 1618 1619 HardwareLimits Limits = {}; 1620 Limits.VmcntMax = AMDGPU::getVmcntBitMask(IV); 1621 Limits.ExpcntMax = AMDGPU::getExpcntBitMask(IV); 1622 Limits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV); 1623 Limits.VscntMax = ST->hasVscnt() ? 63 : 0; 1624 1625 unsigned NumVGPRsMax = ST->getAddressableNumVGPRs(); 1626 unsigned NumSGPRsMax = ST->getAddressableNumSGPRs(); 1627 assert(NumVGPRsMax <= SQ_MAX_PGM_VGPRS); 1628 assert(NumSGPRsMax <= SQ_MAX_PGM_SGPRS); 1629 1630 RegisterEncoding Encoding = {}; 1631 Encoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0); 1632 Encoding.VGPRL = Encoding.VGPR0 + NumVGPRsMax - 1; 1633 Encoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0); 1634 Encoding.SGPRL = Encoding.SGPR0 + NumSGPRsMax - 1; 1635 1636 TrackedWaitcntSet.clear(); 1637 BlockInfos.clear(); 1638 bool Modified = false; 1639 1640 if (!MFI->isEntryFunction()) { 1641 // Wait for any outstanding memory operations that the input registers may 1642 // depend on. We can't track them and it's better to do the wait after the 1643 // costly call sequence. 1644 1645 // TODO: Could insert earlier and schedule more liberally with operations 1646 // that only use caller preserved registers. 1647 MachineBasicBlock &EntryBB = MF.front(); 1648 MachineBasicBlock::iterator I = EntryBB.begin(); 1649 for (MachineBasicBlock::iterator E = EntryBB.end(); 1650 I != E && (I->isPHI() || I->isMetaInstruction()); ++I) 1651 ; 1652 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT)).addImm(0); 1653 if (ST->hasVscnt()) 1654 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1655 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1656 .addImm(0); 1657 1658 Modified = true; 1659 } 1660 1661 // Keep iterating over the blocks in reverse post order, inserting and 1662 // updating s_waitcnt where needed, until a fix point is reached. 1663 for (auto *MBB : ReversePostOrderTraversal<MachineFunction *>(&MF)) 1664 BlockInfos.insert({MBB, BlockInfo(MBB)}); 1665 1666 std::unique_ptr<WaitcntBrackets> Brackets; 1667 bool Repeat; 1668 do { 1669 Repeat = false; 1670 1671 for (auto BII = BlockInfos.begin(), BIE = BlockInfos.end(); BII != BIE; 1672 ++BII) { 1673 BlockInfo &BI = BII->second; 1674 if (!BI.Dirty) 1675 continue; 1676 1677 if (BI.Incoming) { 1678 if (!Brackets) 1679 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming); 1680 else 1681 *Brackets = *BI.Incoming; 1682 } else { 1683 if (!Brackets) 1684 Brackets = std::make_unique<WaitcntBrackets>(ST, Limits, Encoding); 1685 else 1686 *Brackets = WaitcntBrackets(ST, Limits, Encoding); 1687 } 1688 1689 Modified |= insertWaitcntInBlock(MF, *BI.MBB, *Brackets); 1690 BI.Dirty = false; 1691 1692 if (Brackets->hasPending()) { 1693 BlockInfo *MoveBracketsToSucc = nullptr; 1694 for (MachineBasicBlock *Succ : BI.MBB->successors()) { 1695 auto SuccBII = BlockInfos.find(Succ); 1696 BlockInfo &SuccBI = SuccBII->second; 1697 if (!SuccBI.Incoming) { 1698 SuccBI.Dirty = true; 1699 if (SuccBII <= BII) 1700 Repeat = true; 1701 if (!MoveBracketsToSucc) { 1702 MoveBracketsToSucc = &SuccBI; 1703 } else { 1704 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets); 1705 } 1706 } else if (SuccBI.Incoming->merge(*Brackets)) { 1707 SuccBI.Dirty = true; 1708 if (SuccBII <= BII) 1709 Repeat = true; 1710 } 1711 } 1712 if (MoveBracketsToSucc) 1713 MoveBracketsToSucc->Incoming = std::move(Brackets); 1714 } 1715 } 1716 } while (Repeat); 1717 1718 if (ST->hasScalarStores()) { 1719 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks; 1720 bool HaveScalarStores = false; 1721 1722 for (MachineBasicBlock &MBB : MF) { 1723 for (MachineInstr &MI : MBB) { 1724 if (!HaveScalarStores && TII->isScalarStore(MI)) 1725 HaveScalarStores = true; 1726 1727 if (MI.getOpcode() == AMDGPU::S_ENDPGM || 1728 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) 1729 EndPgmBlocks.push_back(&MBB); 1730 } 1731 } 1732 1733 if (HaveScalarStores) { 1734 // If scalar writes are used, the cache must be flushed or else the next 1735 // wave to reuse the same scratch memory can be clobbered. 1736 // 1737 // Insert s_dcache_wb at wave termination points if there were any scalar 1738 // stores, and only if the cache hasn't already been flushed. This could 1739 // be improved by looking across blocks for flushes in postdominating 1740 // blocks from the stores but an explicitly requested flush is probably 1741 // very rare. 1742 for (MachineBasicBlock *MBB : EndPgmBlocks) { 1743 bool SeenDCacheWB = false; 1744 1745 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); 1746 I != E; ++I) { 1747 if (I->getOpcode() == AMDGPU::S_DCACHE_WB) 1748 SeenDCacheWB = true; 1749 else if (TII->isScalarStore(*I)) 1750 SeenDCacheWB = false; 1751 1752 // FIXME: It would be better to insert this before a waitcnt if any. 1753 if ((I->getOpcode() == AMDGPU::S_ENDPGM || 1754 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) && 1755 !SeenDCacheWB) { 1756 Modified = true; 1757 BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB)); 1758 } 1759 } 1760 } 1761 } 1762 } 1763 1764 return Modified; 1765 } 1766