1 //===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Insert wait instructions for memory reads and writes. 11 /// 12 /// Memory reads and writes are issued asynchronously, so we need to insert 13 /// S_WAITCNT instructions when we want to access any of their results or 14 /// overwrite any register that's used asynchronously. 15 /// 16 /// TODO: This pass currently keeps one timeline per hardware counter. A more 17 /// finely-grained approach that keeps one timeline per event type could 18 /// sometimes get away with generating weaker s_waitcnt instructions. For 19 /// example, when both SMEM and LDS are in flight and we need to wait for 20 /// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient, 21 /// but the pass will currently generate a conservative lgkmcnt(0) because 22 /// multiple event types are in flight. 23 // 24 //===----------------------------------------------------------------------===// 25 26 #include "AMDGPU.h" 27 #include "GCNSubtarget.h" 28 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 29 #include "SIMachineFunctionInfo.h" 30 #include "Utils/AMDGPUBaseInfo.h" 31 #include "llvm/ADT/MapVector.h" 32 #include "llvm/ADT/PostOrderIterator.h" 33 #include "llvm/CodeGen/MachinePostDominators.h" 34 #include "llvm/InitializePasses.h" 35 #include "llvm/Support/DebugCounter.h" 36 #include "llvm/Support/TargetParser.h" 37 using namespace llvm; 38 39 #define DEBUG_TYPE "si-insert-waitcnts" 40 41 DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE"-forceexp", 42 "Force emit s_waitcnt expcnt(0) instrs"); 43 DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE"-forcelgkm", 44 "Force emit s_waitcnt lgkmcnt(0) instrs"); 45 DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE"-forcevm", 46 "Force emit s_waitcnt vmcnt(0) instrs"); 47 48 static cl::opt<bool> ForceEmitZeroFlag( 49 "amdgpu-waitcnt-forcezero", 50 cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), 51 cl::init(false), cl::Hidden); 52 53 namespace { 54 55 template <typename EnumT> 56 class enum_iterator 57 : public iterator_facade_base<enum_iterator<EnumT>, 58 std::forward_iterator_tag, const EnumT> { 59 EnumT Value; 60 public: 61 enum_iterator() = default; 62 enum_iterator(EnumT Value) : Value(Value) {} 63 64 enum_iterator &operator++() { 65 Value = static_cast<EnumT>(Value + 1); 66 return *this; 67 } 68 69 bool operator==(const enum_iterator &RHS) const { return Value == RHS.Value; } 70 71 EnumT operator*() const { return Value; } 72 }; 73 74 // Class of object that encapsulates latest instruction counter score 75 // associated with the operand. Used for determining whether 76 // s_waitcnt instruction needs to be emitted. 77 78 #define CNT_MASK(t) (1u << (t)) 79 80 enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, VS_CNT, NUM_INST_CNTS }; 81 82 iterator_range<enum_iterator<InstCounterType>> inst_counter_types() { 83 return make_range(enum_iterator<InstCounterType>(VM_CNT), 84 enum_iterator<InstCounterType>(NUM_INST_CNTS)); 85 } 86 87 using RegInterval = std::pair<int, int>; 88 89 struct { 90 unsigned VmcntMax; 91 unsigned ExpcntMax; 92 unsigned LgkmcntMax; 93 unsigned VscntMax; 94 } HardwareLimits; 95 96 struct { 97 unsigned VGPR0; 98 unsigned VGPRL; 99 unsigned SGPR0; 100 unsigned SGPRL; 101 } RegisterEncoding; 102 103 enum WaitEventType { 104 VMEM_ACCESS, // vector-memory read & write 105 VMEM_READ_ACCESS, // vector-memory read 106 VMEM_WRITE_ACCESS,// vector-memory write 107 LDS_ACCESS, // lds read & write 108 GDS_ACCESS, // gds read & write 109 SQ_MESSAGE, // send message 110 SMEM_ACCESS, // scalar-memory read & write 111 EXP_GPR_LOCK, // export holding on its data src 112 GDS_GPR_LOCK, // GDS holding on its data and addr src 113 EXP_POS_ACCESS, // write to export position 114 EXP_PARAM_ACCESS, // write to export parameter 115 VMW_GPR_LOCK, // vector-memory write holding on its data src 116 NUM_WAIT_EVENTS, 117 }; 118 119 static const unsigned WaitEventMaskForInst[NUM_INST_CNTS] = { 120 (1 << VMEM_ACCESS) | (1 << VMEM_READ_ACCESS), 121 (1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) | 122 (1 << SQ_MESSAGE), 123 (1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) | 124 (1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS), 125 (1 << VMEM_WRITE_ACCESS) 126 }; 127 128 // The mapping is: 129 // 0 .. SQ_MAX_PGM_VGPRS-1 real VGPRs 130 // SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1 extra VGPR-like slots 131 // NUM_ALL_VGPRS .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs 132 // We reserve a fixed number of VGPR slots in the scoring tables for 133 // special tokens like SCMEM_LDS (needed for buffer load to LDS). 134 enum RegisterMapping { 135 SQ_MAX_PGM_VGPRS = 512, // Maximum programmable VGPRs across all targets. 136 AGPR_OFFSET = 226, // Maximum programmable ArchVGPRs across all targets. 137 SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets. 138 NUM_EXTRA_VGPRS = 1, // A reserved slot for DS. 139 EXTRA_VGPR_LDS = 0, // This is a placeholder the Shader algorithm uses. 140 NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts. 141 }; 142 143 // Enumerate different types of result-returning VMEM operations. Although 144 // s_waitcnt orders them all with a single vmcnt counter, in the absence of 145 // s_waitcnt only instructions of the same VmemType are guaranteed to write 146 // their results in order -- so there is no need to insert an s_waitcnt between 147 // two instructions of the same type that write the same vgpr. 148 enum VmemType { 149 // BUF instructions and MIMG instructions without a sampler. 150 VMEM_NOSAMPLER, 151 // MIMG instructions with a sampler. 152 VMEM_SAMPLER, 153 // BVH instructions 154 VMEM_BVH 155 }; 156 157 VmemType getVmemType(const MachineInstr &Inst) { 158 assert(SIInstrInfo::isVMEM(Inst)); 159 if (!SIInstrInfo::isMIMG(Inst)) 160 return VMEM_NOSAMPLER; 161 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Inst.getOpcode()); 162 const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo = 163 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 164 return BaseInfo->BVH ? VMEM_BVH 165 : BaseInfo->Sampler ? VMEM_SAMPLER : VMEM_NOSAMPLER; 166 } 167 168 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) { 169 switch (T) { 170 case VM_CNT: 171 Wait.VmCnt = std::min(Wait.VmCnt, Count); 172 break; 173 case EXP_CNT: 174 Wait.ExpCnt = std::min(Wait.ExpCnt, Count); 175 break; 176 case LGKM_CNT: 177 Wait.LgkmCnt = std::min(Wait.LgkmCnt, Count); 178 break; 179 case VS_CNT: 180 Wait.VsCnt = std::min(Wait.VsCnt, Count); 181 break; 182 default: 183 llvm_unreachable("bad InstCounterType"); 184 } 185 } 186 187 // This objects maintains the current score brackets of each wait counter, and 188 // a per-register scoreboard for each wait counter. 189 // 190 // We also maintain the latest score for every event type that can change the 191 // waitcnt in order to know if there are multiple types of events within 192 // the brackets. When multiple types of event happen in the bracket, 193 // wait count may get decreased out of order, therefore we need to put in 194 // "s_waitcnt 0" before use. 195 class WaitcntBrackets { 196 public: 197 WaitcntBrackets(const GCNSubtarget *SubTarget) : ST(SubTarget) {} 198 199 static unsigned getWaitCountMax(InstCounterType T) { 200 switch (T) { 201 case VM_CNT: 202 return HardwareLimits.VmcntMax; 203 case LGKM_CNT: 204 return HardwareLimits.LgkmcntMax; 205 case EXP_CNT: 206 return HardwareLimits.ExpcntMax; 207 case VS_CNT: 208 return HardwareLimits.VscntMax; 209 default: 210 break; 211 } 212 return 0; 213 } 214 215 unsigned getScoreLB(InstCounterType T) const { 216 assert(T < NUM_INST_CNTS); 217 return ScoreLBs[T]; 218 } 219 220 unsigned getScoreUB(InstCounterType T) const { 221 assert(T < NUM_INST_CNTS); 222 return ScoreUBs[T]; 223 } 224 225 // Mapping from event to counter. 226 InstCounterType eventCounter(WaitEventType E) { 227 if (WaitEventMaskForInst[VM_CNT] & (1 << E)) 228 return VM_CNT; 229 if (WaitEventMaskForInst[LGKM_CNT] & (1 << E)) 230 return LGKM_CNT; 231 if (WaitEventMaskForInst[VS_CNT] & (1 << E)) 232 return VS_CNT; 233 assert(WaitEventMaskForInst[EXP_CNT] & (1 << E)); 234 return EXP_CNT; 235 } 236 237 unsigned getRegScore(int GprNo, InstCounterType T) { 238 if (GprNo < NUM_ALL_VGPRS) { 239 return VgprScores[T][GprNo]; 240 } 241 assert(T == LGKM_CNT); 242 return SgprScores[GprNo - NUM_ALL_VGPRS]; 243 } 244 245 bool merge(const WaitcntBrackets &Other); 246 247 RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII, 248 const MachineRegisterInfo *MRI, 249 const SIRegisterInfo *TRI, unsigned OpNo) const; 250 251 bool counterOutOfOrder(InstCounterType T) const; 252 void simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const; 253 void simplifyWaitcnt(InstCounterType T, unsigned &Count) const; 254 void determineWait(InstCounterType T, unsigned ScoreToWait, 255 AMDGPU::Waitcnt &Wait) const; 256 void applyWaitcnt(const AMDGPU::Waitcnt &Wait); 257 void applyWaitcnt(InstCounterType T, unsigned Count); 258 void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI, 259 const MachineRegisterInfo *MRI, WaitEventType E, 260 MachineInstr &MI); 261 262 bool hasPending() const { return PendingEvents != 0; } 263 bool hasPendingEvent(WaitEventType E) const { 264 return PendingEvents & (1 << E); 265 } 266 267 bool hasMixedPendingEvents(InstCounterType T) const { 268 unsigned Events = PendingEvents & WaitEventMaskForInst[T]; 269 // Return true if more than one bit is set in Events. 270 return Events & (Events - 1); 271 } 272 273 bool hasPendingFlat() const { 274 return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] && 275 LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) || 276 (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] && 277 LastFlat[VM_CNT] <= ScoreUBs[VM_CNT])); 278 } 279 280 void setPendingFlat() { 281 LastFlat[VM_CNT] = ScoreUBs[VM_CNT]; 282 LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT]; 283 } 284 285 // Return true if there might be pending writes to the specified vgpr by VMEM 286 // instructions with types different from V. 287 bool hasOtherPendingVmemTypes(int GprNo, VmemType V) const { 288 assert(GprNo < NUM_ALL_VGPRS); 289 return VgprVmemTypes[GprNo] & ~(1 << V); 290 } 291 292 void clearVgprVmemTypes(int GprNo) { 293 assert(GprNo < NUM_ALL_VGPRS); 294 VgprVmemTypes[GprNo] = 0; 295 } 296 297 void print(raw_ostream &); 298 void dump() { print(dbgs()); } 299 300 private: 301 struct MergeInfo { 302 unsigned OldLB; 303 unsigned OtherLB; 304 unsigned MyShift; 305 unsigned OtherShift; 306 }; 307 static bool mergeScore(const MergeInfo &M, unsigned &Score, 308 unsigned OtherScore); 309 310 void setScoreLB(InstCounterType T, unsigned Val) { 311 assert(T < NUM_INST_CNTS); 312 ScoreLBs[T] = Val; 313 } 314 315 void setScoreUB(InstCounterType T, unsigned Val) { 316 assert(T < NUM_INST_CNTS); 317 ScoreUBs[T] = Val; 318 if (T == EXP_CNT) { 319 unsigned UB = ScoreUBs[T] - getWaitCountMax(EXP_CNT); 320 if (ScoreLBs[T] < UB && UB < ScoreUBs[T]) 321 ScoreLBs[T] = UB; 322 } 323 } 324 325 void setRegScore(int GprNo, InstCounterType T, unsigned Val) { 326 if (GprNo < NUM_ALL_VGPRS) { 327 VgprUB = std::max(VgprUB, GprNo); 328 VgprScores[T][GprNo] = Val; 329 } else { 330 assert(T == LGKM_CNT); 331 SgprUB = std::max(SgprUB, GprNo - NUM_ALL_VGPRS); 332 SgprScores[GprNo - NUM_ALL_VGPRS] = Val; 333 } 334 } 335 336 void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII, 337 const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI, 338 unsigned OpNo, unsigned Val); 339 340 const GCNSubtarget *ST = nullptr; 341 unsigned ScoreLBs[NUM_INST_CNTS] = {0}; 342 unsigned ScoreUBs[NUM_INST_CNTS] = {0}; 343 unsigned PendingEvents = 0; 344 // Remember the last flat memory operation. 345 unsigned LastFlat[NUM_INST_CNTS] = {0}; 346 // wait_cnt scores for every vgpr. 347 // Keep track of the VgprUB and SgprUB to make merge at join efficient. 348 int VgprUB = -1; 349 int SgprUB = -1; 350 unsigned VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS] = {{0}}; 351 // Wait cnt scores for every sgpr, only lgkmcnt is relevant. 352 unsigned SgprScores[SQ_MAX_PGM_SGPRS] = {0}; 353 // Bitmask of the VmemTypes of VMEM instructions that might have a pending 354 // write to each vgpr. 355 unsigned char VgprVmemTypes[NUM_ALL_VGPRS] = {0}; 356 }; 357 358 class SIInsertWaitcnts : public MachineFunctionPass { 359 private: 360 const GCNSubtarget *ST = nullptr; 361 const SIInstrInfo *TII = nullptr; 362 const SIRegisterInfo *TRI = nullptr; 363 const MachineRegisterInfo *MRI = nullptr; 364 AMDGPU::IsaVersion IV; 365 366 DenseSet<MachineInstr *> TrackedWaitcntSet; 367 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses; 368 MachinePostDominatorTree *PDT; 369 370 struct BlockInfo { 371 MachineBasicBlock *MBB; 372 std::unique_ptr<WaitcntBrackets> Incoming; 373 bool Dirty = true; 374 375 explicit BlockInfo(MachineBasicBlock *MBB) : MBB(MBB) {} 376 }; 377 378 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos; 379 380 // ForceEmitZeroWaitcnts: force all waitcnts insts to be s_waitcnt 0 381 // because of amdgpu-waitcnt-forcezero flag 382 bool ForceEmitZeroWaitcnts; 383 bool ForceEmitWaitcnt[NUM_INST_CNTS]; 384 385 public: 386 static char ID; 387 388 SIInsertWaitcnts() : MachineFunctionPass(ID) { 389 (void)ForceExpCounter; 390 (void)ForceLgkmCounter; 391 (void)ForceVMCounter; 392 } 393 394 bool runOnMachineFunction(MachineFunction &MF) override; 395 396 StringRef getPassName() const override { 397 return "SI insert wait instructions"; 398 } 399 400 void getAnalysisUsage(AnalysisUsage &AU) const override { 401 AU.setPreservesCFG(); 402 AU.addRequired<MachinePostDominatorTree>(); 403 MachineFunctionPass::getAnalysisUsage(AU); 404 } 405 406 bool isForceEmitWaitcnt() const { 407 for (auto T : inst_counter_types()) 408 if (ForceEmitWaitcnt[T]) 409 return true; 410 return false; 411 } 412 413 void setForceEmitWaitcnt() { 414 // For non-debug builds, ForceEmitWaitcnt has been initialized to false; 415 // For debug builds, get the debug counter info and adjust if need be 416 #ifndef NDEBUG 417 if (DebugCounter::isCounterSet(ForceExpCounter) && 418 DebugCounter::shouldExecute(ForceExpCounter)) { 419 ForceEmitWaitcnt[EXP_CNT] = true; 420 } else { 421 ForceEmitWaitcnt[EXP_CNT] = false; 422 } 423 424 if (DebugCounter::isCounterSet(ForceLgkmCounter) && 425 DebugCounter::shouldExecute(ForceLgkmCounter)) { 426 ForceEmitWaitcnt[LGKM_CNT] = true; 427 } else { 428 ForceEmitWaitcnt[LGKM_CNT] = false; 429 } 430 431 if (DebugCounter::isCounterSet(ForceVMCounter) && 432 DebugCounter::shouldExecute(ForceVMCounter)) { 433 ForceEmitWaitcnt[VM_CNT] = true; 434 } else { 435 ForceEmitWaitcnt[VM_CNT] = false; 436 } 437 #endif // NDEBUG 438 } 439 440 bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const; 441 bool mayAccessLDSThroughFlat(const MachineInstr &MI) const; 442 bool generateWaitcntInstBefore(MachineInstr &MI, 443 WaitcntBrackets &ScoreBrackets, 444 MachineInstr *OldWaitcntInstr); 445 void updateEventWaitcntAfter(MachineInstr &Inst, 446 WaitcntBrackets *ScoreBrackets); 447 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block, 448 WaitcntBrackets &ScoreBrackets); 449 bool applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets, 450 MachineInstr &OldWaitcntInstr, 451 AMDGPU::Waitcnt &Wait, const MachineInstr *MI); 452 }; 453 454 } // end anonymous namespace 455 456 RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI, 457 const SIInstrInfo *TII, 458 const MachineRegisterInfo *MRI, 459 const SIRegisterInfo *TRI, 460 unsigned OpNo) const { 461 const MachineOperand &Op = MI->getOperand(OpNo); 462 if (!TRI->isInAllocatableClass(Op.getReg())) 463 return {-1, -1}; 464 465 // A use via a PW operand does not need a waitcnt. 466 // A partial write is not a WAW. 467 assert(!Op.getSubReg() || !Op.isUndef()); 468 469 RegInterval Result; 470 471 unsigned Reg = TRI->getEncodingValue(AMDGPU::getMCReg(Op.getReg(), *ST)); 472 473 if (TRI->isVectorRegister(*MRI, Op.getReg())) { 474 assert(Reg >= RegisterEncoding.VGPR0 && Reg <= RegisterEncoding.VGPRL); 475 Result.first = Reg - RegisterEncoding.VGPR0; 476 if (TRI->isAGPR(*MRI, Op.getReg())) 477 Result.first += AGPR_OFFSET; 478 assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS); 479 } else if (TRI->isSGPRReg(*MRI, Op.getReg())) { 480 assert(Reg >= RegisterEncoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS); 481 Result.first = Reg - RegisterEncoding.SGPR0 + NUM_ALL_VGPRS; 482 assert(Result.first >= NUM_ALL_VGPRS && 483 Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS); 484 } 485 // TODO: Handle TTMP 486 // else if (TRI->isTTMP(*MRI, Reg.getReg())) ... 487 else 488 return {-1, -1}; 489 490 const TargetRegisterClass *RC = TII->getOpRegClass(*MI, OpNo); 491 unsigned Size = TRI->getRegSizeInBits(*RC); 492 Result.second = Result.first + ((Size + 16) / 32); 493 494 return Result; 495 } 496 497 void WaitcntBrackets::setExpScore(const MachineInstr *MI, 498 const SIInstrInfo *TII, 499 const SIRegisterInfo *TRI, 500 const MachineRegisterInfo *MRI, unsigned OpNo, 501 unsigned Val) { 502 RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo); 503 assert(TRI->isVectorRegister(*MRI, MI->getOperand(OpNo).getReg())); 504 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 505 setRegScore(RegNo, EXP_CNT, Val); 506 } 507 } 508 509 void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, 510 const SIRegisterInfo *TRI, 511 const MachineRegisterInfo *MRI, 512 WaitEventType E, MachineInstr &Inst) { 513 InstCounterType T = eventCounter(E); 514 unsigned CurrScore = getScoreUB(T) + 1; 515 if (CurrScore == 0) 516 report_fatal_error("InsertWaitcnt score wraparound"); 517 // PendingEvents and ScoreUB need to be update regardless if this event 518 // changes the score of a register or not. 519 // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message. 520 PendingEvents |= 1 << E; 521 setScoreUB(T, CurrScore); 522 523 if (T == EXP_CNT) { 524 // Put score on the source vgprs. If this is a store, just use those 525 // specific register(s). 526 if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) { 527 int AddrOpIdx = 528 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr); 529 // All GDS operations must protect their address register (same as 530 // export.) 531 if (AddrOpIdx != -1) { 532 setExpScore(&Inst, TII, TRI, MRI, AddrOpIdx, CurrScore); 533 } 534 535 if (Inst.mayStore()) { 536 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 537 AMDGPU::OpName::data0) != -1) { 538 setExpScore( 539 &Inst, TII, TRI, MRI, 540 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0), 541 CurrScore); 542 } 543 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 544 AMDGPU::OpName::data1) != -1) { 545 setExpScore(&Inst, TII, TRI, MRI, 546 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 547 AMDGPU::OpName::data1), 548 CurrScore); 549 } 550 } else if (SIInstrInfo::isAtomicRet(Inst) && 551 Inst.getOpcode() != AMDGPU::DS_GWS_INIT && 552 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V && 553 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR && 554 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P && 555 Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER && 556 Inst.getOpcode() != AMDGPU::DS_APPEND && 557 Inst.getOpcode() != AMDGPU::DS_CONSUME && 558 Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) { 559 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 560 const MachineOperand &Op = Inst.getOperand(I); 561 if (Op.isReg() && !Op.isDef() && 562 TRI->isVectorRegister(*MRI, Op.getReg())) { 563 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore); 564 } 565 } 566 } 567 } else if (TII->isFLAT(Inst)) { 568 if (Inst.mayStore()) { 569 setExpScore( 570 &Inst, TII, TRI, MRI, 571 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 572 CurrScore); 573 } else if (SIInstrInfo::isAtomicRet(Inst)) { 574 setExpScore( 575 &Inst, TII, TRI, MRI, 576 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 577 CurrScore); 578 } 579 } else if (TII->isMIMG(Inst)) { 580 if (Inst.mayStore()) { 581 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 582 } else if (SIInstrInfo::isAtomicRet(Inst)) { 583 setExpScore( 584 &Inst, TII, TRI, MRI, 585 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 586 CurrScore); 587 } 588 } else if (TII->isMTBUF(Inst)) { 589 if (Inst.mayStore()) { 590 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 591 } 592 } else if (TII->isMUBUF(Inst)) { 593 if (Inst.mayStore()) { 594 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 595 } else if (SIInstrInfo::isAtomicRet(Inst)) { 596 setExpScore( 597 &Inst, TII, TRI, MRI, 598 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 599 CurrScore); 600 } 601 } else { 602 if (TII->isEXP(Inst)) { 603 // For export the destination registers are really temps that 604 // can be used as the actual source after export patching, so 605 // we need to treat them like sources and set the EXP_CNT 606 // score. 607 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 608 MachineOperand &DefMO = Inst.getOperand(I); 609 if (DefMO.isReg() && DefMO.isDef() && 610 TRI->isVGPR(*MRI, DefMO.getReg())) { 611 setRegScore( 612 TRI->getEncodingValue(AMDGPU::getMCReg(DefMO.getReg(), *ST)), 613 EXP_CNT, CurrScore); 614 } 615 } 616 } 617 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 618 MachineOperand &MO = Inst.getOperand(I); 619 if (MO.isReg() && !MO.isDef() && 620 TRI->isVectorRegister(*MRI, MO.getReg())) { 621 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore); 622 } 623 } 624 } 625 #if 0 // TODO: check if this is handled by MUBUF code above. 626 } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD || 627 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 || 628 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) { 629 MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data); 630 unsigned OpNo;//TODO: find the OpNo for this operand; 631 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo); 632 for (int RegNo = Interval.first; RegNo < Interval.second; 633 ++RegNo) { 634 setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore); 635 } 636 #endif 637 } else { 638 // Match the score to the destination registers. 639 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 640 auto &Op = Inst.getOperand(I); 641 if (!Op.isReg() || !Op.isDef()) 642 continue; 643 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I); 644 if (T == VM_CNT) { 645 if (Interval.first >= NUM_ALL_VGPRS) 646 continue; 647 if (SIInstrInfo::isVMEM(Inst)) { 648 VmemType V = getVmemType(Inst); 649 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) 650 VgprVmemTypes[RegNo] |= 1 << V; 651 } 652 } 653 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 654 setRegScore(RegNo, T, CurrScore); 655 } 656 } 657 if (TII->isDS(Inst) && Inst.mayStore()) { 658 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore); 659 } 660 } 661 } 662 663 void WaitcntBrackets::print(raw_ostream &OS) { 664 OS << '\n'; 665 for (auto T : inst_counter_types()) { 666 unsigned LB = getScoreLB(T); 667 unsigned UB = getScoreUB(T); 668 669 switch (T) { 670 case VM_CNT: 671 OS << " VM_CNT(" << UB - LB << "): "; 672 break; 673 case LGKM_CNT: 674 OS << " LGKM_CNT(" << UB - LB << "): "; 675 break; 676 case EXP_CNT: 677 OS << " EXP_CNT(" << UB - LB << "): "; 678 break; 679 case VS_CNT: 680 OS << " VS_CNT(" << UB - LB << "): "; 681 break; 682 default: 683 OS << " UNKNOWN(" << UB - LB << "): "; 684 break; 685 } 686 687 if (LB < UB) { 688 // Print vgpr scores. 689 for (int J = 0; J <= VgprUB; J++) { 690 unsigned RegScore = getRegScore(J, T); 691 if (RegScore <= LB) 692 continue; 693 unsigned RelScore = RegScore - LB - 1; 694 if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) { 695 OS << RelScore << ":v" << J << " "; 696 } else { 697 OS << RelScore << ":ds "; 698 } 699 } 700 // Also need to print sgpr scores for lgkm_cnt. 701 if (T == LGKM_CNT) { 702 for (int J = 0; J <= SgprUB; J++) { 703 unsigned RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT); 704 if (RegScore <= LB) 705 continue; 706 unsigned RelScore = RegScore - LB - 1; 707 OS << RelScore << ":s" << J << " "; 708 } 709 } 710 } 711 OS << '\n'; 712 } 713 OS << '\n'; 714 } 715 716 /// Simplify the waitcnt, in the sense of removing redundant counts, and return 717 /// whether a waitcnt instruction is needed at all. 718 void WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const { 719 simplifyWaitcnt(VM_CNT, Wait.VmCnt); 720 simplifyWaitcnt(EXP_CNT, Wait.ExpCnt); 721 simplifyWaitcnt(LGKM_CNT, Wait.LgkmCnt); 722 simplifyWaitcnt(VS_CNT, Wait.VsCnt); 723 } 724 725 void WaitcntBrackets::simplifyWaitcnt(InstCounterType T, 726 unsigned &Count) const { 727 const unsigned LB = getScoreLB(T); 728 const unsigned UB = getScoreUB(T); 729 730 // The number of outstanding events for this type, T, can be calculated 731 // as (UB - LB). If the current Count is greater than or equal to the number 732 // of outstanding events, then the wait for this counter is redundant. 733 if (Count >= UB - LB) 734 Count = ~0u; 735 } 736 737 void WaitcntBrackets::determineWait(InstCounterType T, unsigned ScoreToWait, 738 AMDGPU::Waitcnt &Wait) const { 739 // If the score of src_operand falls within the bracket, we need an 740 // s_waitcnt instruction. 741 const unsigned LB = getScoreLB(T); 742 const unsigned UB = getScoreUB(T); 743 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) { 744 if ((T == VM_CNT || T == LGKM_CNT) && 745 hasPendingFlat() && 746 !ST->hasFlatLgkmVMemCountInOrder()) { 747 // If there is a pending FLAT operation, and this is a VMem or LGKM 748 // waitcnt and the target can report early completion, then we need 749 // to force a waitcnt 0. 750 addWait(Wait, T, 0); 751 } else if (counterOutOfOrder(T)) { 752 // Counter can get decremented out-of-order when there 753 // are multiple types event in the bracket. Also emit an s_wait counter 754 // with a conservative value of 0 for the counter. 755 addWait(Wait, T, 0); 756 } else { 757 // If a counter has been maxed out avoid overflow by waiting for 758 // MAX(CounterType) - 1 instead. 759 unsigned NeededWait = std::min(UB - ScoreToWait, getWaitCountMax(T) - 1); 760 addWait(Wait, T, NeededWait); 761 } 762 } 763 } 764 765 void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) { 766 applyWaitcnt(VM_CNT, Wait.VmCnt); 767 applyWaitcnt(EXP_CNT, Wait.ExpCnt); 768 applyWaitcnt(LGKM_CNT, Wait.LgkmCnt); 769 applyWaitcnt(VS_CNT, Wait.VsCnt); 770 } 771 772 void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) { 773 const unsigned UB = getScoreUB(T); 774 if (Count >= UB) 775 return; 776 if (Count != 0) { 777 if (counterOutOfOrder(T)) 778 return; 779 setScoreLB(T, std::max(getScoreLB(T), UB - Count)); 780 } else { 781 setScoreLB(T, UB); 782 PendingEvents &= ~WaitEventMaskForInst[T]; 783 } 784 } 785 786 // Where there are multiple types of event in the bracket of a counter, 787 // the decrement may go out of order. 788 bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const { 789 // Scalar memory read always can go out of order. 790 if (T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS)) 791 return true; 792 return hasMixedPendingEvents(T); 793 } 794 795 INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false, 796 false) 797 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree) 798 INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false, 799 false) 800 801 char SIInsertWaitcnts::ID = 0; 802 803 char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID; 804 805 FunctionPass *llvm::createSIInsertWaitcntsPass() { 806 return new SIInsertWaitcnts(); 807 } 808 809 /// Combine consecutive waitcnt instructions that precede \p MI and follow 810 /// \p OldWaitcntInstr and apply any extra wait from waitcnt that were added 811 /// by previous passes. Currently this pass conservatively assumes that these 812 /// preexisting waitcnt are required for correctness. 813 bool SIInsertWaitcnts::applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets, 814 MachineInstr &OldWaitcntInstr, 815 AMDGPU::Waitcnt &Wait, 816 const MachineInstr *MI) { 817 bool Modified = false; 818 MachineInstr *WaitcntInstr = nullptr; 819 MachineInstr *WaitcntVsCntInstr = nullptr; 820 for (auto II = OldWaitcntInstr.getIterator(), NextI = std::next(II); 821 &*II != MI; II = NextI, ++NextI) { 822 if (II->isMetaInstruction()) 823 continue; 824 825 if (II->getOpcode() == AMDGPU::S_WAITCNT) { 826 // Conservatively update required wait if this waitcnt was added in an 827 // earlier pass. In this case it will not exist in the tracked waitcnt 828 // set. 829 if (!TrackedWaitcntSet.count(&*II)) { 830 unsigned IEnc = II->getOperand(0).getImm(); 831 AMDGPU::Waitcnt OldWait = AMDGPU::decodeWaitcnt(IV, IEnc); 832 Wait = Wait.combined(OldWait); 833 } 834 835 // Merge consecutive waitcnt of the same type by erasing multiples. 836 if (!WaitcntInstr) { 837 WaitcntInstr = &*II; 838 } else { 839 II->eraseFromParent(); 840 Modified = true; 841 } 842 843 } else { 844 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT); 845 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL); 846 if (!TrackedWaitcntSet.count(&*II)) { 847 unsigned OldVSCnt = 848 TII->getNamedOperand(*II, AMDGPU::OpName::simm16)->getImm(); 849 Wait.VsCnt = std::min(Wait.VsCnt, OldVSCnt); 850 } 851 852 if (!WaitcntVsCntInstr) { 853 WaitcntVsCntInstr = &*II; 854 } else { 855 II->eraseFromParent(); 856 Modified = true; 857 } 858 } 859 } 860 861 // Updated encoding of merged waitcnt with the required wait. 862 if (WaitcntInstr) { 863 if (Wait.hasWaitExceptVsCnt()) { 864 unsigned NewEnc = AMDGPU::encodeWaitcnt(IV, Wait); 865 unsigned OldEnc = WaitcntInstr->getOperand(0).getImm(); 866 if (OldEnc != NewEnc) { 867 WaitcntInstr->getOperand(0).setImm(NewEnc); 868 Modified = true; 869 } 870 ScoreBrackets.applyWaitcnt(Wait); 871 Wait.VmCnt = ~0u; 872 Wait.LgkmCnt = ~0u; 873 Wait.ExpCnt = ~0u; 874 875 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 876 << "Old Instr: " << MI << "New Instr: " << *WaitcntInstr 877 << '\n'); 878 } else { 879 WaitcntInstr->eraseFromParent(); 880 Modified = true; 881 } 882 } 883 884 if (WaitcntVsCntInstr) { 885 if (Wait.hasWaitVsCnt()) { 886 assert(ST->hasVscnt()); 887 unsigned OldVSCnt = 888 TII->getNamedOperand(*WaitcntVsCntInstr, AMDGPU::OpName::simm16) 889 ->getImm(); 890 if (Wait.VsCnt != OldVSCnt) { 891 TII->getNamedOperand(*WaitcntVsCntInstr, AMDGPU::OpName::simm16) 892 ->setImm(Wait.VsCnt); 893 Modified = true; 894 } 895 ScoreBrackets.applyWaitcnt(Wait); 896 Wait.VsCnt = ~0u; 897 898 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 899 << "Old Instr: " << MI 900 << "New Instr: " << *WaitcntVsCntInstr << '\n'); 901 } else { 902 WaitcntVsCntInstr->eraseFromParent(); 903 Modified = true; 904 } 905 } 906 907 return Modified; 908 } 909 910 static bool readsVCCZ(const MachineInstr &MI) { 911 unsigned Opc = MI.getOpcode(); 912 return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) && 913 !MI.getOperand(1).isUndef(); 914 } 915 916 /// \returns true if the callee inserts an s_waitcnt 0 on function entry. 917 static bool callWaitsOnFunctionEntry(const MachineInstr &MI) { 918 // Currently all conventions wait, but this may not always be the case. 919 // 920 // TODO: If IPRA is enabled, and the callee is isSafeForNoCSROpt, it may make 921 // senses to omit the wait and do it in the caller. 922 return true; 923 } 924 925 /// \returns true if the callee is expected to wait for any outstanding waits 926 /// before returning. 927 static bool callWaitsOnFunctionReturn(const MachineInstr &MI) { 928 return true; 929 } 930 931 /// Generate s_waitcnt instruction to be placed before cur_Inst. 932 /// Instructions of a given type are returned in order, 933 /// but instructions of different types can complete out of order. 934 /// We rely on this in-order completion 935 /// and simply assign a score to the memory access instructions. 936 /// We keep track of the active "score bracket" to determine 937 /// if an access of a memory read requires an s_waitcnt 938 /// and if so what the value of each counter is. 939 /// The "score bracket" is bound by the lower bound and upper bound 940 /// scores (*_score_LB and *_score_ub respectively). 941 bool SIInsertWaitcnts::generateWaitcntInstBefore( 942 MachineInstr &MI, WaitcntBrackets &ScoreBrackets, 943 MachineInstr *OldWaitcntInstr) { 944 setForceEmitWaitcnt(); 945 946 if (MI.isMetaInstruction()) 947 return false; 948 949 AMDGPU::Waitcnt Wait; 950 bool Modified = false; 951 952 // FIXME: This should have already been handled by the memory legalizer. 953 // Removing this currently doesn't affect any lit tests, but we need to 954 // verify that nothing was relying on this. The number of buffer invalidates 955 // being handled here should not be expanded. 956 if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 || 957 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC || 958 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL || 959 MI.getOpcode() == AMDGPU::BUFFER_GL0_INV || 960 MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) { 961 Wait.VmCnt = 0; 962 } 963 964 // All waits must be resolved at call return. 965 // NOTE: this could be improved with knowledge of all call sites or 966 // with knowledge of the called routines. 967 if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG || 968 MI.getOpcode() == AMDGPU::S_SETPC_B64_return || 969 MI.getOpcode() == AMDGPU::S_SETPC_B64_return_gfx || 970 (MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) { 971 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 972 } 973 // Resolve vm waits before gs-done. 974 else if ((MI.getOpcode() == AMDGPU::S_SENDMSG || 975 MI.getOpcode() == AMDGPU::S_SENDMSGHALT) && 976 ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_) == 977 AMDGPU::SendMsg::ID_GS_DONE)) { 978 Wait.VmCnt = 0; 979 } 980 #if 0 // TODO: the following blocks of logic when we have fence. 981 else if (MI.getOpcode() == SC_FENCE) { 982 const unsigned int group_size = 983 context->shader_info->GetMaxThreadGroupSize(); 984 // group_size == 0 means thread group size is unknown at compile time 985 const bool group_is_multi_wave = 986 (group_size == 0 || group_size > target_info->GetWaveFrontSize()); 987 const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence(); 988 989 for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) { 990 SCRegType src_type = Inst->GetSrcType(i); 991 switch (src_type) { 992 case SCMEM_LDS: 993 if (group_is_multi_wave || 994 context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) { 995 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT, 996 ScoreBrackets->getScoreUB(LGKM_CNT)); 997 // LDS may have to wait for VM_CNT after buffer load to LDS 998 if (target_info->HasBufferLoadToLDS()) { 999 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT, 1000 ScoreBrackets->getScoreUB(VM_CNT)); 1001 } 1002 } 1003 break; 1004 1005 case SCMEM_GDS: 1006 if (group_is_multi_wave || fence_is_global) { 1007 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT, 1008 ScoreBrackets->getScoreUB(EXP_CNT)); 1009 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT, 1010 ScoreBrackets->getScoreUB(LGKM_CNT)); 1011 } 1012 break; 1013 1014 case SCMEM_UAV: 1015 case SCMEM_TFBUF: 1016 case SCMEM_RING: 1017 case SCMEM_SCATTER: 1018 if (group_is_multi_wave || fence_is_global) { 1019 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT, 1020 ScoreBrackets->getScoreUB(EXP_CNT)); 1021 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT, 1022 ScoreBrackets->getScoreUB(VM_CNT)); 1023 } 1024 break; 1025 1026 case SCMEM_SCRATCH: 1027 default: 1028 break; 1029 } 1030 } 1031 } 1032 #endif 1033 1034 // Export & GDS instructions do not read the EXEC mask until after the export 1035 // is granted (which can occur well after the instruction is issued). 1036 // The shader program must flush all EXP operations on the export-count 1037 // before overwriting the EXEC mask. 1038 else { 1039 if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) { 1040 // Export and GDS are tracked individually, either may trigger a waitcnt 1041 // for EXEC. 1042 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) || 1043 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) || 1044 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) || 1045 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) { 1046 Wait.ExpCnt = 0; 1047 } 1048 } 1049 1050 if (MI.isCall() && callWaitsOnFunctionEntry(MI)) { 1051 // The function is going to insert a wait on everything in its prolog. 1052 // This still needs to be careful if the call target is a load (e.g. a GOT 1053 // load). We also need to check WAW depenancy with saved PC. 1054 Wait = AMDGPU::Waitcnt(); 1055 1056 int CallAddrOpIdx = 1057 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); 1058 1059 if (MI.getOperand(CallAddrOpIdx).isReg()) { 1060 RegInterval CallAddrOpInterval = 1061 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, CallAddrOpIdx); 1062 1063 for (int RegNo = CallAddrOpInterval.first; 1064 RegNo < CallAddrOpInterval.second; ++RegNo) 1065 ScoreBrackets.determineWait( 1066 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 1067 1068 int RtnAddrOpIdx = 1069 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); 1070 if (RtnAddrOpIdx != -1) { 1071 RegInterval RtnAddrOpInterval = 1072 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, RtnAddrOpIdx); 1073 1074 for (int RegNo = RtnAddrOpInterval.first; 1075 RegNo < RtnAddrOpInterval.second; ++RegNo) 1076 ScoreBrackets.determineWait( 1077 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 1078 } 1079 } 1080 } else { 1081 // FIXME: Should not be relying on memoperands. 1082 // Look at the source operands of every instruction to see if 1083 // any of them results from a previous memory operation that affects 1084 // its current usage. If so, an s_waitcnt instruction needs to be 1085 // emitted. 1086 // If the source operand was defined by a load, add the s_waitcnt 1087 // instruction. 1088 // 1089 // Two cases are handled for destination operands: 1090 // 1) If the destination operand was defined by a load, add the s_waitcnt 1091 // instruction to guarantee the right WAW order. 1092 // 2) If a destination operand that was used by a recent export/store ins, 1093 // add s_waitcnt on exp_cnt to guarantee the WAR order. 1094 for (const MachineMemOperand *Memop : MI.memoperands()) { 1095 const Value *Ptr = Memop->getValue(); 1096 if (Memop->isStore() && SLoadAddresses.count(Ptr)) { 1097 addWait(Wait, LGKM_CNT, 0); 1098 if (PDT->dominates(MI.getParent(), SLoadAddresses.find(Ptr)->second)) 1099 SLoadAddresses.erase(Ptr); 1100 } 1101 unsigned AS = Memop->getAddrSpace(); 1102 if (AS != AMDGPUAS::LOCAL_ADDRESS) 1103 continue; 1104 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS; 1105 // VM_CNT is only relevant to vgpr or LDS. 1106 ScoreBrackets.determineWait( 1107 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 1108 if (Memop->isStore()) { 1109 ScoreBrackets.determineWait( 1110 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait); 1111 } 1112 } 1113 1114 // Loop over use and def operands. 1115 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 1116 MachineOperand &Op = MI.getOperand(I); 1117 if (!Op.isReg()) 1118 continue; 1119 RegInterval Interval = 1120 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I); 1121 1122 const bool IsVGPR = TRI->isVectorRegister(*MRI, Op.getReg()); 1123 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 1124 if (IsVGPR) { 1125 // RAW always needs an s_waitcnt. WAW needs an s_waitcnt unless the 1126 // previous write and this write are the same type of VMEM 1127 // instruction, in which case they're guaranteed to write their 1128 // results in order anyway. 1129 if (Op.isUse() || !SIInstrInfo::isVMEM(MI) || 1130 ScoreBrackets.hasOtherPendingVmemTypes(RegNo, 1131 getVmemType(MI))) { 1132 ScoreBrackets.determineWait( 1133 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 1134 ScoreBrackets.clearVgprVmemTypes(RegNo); 1135 } 1136 if (Op.isDef()) { 1137 ScoreBrackets.determineWait( 1138 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait); 1139 } 1140 } 1141 ScoreBrackets.determineWait( 1142 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 1143 } 1144 } 1145 } 1146 } 1147 1148 // Check to see if this is an S_BARRIER, and if an implicit S_WAITCNT 0 1149 // occurs before the instruction. Doing it here prevents any additional 1150 // S_WAITCNTs from being emitted if the instruction was marked as 1151 // requiring a WAITCNT beforehand. 1152 if (MI.getOpcode() == AMDGPU::S_BARRIER && 1153 !ST->hasAutoWaitcntBeforeBarrier()) { 1154 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 1155 } 1156 1157 // TODO: Remove this work-around, enable the assert for Bug 457939 1158 // after fixing the scheduler. Also, the Shader Compiler code is 1159 // independent of target. 1160 if (readsVCCZ(MI) && ST->hasReadVCCZBug()) { 1161 if (ScoreBrackets.getScoreLB(LGKM_CNT) < 1162 ScoreBrackets.getScoreUB(LGKM_CNT) && 1163 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { 1164 Wait.LgkmCnt = 0; 1165 } 1166 } 1167 1168 // Verify that the wait is actually needed. 1169 ScoreBrackets.simplifyWaitcnt(Wait); 1170 1171 if (ForceEmitZeroWaitcnts) 1172 Wait = AMDGPU::Waitcnt::allZero(ST->hasVscnt()); 1173 1174 if (ForceEmitWaitcnt[VM_CNT]) 1175 Wait.VmCnt = 0; 1176 if (ForceEmitWaitcnt[EXP_CNT]) 1177 Wait.ExpCnt = 0; 1178 if (ForceEmitWaitcnt[LGKM_CNT]) 1179 Wait.LgkmCnt = 0; 1180 if (ForceEmitWaitcnt[VS_CNT]) 1181 Wait.VsCnt = 0; 1182 1183 if (OldWaitcntInstr) { 1184 // Try to merge the required wait with preexisting waitcnt instructions. 1185 // Also erase redundant waitcnt. 1186 Modified = 1187 applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr, Wait, &MI); 1188 } else { 1189 // Update waitcnt brackets after determining the required wait. 1190 ScoreBrackets.applyWaitcnt(Wait); 1191 } 1192 1193 // Build new waitcnt instructions unless no wait is needed or the old waitcnt 1194 // instruction was modified to handle the required wait. 1195 if (Wait.hasWaitExceptVsCnt()) { 1196 unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait); 1197 auto SWaitInst = BuildMI(*MI.getParent(), MI.getIterator(), 1198 MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) 1199 .addImm(Enc); 1200 TrackedWaitcntSet.insert(SWaitInst); 1201 Modified = true; 1202 1203 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1204 << "Old Instr: " << MI 1205 << "New Instr: " << *SWaitInst << '\n'); 1206 } 1207 1208 if (Wait.hasWaitVsCnt()) { 1209 assert(ST->hasVscnt()); 1210 1211 auto SWaitInst = 1212 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), 1213 TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1214 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1215 .addImm(Wait.VsCnt); 1216 TrackedWaitcntSet.insert(SWaitInst); 1217 Modified = true; 1218 1219 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1220 << "Old Instr: " << MI 1221 << "New Instr: " << *SWaitInst << '\n'); 1222 } 1223 1224 return Modified; 1225 } 1226 1227 // This is a flat memory operation. Check to see if it has memory tokens other 1228 // than LDS. Other address spaces supported by flat memory operations involve 1229 // global memory. 1230 bool SIInsertWaitcnts::mayAccessVMEMThroughFlat(const MachineInstr &MI) const { 1231 assert(TII->isFLAT(MI)); 1232 1233 // All flat instructions use the VMEM counter. 1234 assert(TII->usesVM_CNT(MI)); 1235 1236 // If there are no memory operands then conservatively assume the flat 1237 // operation may access VMEM. 1238 if (MI.memoperands_empty()) 1239 return true; 1240 1241 // See if any memory operand specifies an address space that involves VMEM. 1242 // Flat operations only supported FLAT, LOCAL (LDS), or address spaces 1243 // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION 1244 // (GDS) address space is not supported by flat operations. Therefore, simply 1245 // return true unless only the LDS address space is found. 1246 for (const MachineMemOperand *Memop : MI.memoperands()) { 1247 unsigned AS = Memop->getAddrSpace(); 1248 assert(AS != AMDGPUAS::REGION_ADDRESS); 1249 if (AS != AMDGPUAS::LOCAL_ADDRESS) 1250 return true; 1251 } 1252 1253 return false; 1254 } 1255 1256 // This is a flat memory operation. Check to see if it has memory tokens for 1257 // either LDS or FLAT. 1258 bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const { 1259 assert(TII->isFLAT(MI)); 1260 1261 // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter. 1262 if (!TII->usesLGKM_CNT(MI)) 1263 return false; 1264 1265 // If in tgsplit mode then there can be no use of LDS. 1266 if (ST->isTgSplitEnabled()) 1267 return false; 1268 1269 // If there are no memory operands then conservatively assume the flat 1270 // operation may access LDS. 1271 if (MI.memoperands_empty()) 1272 return true; 1273 1274 // See if any memory operand specifies an address space that involves LDS. 1275 for (const MachineMemOperand *Memop : MI.memoperands()) { 1276 unsigned AS = Memop->getAddrSpace(); 1277 if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) 1278 return true; 1279 } 1280 1281 return false; 1282 } 1283 1284 void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, 1285 WaitcntBrackets *ScoreBrackets) { 1286 // Now look at the instruction opcode. If it is a memory access 1287 // instruction, update the upper-bound of the appropriate counter's 1288 // bracket and the destination operand scores. 1289 // TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere. 1290 if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) { 1291 if (TII->isAlwaysGDS(Inst.getOpcode()) || 1292 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) { 1293 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst); 1294 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst); 1295 } else { 1296 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); 1297 } 1298 } else if (TII->isFLAT(Inst)) { 1299 assert(Inst.mayLoadOrStore()); 1300 1301 int FlatASCount = 0; 1302 1303 if (mayAccessVMEMThroughFlat(Inst)) { 1304 ++FlatASCount; 1305 if (!ST->hasVscnt()) 1306 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst); 1307 else if (Inst.mayLoad() && !SIInstrInfo::isAtomicNoRet(Inst)) 1308 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst); 1309 else 1310 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst); 1311 } 1312 1313 if (mayAccessLDSThroughFlat(Inst)) { 1314 ++FlatASCount; 1315 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); 1316 } 1317 1318 // A Flat memory operation must access at least one address space. 1319 assert(FlatASCount); 1320 1321 // This is a flat memory operation that access both VMEM and LDS, so note it 1322 // - it will require that both the VM and LGKM be flushed to zero if it is 1323 // pending when a VM or LGKM dependency occurs. 1324 if (FlatASCount > 1) 1325 ScoreBrackets->setPendingFlat(); 1326 } else if (SIInstrInfo::isVMEM(Inst) && 1327 !llvm::AMDGPU::getMUBUFIsBufferInv(Inst.getOpcode())) { 1328 if (!ST->hasVscnt()) 1329 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst); 1330 else if ((Inst.mayLoad() && !SIInstrInfo::isAtomicNoRet(Inst)) || 1331 /* IMAGE_GET_RESINFO / IMAGE_GET_LOD */ 1332 (TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore())) 1333 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst); 1334 else if (Inst.mayStore()) 1335 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst); 1336 1337 if (ST->vmemWriteNeedsExpWaitcnt() && 1338 (Inst.mayStore() || SIInstrInfo::isAtomicRet(Inst))) { 1339 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst); 1340 } 1341 } else if (TII->isSMRD(Inst)) { 1342 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); 1343 } else if (Inst.isCall()) { 1344 if (callWaitsOnFunctionReturn(Inst)) { 1345 // Act as a wait on everything 1346 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 1347 } else { 1348 // May need to way wait for anything. 1349 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt()); 1350 } 1351 } else if (SIInstrInfo::isEXP(Inst)) { 1352 unsigned Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm(); 1353 if (Imm >= AMDGPU::Exp::ET_PARAM0 && Imm <= AMDGPU::Exp::ET_PARAM31) 1354 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst); 1355 else if (Imm >= AMDGPU::Exp::ET_POS0 && Imm <= AMDGPU::Exp::ET_POS_LAST) 1356 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst); 1357 else 1358 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst); 1359 } else { 1360 switch (Inst.getOpcode()) { 1361 case AMDGPU::S_SENDMSG: 1362 case AMDGPU::S_SENDMSGHALT: 1363 ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst); 1364 break; 1365 case AMDGPU::S_MEMTIME: 1366 case AMDGPU::S_MEMREALTIME: 1367 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); 1368 break; 1369 } 1370 } 1371 } 1372 1373 bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score, 1374 unsigned OtherScore) { 1375 unsigned MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift; 1376 unsigned OtherShifted = 1377 OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift; 1378 Score = std::max(MyShifted, OtherShifted); 1379 return OtherShifted > MyShifted; 1380 } 1381 1382 /// Merge the pending events and associater score brackets of \p Other into 1383 /// this brackets status. 1384 /// 1385 /// Returns whether the merge resulted in a change that requires tighter waits 1386 /// (i.e. the merged brackets strictly dominate the original brackets). 1387 bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { 1388 bool StrictDom = false; 1389 1390 VgprUB = std::max(VgprUB, Other.VgprUB); 1391 SgprUB = std::max(SgprUB, Other.SgprUB); 1392 1393 for (auto T : inst_counter_types()) { 1394 // Merge event flags for this counter 1395 const bool OldOutOfOrder = counterOutOfOrder(T); 1396 const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[T]; 1397 const unsigned OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T]; 1398 if (OtherEvents & ~OldEvents) 1399 StrictDom = true; 1400 PendingEvents |= OtherEvents; 1401 1402 // Merge scores for this counter 1403 const unsigned MyPending = ScoreUBs[T] - ScoreLBs[T]; 1404 const unsigned OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T]; 1405 const unsigned NewUB = ScoreLBs[T] + std::max(MyPending, OtherPending); 1406 if (NewUB < ScoreLBs[T]) 1407 report_fatal_error("waitcnt score overflow"); 1408 1409 MergeInfo M; 1410 M.OldLB = ScoreLBs[T]; 1411 M.OtherLB = Other.ScoreLBs[T]; 1412 M.MyShift = NewUB - ScoreUBs[T]; 1413 M.OtherShift = NewUB - Other.ScoreUBs[T]; 1414 1415 ScoreUBs[T] = NewUB; 1416 1417 StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]); 1418 1419 bool RegStrictDom = false; 1420 for (int J = 0; J <= VgprUB; J++) { 1421 RegStrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]); 1422 } 1423 1424 if (T == VM_CNT) { 1425 for (int J = 0; J <= VgprUB; J++) { 1426 unsigned char NewVmemTypes = VgprVmemTypes[J] | Other.VgprVmemTypes[J]; 1427 RegStrictDom |= NewVmemTypes != VgprVmemTypes[J]; 1428 VgprVmemTypes[J] = NewVmemTypes; 1429 } 1430 } 1431 1432 if (T == LGKM_CNT) { 1433 for (int J = 0; J <= SgprUB; J++) { 1434 RegStrictDom |= mergeScore(M, SgprScores[J], Other.SgprScores[J]); 1435 } 1436 } 1437 1438 if (RegStrictDom && !OldOutOfOrder) 1439 StrictDom = true; 1440 } 1441 1442 return StrictDom; 1443 } 1444 1445 // Generate s_waitcnt instructions where needed. 1446 bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF, 1447 MachineBasicBlock &Block, 1448 WaitcntBrackets &ScoreBrackets) { 1449 bool Modified = false; 1450 1451 LLVM_DEBUG({ 1452 dbgs() << "*** Block" << Block.getNumber() << " ***"; 1453 ScoreBrackets.dump(); 1454 }); 1455 1456 // Track the correctness of vccz through this basic block. There are two 1457 // reasons why it might be incorrect; see ST->hasReadVCCZBug() and 1458 // ST->partialVCCWritesUpdateVCCZ(). 1459 bool VCCZCorrect = true; 1460 if (ST->hasReadVCCZBug()) { 1461 // vccz could be incorrect at a basic block boundary if a predecessor wrote 1462 // to vcc and then issued an smem load. 1463 VCCZCorrect = false; 1464 } else if (!ST->partialVCCWritesUpdateVCCZ()) { 1465 // vccz could be incorrect at a basic block boundary if a predecessor wrote 1466 // to vcc_lo or vcc_hi. 1467 VCCZCorrect = false; 1468 } 1469 1470 // Walk over the instructions. 1471 MachineInstr *OldWaitcntInstr = nullptr; 1472 1473 for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(), 1474 E = Block.instr_end(); 1475 Iter != E;) { 1476 MachineInstr &Inst = *Iter; 1477 1478 // Track pre-existing waitcnts that were added in earlier iterations or by 1479 // the memory legalizer. 1480 if (Inst.getOpcode() == AMDGPU::S_WAITCNT || 1481 (Inst.getOpcode() == AMDGPU::S_WAITCNT_VSCNT && 1482 Inst.getOperand(0).isReg() && 1483 Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL)) { 1484 if (!OldWaitcntInstr) 1485 OldWaitcntInstr = &Inst; 1486 ++Iter; 1487 continue; 1488 } 1489 1490 // Generate an s_waitcnt instruction to be placed before Inst, if needed. 1491 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr); 1492 OldWaitcntInstr = nullptr; 1493 1494 // Restore vccz if it's not known to be correct already. 1495 bool RestoreVCCZ = !VCCZCorrect && readsVCCZ(Inst); 1496 1497 // Don't examine operands unless we need to track vccz correctness. 1498 if (ST->hasReadVCCZBug() || !ST->partialVCCWritesUpdateVCCZ()) { 1499 if (Inst.definesRegister(AMDGPU::VCC_LO) || 1500 Inst.definesRegister(AMDGPU::VCC_HI)) { 1501 // Up to gfx9, writes to vcc_lo and vcc_hi don't update vccz. 1502 if (!ST->partialVCCWritesUpdateVCCZ()) 1503 VCCZCorrect = false; 1504 } else if (Inst.definesRegister(AMDGPU::VCC)) { 1505 // There is a hardware bug on CI/SI where SMRD instruction may corrupt 1506 // vccz bit, so when we detect that an instruction may read from a 1507 // corrupt vccz bit, we need to: 1508 // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD 1509 // operations to complete. 1510 // 2. Restore the correct value of vccz by writing the current value 1511 // of vcc back to vcc. 1512 if (ST->hasReadVCCZBug() && 1513 ScoreBrackets.getScoreLB(LGKM_CNT) < 1514 ScoreBrackets.getScoreUB(LGKM_CNT) && 1515 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { 1516 // Writes to vcc while there's an outstanding smem read may get 1517 // clobbered as soon as any read completes. 1518 VCCZCorrect = false; 1519 } else { 1520 // Writes to vcc will fix any incorrect value in vccz. 1521 VCCZCorrect = true; 1522 } 1523 } 1524 } 1525 1526 if (TII->isSMRD(Inst)) { 1527 for (const MachineMemOperand *Memop : Inst.memoperands()) { 1528 // No need to handle invariant loads when avoiding WAR conflicts, as 1529 // there cannot be a vector store to the same memory location. 1530 if (!Memop->isInvariant()) { 1531 const Value *Ptr = Memop->getValue(); 1532 SLoadAddresses.insert(std::make_pair(Ptr, Inst.getParent())); 1533 } 1534 } 1535 if (ST->hasReadVCCZBug()) { 1536 // This smem read could complete and clobber vccz at any time. 1537 VCCZCorrect = false; 1538 } 1539 } 1540 1541 updateEventWaitcntAfter(Inst, &ScoreBrackets); 1542 1543 #if 0 // TODO: implement resource type check controlled by options with ub = LB. 1544 // If this instruction generates a S_SETVSKIP because it is an 1545 // indexed resource, and we are on Tahiti, then it will also force 1546 // an S_WAITCNT vmcnt(0) 1547 if (RequireCheckResourceType(Inst, context)) { 1548 // Force the score to as if an S_WAITCNT vmcnt(0) is emitted. 1549 ScoreBrackets->setScoreLB(VM_CNT, 1550 ScoreBrackets->getScoreUB(VM_CNT)); 1551 } 1552 #endif 1553 1554 LLVM_DEBUG({ 1555 Inst.print(dbgs()); 1556 ScoreBrackets.dump(); 1557 }); 1558 1559 // TODO: Remove this work-around after fixing the scheduler and enable the 1560 // assert above. 1561 if (RestoreVCCZ) { 1562 // Restore the vccz bit. Any time a value is written to vcc, the vcc 1563 // bit is updated, so we can restore the bit by reading the value of 1564 // vcc and then writing it back to the register. 1565 BuildMI(Block, Inst, Inst.getDebugLoc(), 1566 TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64), 1567 TRI->getVCC()) 1568 .addReg(TRI->getVCC()); 1569 VCCZCorrect = true; 1570 Modified = true; 1571 } 1572 1573 ++Iter; 1574 } 1575 1576 return Modified; 1577 } 1578 1579 bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) { 1580 ST = &MF.getSubtarget<GCNSubtarget>(); 1581 TII = ST->getInstrInfo(); 1582 TRI = &TII->getRegisterInfo(); 1583 MRI = &MF.getRegInfo(); 1584 IV = AMDGPU::getIsaVersion(ST->getCPU()); 1585 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1586 PDT = &getAnalysis<MachinePostDominatorTree>(); 1587 1588 ForceEmitZeroWaitcnts = ForceEmitZeroFlag; 1589 for (auto T : inst_counter_types()) 1590 ForceEmitWaitcnt[T] = false; 1591 1592 HardwareLimits.VmcntMax = AMDGPU::getVmcntBitMask(IV); 1593 HardwareLimits.ExpcntMax = AMDGPU::getExpcntBitMask(IV); 1594 HardwareLimits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV); 1595 HardwareLimits.VscntMax = ST->hasVscnt() ? 63 : 0; 1596 1597 unsigned NumVGPRsMax = ST->getAddressableNumVGPRs(); 1598 unsigned NumSGPRsMax = ST->getAddressableNumSGPRs(); 1599 assert(NumVGPRsMax <= SQ_MAX_PGM_VGPRS); 1600 assert(NumSGPRsMax <= SQ_MAX_PGM_SGPRS); 1601 1602 RegisterEncoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0); 1603 RegisterEncoding.VGPRL = RegisterEncoding.VGPR0 + NumVGPRsMax - 1; 1604 RegisterEncoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0); 1605 RegisterEncoding.SGPRL = RegisterEncoding.SGPR0 + NumSGPRsMax - 1; 1606 1607 TrackedWaitcntSet.clear(); 1608 BlockInfos.clear(); 1609 bool Modified = false; 1610 1611 if (!MFI->isEntryFunction()) { 1612 // Wait for any outstanding memory operations that the input registers may 1613 // depend on. We can't track them and it's better to do the wait after the 1614 // costly call sequence. 1615 1616 // TODO: Could insert earlier and schedule more liberally with operations 1617 // that only use caller preserved registers. 1618 MachineBasicBlock &EntryBB = MF.front(); 1619 MachineBasicBlock::iterator I = EntryBB.begin(); 1620 for (MachineBasicBlock::iterator E = EntryBB.end(); 1621 I != E && (I->isPHI() || I->isMetaInstruction()); ++I) 1622 ; 1623 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT)).addImm(0); 1624 if (ST->hasVscnt()) 1625 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1626 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1627 .addImm(0); 1628 1629 Modified = true; 1630 } 1631 1632 // Keep iterating over the blocks in reverse post order, inserting and 1633 // updating s_waitcnt where needed, until a fix point is reached. 1634 for (auto *MBB : ReversePostOrderTraversal<MachineFunction *>(&MF)) 1635 BlockInfos.insert({MBB, BlockInfo(MBB)}); 1636 1637 std::unique_ptr<WaitcntBrackets> Brackets; 1638 bool Repeat; 1639 do { 1640 Repeat = false; 1641 1642 for (auto BII = BlockInfos.begin(), BIE = BlockInfos.end(); BII != BIE; 1643 ++BII) { 1644 BlockInfo &BI = BII->second; 1645 if (!BI.Dirty) 1646 continue; 1647 1648 if (BI.Incoming) { 1649 if (!Brackets) 1650 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming); 1651 else 1652 *Brackets = *BI.Incoming; 1653 } else { 1654 if (!Brackets) 1655 Brackets = std::make_unique<WaitcntBrackets>(ST); 1656 else 1657 *Brackets = WaitcntBrackets(ST); 1658 } 1659 1660 Modified |= insertWaitcntInBlock(MF, *BI.MBB, *Brackets); 1661 BI.Dirty = false; 1662 1663 if (Brackets->hasPending()) { 1664 BlockInfo *MoveBracketsToSucc = nullptr; 1665 for (MachineBasicBlock *Succ : BI.MBB->successors()) { 1666 auto SuccBII = BlockInfos.find(Succ); 1667 BlockInfo &SuccBI = SuccBII->second; 1668 if (!SuccBI.Incoming) { 1669 SuccBI.Dirty = true; 1670 if (SuccBII <= BII) 1671 Repeat = true; 1672 if (!MoveBracketsToSucc) { 1673 MoveBracketsToSucc = &SuccBI; 1674 } else { 1675 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets); 1676 } 1677 } else if (SuccBI.Incoming->merge(*Brackets)) { 1678 SuccBI.Dirty = true; 1679 if (SuccBII <= BII) 1680 Repeat = true; 1681 } 1682 } 1683 if (MoveBracketsToSucc) 1684 MoveBracketsToSucc->Incoming = std::move(Brackets); 1685 } 1686 } 1687 } while (Repeat); 1688 1689 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks; 1690 1691 bool HaveScalarStores = false; 1692 1693 for (MachineBasicBlock &MBB : MF) { 1694 for (MachineInstr &MI : MBB) { 1695 if (!HaveScalarStores && TII->isScalarStore(MI)) 1696 HaveScalarStores = true; 1697 1698 if (MI.getOpcode() == AMDGPU::S_ENDPGM || 1699 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) 1700 EndPgmBlocks.push_back(&MBB); 1701 } 1702 } 1703 1704 if (HaveScalarStores) { 1705 // If scalar writes are used, the cache must be flushed or else the next 1706 // wave to reuse the same scratch memory can be clobbered. 1707 // 1708 // Insert s_dcache_wb at wave termination points if there were any scalar 1709 // stores, and only if the cache hasn't already been flushed. This could be 1710 // improved by looking across blocks for flushes in postdominating blocks 1711 // from the stores but an explicitly requested flush is probably very rare. 1712 for (MachineBasicBlock *MBB : EndPgmBlocks) { 1713 bool SeenDCacheWB = false; 1714 1715 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; 1716 ++I) { 1717 if (I->getOpcode() == AMDGPU::S_DCACHE_WB) 1718 SeenDCacheWB = true; 1719 else if (TII->isScalarStore(*I)) 1720 SeenDCacheWB = false; 1721 1722 // FIXME: It would be better to insert this before a waitcnt if any. 1723 if ((I->getOpcode() == AMDGPU::S_ENDPGM || 1724 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) && 1725 !SeenDCacheWB) { 1726 Modified = true; 1727 BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB)); 1728 } 1729 } 1730 } 1731 } 1732 1733 return Modified; 1734 } 1735