1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass tries to fuse DS instructions with close by immediate offsets. 11 // This will fuse operations such as 12 // ds_read_b32 v0, v2 offset:16 13 // ds_read_b32 v1, v2 offset:32 14 // ==> 15 // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8 16 // 17 // The same is done for certain SMEM opcodes, e.g.: 18 // s_buffer_load_dword s4, s[0:3], 4 19 // s_buffer_load_dword s5, s[0:3], 8 20 // ==> 21 // s_buffer_load_dwordx2 s[4:5], s[0:3], 4 22 // 23 // 24 // Future improvements: 25 // 26 // - This currently relies on the scheduler to place loads and stores next to 27 // each other, and then only merges adjacent pairs of instructions. It would 28 // be good to be more flexible with interleaved instructions, and possibly run 29 // before scheduling. It currently missing stores of constants because loading 30 // the constant into the data register is placed between the stores, although 31 // this is arguably a scheduling problem. 32 // 33 // - Live interval recomputing seems inefficient. This currently only matches 34 // one pair, and recomputes live intervals and moves on to the next pair. It 35 // would be better to compute a list of all merges that need to occur. 36 // 37 // - With a list of instructions to process, we can also merge more. If a 38 // cluster of loads have offsets that are too large to fit in the 8-bit 39 // offsets, but are close enough to fit in the 8 bits, we can add to the base 40 // pointer and use the new reduced offsets. 41 // 42 //===----------------------------------------------------------------------===// 43 44 #include "AMDGPU.h" 45 #include "AMDGPUSubtarget.h" 46 #include "SIInstrInfo.h" 47 #include "SIRegisterInfo.h" 48 #include "Utils/AMDGPUBaseInfo.h" 49 #include "llvm/ADT/ArrayRef.h" 50 #include "llvm/ADT/SmallVector.h" 51 #include "llvm/ADT/StringRef.h" 52 #include "llvm/Analysis/AliasAnalysis.h" 53 #include "llvm/CodeGen/MachineBasicBlock.h" 54 #include "llvm/CodeGen/MachineFunction.h" 55 #include "llvm/CodeGen/MachineFunctionPass.h" 56 #include "llvm/CodeGen/MachineInstr.h" 57 #include "llvm/CodeGen/MachineInstrBuilder.h" 58 #include "llvm/CodeGen/MachineOperand.h" 59 #include "llvm/CodeGen/MachineRegisterInfo.h" 60 #include "llvm/IR/DebugLoc.h" 61 #include "llvm/Pass.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/MathExtras.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include <algorithm> 66 #include <cassert> 67 #include <cstdlib> 68 #include <iterator> 69 #include <utility> 70 71 using namespace llvm; 72 73 #define DEBUG_TYPE "si-load-store-opt" 74 75 namespace { 76 77 class SILoadStoreOptimizer : public MachineFunctionPass { 78 enum InstClassEnum { 79 DS_READ_WRITE, 80 S_BUFFER_LOAD_IMM, 81 BUFFER_LOAD_OFFEN, 82 BUFFER_LOAD_OFFSET, 83 BUFFER_STORE_OFFEN, 84 BUFFER_STORE_OFFSET, 85 }; 86 87 struct CombineInfo { 88 MachineBasicBlock::iterator I; 89 MachineBasicBlock::iterator Paired; 90 unsigned EltSize; 91 unsigned Offset0; 92 unsigned Offset1; 93 unsigned BaseOff; 94 InstClassEnum InstClass; 95 bool GLC0; 96 bool GLC1; 97 bool SLC0; 98 bool SLC1; 99 bool UseST64; 100 bool IsX2; 101 SmallVector<MachineInstr*, 8> InstsToMove; 102 }; 103 104 private: 105 const SISubtarget *STM = nullptr; 106 const SIInstrInfo *TII = nullptr; 107 const SIRegisterInfo *TRI = nullptr; 108 MachineRegisterInfo *MRI = nullptr; 109 AliasAnalysis *AA = nullptr; 110 unsigned CreatedX2; 111 112 static bool offsetsCanBeCombined(CombineInfo &CI); 113 114 bool findMatchingInst(CombineInfo &CI); 115 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI); 116 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI); 117 MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI); 118 MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI); 119 unsigned promoteBufferStoreOpcode(const MachineInstr &I, bool &IsX2, 120 bool &IsOffen) const; 121 MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI); 122 123 public: 124 static char ID; 125 126 SILoadStoreOptimizer() : MachineFunctionPass(ID) { 127 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry()); 128 } 129 130 bool optimizeBlock(MachineBasicBlock &MBB); 131 132 bool runOnMachineFunction(MachineFunction &MF) override; 133 134 StringRef getPassName() const override { return "SI Load / Store Optimizer"; } 135 136 void getAnalysisUsage(AnalysisUsage &AU) const override { 137 AU.setPreservesCFG(); 138 AU.addRequired<AAResultsWrapperPass>(); 139 140 MachineFunctionPass::getAnalysisUsage(AU); 141 } 142 }; 143 144 } // end anonymous namespace. 145 146 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE, 147 "SI Load / Store Optimizer", false, false) 148 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 149 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, 150 "SI Load / Store Optimizer", false, false) 151 152 char SILoadStoreOptimizer::ID = 0; 153 154 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID; 155 156 FunctionPass *llvm::createSILoadStoreOptimizerPass() { 157 return new SILoadStoreOptimizer(); 158 } 159 160 static void moveInstsAfter(MachineBasicBlock::iterator I, 161 ArrayRef<MachineInstr*> InstsToMove) { 162 MachineBasicBlock *MBB = I->getParent(); 163 ++I; 164 for (MachineInstr *MI : InstsToMove) { 165 MI->removeFromParent(); 166 MBB->insert(I, MI); 167 } 168 } 169 170 static void addDefsToList(const MachineInstr &MI, DenseSet<unsigned> &Defs) { 171 // XXX: Should this be looking for implicit defs? 172 for (const MachineOperand &Def : MI.defs()) 173 Defs.insert(Def.getReg()); 174 } 175 176 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A, 177 MachineBasicBlock::iterator B, 178 const SIInstrInfo *TII, 179 AliasAnalysis * AA) { 180 // RAW or WAR - cannot reorder 181 // WAW - cannot reorder 182 // RAR - safe to reorder 183 return !(A->mayStore() || B->mayStore()) || 184 TII->areMemAccessesTriviallyDisjoint(*A, *B, AA); 185 } 186 187 // Add MI and its defs to the lists if MI reads one of the defs that are 188 // already in the list. Returns true in that case. 189 static bool 190 addToListsIfDependent(MachineInstr &MI, 191 DenseSet<unsigned> &Defs, 192 SmallVectorImpl<MachineInstr*> &Insts) { 193 for (MachineOperand &Use : MI.operands()) { 194 // If one of the defs is read, then there is a use of Def between I and the 195 // instruction that I will potentially be merged with. We will need to move 196 // this instruction after the merged instructions. 197 198 if (Use.isReg() && Use.readsReg() && Defs.count(Use.getReg())) { 199 Insts.push_back(&MI); 200 addDefsToList(MI, Defs); 201 return true; 202 } 203 } 204 205 return false; 206 } 207 208 static bool 209 canMoveInstsAcrossMemOp(MachineInstr &MemOp, 210 ArrayRef<MachineInstr*> InstsToMove, 211 const SIInstrInfo *TII, 212 AliasAnalysis *AA) { 213 assert(MemOp.mayLoadOrStore()); 214 215 for (MachineInstr *InstToMove : InstsToMove) { 216 if (!InstToMove->mayLoadOrStore()) 217 continue; 218 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA)) 219 return false; 220 } 221 return true; 222 } 223 224 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) { 225 // XXX - Would the same offset be OK? Is there any reason this would happen or 226 // be useful? 227 if (CI.Offset0 == CI.Offset1) 228 return false; 229 230 // This won't be valid if the offset isn't aligned. 231 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0)) 232 return false; 233 234 unsigned EltOffset0 = CI.Offset0 / CI.EltSize; 235 unsigned EltOffset1 = CI.Offset1 / CI.EltSize; 236 CI.UseST64 = false; 237 CI.BaseOff = 0; 238 239 // Handle SMEM and VMEM instructions. 240 if (CI.InstClass != DS_READ_WRITE) { 241 unsigned Diff = CI.IsX2 ? 2 : 1; 242 return (EltOffset0 + Diff == EltOffset1 || 243 EltOffset1 + Diff == EltOffset0) && 244 CI.GLC0 == CI.GLC1 && 245 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1); 246 } 247 248 // If the offset in elements doesn't fit in 8-bits, we might be able to use 249 // the stride 64 versions. 250 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 && 251 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) { 252 CI.Offset0 = EltOffset0 / 64; 253 CI.Offset1 = EltOffset1 / 64; 254 CI.UseST64 = true; 255 return true; 256 } 257 258 // Check if the new offsets fit in the reduced 8-bit range. 259 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) { 260 CI.Offset0 = EltOffset0; 261 CI.Offset1 = EltOffset1; 262 return true; 263 } 264 265 // Try to shift base address to decrease offsets. 266 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0); 267 CI.BaseOff = std::min(CI.Offset0, CI.Offset1); 268 269 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) { 270 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64; 271 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64; 272 CI.UseST64 = true; 273 return true; 274 } 275 276 if (isUInt<8>(OffsetDiff)) { 277 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize; 278 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize; 279 return true; 280 } 281 282 return false; 283 } 284 285 bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) { 286 MachineBasicBlock *MBB = CI.I->getParent(); 287 MachineBasicBlock::iterator E = MBB->end(); 288 MachineBasicBlock::iterator MBBI = CI.I; 289 290 unsigned AddrOpName[3] = {0}; 291 int AddrIdx[3]; 292 const MachineOperand *AddrReg[3]; 293 unsigned NumAddresses = 0; 294 295 switch (CI.InstClass) { 296 case DS_READ_WRITE: 297 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr; 298 break; 299 case S_BUFFER_LOAD_IMM: 300 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase; 301 break; 302 case BUFFER_LOAD_OFFEN: 303 case BUFFER_STORE_OFFEN: 304 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc; 305 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr; 306 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset; 307 break; 308 case BUFFER_LOAD_OFFSET: 309 case BUFFER_STORE_OFFSET: 310 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc; 311 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset; 312 break; 313 } 314 315 for (unsigned i = 0; i < NumAddresses; i++) { 316 AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]); 317 AddrReg[i] = &CI.I->getOperand(AddrIdx[i]); 318 319 // We only ever merge operations with the same base address register, so don't 320 // bother scanning forward if there are no other uses. 321 if (AddrReg[i]->isReg() && 322 (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) || 323 MRI->hasOneNonDBGUse(AddrReg[i]->getReg()))) 324 return false; 325 } 326 327 ++MBBI; 328 329 DenseSet<unsigned> DefsToMove; 330 addDefsToList(*CI.I, DefsToMove); 331 332 for ( ; MBBI != E; ++MBBI) { 333 if (MBBI->getOpcode() != CI.I->getOpcode()) { 334 // This is not a matching DS instruction, but we can keep looking as 335 // long as one of these conditions are met: 336 // 1. It is safe to move I down past MBBI. 337 // 2. It is safe to move MBBI down past the instruction that I will 338 // be merged into. 339 340 if (MBBI->hasUnmodeledSideEffects()) { 341 // We can't re-order this instruction with respect to other memory 342 // operations, so we fail both conditions mentioned above. 343 return false; 344 } 345 346 if (MBBI->mayLoadOrStore() && 347 (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) || 348 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))) { 349 // We fail condition #1, but we may still be able to satisfy condition 350 // #2. Add this instruction to the move list and then we will check 351 // if condition #2 holds once we have selected the matching instruction. 352 CI.InstsToMove.push_back(&*MBBI); 353 addDefsToList(*MBBI, DefsToMove); 354 continue; 355 } 356 357 // When we match I with another DS instruction we will be moving I down 358 // to the location of the matched instruction any uses of I will need to 359 // be moved down as well. 360 addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove); 361 continue; 362 } 363 364 // Don't merge volatiles. 365 if (MBBI->hasOrderedMemoryRef()) 366 return false; 367 368 // Handle a case like 369 // DS_WRITE_B32 addr, v, idx0 370 // w = DS_READ_B32 addr, idx0 371 // DS_WRITE_B32 addr, f(w), idx1 372 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents 373 // merging of the two writes. 374 if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove)) 375 continue; 376 377 bool Match = true; 378 for (unsigned i = 0; i < NumAddresses; i++) { 379 const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]); 380 381 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) { 382 if (AddrReg[i]->isImm() != AddrRegNext.isImm() || 383 AddrReg[i]->getImm() != AddrRegNext.getImm()) { 384 Match = false; 385 break; 386 } 387 continue; 388 } 389 390 // Check same base pointer. Be careful of subregisters, which can occur with 391 // vectors of pointers. 392 if (AddrReg[i]->getReg() != AddrRegNext.getReg() || 393 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) { 394 Match = false; 395 break; 396 } 397 } 398 399 if (Match) { 400 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), 401 AMDGPU::OpName::offset); 402 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm(); 403 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm(); 404 CI.Paired = MBBI; 405 406 if (CI.InstClass == DS_READ_WRITE) { 407 CI.Offset0 &= 0xffff; 408 CI.Offset1 &= 0xffff; 409 } else { 410 CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm(); 411 CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm(); 412 if (CI.InstClass != S_BUFFER_LOAD_IMM) { 413 CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm(); 414 CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm(); 415 } 416 } 417 418 // Check both offsets fit in the reduced range. 419 // We also need to go through the list of instructions that we plan to 420 // move and make sure they are all safe to move down past the merged 421 // instruction. 422 if (offsetsCanBeCombined(CI)) 423 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA)) 424 return true; 425 } 426 427 // We've found a load/store that we couldn't merge for some reason. 428 // We could potentially keep looking, but we'd need to make sure that 429 // it was safe to move I and also all the instruction in InstsToMove 430 // down past this instruction. 431 // check if we can move I across MBBI and if we can move all I's users 432 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) || 433 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA)) 434 break; 435 } 436 return false; 437 } 438 439 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair( 440 CombineInfo &CI) { 441 MachineBasicBlock *MBB = CI.I->getParent(); 442 443 // Be careful, since the addresses could be subregisters themselves in weird 444 // cases, like vectors of pointers. 445 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr); 446 447 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst); 448 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst); 449 450 unsigned NewOffset0 = CI.Offset0; 451 unsigned NewOffset1 = CI.Offset1; 452 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32 453 : AMDGPU::DS_READ2_B64; 454 455 if (CI.UseST64) 456 Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 457 : AMDGPU::DS_READ2ST64_B64; 458 459 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1; 460 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3; 461 462 if (NewOffset0 > NewOffset1) { 463 // Canonicalize the merged instruction so the smaller offset comes first. 464 std::swap(NewOffset0, NewOffset1); 465 std::swap(SubRegIdx0, SubRegIdx1); 466 } 467 468 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) && 469 (NewOffset0 != NewOffset1) && 470 "Computed offset doesn't fit"); 471 472 const MCInstrDesc &Read2Desc = TII->get(Opc); 473 474 const TargetRegisterClass *SuperRC 475 = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass; 476 unsigned DestReg = MRI->createVirtualRegister(SuperRC); 477 478 DebugLoc DL = CI.I->getDebugLoc(); 479 480 unsigned BaseReg = AddrReg->getReg(); 481 unsigned BaseRegFlags = 0; 482 if (CI.BaseOff) { 483 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 484 BaseRegFlags = RegState::Kill; 485 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg) 486 .addImm(CI.BaseOff) 487 .addReg(AddrReg->getReg()); 488 } 489 490 MachineInstrBuilder Read2 = 491 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg) 492 .addReg(BaseReg, BaseRegFlags) // addr 493 .addImm(NewOffset0) // offset0 494 .addImm(NewOffset1) // offset1 495 .addImm(0) // gds 496 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); 497 498 (void)Read2; 499 500 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 501 502 // Copy to the old destination registers. 503 BuildMI(*MBB, CI.Paired, DL, CopyDesc) 504 .add(*Dest0) // Copy to same destination including flags and sub reg. 505 .addReg(DestReg, 0, SubRegIdx0); 506 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc) 507 .add(*Dest1) 508 .addReg(DestReg, RegState::Kill, SubRegIdx1); 509 510 moveInstsAfter(Copy1, CI.InstsToMove); 511 512 MachineBasicBlock::iterator Next = std::next(CI.I); 513 CI.I->eraseFromParent(); 514 CI.Paired->eraseFromParent(); 515 516 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n'); 517 return Next; 518 } 519 520 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair( 521 CombineInfo &CI) { 522 MachineBasicBlock *MBB = CI.I->getParent(); 523 524 // Be sure to use .addOperand(), and not .addReg() with these. We want to be 525 // sure we preserve the subregister index and any register flags set on them. 526 const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr); 527 const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0); 528 const MachineOperand *Data1 529 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0); 530 531 unsigned NewOffset0 = CI.Offset0; 532 unsigned NewOffset1 = CI.Offset1; 533 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32 534 : AMDGPU::DS_WRITE2_B64; 535 536 if (CI.UseST64) 537 Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 538 : AMDGPU::DS_WRITE2ST64_B64; 539 540 if (NewOffset0 > NewOffset1) { 541 // Canonicalize the merged instruction so the smaller offset comes first. 542 std::swap(NewOffset0, NewOffset1); 543 std::swap(Data0, Data1); 544 } 545 546 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) && 547 (NewOffset0 != NewOffset1) && 548 "Computed offset doesn't fit"); 549 550 const MCInstrDesc &Write2Desc = TII->get(Opc); 551 DebugLoc DL = CI.I->getDebugLoc(); 552 553 unsigned BaseReg = Addr->getReg(); 554 unsigned BaseRegFlags = 0; 555 if (CI.BaseOff) { 556 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 557 BaseRegFlags = RegState::Kill; 558 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg) 559 .addImm(CI.BaseOff) 560 .addReg(Addr->getReg()); 561 } 562 563 MachineInstrBuilder Write2 = 564 BuildMI(*MBB, CI.Paired, DL, Write2Desc) 565 .addReg(BaseReg, BaseRegFlags) // addr 566 .add(*Data0) // data0 567 .add(*Data1) // data1 568 .addImm(NewOffset0) // offset0 569 .addImm(NewOffset1) // offset1 570 .addImm(0) // gds 571 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); 572 573 moveInstsAfter(Write2, CI.InstsToMove); 574 575 MachineBasicBlock::iterator Next = std::next(CI.I); 576 CI.I->eraseFromParent(); 577 CI.Paired->eraseFromParent(); 578 579 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n'); 580 return Next; 581 } 582 583 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair( 584 CombineInfo &CI) { 585 MachineBasicBlock *MBB = CI.I->getParent(); 586 DebugLoc DL = CI.I->getDebugLoc(); 587 unsigned Opcode = CI.IsX2 ? AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM : 588 AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM; 589 590 const TargetRegisterClass *SuperRC = 591 CI.IsX2 ? &AMDGPU::SReg_128RegClass : &AMDGPU::SReg_64_XEXECRegClass; 592 unsigned DestReg = MRI->createVirtualRegister(SuperRC); 593 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1); 594 595 BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg) 596 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase)) 597 .addImm(MergedOffset) // offset 598 .addImm(CI.GLC0) // glc 599 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); 600 601 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 602 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1; 603 604 // Handle descending offsets 605 if (CI.Offset0 > CI.Offset1) 606 std::swap(SubRegIdx0, SubRegIdx1); 607 608 // Copy to the old destination registers. 609 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 610 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst); 611 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst); 612 613 BuildMI(*MBB, CI.Paired, DL, CopyDesc) 614 .add(*Dest0) // Copy to same destination including flags and sub reg. 615 .addReg(DestReg, 0, SubRegIdx0); 616 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc) 617 .add(*Dest1) 618 .addReg(DestReg, RegState::Kill, SubRegIdx1); 619 620 moveInstsAfter(Copy1, CI.InstsToMove); 621 622 MachineBasicBlock::iterator Next = std::next(CI.I); 623 CI.I->eraseFromParent(); 624 CI.Paired->eraseFromParent(); 625 return Next; 626 } 627 628 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair( 629 CombineInfo &CI) { 630 MachineBasicBlock *MBB = CI.I->getParent(); 631 DebugLoc DL = CI.I->getDebugLoc(); 632 unsigned Opcode; 633 634 if (CI.InstClass == BUFFER_LOAD_OFFEN) { 635 Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN : 636 AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN; 637 } else { 638 Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET : 639 AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET; 640 } 641 642 const TargetRegisterClass *SuperRC = 643 CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass; 644 unsigned DestReg = MRI->createVirtualRegister(SuperRC); 645 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1); 646 647 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg); 648 649 if (CI.InstClass == BUFFER_LOAD_OFFEN) 650 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr)); 651 652 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) 653 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) 654 .addImm(MergedOffset) // offset 655 .addImm(CI.GLC0) // glc 656 .addImm(CI.SLC0) // slc 657 .addImm(0) // tfe 658 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); 659 660 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 661 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1; 662 663 // Handle descending offsets 664 if (CI.Offset0 > CI.Offset1) 665 std::swap(SubRegIdx0, SubRegIdx1); 666 667 // Copy to the old destination registers. 668 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 669 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 670 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata); 671 672 BuildMI(*MBB, CI.Paired, DL, CopyDesc) 673 .add(*Dest0) // Copy to same destination including flags and sub reg. 674 .addReg(DestReg, 0, SubRegIdx0); 675 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc) 676 .add(*Dest1) 677 .addReg(DestReg, RegState::Kill, SubRegIdx1); 678 679 moveInstsAfter(Copy1, CI.InstsToMove); 680 681 MachineBasicBlock::iterator Next = std::next(CI.I); 682 CI.I->eraseFromParent(); 683 CI.Paired->eraseFromParent(); 684 return Next; 685 } 686 687 unsigned SILoadStoreOptimizer::promoteBufferStoreOpcode( 688 const MachineInstr &I, bool &IsX2, bool &IsOffen) const { 689 IsX2 = false; 690 IsOffen = false; 691 692 switch (I.getOpcode()) { 693 case AMDGPU::BUFFER_STORE_DWORD_OFFEN: 694 IsOffen = true; 695 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN; 696 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact: 697 IsOffen = true; 698 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact; 699 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN: 700 IsX2 = true; 701 IsOffen = true; 702 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN; 703 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact: 704 IsX2 = true; 705 IsOffen = true; 706 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN_exact; 707 case AMDGPU::BUFFER_STORE_DWORD_OFFSET: 708 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET; 709 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact: 710 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact; 711 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET: 712 IsX2 = true; 713 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET; 714 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact: 715 IsX2 = true; 716 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET_exact; 717 } 718 return 0; 719 } 720 721 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair( 722 CombineInfo &CI) { 723 MachineBasicBlock *MBB = CI.I->getParent(); 724 DebugLoc DL = CI.I->getDebugLoc(); 725 bool Unused1, Unused2; 726 unsigned Opcode = promoteBufferStoreOpcode(*CI.I, Unused1, Unused2); 727 728 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 729 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1; 730 731 // Handle descending offsets 732 if (CI.Offset0 > CI.Offset1) 733 std::swap(SubRegIdx0, SubRegIdx1); 734 735 // Copy to the new source register. 736 const TargetRegisterClass *SuperRC = 737 CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass; 738 unsigned SrcReg = MRI->createVirtualRegister(SuperRC); 739 740 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 741 const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata); 742 743 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg) 744 .add(*Src0) 745 .addImm(SubRegIdx0) 746 .add(*Src1) 747 .addImm(SubRegIdx1); 748 749 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode)) 750 .addReg(SrcReg, RegState::Kill); 751 752 if (CI.InstClass == BUFFER_STORE_OFFEN) 753 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr)); 754 755 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) 756 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) 757 .addImm(std::min(CI.Offset0, CI.Offset1)) // offset 758 .addImm(CI.GLC0) // glc 759 .addImm(CI.SLC0) // slc 760 .addImm(0) // tfe 761 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); 762 763 moveInstsAfter(MIB, CI.InstsToMove); 764 765 MachineBasicBlock::iterator Next = std::next(CI.I); 766 CI.I->eraseFromParent(); 767 CI.Paired->eraseFromParent(); 768 return Next; 769 } 770 771 // Scan through looking for adjacent LDS operations with constant offsets from 772 // the same base register. We rely on the scheduler to do the hard work of 773 // clustering nearby loads, and assume these are all adjacent. 774 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) { 775 bool Modified = false; 776 777 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) { 778 MachineInstr &MI = *I; 779 780 // Don't combine if volatile. 781 if (MI.hasOrderedMemoryRef()) { 782 ++I; 783 continue; 784 } 785 786 CombineInfo CI; 787 CI.I = I; 788 unsigned Opc = MI.getOpcode(); 789 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) { 790 CI.InstClass = DS_READ_WRITE; 791 CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4; 792 if (findMatchingInst(CI)) { 793 Modified = true; 794 I = mergeRead2Pair(CI); 795 } else { 796 ++I; 797 } 798 799 continue; 800 } 801 if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) { 802 CI.InstClass = DS_READ_WRITE; 803 CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4; 804 if (findMatchingInst(CI)) { 805 Modified = true; 806 I = mergeWrite2Pair(CI); 807 } else { 808 ++I; 809 } 810 811 continue; 812 } 813 if (STM->hasSBufferLoadStoreAtomicDwordxN() && 814 (Opc == AMDGPU::S_BUFFER_LOAD_DWORD_IMM || 815 Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM)) { 816 // EltSize is in units of the offset encoding. 817 CI.InstClass = S_BUFFER_LOAD_IMM; 818 CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4); 819 CI.IsX2 = Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM; 820 if (findMatchingInst(CI)) { 821 Modified = true; 822 I = mergeSBufferLoadImmPair(CI); 823 if (!CI.IsX2) 824 CreatedX2++; 825 } else { 826 ++I; 827 } 828 continue; 829 } 830 if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN || 831 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN || 832 Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFSET || 833 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET) { 834 if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN || 835 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN) 836 CI.InstClass = BUFFER_LOAD_OFFEN; 837 else 838 CI.InstClass = BUFFER_LOAD_OFFSET; 839 840 CI.EltSize = 4; 841 CI.IsX2 = Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN || 842 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET; 843 if (findMatchingInst(CI)) { 844 Modified = true; 845 I = mergeBufferLoadPair(CI); 846 if (!CI.IsX2) 847 CreatedX2++; 848 } else { 849 ++I; 850 } 851 continue; 852 } 853 854 bool StoreIsX2, IsOffen; 855 if (promoteBufferStoreOpcode(*I, StoreIsX2, IsOffen)) { 856 CI.InstClass = IsOffen ? BUFFER_STORE_OFFEN : BUFFER_STORE_OFFSET; 857 CI.EltSize = 4; 858 CI.IsX2 = StoreIsX2; 859 if (findMatchingInst(CI)) { 860 Modified = true; 861 I = mergeBufferStorePair(CI); 862 if (!CI.IsX2) 863 CreatedX2++; 864 } else { 865 ++I; 866 } 867 continue; 868 } 869 870 ++I; 871 } 872 873 return Modified; 874 } 875 876 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) { 877 if (skipFunction(*MF.getFunction())) 878 return false; 879 880 STM = &MF.getSubtarget<SISubtarget>(); 881 if (!STM->loadStoreOptEnabled()) 882 return false; 883 884 TII = STM->getInstrInfo(); 885 TRI = &TII->getRegisterInfo(); 886 887 MRI = &MF.getRegInfo(); 888 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 889 890 assert(MRI->isSSA() && "Must be run on SSA"); 891 892 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n"); 893 894 bool Modified = false; 895 CreatedX2 = 0; 896 897 for (MachineBasicBlock &MBB : MF) 898 Modified |= optimizeBlock(MBB); 899 900 // Run again to convert x2 to x4. 901 if (CreatedX2 >= 1) { 902 for (MachineBasicBlock &MBB : MF) 903 Modified |= optimizeBlock(MBB); 904 } 905 906 return Modified; 907 } 908