1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass tries to fuse DS instructions with close by immediate offsets. 10 // This will fuse operations such as 11 // ds_read_b32 v0, v2 offset:16 12 // ds_read_b32 v1, v2 offset:32 13 // ==> 14 // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8 15 // 16 // The same is done for certain SMEM and VMEM opcodes, e.g.: 17 // s_buffer_load_dword s4, s[0:3], 4 18 // s_buffer_load_dword s5, s[0:3], 8 19 // ==> 20 // s_buffer_load_dwordx2 s[4:5], s[0:3], 4 21 // 22 // This pass also tries to promote constant offset to the immediate by 23 // adjusting the base. It tries to use a base from the nearby instructions that 24 // allows it to have a 13bit constant offset and then promotes the 13bit offset 25 // to the immediate. 26 // E.g. 27 // s_movk_i32 s0, 0x1800 28 // v_add_co_u32_e32 v0, vcc, s0, v2 29 // v_addc_co_u32_e32 v1, vcc, 0, v6, vcc 30 // 31 // s_movk_i32 s0, 0x1000 32 // v_add_co_u32_e32 v5, vcc, s0, v2 33 // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc 34 // global_load_dwordx2 v[5:6], v[5:6], off 35 // global_load_dwordx2 v[0:1], v[0:1], off 36 // => 37 // s_movk_i32 s0, 0x1000 38 // v_add_co_u32_e32 v5, vcc, s0, v2 39 // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc 40 // global_load_dwordx2 v[5:6], v[5:6], off 41 // global_load_dwordx2 v[0:1], v[5:6], off offset:2048 42 // 43 // Future improvements: 44 // 45 // - This is currently missing stores of constants because loading 46 // the constant into the data register is placed between the stores, although 47 // this is arguably a scheduling problem. 48 // 49 // - Live interval recomputing seems inefficient. This currently only matches 50 // one pair, and recomputes live intervals and moves on to the next pair. It 51 // would be better to compute a list of all merges that need to occur. 52 // 53 // - With a list of instructions to process, we can also merge more. If a 54 // cluster of loads have offsets that are too large to fit in the 8-bit 55 // offsets, but are close enough to fit in the 8 bits, we can add to the base 56 // pointer and use the new reduced offsets. 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "AMDGPU.h" 61 #include "GCNSubtarget.h" 62 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 63 #include "llvm/Analysis/AliasAnalysis.h" 64 #include "llvm/CodeGen/MachineFunctionPass.h" 65 #include "llvm/InitializePasses.h" 66 67 using namespace llvm; 68 69 #define DEBUG_TYPE "si-load-store-opt" 70 71 namespace { 72 enum InstClassEnum { 73 UNKNOWN, 74 DS_READ, 75 DS_WRITE, 76 S_BUFFER_LOAD_IMM, 77 BUFFER_LOAD, 78 BUFFER_STORE, 79 MIMG, 80 TBUFFER_LOAD, 81 TBUFFER_STORE, 82 }; 83 84 struct AddressRegs { 85 unsigned char NumVAddrs = 0; 86 bool SBase = false; 87 bool SRsrc = false; 88 bool SOffset = false; 89 bool VAddr = false; 90 bool Addr = false; 91 bool SSamp = false; 92 }; 93 94 // GFX10 image_sample instructions can have 12 vaddrs + srsrc + ssamp. 95 const unsigned MaxAddressRegs = 12 + 1 + 1; 96 97 class SILoadStoreOptimizer : public MachineFunctionPass { 98 struct CombineInfo { 99 MachineBasicBlock::iterator I; 100 unsigned EltSize; 101 unsigned Offset; 102 unsigned Width; 103 unsigned Format; 104 unsigned BaseOff; 105 unsigned DMask; 106 InstClassEnum InstClass; 107 unsigned CPol = 0; 108 bool IsAGPR; 109 bool UseST64; 110 int AddrIdx[MaxAddressRegs]; 111 const MachineOperand *AddrReg[MaxAddressRegs]; 112 unsigned NumAddresses; 113 unsigned Order; 114 115 bool hasSameBaseAddress(const MachineInstr &MI) { 116 for (unsigned i = 0; i < NumAddresses; i++) { 117 const MachineOperand &AddrRegNext = MI.getOperand(AddrIdx[i]); 118 119 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) { 120 if (AddrReg[i]->isImm() != AddrRegNext.isImm() || 121 AddrReg[i]->getImm() != AddrRegNext.getImm()) { 122 return false; 123 } 124 continue; 125 } 126 127 // Check same base pointer. Be careful of subregisters, which can occur 128 // with vectors of pointers. 129 if (AddrReg[i]->getReg() != AddrRegNext.getReg() || 130 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) { 131 return false; 132 } 133 } 134 return true; 135 } 136 137 bool hasMergeableAddress(const MachineRegisterInfo &MRI) { 138 for (unsigned i = 0; i < NumAddresses; ++i) { 139 const MachineOperand *AddrOp = AddrReg[i]; 140 // Immediates are always OK. 141 if (AddrOp->isImm()) 142 continue; 143 144 // Don't try to merge addresses that aren't either immediates or registers. 145 // TODO: Should be possible to merge FrameIndexes and maybe some other 146 // non-register 147 if (!AddrOp->isReg()) 148 return false; 149 150 // TODO: We should be able to merge physical reg addresses. 151 if (AddrOp->getReg().isPhysical()) 152 return false; 153 154 // If an address has only one use then there will be on other 155 // instructions with the same address, so we can't merge this one. 156 if (MRI.hasOneNonDBGUse(AddrOp->getReg())) 157 return false; 158 } 159 return true; 160 } 161 162 void setMI(MachineBasicBlock::iterator MI, const SILoadStoreOptimizer &LSO); 163 }; 164 165 struct BaseRegisters { 166 Register LoReg; 167 Register HiReg; 168 169 unsigned LoSubReg = 0; 170 unsigned HiSubReg = 0; 171 }; 172 173 struct MemAddress { 174 BaseRegisters Base; 175 int64_t Offset = 0; 176 }; 177 178 using MemInfoMap = DenseMap<MachineInstr *, MemAddress>; 179 180 private: 181 const GCNSubtarget *STM = nullptr; 182 const SIInstrInfo *TII = nullptr; 183 const SIRegisterInfo *TRI = nullptr; 184 MachineRegisterInfo *MRI = nullptr; 185 AliasAnalysis *AA = nullptr; 186 bool OptimizeAgain; 187 188 static bool dmasksCanBeCombined(const CombineInfo &CI, 189 const SIInstrInfo &TII, 190 const CombineInfo &Paired); 191 static bool offsetsCanBeCombined(CombineInfo &CI, const GCNSubtarget &STI, 192 CombineInfo &Paired, bool Modify = false); 193 static bool widthsFit(const GCNSubtarget &STI, const CombineInfo &CI, 194 const CombineInfo &Paired); 195 static unsigned getNewOpcode(const CombineInfo &CI, const CombineInfo &Paired); 196 static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI, 197 const CombineInfo &Paired); 198 const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI, 199 const CombineInfo &Paired); 200 const TargetRegisterClass *getDataRegClass(const MachineInstr &MI) const; 201 202 bool checkAndPrepareMerge(CombineInfo &CI, CombineInfo &Paired, 203 SmallVectorImpl<MachineInstr *> &InstsToMove); 204 205 unsigned read2Opcode(unsigned EltSize) const; 206 unsigned read2ST64Opcode(unsigned EltSize) const; 207 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI, 208 CombineInfo &Paired, 209 const SmallVectorImpl<MachineInstr *> &InstsToMove); 210 211 unsigned write2Opcode(unsigned EltSize) const; 212 unsigned write2ST64Opcode(unsigned EltSize) const; 213 MachineBasicBlock::iterator 214 mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired, 215 const SmallVectorImpl<MachineInstr *> &InstsToMove); 216 MachineBasicBlock::iterator 217 mergeImagePair(CombineInfo &CI, CombineInfo &Paired, 218 const SmallVectorImpl<MachineInstr *> &InstsToMove); 219 MachineBasicBlock::iterator 220 mergeSBufferLoadImmPair(CombineInfo &CI, CombineInfo &Paired, 221 const SmallVectorImpl<MachineInstr *> &InstsToMove); 222 MachineBasicBlock::iterator 223 mergeBufferLoadPair(CombineInfo &CI, CombineInfo &Paired, 224 const SmallVectorImpl<MachineInstr *> &InstsToMove); 225 MachineBasicBlock::iterator 226 mergeBufferStorePair(CombineInfo &CI, CombineInfo &Paired, 227 const SmallVectorImpl<MachineInstr *> &InstsToMove); 228 MachineBasicBlock::iterator 229 mergeTBufferLoadPair(CombineInfo &CI, CombineInfo &Paired, 230 const SmallVectorImpl<MachineInstr *> &InstsToMove); 231 MachineBasicBlock::iterator 232 mergeTBufferStorePair(CombineInfo &CI, CombineInfo &Paired, 233 const SmallVectorImpl<MachineInstr *> &InstsToMove); 234 235 void updateBaseAndOffset(MachineInstr &I, Register NewBase, 236 int32_t NewOffset) const; 237 Register computeBase(MachineInstr &MI, const MemAddress &Addr) const; 238 MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const; 239 Optional<int32_t> extractConstOffset(const MachineOperand &Op) const; 240 void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const; 241 /// Promotes constant offset to the immediate by adjusting the base. It 242 /// tries to use a base from the nearby instructions that allows it to have 243 /// a 13bit constant offset which gets promoted to the immediate. 244 bool promoteConstantOffsetToImm(MachineInstr &CI, 245 MemInfoMap &Visited, 246 SmallPtrSet<MachineInstr *, 4> &Promoted) const; 247 void addInstToMergeableList(const CombineInfo &CI, 248 std::list<std::list<CombineInfo> > &MergeableInsts) const; 249 250 std::pair<MachineBasicBlock::iterator, bool> collectMergeableInsts( 251 MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, 252 MemInfoMap &Visited, SmallPtrSet<MachineInstr *, 4> &AnchorList, 253 std::list<std::list<CombineInfo>> &MergeableInsts) const; 254 255 public: 256 static char ID; 257 258 SILoadStoreOptimizer() : MachineFunctionPass(ID) { 259 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry()); 260 } 261 262 bool optimizeInstsWithSameBaseAddr(std::list<CombineInfo> &MergeList, 263 bool &OptimizeListAgain); 264 bool optimizeBlock(std::list<std::list<CombineInfo> > &MergeableInsts); 265 266 bool runOnMachineFunction(MachineFunction &MF) override; 267 268 StringRef getPassName() const override { return "SI Load Store Optimizer"; } 269 270 void getAnalysisUsage(AnalysisUsage &AU) const override { 271 AU.setPreservesCFG(); 272 AU.addRequired<AAResultsWrapperPass>(); 273 274 MachineFunctionPass::getAnalysisUsage(AU); 275 } 276 277 MachineFunctionProperties getRequiredProperties() const override { 278 return MachineFunctionProperties() 279 .set(MachineFunctionProperties::Property::IsSSA); 280 } 281 }; 282 283 static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) { 284 const unsigned Opc = MI.getOpcode(); 285 286 if (TII.isMUBUF(Opc)) { 287 // FIXME: Handle d16 correctly 288 return AMDGPU::getMUBUFElements(Opc); 289 } 290 if (TII.isMIMG(MI)) { 291 uint64_t DMaskImm = 292 TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm(); 293 return countPopulation(DMaskImm); 294 } 295 if (TII.isMTBUF(Opc)) { 296 return AMDGPU::getMTBUFElements(Opc); 297 } 298 299 switch (Opc) { 300 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: 301 return 1; 302 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: 303 return 2; 304 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: 305 return 4; 306 case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM: 307 return 8; 308 case AMDGPU::DS_READ_B32: LLVM_FALLTHROUGH; 309 case AMDGPU::DS_READ_B32_gfx9: LLVM_FALLTHROUGH; 310 case AMDGPU::DS_WRITE_B32: LLVM_FALLTHROUGH; 311 case AMDGPU::DS_WRITE_B32_gfx9: 312 return 1; 313 case AMDGPU::DS_READ_B64: LLVM_FALLTHROUGH; 314 case AMDGPU::DS_READ_B64_gfx9: LLVM_FALLTHROUGH; 315 case AMDGPU::DS_WRITE_B64: LLVM_FALLTHROUGH; 316 case AMDGPU::DS_WRITE_B64_gfx9: 317 return 2; 318 default: 319 return 0; 320 } 321 } 322 323 /// Maps instruction opcode to enum InstClassEnum. 324 static InstClassEnum getInstClass(unsigned Opc, const SIInstrInfo &TII) { 325 switch (Opc) { 326 default: 327 if (TII.isMUBUF(Opc)) { 328 switch (AMDGPU::getMUBUFBaseOpcode(Opc)) { 329 default: 330 return UNKNOWN; 331 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN: 332 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact: 333 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET: 334 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact: 335 return BUFFER_LOAD; 336 case AMDGPU::BUFFER_STORE_DWORD_OFFEN: 337 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact: 338 case AMDGPU::BUFFER_STORE_DWORD_OFFSET: 339 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact: 340 return BUFFER_STORE; 341 } 342 } 343 if (TII.isMIMG(Opc)) { 344 // Ignore instructions encoded without vaddr. 345 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr) == -1 && 346 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0) == -1) 347 return UNKNOWN; 348 // Ignore BVH instructions 349 if (AMDGPU::getMIMGBaseOpcode(Opc)->BVH) 350 return UNKNOWN; 351 // TODO: Support IMAGE_GET_RESINFO and IMAGE_GET_LOD. 352 if (TII.get(Opc).mayStore() || !TII.get(Opc).mayLoad() || 353 TII.isGather4(Opc)) 354 return UNKNOWN; 355 return MIMG; 356 } 357 if (TII.isMTBUF(Opc)) { 358 switch (AMDGPU::getMTBUFBaseOpcode(Opc)) { 359 default: 360 return UNKNOWN; 361 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN: 362 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN_exact: 363 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET: 364 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET_exact: 365 return TBUFFER_LOAD; 366 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN: 367 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN_exact: 368 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET: 369 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET_exact: 370 return TBUFFER_STORE; 371 } 372 } 373 return UNKNOWN; 374 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: 375 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: 376 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: 377 case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM: 378 return S_BUFFER_LOAD_IMM; 379 case AMDGPU::DS_READ_B32: 380 case AMDGPU::DS_READ_B32_gfx9: 381 case AMDGPU::DS_READ_B64: 382 case AMDGPU::DS_READ_B64_gfx9: 383 return DS_READ; 384 case AMDGPU::DS_WRITE_B32: 385 case AMDGPU::DS_WRITE_B32_gfx9: 386 case AMDGPU::DS_WRITE_B64: 387 case AMDGPU::DS_WRITE_B64_gfx9: 388 return DS_WRITE; 389 } 390 } 391 392 /// Determines instruction subclass from opcode. Only instructions 393 /// of the same subclass can be merged together. 394 static unsigned getInstSubclass(unsigned Opc, const SIInstrInfo &TII) { 395 switch (Opc) { 396 default: 397 if (TII.isMUBUF(Opc)) 398 return AMDGPU::getMUBUFBaseOpcode(Opc); 399 if (TII.isMIMG(Opc)) { 400 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc); 401 assert(Info); 402 return Info->BaseOpcode; 403 } 404 if (TII.isMTBUF(Opc)) 405 return AMDGPU::getMTBUFBaseOpcode(Opc); 406 return -1; 407 case AMDGPU::DS_READ_B32: 408 case AMDGPU::DS_READ_B32_gfx9: 409 case AMDGPU::DS_READ_B64: 410 case AMDGPU::DS_READ_B64_gfx9: 411 case AMDGPU::DS_WRITE_B32: 412 case AMDGPU::DS_WRITE_B32_gfx9: 413 case AMDGPU::DS_WRITE_B64: 414 case AMDGPU::DS_WRITE_B64_gfx9: 415 return Opc; 416 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: 417 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: 418 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: 419 case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM: 420 return AMDGPU::S_BUFFER_LOAD_DWORD_IMM; 421 } 422 } 423 424 static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) { 425 AddressRegs Result; 426 427 if (TII.isMUBUF(Opc)) { 428 if (AMDGPU::getMUBUFHasVAddr(Opc)) 429 Result.VAddr = true; 430 if (AMDGPU::getMUBUFHasSrsrc(Opc)) 431 Result.SRsrc = true; 432 if (AMDGPU::getMUBUFHasSoffset(Opc)) 433 Result.SOffset = true; 434 435 return Result; 436 } 437 438 if (TII.isMIMG(Opc)) { 439 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 440 if (VAddr0Idx >= 0) { 441 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 442 Result.NumVAddrs = SRsrcIdx - VAddr0Idx; 443 } else { 444 Result.VAddr = true; 445 } 446 Result.SRsrc = true; 447 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc); 448 if (Info && AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler) 449 Result.SSamp = true; 450 451 return Result; 452 } 453 if (TII.isMTBUF(Opc)) { 454 if (AMDGPU::getMTBUFHasVAddr(Opc)) 455 Result.VAddr = true; 456 if (AMDGPU::getMTBUFHasSrsrc(Opc)) 457 Result.SRsrc = true; 458 if (AMDGPU::getMTBUFHasSoffset(Opc)) 459 Result.SOffset = true; 460 461 return Result; 462 } 463 464 switch (Opc) { 465 default: 466 return Result; 467 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: 468 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: 469 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: 470 case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM: 471 Result.SBase = true; 472 return Result; 473 case AMDGPU::DS_READ_B32: 474 case AMDGPU::DS_READ_B64: 475 case AMDGPU::DS_READ_B32_gfx9: 476 case AMDGPU::DS_READ_B64_gfx9: 477 case AMDGPU::DS_WRITE_B32: 478 case AMDGPU::DS_WRITE_B64: 479 case AMDGPU::DS_WRITE_B32_gfx9: 480 case AMDGPU::DS_WRITE_B64_gfx9: 481 Result.Addr = true; 482 return Result; 483 } 484 } 485 486 void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI, 487 const SILoadStoreOptimizer &LSO) { 488 I = MI; 489 unsigned Opc = MI->getOpcode(); 490 InstClass = getInstClass(Opc, *LSO.TII); 491 492 if (InstClass == UNKNOWN) 493 return; 494 495 IsAGPR = LSO.TRI->hasAGPRs(LSO.getDataRegClass(*MI)); 496 497 switch (InstClass) { 498 case DS_READ: 499 EltSize = 500 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8 501 : 4; 502 break; 503 case DS_WRITE: 504 EltSize = 505 (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8 506 : 4; 507 break; 508 case S_BUFFER_LOAD_IMM: 509 EltSize = AMDGPU::convertSMRDOffsetUnits(*LSO.STM, 4); 510 break; 511 default: 512 EltSize = 4; 513 break; 514 } 515 516 if (InstClass == MIMG) { 517 DMask = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm(); 518 // Offset is not considered for MIMG instructions. 519 Offset = 0; 520 } else { 521 int OffsetIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::offset); 522 Offset = I->getOperand(OffsetIdx).getImm(); 523 } 524 525 if (InstClass == TBUFFER_LOAD || InstClass == TBUFFER_STORE) 526 Format = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::format)->getImm(); 527 528 Width = getOpcodeWidth(*I, *LSO.TII); 529 530 if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) { 531 Offset &= 0xffff; 532 } else if (InstClass != MIMG) { 533 CPol = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::cpol)->getImm(); 534 } 535 536 AddressRegs Regs = getRegs(Opc, *LSO.TII); 537 538 NumAddresses = 0; 539 for (unsigned J = 0; J < Regs.NumVAddrs; J++) 540 AddrIdx[NumAddresses++] = 541 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0) + J; 542 if (Regs.Addr) 543 AddrIdx[NumAddresses++] = 544 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::addr); 545 if (Regs.SBase) 546 AddrIdx[NumAddresses++] = 547 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sbase); 548 if (Regs.SRsrc) 549 AddrIdx[NumAddresses++] = 550 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 551 if (Regs.SOffset) 552 AddrIdx[NumAddresses++] = 553 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset); 554 if (Regs.VAddr) 555 AddrIdx[NumAddresses++] = 556 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); 557 if (Regs.SSamp) 558 AddrIdx[NumAddresses++] = 559 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::ssamp); 560 assert(NumAddresses <= MaxAddressRegs); 561 562 for (unsigned J = 0; J < NumAddresses; J++) 563 AddrReg[J] = &I->getOperand(AddrIdx[J]); 564 } 565 566 } // end anonymous namespace. 567 568 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE, 569 "SI Load Store Optimizer", false, false) 570 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 571 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer", 572 false, false) 573 574 char SILoadStoreOptimizer::ID = 0; 575 576 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID; 577 578 FunctionPass *llvm::createSILoadStoreOptimizerPass() { 579 return new SILoadStoreOptimizer(); 580 } 581 582 static void moveInstsAfter(MachineBasicBlock::iterator I, 583 ArrayRef<MachineInstr *> InstsToMove) { 584 MachineBasicBlock *MBB = I->getParent(); 585 ++I; 586 for (MachineInstr *MI : InstsToMove) { 587 MI->removeFromParent(); 588 MBB->insert(I, MI); 589 } 590 } 591 592 static void addDefsUsesToList(const MachineInstr &MI, 593 DenseSet<Register> &RegDefs, 594 DenseSet<Register> &PhysRegUses) { 595 for (const MachineOperand &Op : MI.operands()) { 596 if (Op.isReg()) { 597 if (Op.isDef()) 598 RegDefs.insert(Op.getReg()); 599 else if (Op.readsReg() && Op.getReg().isPhysical()) 600 PhysRegUses.insert(Op.getReg()); 601 } 602 } 603 } 604 605 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A, 606 MachineBasicBlock::iterator B, 607 AliasAnalysis *AA) { 608 // RAW or WAR - cannot reorder 609 // WAW - cannot reorder 610 // RAR - safe to reorder 611 return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true); 612 } 613 614 // Add MI and its defs to the lists if MI reads one of the defs that are 615 // already in the list. Returns true in that case. 616 static bool addToListsIfDependent(MachineInstr &MI, DenseSet<Register> &RegDefs, 617 DenseSet<Register> &PhysRegUses, 618 SmallVectorImpl<MachineInstr *> &Insts) { 619 for (MachineOperand &Use : MI.operands()) { 620 // If one of the defs is read, then there is a use of Def between I and the 621 // instruction that I will potentially be merged with. We will need to move 622 // this instruction after the merged instructions. 623 // 624 // Similarly, if there is a def which is read by an instruction that is to 625 // be moved for merging, then we need to move the def-instruction as well. 626 // This can only happen for physical registers such as M0; virtual 627 // registers are in SSA form. 628 if (Use.isReg() && ((Use.readsReg() && RegDefs.count(Use.getReg())) || 629 (Use.isDef() && RegDefs.count(Use.getReg())) || 630 (Use.isDef() && Use.getReg().isPhysical() && 631 PhysRegUses.count(Use.getReg())))) { 632 Insts.push_back(&MI); 633 addDefsUsesToList(MI, RegDefs, PhysRegUses); 634 return true; 635 } 636 } 637 638 return false; 639 } 640 641 static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp, 642 ArrayRef<MachineInstr *> InstsToMove, 643 AliasAnalysis *AA) { 644 assert(MemOp.mayLoadOrStore()); 645 646 for (MachineInstr *InstToMove : InstsToMove) { 647 if (!InstToMove->mayLoadOrStore()) 648 continue; 649 if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA)) 650 return false; 651 } 652 return true; 653 } 654 655 // This function assumes that \p A and \p B have are identical except for 656 // size and offset, and they reference adjacent memory. 657 static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF, 658 const MachineMemOperand *A, 659 const MachineMemOperand *B) { 660 unsigned MinOffset = std::min(A->getOffset(), B->getOffset()); 661 unsigned Size = A->getSize() + B->getSize(); 662 // This function adds the offset parameter to the existing offset for A, 663 // so we pass 0 here as the offset and then manually set it to the correct 664 // value after the call. 665 MachineMemOperand *MMO = MF.getMachineMemOperand(A, 0, Size); 666 MMO->setOffset(MinOffset); 667 return MMO; 668 } 669 670 bool SILoadStoreOptimizer::dmasksCanBeCombined(const CombineInfo &CI, 671 const SIInstrInfo &TII, 672 const CombineInfo &Paired) { 673 assert(CI.InstClass == MIMG); 674 675 // Ignore instructions with tfe/lwe set. 676 const auto *TFEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::tfe); 677 const auto *LWEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::lwe); 678 679 if ((TFEOp && TFEOp->getImm()) || (LWEOp && LWEOp->getImm())) 680 return false; 681 682 // Check other optional immediate operands for equality. 683 unsigned OperandsToMatch[] = {AMDGPU::OpName::cpol, AMDGPU::OpName::d16, 684 AMDGPU::OpName::unorm, AMDGPU::OpName::da, 685 AMDGPU::OpName::r128, AMDGPU::OpName::a16}; 686 687 for (auto op : OperandsToMatch) { 688 int Idx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), op); 689 if (AMDGPU::getNamedOperandIdx(Paired.I->getOpcode(), op) != Idx) 690 return false; 691 if (Idx != -1 && 692 CI.I->getOperand(Idx).getImm() != Paired.I->getOperand(Idx).getImm()) 693 return false; 694 } 695 696 // Check DMask for overlaps. 697 unsigned MaxMask = std::max(CI.DMask, Paired.DMask); 698 unsigned MinMask = std::min(CI.DMask, Paired.DMask); 699 700 unsigned AllowedBitsForMin = llvm::countTrailingZeros(MaxMask); 701 if ((1u << AllowedBitsForMin) <= MinMask) 702 return false; 703 704 return true; 705 } 706 707 static unsigned getBufferFormatWithCompCount(unsigned OldFormat, 708 unsigned ComponentCount, 709 const GCNSubtarget &STI) { 710 if (ComponentCount > 4) 711 return 0; 712 713 const llvm::AMDGPU::GcnBufferFormatInfo *OldFormatInfo = 714 llvm::AMDGPU::getGcnBufferFormatInfo(OldFormat, STI); 715 if (!OldFormatInfo) 716 return 0; 717 718 const llvm::AMDGPU::GcnBufferFormatInfo *NewFormatInfo = 719 llvm::AMDGPU::getGcnBufferFormatInfo(OldFormatInfo->BitsPerComp, 720 ComponentCount, 721 OldFormatInfo->NumFormat, STI); 722 723 if (!NewFormatInfo) 724 return 0; 725 726 assert(NewFormatInfo->NumFormat == OldFormatInfo->NumFormat && 727 NewFormatInfo->BitsPerComp == OldFormatInfo->BitsPerComp); 728 729 return NewFormatInfo->Format; 730 } 731 732 // Return the value in the inclusive range [Lo,Hi] that is aligned to the 733 // highest power of two. Note that the result is well defined for all inputs 734 // including corner cases like: 735 // - if Lo == Hi, return that value 736 // - if Lo == 0, return 0 (even though the "- 1" below underflows 737 // - if Lo > Hi, return 0 (as if the range wrapped around) 738 static uint32_t mostAlignedValueInRange(uint32_t Lo, uint32_t Hi) { 739 return Hi & maskLeadingOnes<uint32_t>(countLeadingZeros((Lo - 1) ^ Hi) + 1); 740 } 741 742 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI, 743 const GCNSubtarget &STI, 744 CombineInfo &Paired, 745 bool Modify) { 746 assert(CI.InstClass != MIMG); 747 748 // XXX - Would the same offset be OK? Is there any reason this would happen or 749 // be useful? 750 if (CI.Offset == Paired.Offset) 751 return false; 752 753 // This won't be valid if the offset isn't aligned. 754 if ((CI.Offset % CI.EltSize != 0) || (Paired.Offset % CI.EltSize != 0)) 755 return false; 756 757 if (CI.InstClass == TBUFFER_LOAD || CI.InstClass == TBUFFER_STORE) { 758 759 const llvm::AMDGPU::GcnBufferFormatInfo *Info0 = 760 llvm::AMDGPU::getGcnBufferFormatInfo(CI.Format, STI); 761 if (!Info0) 762 return false; 763 const llvm::AMDGPU::GcnBufferFormatInfo *Info1 = 764 llvm::AMDGPU::getGcnBufferFormatInfo(Paired.Format, STI); 765 if (!Info1) 766 return false; 767 768 if (Info0->BitsPerComp != Info1->BitsPerComp || 769 Info0->NumFormat != Info1->NumFormat) 770 return false; 771 772 // TODO: Should be possible to support more formats, but if format loads 773 // are not dword-aligned, the merged load might not be valid. 774 if (Info0->BitsPerComp != 32) 775 return false; 776 777 if (getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, STI) == 0) 778 return false; 779 } 780 781 uint32_t EltOffset0 = CI.Offset / CI.EltSize; 782 uint32_t EltOffset1 = Paired.Offset / CI.EltSize; 783 CI.UseST64 = false; 784 CI.BaseOff = 0; 785 786 // Handle all non-DS instructions. 787 if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) { 788 return (EltOffset0 + CI.Width == EltOffset1 || 789 EltOffset1 + Paired.Width == EltOffset0) && 790 CI.CPol == Paired.CPol && 791 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.CPol == Paired.CPol); 792 } 793 794 // If the offset in elements doesn't fit in 8-bits, we might be able to use 795 // the stride 64 versions. 796 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 && 797 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) { 798 if (Modify) { 799 CI.Offset = EltOffset0 / 64; 800 Paired.Offset = EltOffset1 / 64; 801 CI.UseST64 = true; 802 } 803 return true; 804 } 805 806 // Check if the new offsets fit in the reduced 8-bit range. 807 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) { 808 if (Modify) { 809 CI.Offset = EltOffset0; 810 Paired.Offset = EltOffset1; 811 } 812 return true; 813 } 814 815 // Try to shift base address to decrease offsets. 816 uint32_t Min = std::min(EltOffset0, EltOffset1); 817 uint32_t Max = std::max(EltOffset0, EltOffset1); 818 819 const uint32_t Mask = maskTrailingOnes<uint32_t>(8) * 64; 820 if (((Max - Min) & ~Mask) == 0) { 821 if (Modify) { 822 // From the range of values we could use for BaseOff, choose the one that 823 // is aligned to the highest power of two, to maximise the chance that 824 // the same offset can be reused for other load/store pairs. 825 uint32_t BaseOff = mostAlignedValueInRange(Max - 0xff * 64, Min); 826 // Copy the low bits of the offsets, so that when we adjust them by 827 // subtracting BaseOff they will be multiples of 64. 828 BaseOff |= Min & maskTrailingOnes<uint32_t>(6); 829 CI.BaseOff = BaseOff * CI.EltSize; 830 CI.Offset = (EltOffset0 - BaseOff) / 64; 831 Paired.Offset = (EltOffset1 - BaseOff) / 64; 832 CI.UseST64 = true; 833 } 834 return true; 835 } 836 837 if (isUInt<8>(Max - Min)) { 838 if (Modify) { 839 // From the range of values we could use for BaseOff, choose the one that 840 // is aligned to the highest power of two, to maximise the chance that 841 // the same offset can be reused for other load/store pairs. 842 uint32_t BaseOff = mostAlignedValueInRange(Max - 0xff, Min); 843 CI.BaseOff = BaseOff * CI.EltSize; 844 CI.Offset = EltOffset0 - BaseOff; 845 Paired.Offset = EltOffset1 - BaseOff; 846 } 847 return true; 848 } 849 850 return false; 851 } 852 853 bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM, 854 const CombineInfo &CI, 855 const CombineInfo &Paired) { 856 const unsigned Width = (CI.Width + Paired.Width); 857 switch (CI.InstClass) { 858 default: 859 return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3)); 860 case S_BUFFER_LOAD_IMM: 861 switch (Width) { 862 default: 863 return false; 864 case 2: 865 case 4: 866 case 8: 867 return true; 868 } 869 } 870 } 871 872 const TargetRegisterClass * 873 SILoadStoreOptimizer::getDataRegClass(const MachineInstr &MI) const { 874 if (const auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) { 875 return TRI->getRegClassForReg(*MRI, Dst->getReg()); 876 } 877 if (const auto *Src = TII->getNamedOperand(MI, AMDGPU::OpName::vdata)) { 878 return TRI->getRegClassForReg(*MRI, Src->getReg()); 879 } 880 if (const auto *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0)) { 881 return TRI->getRegClassForReg(*MRI, Src->getReg()); 882 } 883 if (const auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) { 884 return TRI->getRegClassForReg(*MRI, Dst->getReg()); 885 } 886 if (const auto *Src = TII->getNamedOperand(MI, AMDGPU::OpName::sdata)) { 887 return TRI->getRegClassForReg(*MRI, Src->getReg()); 888 } 889 return nullptr; 890 } 891 892 /// This function assumes that CI comes before Paired in a basic block. 893 bool SILoadStoreOptimizer::checkAndPrepareMerge( 894 CombineInfo &CI, CombineInfo &Paired, 895 SmallVectorImpl<MachineInstr *> &InstsToMove) { 896 897 // Check both offsets (or masks for MIMG) can be combined and fit in the 898 // reduced range. 899 if (CI.InstClass == MIMG && !dmasksCanBeCombined(CI, *TII, Paired)) 900 return false; 901 902 if (CI.InstClass != MIMG && 903 (!widthsFit(*STM, CI, Paired) || !offsetsCanBeCombined(CI, *STM, Paired))) 904 return false; 905 906 const unsigned Opc = CI.I->getOpcode(); 907 const InstClassEnum InstClass = getInstClass(Opc, *TII); 908 909 if (InstClass == UNKNOWN) { 910 return false; 911 } 912 const unsigned InstSubclass = getInstSubclass(Opc, *TII); 913 914 DenseSet<Register> RegDefsToMove; 915 DenseSet<Register> PhysRegUsesToMove; 916 addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove); 917 918 MachineBasicBlock::iterator E = std::next(Paired.I); 919 MachineBasicBlock::iterator MBBI = std::next(CI.I); 920 MachineBasicBlock::iterator MBBE = CI.I->getParent()->end(); 921 for (; MBBI != E; ++MBBI) { 922 923 if (MBBI == MBBE) { 924 // CombineInfo::Order is a hint on the instruction ordering within the 925 // basic block. This hint suggests that CI precedes Paired, which is 926 // true most of the time. However, moveInstsAfter() processing a 927 // previous list may have changed this order in a situation when it 928 // moves an instruction which exists in some other merge list. 929 // In this case it must be dependent. 930 return false; 931 } 932 933 if ((getInstClass(MBBI->getOpcode(), *TII) != InstClass) || 934 (getInstSubclass(MBBI->getOpcode(), *TII) != InstSubclass)) { 935 // This is not a matching instruction, but we can keep looking as 936 // long as one of these conditions are met: 937 // 1. It is safe to move I down past MBBI. 938 // 2. It is safe to move MBBI down past the instruction that I will 939 // be merged into. 940 941 if (MBBI->hasUnmodeledSideEffects()) { 942 // We can't re-order this instruction with respect to other memory 943 // operations, so we fail both conditions mentioned above. 944 return false; 945 } 946 947 if (MBBI->mayLoadOrStore() && 948 (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) || 949 !canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA))) { 950 // We fail condition #1, but we may still be able to satisfy condition 951 // #2. Add this instruction to the move list and then we will check 952 // if condition #2 holds once we have selected the matching instruction. 953 InstsToMove.push_back(&*MBBI); 954 addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove); 955 continue; 956 } 957 958 // When we match I with another DS instruction we will be moving I down 959 // to the location of the matched instruction any uses of I will need to 960 // be moved down as well. 961 addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove, 962 InstsToMove); 963 continue; 964 } 965 966 // Handle a case like 967 // DS_WRITE_B32 addr, v, idx0 968 // w = DS_READ_B32 addr, idx0 969 // DS_WRITE_B32 addr, f(w), idx1 970 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents 971 // merging of the two writes. 972 if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove, 973 InstsToMove)) 974 continue; 975 976 if (&*MBBI == &*Paired.I) { 977 // We need to go through the list of instructions that we plan to 978 // move and make sure they are all safe to move down past the merged 979 // instruction. 980 if (canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA)) { 981 982 // Call offsetsCanBeCombined with modify = true so that the offsets are 983 // correct for the new instruction. This should return true, because 984 // this function should only be called on CombineInfo objects that 985 // have already been confirmed to be mergeable. 986 if (CI.InstClass != MIMG) 987 offsetsCanBeCombined(CI, *STM, Paired, true); 988 return true; 989 } 990 return false; 991 } 992 993 // We've found a load/store that we couldn't merge for some reason. 994 // We could potentially keep looking, but we'd need to make sure that 995 // it was safe to move I and also all the instruction in InstsToMove 996 // down past this instruction. 997 // check if we can move I across MBBI and if we can move all I's users 998 if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) || 999 !canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA)) 1000 break; 1001 } 1002 return false; 1003 } 1004 1005 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const { 1006 if (STM->ldsRequiresM0Init()) 1007 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64; 1008 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9; 1009 } 1010 1011 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const { 1012 if (STM->ldsRequiresM0Init()) 1013 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64; 1014 1015 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9 1016 : AMDGPU::DS_READ2ST64_B64_gfx9; 1017 } 1018 1019 MachineBasicBlock::iterator 1020 SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI, CombineInfo &Paired, 1021 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1022 MachineBasicBlock *MBB = CI.I->getParent(); 1023 1024 // Be careful, since the addresses could be subregisters themselves in weird 1025 // cases, like vectors of pointers. 1026 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr); 1027 1028 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst); 1029 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdst); 1030 1031 unsigned NewOffset0 = CI.Offset; 1032 unsigned NewOffset1 = Paired.Offset; 1033 unsigned Opc = 1034 CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize); 1035 1036 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1; 1037 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3; 1038 1039 if (NewOffset0 > NewOffset1) { 1040 // Canonicalize the merged instruction so the smaller offset comes first. 1041 std::swap(NewOffset0, NewOffset1); 1042 std::swap(SubRegIdx0, SubRegIdx1); 1043 } 1044 1045 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) && 1046 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit"); 1047 1048 const MCInstrDesc &Read2Desc = TII->get(Opc); 1049 1050 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1051 Register DestReg = MRI->createVirtualRegister(SuperRC); 1052 1053 DebugLoc DL = CI.I->getDebugLoc(); 1054 1055 Register BaseReg = AddrReg->getReg(); 1056 unsigned BaseSubReg = AddrReg->getSubReg(); 1057 unsigned BaseRegFlags = 0; 1058 if (CI.BaseOff) { 1059 Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1060 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg) 1061 .addImm(CI.BaseOff); 1062 1063 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1064 BaseRegFlags = RegState::Kill; 1065 1066 TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg) 1067 .addReg(ImmReg) 1068 .addReg(AddrReg->getReg(), 0, BaseSubReg) 1069 .addImm(0); // clamp bit 1070 BaseSubReg = 0; 1071 } 1072 1073 MachineInstrBuilder Read2 = 1074 BuildMI(*MBB, Paired.I, DL, Read2Desc, DestReg) 1075 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr 1076 .addImm(NewOffset0) // offset0 1077 .addImm(NewOffset1) // offset1 1078 .addImm(0) // gds 1079 .cloneMergedMemRefs({&*CI.I, &*Paired.I}); 1080 1081 (void)Read2; 1082 1083 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 1084 1085 // Copy to the old destination registers. 1086 BuildMI(*MBB, Paired.I, DL, CopyDesc) 1087 .add(*Dest0) // Copy to same destination including flags and sub reg. 1088 .addReg(DestReg, 0, SubRegIdx0); 1089 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc) 1090 .add(*Dest1) 1091 .addReg(DestReg, RegState::Kill, SubRegIdx1); 1092 1093 moveInstsAfter(Copy1, InstsToMove); 1094 1095 CI.I->eraseFromParent(); 1096 Paired.I->eraseFromParent(); 1097 1098 LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n'); 1099 return Read2; 1100 } 1101 1102 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const { 1103 if (STM->ldsRequiresM0Init()) 1104 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64; 1105 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9 1106 : AMDGPU::DS_WRITE2_B64_gfx9; 1107 } 1108 1109 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const { 1110 if (STM->ldsRequiresM0Init()) 1111 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 1112 : AMDGPU::DS_WRITE2ST64_B64; 1113 1114 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9 1115 : AMDGPU::DS_WRITE2ST64_B64_gfx9; 1116 } 1117 1118 MachineBasicBlock::iterator 1119 SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired, 1120 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1121 MachineBasicBlock *MBB = CI.I->getParent(); 1122 1123 // Be sure to use .addOperand(), and not .addReg() with these. We want to be 1124 // sure we preserve the subregister index and any register flags set on them. 1125 const MachineOperand *AddrReg = 1126 TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr); 1127 const MachineOperand *Data0 = 1128 TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0); 1129 const MachineOperand *Data1 = 1130 TII->getNamedOperand(*Paired.I, AMDGPU::OpName::data0); 1131 1132 unsigned NewOffset0 = CI.Offset; 1133 unsigned NewOffset1 = Paired.Offset; 1134 unsigned Opc = 1135 CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize); 1136 1137 if (NewOffset0 > NewOffset1) { 1138 // Canonicalize the merged instruction so the smaller offset comes first. 1139 std::swap(NewOffset0, NewOffset1); 1140 std::swap(Data0, Data1); 1141 } 1142 1143 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) && 1144 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit"); 1145 1146 const MCInstrDesc &Write2Desc = TII->get(Opc); 1147 DebugLoc DL = CI.I->getDebugLoc(); 1148 1149 Register BaseReg = AddrReg->getReg(); 1150 unsigned BaseSubReg = AddrReg->getSubReg(); 1151 unsigned BaseRegFlags = 0; 1152 if (CI.BaseOff) { 1153 Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1154 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg) 1155 .addImm(CI.BaseOff); 1156 1157 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1158 BaseRegFlags = RegState::Kill; 1159 1160 TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg) 1161 .addReg(ImmReg) 1162 .addReg(AddrReg->getReg(), 0, BaseSubReg) 1163 .addImm(0); // clamp bit 1164 BaseSubReg = 0; 1165 } 1166 1167 MachineInstrBuilder Write2 = 1168 BuildMI(*MBB, Paired.I, DL, Write2Desc) 1169 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr 1170 .add(*Data0) // data0 1171 .add(*Data1) // data1 1172 .addImm(NewOffset0) // offset0 1173 .addImm(NewOffset1) // offset1 1174 .addImm(0) // gds 1175 .cloneMergedMemRefs({&*CI.I, &*Paired.I}); 1176 1177 moveInstsAfter(Write2, InstsToMove); 1178 1179 CI.I->eraseFromParent(); 1180 Paired.I->eraseFromParent(); 1181 1182 LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n'); 1183 return Write2; 1184 } 1185 1186 MachineBasicBlock::iterator 1187 SILoadStoreOptimizer::mergeImagePair(CombineInfo &CI, CombineInfo &Paired, 1188 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1189 MachineBasicBlock *MBB = CI.I->getParent(); 1190 DebugLoc DL = CI.I->getDebugLoc(); 1191 const unsigned Opcode = getNewOpcode(CI, Paired); 1192 1193 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1194 1195 Register DestReg = MRI->createVirtualRegister(SuperRC); 1196 unsigned MergedDMask = CI.DMask | Paired.DMask; 1197 unsigned DMaskIdx = 1198 AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::dmask); 1199 1200 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg); 1201 for (unsigned I = 1, E = (*CI.I).getNumOperands(); I != E; ++I) { 1202 if (I == DMaskIdx) 1203 MIB.addImm(MergedDMask); 1204 else 1205 MIB.add((*CI.I).getOperand(I)); 1206 } 1207 1208 // It shouldn't be possible to get this far if the two instructions 1209 // don't have a single memoperand, because MachineInstr::mayAlias() 1210 // will return true if this is the case. 1211 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1212 1213 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1214 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1215 1216 MachineInstr *New = MIB.addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1217 1218 unsigned SubRegIdx0, SubRegIdx1; 1219 std::tie(SubRegIdx0, SubRegIdx1) = getSubRegIdxs(CI, Paired); 1220 1221 // Copy to the old destination registers. 1222 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 1223 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1224 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata); 1225 1226 BuildMI(*MBB, Paired.I, DL, CopyDesc) 1227 .add(*Dest0) // Copy to same destination including flags and sub reg. 1228 .addReg(DestReg, 0, SubRegIdx0); 1229 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc) 1230 .add(*Dest1) 1231 .addReg(DestReg, RegState::Kill, SubRegIdx1); 1232 1233 moveInstsAfter(Copy1, InstsToMove); 1234 1235 CI.I->eraseFromParent(); 1236 Paired.I->eraseFromParent(); 1237 return New; 1238 } 1239 1240 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair( 1241 CombineInfo &CI, CombineInfo &Paired, 1242 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1243 MachineBasicBlock *MBB = CI.I->getParent(); 1244 DebugLoc DL = CI.I->getDebugLoc(); 1245 const unsigned Opcode = getNewOpcode(CI, Paired); 1246 1247 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1248 1249 Register DestReg = MRI->createVirtualRegister(SuperRC); 1250 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset); 1251 1252 // It shouldn't be possible to get this far if the two instructions 1253 // don't have a single memoperand, because MachineInstr::mayAlias() 1254 // will return true if this is the case. 1255 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1256 1257 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1258 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1259 1260 MachineInstr *New = 1261 BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg) 1262 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase)) 1263 .addImm(MergedOffset) // offset 1264 .addImm(CI.CPol) // cpol 1265 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1266 1267 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired); 1268 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx); 1269 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx); 1270 1271 // Copy to the old destination registers. 1272 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 1273 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst); 1274 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::sdst); 1275 1276 BuildMI(*MBB, Paired.I, DL, CopyDesc) 1277 .add(*Dest0) // Copy to same destination including flags and sub reg. 1278 .addReg(DestReg, 0, SubRegIdx0); 1279 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc) 1280 .add(*Dest1) 1281 .addReg(DestReg, RegState::Kill, SubRegIdx1); 1282 1283 moveInstsAfter(Copy1, InstsToMove); 1284 1285 CI.I->eraseFromParent(); 1286 Paired.I->eraseFromParent(); 1287 return New; 1288 } 1289 1290 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair( 1291 CombineInfo &CI, CombineInfo &Paired, 1292 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1293 MachineBasicBlock *MBB = CI.I->getParent(); 1294 DebugLoc DL = CI.I->getDebugLoc(); 1295 1296 const unsigned Opcode = getNewOpcode(CI, Paired); 1297 1298 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1299 1300 // Copy to the new source register. 1301 Register DestReg = MRI->createVirtualRegister(SuperRC); 1302 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset); 1303 1304 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg); 1305 1306 AddressRegs Regs = getRegs(Opcode, *TII); 1307 1308 if (Regs.VAddr) 1309 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr)); 1310 1311 // It shouldn't be possible to get this far if the two instructions 1312 // don't have a single memoperand, because MachineInstr::mayAlias() 1313 // will return true if this is the case. 1314 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1315 1316 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1317 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1318 1319 MachineInstr *New = 1320 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) 1321 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) 1322 .addImm(MergedOffset) // offset 1323 .addImm(CI.CPol) // cpol 1324 .addImm(0) // tfe 1325 .addImm(0) // swz 1326 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1327 1328 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired); 1329 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx); 1330 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx); 1331 1332 // Copy to the old destination registers. 1333 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 1334 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1335 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata); 1336 1337 BuildMI(*MBB, Paired.I, DL, CopyDesc) 1338 .add(*Dest0) // Copy to same destination including flags and sub reg. 1339 .addReg(DestReg, 0, SubRegIdx0); 1340 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc) 1341 .add(*Dest1) 1342 .addReg(DestReg, RegState::Kill, SubRegIdx1); 1343 1344 moveInstsAfter(Copy1, InstsToMove); 1345 1346 CI.I->eraseFromParent(); 1347 Paired.I->eraseFromParent(); 1348 return New; 1349 } 1350 1351 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferLoadPair( 1352 CombineInfo &CI, CombineInfo &Paired, 1353 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1354 MachineBasicBlock *MBB = CI.I->getParent(); 1355 DebugLoc DL = CI.I->getDebugLoc(); 1356 1357 const unsigned Opcode = getNewOpcode(CI, Paired); 1358 1359 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1360 1361 // Copy to the new source register. 1362 Register DestReg = MRI->createVirtualRegister(SuperRC); 1363 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset); 1364 1365 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg); 1366 1367 AddressRegs Regs = getRegs(Opcode, *TII); 1368 1369 if (Regs.VAddr) 1370 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr)); 1371 1372 unsigned JoinedFormat = 1373 getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STM); 1374 1375 // It shouldn't be possible to get this far if the two instructions 1376 // don't have a single memoperand, because MachineInstr::mayAlias() 1377 // will return true if this is the case. 1378 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1379 1380 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1381 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1382 1383 MachineInstr *New = 1384 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) 1385 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) 1386 .addImm(MergedOffset) // offset 1387 .addImm(JoinedFormat) // format 1388 .addImm(CI.CPol) // cpol 1389 .addImm(0) // tfe 1390 .addImm(0) // swz 1391 .addMemOperand( 1392 combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1393 1394 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired); 1395 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx); 1396 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx); 1397 1398 // Copy to the old destination registers. 1399 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 1400 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1401 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata); 1402 1403 BuildMI(*MBB, Paired.I, DL, CopyDesc) 1404 .add(*Dest0) // Copy to same destination including flags and sub reg. 1405 .addReg(DestReg, 0, SubRegIdx0); 1406 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc) 1407 .add(*Dest1) 1408 .addReg(DestReg, RegState::Kill, SubRegIdx1); 1409 1410 moveInstsAfter(Copy1, InstsToMove); 1411 1412 CI.I->eraseFromParent(); 1413 Paired.I->eraseFromParent(); 1414 return New; 1415 } 1416 1417 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferStorePair( 1418 CombineInfo &CI, CombineInfo &Paired, 1419 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1420 MachineBasicBlock *MBB = CI.I->getParent(); 1421 DebugLoc DL = CI.I->getDebugLoc(); 1422 1423 const unsigned Opcode = getNewOpcode(CI, Paired); 1424 1425 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired); 1426 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx); 1427 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx); 1428 1429 // Copy to the new source register. 1430 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1431 Register SrcReg = MRI->createVirtualRegister(SuperRC); 1432 1433 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1434 const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata); 1435 1436 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg) 1437 .add(*Src0) 1438 .addImm(SubRegIdx0) 1439 .add(*Src1) 1440 .addImm(SubRegIdx1); 1441 1442 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode)) 1443 .addReg(SrcReg, RegState::Kill); 1444 1445 AddressRegs Regs = getRegs(Opcode, *TII); 1446 1447 if (Regs.VAddr) 1448 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr)); 1449 1450 unsigned JoinedFormat = 1451 getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STM); 1452 1453 // It shouldn't be possible to get this far if the two instructions 1454 // don't have a single memoperand, because MachineInstr::mayAlias() 1455 // will return true if this is the case. 1456 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1457 1458 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1459 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1460 1461 MachineInstr *New = 1462 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) 1463 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) 1464 .addImm(std::min(CI.Offset, Paired.Offset)) // offset 1465 .addImm(JoinedFormat) // format 1466 .addImm(CI.CPol) // cpol 1467 .addImm(0) // tfe 1468 .addImm(0) // swz 1469 .addMemOperand( 1470 combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1471 1472 moveInstsAfter(MIB, InstsToMove); 1473 1474 CI.I->eraseFromParent(); 1475 Paired.I->eraseFromParent(); 1476 return New; 1477 } 1478 1479 unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI, 1480 const CombineInfo &Paired) { 1481 const unsigned Width = CI.Width + Paired.Width; 1482 1483 switch (CI.InstClass) { 1484 default: 1485 assert(CI.InstClass == BUFFER_LOAD || CI.InstClass == BUFFER_STORE); 1486 // FIXME: Handle d16 correctly 1487 return AMDGPU::getMUBUFOpcode(AMDGPU::getMUBUFBaseOpcode(CI.I->getOpcode()), 1488 Width); 1489 case TBUFFER_LOAD: 1490 case TBUFFER_STORE: 1491 return AMDGPU::getMTBUFOpcode(AMDGPU::getMTBUFBaseOpcode(CI.I->getOpcode()), 1492 Width); 1493 1494 case UNKNOWN: 1495 llvm_unreachable("Unknown instruction class"); 1496 case S_BUFFER_LOAD_IMM: 1497 switch (Width) { 1498 default: 1499 return 0; 1500 case 2: 1501 return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM; 1502 case 4: 1503 return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM; 1504 case 8: 1505 return AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM; 1506 } 1507 case MIMG: 1508 assert((countPopulation(CI.DMask | Paired.DMask) == Width) && 1509 "No overlaps"); 1510 return AMDGPU::getMaskedMIMGOp(CI.I->getOpcode(), Width); 1511 } 1512 } 1513 1514 std::pair<unsigned, unsigned> 1515 SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI, 1516 const CombineInfo &Paired) { 1517 bool ReverseOrder; 1518 if (CI.InstClass == MIMG) { 1519 assert( 1520 (countPopulation(CI.DMask | Paired.DMask) == CI.Width + Paired.Width) && 1521 "No overlaps"); 1522 ReverseOrder = CI.DMask > Paired.DMask; 1523 } else { 1524 ReverseOrder = CI.Offset > Paired.Offset; 1525 } 1526 1527 unsigned Idx0; 1528 unsigned Idx1; 1529 1530 static const unsigned Idxs[5][4] = { 1531 {AMDGPU::sub0, AMDGPU::sub0_sub1, AMDGPU::sub0_sub1_sub2, AMDGPU::sub0_sub1_sub2_sub3}, 1532 {AMDGPU::sub1, AMDGPU::sub1_sub2, AMDGPU::sub1_sub2_sub3, AMDGPU::sub1_sub2_sub3_sub4}, 1533 {AMDGPU::sub2, AMDGPU::sub2_sub3, AMDGPU::sub2_sub3_sub4, AMDGPU::sub2_sub3_sub4_sub5}, 1534 {AMDGPU::sub3, AMDGPU::sub3_sub4, AMDGPU::sub3_sub4_sub5, AMDGPU::sub3_sub4_sub5_sub6}, 1535 {AMDGPU::sub4, AMDGPU::sub4_sub5, AMDGPU::sub4_sub5_sub6, AMDGPU::sub4_sub5_sub6_sub7}, 1536 }; 1537 1538 assert(CI.Width >= 1 && CI.Width <= 4); 1539 assert(Paired.Width >= 1 && Paired.Width <= 4); 1540 1541 if (ReverseOrder) { 1542 Idx1 = Idxs[0][Paired.Width - 1]; 1543 Idx0 = Idxs[Paired.Width][CI.Width - 1]; 1544 } else { 1545 Idx0 = Idxs[0][CI.Width - 1]; 1546 Idx1 = Idxs[CI.Width][Paired.Width - 1]; 1547 } 1548 1549 return std::make_pair(Idx0, Idx1); 1550 } 1551 1552 const TargetRegisterClass * 1553 SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI, 1554 const CombineInfo &Paired) { 1555 if (CI.InstClass == S_BUFFER_LOAD_IMM) { 1556 switch (CI.Width + Paired.Width) { 1557 default: 1558 return nullptr; 1559 case 2: 1560 return &AMDGPU::SReg_64_XEXECRegClass; 1561 case 4: 1562 return &AMDGPU::SGPR_128RegClass; 1563 case 8: 1564 return &AMDGPU::SGPR_256RegClass; 1565 case 16: 1566 return &AMDGPU::SGPR_512RegClass; 1567 } 1568 } 1569 1570 unsigned BitWidth = 32 * (CI.Width + Paired.Width); 1571 return TRI->isAGPRClass(getDataRegClass(*CI.I)) 1572 ? TRI->getAGPRClassForBitWidth(BitWidth) 1573 : TRI->getVGPRClassForBitWidth(BitWidth); 1574 } 1575 1576 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair( 1577 CombineInfo &CI, CombineInfo &Paired, 1578 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1579 MachineBasicBlock *MBB = CI.I->getParent(); 1580 DebugLoc DL = CI.I->getDebugLoc(); 1581 1582 const unsigned Opcode = getNewOpcode(CI, Paired); 1583 1584 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired); 1585 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx); 1586 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx); 1587 1588 // Copy to the new source register. 1589 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1590 Register SrcReg = MRI->createVirtualRegister(SuperRC); 1591 1592 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1593 const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata); 1594 1595 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg) 1596 .add(*Src0) 1597 .addImm(SubRegIdx0) 1598 .add(*Src1) 1599 .addImm(SubRegIdx1); 1600 1601 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode)) 1602 .addReg(SrcReg, RegState::Kill); 1603 1604 AddressRegs Regs = getRegs(Opcode, *TII); 1605 1606 if (Regs.VAddr) 1607 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr)); 1608 1609 1610 // It shouldn't be possible to get this far if the two instructions 1611 // don't have a single memoperand, because MachineInstr::mayAlias() 1612 // will return true if this is the case. 1613 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1614 1615 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1616 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1617 1618 MachineInstr *New = 1619 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) 1620 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) 1621 .addImm(std::min(CI.Offset, Paired.Offset)) // offset 1622 .addImm(CI.CPol) // cpol 1623 .addImm(0) // tfe 1624 .addImm(0) // swz 1625 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1626 1627 moveInstsAfter(MIB, InstsToMove); 1628 1629 CI.I->eraseFromParent(); 1630 Paired.I->eraseFromParent(); 1631 return New; 1632 } 1633 1634 MachineOperand 1635 SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) const { 1636 APInt V(32, Val, true); 1637 if (TII->isInlineConstant(V)) 1638 return MachineOperand::CreateImm(Val); 1639 1640 Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1641 MachineInstr *Mov = 1642 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), 1643 TII->get(AMDGPU::S_MOV_B32), Reg) 1644 .addImm(Val); 1645 (void)Mov; 1646 LLVM_DEBUG(dbgs() << " "; Mov->dump()); 1647 return MachineOperand::CreateReg(Reg, false); 1648 } 1649 1650 // Compute base address using Addr and return the final register. 1651 Register SILoadStoreOptimizer::computeBase(MachineInstr &MI, 1652 const MemAddress &Addr) const { 1653 MachineBasicBlock *MBB = MI.getParent(); 1654 MachineBasicBlock::iterator MBBI = MI.getIterator(); 1655 DebugLoc DL = MI.getDebugLoc(); 1656 1657 assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 || 1658 Addr.Base.LoSubReg) && 1659 "Expected 32-bit Base-Register-Low!!"); 1660 1661 assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 || 1662 Addr.Base.HiSubReg) && 1663 "Expected 32-bit Base-Register-Hi!!"); 1664 1665 LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n"); 1666 MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI); 1667 MachineOperand OffsetHi = 1668 createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI); 1669 1670 const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 1671 Register CarryReg = MRI->createVirtualRegister(CarryRC); 1672 Register DeadCarryReg = MRI->createVirtualRegister(CarryRC); 1673 1674 Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1675 Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1676 MachineInstr *LoHalf = 1677 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_CO_U32_e64), DestSub0) 1678 .addReg(CarryReg, RegState::Define) 1679 .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg) 1680 .add(OffsetLo) 1681 .addImm(0); // clamp bit 1682 (void)LoHalf; 1683 LLVM_DEBUG(dbgs() << " "; LoHalf->dump();); 1684 1685 MachineInstr *HiHalf = 1686 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1) 1687 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 1688 .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg) 1689 .add(OffsetHi) 1690 .addReg(CarryReg, RegState::Kill) 1691 .addImm(0); // clamp bit 1692 (void)HiHalf; 1693 LLVM_DEBUG(dbgs() << " "; HiHalf->dump();); 1694 1695 Register FullDestReg = MRI->createVirtualRegister(TRI->getVGPR64Class()); 1696 MachineInstr *FullBase = 1697 BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg) 1698 .addReg(DestSub0) 1699 .addImm(AMDGPU::sub0) 1700 .addReg(DestSub1) 1701 .addImm(AMDGPU::sub1); 1702 (void)FullBase; 1703 LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";); 1704 1705 return FullDestReg; 1706 } 1707 1708 // Update base and offset with the NewBase and NewOffset in MI. 1709 void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI, 1710 Register NewBase, 1711 int32_t NewOffset) const { 1712 auto Base = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr); 1713 Base->setReg(NewBase); 1714 Base->setIsKill(false); 1715 TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset); 1716 } 1717 1718 Optional<int32_t> 1719 SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const { 1720 if (Op.isImm()) 1721 return Op.getImm(); 1722 1723 if (!Op.isReg()) 1724 return None; 1725 1726 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg()); 1727 if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 || 1728 !Def->getOperand(1).isImm()) 1729 return None; 1730 1731 return Def->getOperand(1).getImm(); 1732 } 1733 1734 // Analyze Base and extracts: 1735 // - 32bit base registers, subregisters 1736 // - 64bit constant offset 1737 // Expecting base computation as: 1738 // %OFFSET0:sgpr_32 = S_MOV_B32 8000 1739 // %LO:vgpr_32, %c:sreg_64_xexec = 1740 // V_ADD_CO_U32_e64 %BASE_LO:vgpr_32, %103:sgpr_32, 1741 // %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec 1742 // %Base:vreg_64 = 1743 // REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1 1744 void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base, 1745 MemAddress &Addr) const { 1746 if (!Base.isReg()) 1747 return; 1748 1749 MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg()); 1750 if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE 1751 || Def->getNumOperands() != 5) 1752 return; 1753 1754 MachineOperand BaseLo = Def->getOperand(1); 1755 MachineOperand BaseHi = Def->getOperand(3); 1756 if (!BaseLo.isReg() || !BaseHi.isReg()) 1757 return; 1758 1759 MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg()); 1760 MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg()); 1761 1762 if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_CO_U32_e64 || 1763 !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64) 1764 return; 1765 1766 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0); 1767 const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1); 1768 1769 auto Offset0P = extractConstOffset(*Src0); 1770 if (Offset0P) 1771 BaseLo = *Src1; 1772 else { 1773 if (!(Offset0P = extractConstOffset(*Src1))) 1774 return; 1775 BaseLo = *Src0; 1776 } 1777 1778 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0); 1779 Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1); 1780 1781 if (Src0->isImm()) 1782 std::swap(Src0, Src1); 1783 1784 if (!Src1->isImm()) 1785 return; 1786 1787 uint64_t Offset1 = Src1->getImm(); 1788 BaseHi = *Src0; 1789 1790 Addr.Base.LoReg = BaseLo.getReg(); 1791 Addr.Base.HiReg = BaseHi.getReg(); 1792 Addr.Base.LoSubReg = BaseLo.getSubReg(); 1793 Addr.Base.HiSubReg = BaseHi.getSubReg(); 1794 Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32); 1795 } 1796 1797 bool SILoadStoreOptimizer::promoteConstantOffsetToImm( 1798 MachineInstr &MI, 1799 MemInfoMap &Visited, 1800 SmallPtrSet<MachineInstr *, 4> &AnchorList) const { 1801 1802 if (!(MI.mayLoad() ^ MI.mayStore())) 1803 return false; 1804 1805 // TODO: Support flat and scratch. 1806 if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0) 1807 return false; 1808 1809 if (MI.mayLoad() && 1810 TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != nullptr) 1811 return false; 1812 1813 if (AnchorList.count(&MI)) 1814 return false; 1815 1816 LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump()); 1817 1818 if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) { 1819 LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";); 1820 return false; 1821 } 1822 1823 // Step1: Find the base-registers and a 64bit constant offset. 1824 MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr); 1825 MemAddress MAddr; 1826 if (Visited.find(&MI) == Visited.end()) { 1827 processBaseWithConstOffset(Base, MAddr); 1828 Visited[&MI] = MAddr; 1829 } else 1830 MAddr = Visited[&MI]; 1831 1832 if (MAddr.Offset == 0) { 1833 LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no" 1834 " constant offsets that can be promoted.\n";); 1835 return false; 1836 } 1837 1838 LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", " 1839 << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";); 1840 1841 // Step2: Traverse through MI's basic block and find an anchor(that has the 1842 // same base-registers) with the highest 13bit distance from MI's offset. 1843 // E.g. (64bit loads) 1844 // bb: 1845 // addr1 = &a + 4096; load1 = load(addr1, 0) 1846 // addr2 = &a + 6144; load2 = load(addr2, 0) 1847 // addr3 = &a + 8192; load3 = load(addr3, 0) 1848 // addr4 = &a + 10240; load4 = load(addr4, 0) 1849 // addr5 = &a + 12288; load5 = load(addr5, 0) 1850 // 1851 // Starting from the first load, the optimization will try to find a new base 1852 // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192 1853 // has 13bit distance from &a + 4096. The heuristic considers &a + 8192 1854 // as the new-base(anchor) because of the maximum distance which can 1855 // accomodate more intermediate bases presumeably. 1856 // 1857 // Step3: move (&a + 8192) above load1. Compute and promote offsets from 1858 // (&a + 8192) for load1, load2, load4. 1859 // addr = &a + 8192 1860 // load1 = load(addr, -4096) 1861 // load2 = load(addr, -2048) 1862 // load3 = load(addr, 0) 1863 // load4 = load(addr, 2048) 1864 // addr5 = &a + 12288; load5 = load(addr5, 0) 1865 // 1866 MachineInstr *AnchorInst = nullptr; 1867 MemAddress AnchorAddr; 1868 uint32_t MaxDist = std::numeric_limits<uint32_t>::min(); 1869 SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase; 1870 1871 MachineBasicBlock *MBB = MI.getParent(); 1872 MachineBasicBlock::iterator E = MBB->end(); 1873 MachineBasicBlock::iterator MBBI = MI.getIterator(); 1874 ++MBBI; 1875 const SITargetLowering *TLI = 1876 static_cast<const SITargetLowering *>(STM->getTargetLowering()); 1877 1878 for ( ; MBBI != E; ++MBBI) { 1879 MachineInstr &MINext = *MBBI; 1880 // TODO: Support finding an anchor(with same base) from store addresses or 1881 // any other load addresses where the opcodes are different. 1882 if (MINext.getOpcode() != MI.getOpcode() || 1883 TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm()) 1884 continue; 1885 1886 const MachineOperand &BaseNext = 1887 *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr); 1888 MemAddress MAddrNext; 1889 if (Visited.find(&MINext) == Visited.end()) { 1890 processBaseWithConstOffset(BaseNext, MAddrNext); 1891 Visited[&MINext] = MAddrNext; 1892 } else 1893 MAddrNext = Visited[&MINext]; 1894 1895 if (MAddrNext.Base.LoReg != MAddr.Base.LoReg || 1896 MAddrNext.Base.HiReg != MAddr.Base.HiReg || 1897 MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg || 1898 MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg) 1899 continue; 1900 1901 InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset)); 1902 1903 int64_t Dist = MAddr.Offset - MAddrNext.Offset; 1904 TargetLoweringBase::AddrMode AM; 1905 AM.HasBaseReg = true; 1906 AM.BaseOffs = Dist; 1907 if (TLI->isLegalGlobalAddressingMode(AM) && 1908 (uint32_t)std::abs(Dist) > MaxDist) { 1909 MaxDist = std::abs(Dist); 1910 1911 AnchorAddr = MAddrNext; 1912 AnchorInst = &MINext; 1913 } 1914 } 1915 1916 if (AnchorInst) { 1917 LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): "; 1918 AnchorInst->dump()); 1919 LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: " 1920 << AnchorAddr.Offset << "\n\n"); 1921 1922 // Instead of moving up, just re-compute anchor-instruction's base address. 1923 Register Base = computeBase(MI, AnchorAddr); 1924 1925 updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset); 1926 LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump();); 1927 1928 for (auto P : InstsWCommonBase) { 1929 TargetLoweringBase::AddrMode AM; 1930 AM.HasBaseReg = true; 1931 AM.BaseOffs = P.second - AnchorAddr.Offset; 1932 1933 if (TLI->isLegalGlobalAddressingMode(AM)) { 1934 LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second; 1935 dbgs() << ")"; P.first->dump()); 1936 updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset); 1937 LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump()); 1938 } 1939 } 1940 AnchorList.insert(AnchorInst); 1941 return true; 1942 } 1943 1944 return false; 1945 } 1946 1947 void SILoadStoreOptimizer::addInstToMergeableList(const CombineInfo &CI, 1948 std::list<std::list<CombineInfo> > &MergeableInsts) const { 1949 for (std::list<CombineInfo> &AddrList : MergeableInsts) { 1950 if (AddrList.front().InstClass == CI.InstClass && 1951 AddrList.front().IsAGPR == CI.IsAGPR && 1952 AddrList.front().hasSameBaseAddress(*CI.I)) { 1953 AddrList.emplace_back(CI); 1954 return; 1955 } 1956 } 1957 1958 // Base address not found, so add a new list. 1959 MergeableInsts.emplace_back(1, CI); 1960 } 1961 1962 std::pair<MachineBasicBlock::iterator, bool> 1963 SILoadStoreOptimizer::collectMergeableInsts( 1964 MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, 1965 MemInfoMap &Visited, SmallPtrSet<MachineInstr *, 4> &AnchorList, 1966 std::list<std::list<CombineInfo>> &MergeableInsts) const { 1967 bool Modified = false; 1968 1969 // Sort potential mergeable instructions into lists. One list per base address. 1970 unsigned Order = 0; 1971 MachineBasicBlock::iterator BlockI = Begin; 1972 for (; BlockI != End; ++BlockI) { 1973 MachineInstr &MI = *BlockI; 1974 1975 // We run this before checking if an address is mergeable, because it can produce 1976 // better code even if the instructions aren't mergeable. 1977 if (promoteConstantOffsetToImm(MI, Visited, AnchorList)) 1978 Modified = true; 1979 1980 // Don't combine if volatile. We also won't be able to merge across this, so 1981 // break the search. We can look after this barrier for separate merges. 1982 if (MI.hasOrderedMemoryRef()) { 1983 LLVM_DEBUG(dbgs() << "Breaking search on memory fence: " << MI); 1984 1985 // Search will resume after this instruction in a separate merge list. 1986 ++BlockI; 1987 break; 1988 } 1989 1990 const InstClassEnum InstClass = getInstClass(MI.getOpcode(), *TII); 1991 if (InstClass == UNKNOWN) 1992 continue; 1993 1994 // Do not merge VMEM buffer instructions with "swizzled" bit set. 1995 int Swizzled = 1996 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz); 1997 if (Swizzled != -1 && MI.getOperand(Swizzled).getImm()) 1998 continue; 1999 2000 CombineInfo CI; 2001 CI.setMI(MI, *this); 2002 CI.Order = Order++; 2003 2004 if (!CI.hasMergeableAddress(*MRI)) 2005 continue; 2006 2007 if (CI.InstClass == DS_WRITE && CI.IsAGPR) { 2008 // FIXME: nothing is illegal in a ds_write2 opcode with two AGPR data 2009 // operands. However we are reporting that ds_write2 shall have 2010 // only VGPR data so that machine copy propagation does not 2011 // create an illegal instruction with a VGPR and AGPR sources. 2012 // Consequenctially if we create such instruction the verifier 2013 // will complain. 2014 continue; 2015 } 2016 2017 LLVM_DEBUG(dbgs() << "Mergeable: " << MI); 2018 2019 addInstToMergeableList(CI, MergeableInsts); 2020 } 2021 2022 // At this point we have lists of Mergeable instructions. 2023 // 2024 // Part 2: Sort lists by offset and then for each CombineInfo object in the 2025 // list try to find an instruction that can be merged with I. If an instruction 2026 // is found, it is stored in the Paired field. If no instructions are found, then 2027 // the CombineInfo object is deleted from the list. 2028 2029 for (std::list<std::list<CombineInfo>>::iterator I = MergeableInsts.begin(), 2030 E = MergeableInsts.end(); I != E;) { 2031 2032 std::list<CombineInfo> &MergeList = *I; 2033 if (MergeList.size() <= 1) { 2034 // This means we have found only one instruction with a given address 2035 // that can be merged, and we need at least 2 instructions to do a merge, 2036 // so this list can be discarded. 2037 I = MergeableInsts.erase(I); 2038 continue; 2039 } 2040 2041 // Sort the lists by offsets, this way mergeable instructions will be 2042 // adjacent to each other in the list, which will make it easier to find 2043 // matches. 2044 MergeList.sort( 2045 [] (const CombineInfo &A, const CombineInfo &B) { 2046 return A.Offset < B.Offset; 2047 }); 2048 ++I; 2049 } 2050 2051 return std::make_pair(BlockI, Modified); 2052 } 2053 2054 // Scan through looking for adjacent LDS operations with constant offsets from 2055 // the same base register. We rely on the scheduler to do the hard work of 2056 // clustering nearby loads, and assume these are all adjacent. 2057 bool SILoadStoreOptimizer::optimizeBlock( 2058 std::list<std::list<CombineInfo> > &MergeableInsts) { 2059 bool Modified = false; 2060 2061 for (std::list<std::list<CombineInfo>>::iterator I = MergeableInsts.begin(), 2062 E = MergeableInsts.end(); I != E;) { 2063 std::list<CombineInfo> &MergeList = *I; 2064 2065 bool OptimizeListAgain = false; 2066 if (!optimizeInstsWithSameBaseAddr(MergeList, OptimizeListAgain)) { 2067 // We weren't able to make any changes, so delete the list so we don't 2068 // process the same instructions the next time we try to optimize this 2069 // block. 2070 I = MergeableInsts.erase(I); 2071 continue; 2072 } 2073 2074 Modified = true; 2075 2076 // We made changes, but also determined that there were no more optimization 2077 // opportunities, so we don't need to reprocess the list 2078 if (!OptimizeListAgain) { 2079 I = MergeableInsts.erase(I); 2080 continue; 2081 } 2082 OptimizeAgain = true; 2083 } 2084 return Modified; 2085 } 2086 2087 bool 2088 SILoadStoreOptimizer::optimizeInstsWithSameBaseAddr( 2089 std::list<CombineInfo> &MergeList, 2090 bool &OptimizeListAgain) { 2091 if (MergeList.empty()) 2092 return false; 2093 2094 bool Modified = false; 2095 2096 for (auto I = MergeList.begin(), Next = std::next(I); Next != MergeList.end(); 2097 Next = std::next(I)) { 2098 2099 auto First = I; 2100 auto Second = Next; 2101 2102 if ((*First).Order > (*Second).Order) 2103 std::swap(First, Second); 2104 CombineInfo &CI = *First; 2105 CombineInfo &Paired = *Second; 2106 2107 SmallVector<MachineInstr *, 8> InstsToMove; 2108 if (!checkAndPrepareMerge(CI, Paired, InstsToMove)) { 2109 ++I; 2110 continue; 2111 } 2112 2113 Modified = true; 2114 2115 LLVM_DEBUG(dbgs() << "Merging: " << *CI.I << " with: " << *Paired.I); 2116 2117 switch (CI.InstClass) { 2118 default: 2119 llvm_unreachable("unknown InstClass"); 2120 break; 2121 case DS_READ: { 2122 MachineBasicBlock::iterator NewMI = 2123 mergeRead2Pair(CI, Paired, InstsToMove); 2124 CI.setMI(NewMI, *this); 2125 break; 2126 } 2127 case DS_WRITE: { 2128 MachineBasicBlock::iterator NewMI = 2129 mergeWrite2Pair(CI, Paired, InstsToMove); 2130 CI.setMI(NewMI, *this); 2131 break; 2132 } 2133 case S_BUFFER_LOAD_IMM: { 2134 MachineBasicBlock::iterator NewMI = 2135 mergeSBufferLoadImmPair(CI, Paired, InstsToMove); 2136 CI.setMI(NewMI, *this); 2137 OptimizeListAgain |= (CI.Width + Paired.Width) < 8; 2138 break; 2139 } 2140 case BUFFER_LOAD: { 2141 MachineBasicBlock::iterator NewMI = 2142 mergeBufferLoadPair(CI, Paired, InstsToMove); 2143 CI.setMI(NewMI, *this); 2144 OptimizeListAgain |= (CI.Width + Paired.Width) < 4; 2145 break; 2146 } 2147 case BUFFER_STORE: { 2148 MachineBasicBlock::iterator NewMI = 2149 mergeBufferStorePair(CI, Paired, InstsToMove); 2150 CI.setMI(NewMI, *this); 2151 OptimizeListAgain |= (CI.Width + Paired.Width) < 4; 2152 break; 2153 } 2154 case MIMG: { 2155 MachineBasicBlock::iterator NewMI = 2156 mergeImagePair(CI, Paired, InstsToMove); 2157 CI.setMI(NewMI, *this); 2158 OptimizeListAgain |= (CI.Width + Paired.Width) < 4; 2159 break; 2160 } 2161 case TBUFFER_LOAD: { 2162 MachineBasicBlock::iterator NewMI = 2163 mergeTBufferLoadPair(CI, Paired, InstsToMove); 2164 CI.setMI(NewMI, *this); 2165 OptimizeListAgain |= (CI.Width + Paired.Width) < 4; 2166 break; 2167 } 2168 case TBUFFER_STORE: { 2169 MachineBasicBlock::iterator NewMI = 2170 mergeTBufferStorePair(CI, Paired, InstsToMove); 2171 CI.setMI(NewMI, *this); 2172 OptimizeListAgain |= (CI.Width + Paired.Width) < 4; 2173 break; 2174 } 2175 } 2176 CI.Order = Paired.Order; 2177 if (I == Second) 2178 I = Next; 2179 2180 MergeList.erase(Second); 2181 } 2182 2183 return Modified; 2184 } 2185 2186 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) { 2187 if (skipFunction(MF.getFunction())) 2188 return false; 2189 2190 STM = &MF.getSubtarget<GCNSubtarget>(); 2191 if (!STM->loadStoreOptEnabled()) 2192 return false; 2193 2194 TII = STM->getInstrInfo(); 2195 TRI = &TII->getRegisterInfo(); 2196 2197 MRI = &MF.getRegInfo(); 2198 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2199 2200 LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n"); 2201 2202 bool Modified = false; 2203 2204 // Contains the list of instructions for which constant offsets are being 2205 // promoted to the IMM. This is tracked for an entire block at time. 2206 SmallPtrSet<MachineInstr *, 4> AnchorList; 2207 MemInfoMap Visited; 2208 2209 for (MachineBasicBlock &MBB : MF) { 2210 MachineBasicBlock::iterator SectionEnd; 2211 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; 2212 I = SectionEnd) { 2213 bool CollectModified; 2214 std::list<std::list<CombineInfo>> MergeableInsts; 2215 2216 // First pass: Collect list of all instructions we know how to merge in a 2217 // subset of the block. 2218 std::tie(SectionEnd, CollectModified) = 2219 collectMergeableInsts(I, E, Visited, AnchorList, MergeableInsts); 2220 2221 Modified |= CollectModified; 2222 2223 do { 2224 OptimizeAgain = false; 2225 Modified |= optimizeBlock(MergeableInsts); 2226 } while (OptimizeAgain); 2227 } 2228 2229 Visited.clear(); 2230 AnchorList.clear(); 2231 } 2232 2233 return Modified; 2234 } 2235