1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass tries to fuse DS instructions with close by immediate offsets. 10 // This will fuse operations such as 11 // ds_read_b32 v0, v2 offset:16 12 // ds_read_b32 v1, v2 offset:32 13 // ==> 14 // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8 15 // 16 // The same is done for certain SMEM and VMEM opcodes, e.g.: 17 // s_buffer_load_dword s4, s[0:3], 4 18 // s_buffer_load_dword s5, s[0:3], 8 19 // ==> 20 // s_buffer_load_dwordx2 s[4:5], s[0:3], 4 21 // 22 // This pass also tries to promote constant offset to the immediate by 23 // adjusting the base. It tries to use a base from the nearby instructions that 24 // allows it to have a 13bit constant offset and then promotes the 13bit offset 25 // to the immediate. 26 // E.g. 27 // s_movk_i32 s0, 0x1800 28 // v_add_co_u32_e32 v0, vcc, s0, v2 29 // v_addc_co_u32_e32 v1, vcc, 0, v6, vcc 30 // 31 // s_movk_i32 s0, 0x1000 32 // v_add_co_u32_e32 v5, vcc, s0, v2 33 // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc 34 // global_load_dwordx2 v[5:6], v[5:6], off 35 // global_load_dwordx2 v[0:1], v[0:1], off 36 // => 37 // s_movk_i32 s0, 0x1000 38 // v_add_co_u32_e32 v5, vcc, s0, v2 39 // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc 40 // global_load_dwordx2 v[5:6], v[5:6], off 41 // global_load_dwordx2 v[0:1], v[5:6], off offset:2048 42 // 43 // Future improvements: 44 // 45 // - This is currently missing stores of constants because loading 46 // the constant into the data register is placed between the stores, although 47 // this is arguably a scheduling problem. 48 // 49 // - Live interval recomputing seems inefficient. This currently only matches 50 // one pair, and recomputes live intervals and moves on to the next pair. It 51 // would be better to compute a list of all merges that need to occur. 52 // 53 // - With a list of instructions to process, we can also merge more. If a 54 // cluster of loads have offsets that are too large to fit in the 8-bit 55 // offsets, but are close enough to fit in the 8 bits, we can add to the base 56 // pointer and use the new reduced offsets. 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "AMDGPU.h" 61 #include "GCNSubtarget.h" 62 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 63 #include "llvm/Analysis/AliasAnalysis.h" 64 #include "llvm/CodeGen/MachineFunctionPass.h" 65 #include "llvm/InitializePasses.h" 66 67 using namespace llvm; 68 69 #define DEBUG_TYPE "si-load-store-opt" 70 71 namespace { 72 enum InstClassEnum { 73 UNKNOWN, 74 DS_READ, 75 DS_WRITE, 76 S_BUFFER_LOAD_IMM, 77 BUFFER_LOAD, 78 BUFFER_STORE, 79 MIMG, 80 TBUFFER_LOAD, 81 TBUFFER_STORE, 82 }; 83 84 struct AddressRegs { 85 unsigned char NumVAddrs = 0; 86 bool SBase = false; 87 bool SRsrc = false; 88 bool SOffset = false; 89 bool VAddr = false; 90 bool Addr = false; 91 bool SSamp = false; 92 }; 93 94 // GFX10 image_sample instructions can have 12 vaddrs + srsrc + ssamp. 95 const unsigned MaxAddressRegs = 12 + 1 + 1; 96 97 class SILoadStoreOptimizer : public MachineFunctionPass { 98 struct CombineInfo { 99 MachineBasicBlock::iterator I; 100 unsigned EltSize; 101 unsigned Offset; 102 unsigned Width; 103 unsigned Format; 104 unsigned BaseOff; 105 unsigned DMask; 106 InstClassEnum InstClass; 107 unsigned CPol = 0; 108 bool IsAGPR; 109 bool UseST64; 110 int AddrIdx[MaxAddressRegs]; 111 const MachineOperand *AddrReg[MaxAddressRegs]; 112 unsigned NumAddresses; 113 unsigned Order; 114 115 bool hasSameBaseAddress(const MachineInstr &MI) { 116 for (unsigned i = 0; i < NumAddresses; i++) { 117 const MachineOperand &AddrRegNext = MI.getOperand(AddrIdx[i]); 118 119 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) { 120 if (AddrReg[i]->isImm() != AddrRegNext.isImm() || 121 AddrReg[i]->getImm() != AddrRegNext.getImm()) { 122 return false; 123 } 124 continue; 125 } 126 127 // Check same base pointer. Be careful of subregisters, which can occur 128 // with vectors of pointers. 129 if (AddrReg[i]->getReg() != AddrRegNext.getReg() || 130 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) { 131 return false; 132 } 133 } 134 return true; 135 } 136 137 bool hasMergeableAddress(const MachineRegisterInfo &MRI) { 138 for (unsigned i = 0; i < NumAddresses; ++i) { 139 const MachineOperand *AddrOp = AddrReg[i]; 140 // Immediates are always OK. 141 if (AddrOp->isImm()) 142 continue; 143 144 // Don't try to merge addresses that aren't either immediates or registers. 145 // TODO: Should be possible to merge FrameIndexes and maybe some other 146 // non-register 147 if (!AddrOp->isReg()) 148 return false; 149 150 // TODO: We should be able to merge physical reg addresses. 151 if (AddrOp->getReg().isPhysical()) 152 return false; 153 154 // If an address has only one use then there will be on other 155 // instructions with the same address, so we can't merge this one. 156 if (MRI.hasOneNonDBGUse(AddrOp->getReg())) 157 return false; 158 } 159 return true; 160 } 161 162 void setMI(MachineBasicBlock::iterator MI, const SILoadStoreOptimizer &LSO); 163 }; 164 165 struct BaseRegisters { 166 Register LoReg; 167 Register HiReg; 168 169 unsigned LoSubReg = 0; 170 unsigned HiSubReg = 0; 171 }; 172 173 struct MemAddress { 174 BaseRegisters Base; 175 int64_t Offset = 0; 176 }; 177 178 using MemInfoMap = DenseMap<MachineInstr *, MemAddress>; 179 180 private: 181 const GCNSubtarget *STM = nullptr; 182 const SIInstrInfo *TII = nullptr; 183 const SIRegisterInfo *TRI = nullptr; 184 MachineRegisterInfo *MRI = nullptr; 185 AliasAnalysis *AA = nullptr; 186 bool OptimizeAgain; 187 188 static bool dmasksCanBeCombined(const CombineInfo &CI, 189 const SIInstrInfo &TII, 190 const CombineInfo &Paired); 191 static bool offsetsCanBeCombined(CombineInfo &CI, const GCNSubtarget &STI, 192 CombineInfo &Paired, bool Modify = false); 193 static bool widthsFit(const GCNSubtarget &STI, const CombineInfo &CI, 194 const CombineInfo &Paired); 195 static unsigned getNewOpcode(const CombineInfo &CI, const CombineInfo &Paired); 196 static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI, 197 const CombineInfo &Paired); 198 const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI, 199 const CombineInfo &Paired); 200 const TargetRegisterClass *getDataRegClass(const MachineInstr &MI) const; 201 202 bool checkAndPrepareMerge(CombineInfo &CI, CombineInfo &Paired, 203 SmallVectorImpl<MachineInstr *> &InstsToMove); 204 205 unsigned read2Opcode(unsigned EltSize) const; 206 unsigned read2ST64Opcode(unsigned EltSize) const; 207 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI, 208 CombineInfo &Paired, 209 const SmallVectorImpl<MachineInstr *> &InstsToMove); 210 211 unsigned write2Opcode(unsigned EltSize) const; 212 unsigned write2ST64Opcode(unsigned EltSize) const; 213 MachineBasicBlock::iterator 214 mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired, 215 const SmallVectorImpl<MachineInstr *> &InstsToMove); 216 MachineBasicBlock::iterator 217 mergeImagePair(CombineInfo &CI, CombineInfo &Paired, 218 const SmallVectorImpl<MachineInstr *> &InstsToMove); 219 MachineBasicBlock::iterator 220 mergeSBufferLoadImmPair(CombineInfo &CI, CombineInfo &Paired, 221 const SmallVectorImpl<MachineInstr *> &InstsToMove); 222 MachineBasicBlock::iterator 223 mergeBufferLoadPair(CombineInfo &CI, CombineInfo &Paired, 224 const SmallVectorImpl<MachineInstr *> &InstsToMove); 225 MachineBasicBlock::iterator 226 mergeBufferStorePair(CombineInfo &CI, CombineInfo &Paired, 227 const SmallVectorImpl<MachineInstr *> &InstsToMove); 228 MachineBasicBlock::iterator 229 mergeTBufferLoadPair(CombineInfo &CI, CombineInfo &Paired, 230 const SmallVectorImpl<MachineInstr *> &InstsToMove); 231 MachineBasicBlock::iterator 232 mergeTBufferStorePair(CombineInfo &CI, CombineInfo &Paired, 233 const SmallVectorImpl<MachineInstr *> &InstsToMove); 234 235 void updateBaseAndOffset(MachineInstr &I, Register NewBase, 236 int32_t NewOffset) const; 237 Register computeBase(MachineInstr &MI, const MemAddress &Addr) const; 238 MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const; 239 Optional<int32_t> extractConstOffset(const MachineOperand &Op) const; 240 void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const; 241 /// Promotes constant offset to the immediate by adjusting the base. It 242 /// tries to use a base from the nearby instructions that allows it to have 243 /// a 13bit constant offset which gets promoted to the immediate. 244 bool promoteConstantOffsetToImm(MachineInstr &CI, 245 MemInfoMap &Visited, 246 SmallPtrSet<MachineInstr *, 4> &Promoted) const; 247 void addInstToMergeableList(const CombineInfo &CI, 248 std::list<std::list<CombineInfo> > &MergeableInsts) const; 249 250 std::pair<MachineBasicBlock::iterator, bool> collectMergeableInsts( 251 MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, 252 MemInfoMap &Visited, SmallPtrSet<MachineInstr *, 4> &AnchorList, 253 std::list<std::list<CombineInfo>> &MergeableInsts) const; 254 255 public: 256 static char ID; 257 258 SILoadStoreOptimizer() : MachineFunctionPass(ID) { 259 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry()); 260 } 261 262 bool optimizeInstsWithSameBaseAddr(std::list<CombineInfo> &MergeList, 263 bool &OptimizeListAgain); 264 bool optimizeBlock(std::list<std::list<CombineInfo> > &MergeableInsts); 265 266 bool runOnMachineFunction(MachineFunction &MF) override; 267 268 StringRef getPassName() const override { return "SI Load Store Optimizer"; } 269 270 void getAnalysisUsage(AnalysisUsage &AU) const override { 271 AU.setPreservesCFG(); 272 AU.addRequired<AAResultsWrapperPass>(); 273 274 MachineFunctionPass::getAnalysisUsage(AU); 275 } 276 277 MachineFunctionProperties getRequiredProperties() const override { 278 return MachineFunctionProperties() 279 .set(MachineFunctionProperties::Property::IsSSA); 280 } 281 }; 282 283 static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) { 284 const unsigned Opc = MI.getOpcode(); 285 286 if (TII.isMUBUF(Opc)) { 287 // FIXME: Handle d16 correctly 288 return AMDGPU::getMUBUFElements(Opc); 289 } 290 if (TII.isMIMG(MI)) { 291 uint64_t DMaskImm = 292 TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm(); 293 return countPopulation(DMaskImm); 294 } 295 if (TII.isMTBUF(Opc)) { 296 return AMDGPU::getMTBUFElements(Opc); 297 } 298 299 switch (Opc) { 300 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: 301 return 1; 302 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: 303 return 2; 304 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: 305 return 4; 306 case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM: 307 return 8; 308 case AMDGPU::DS_READ_B32: LLVM_FALLTHROUGH; 309 case AMDGPU::DS_READ_B32_gfx9: LLVM_FALLTHROUGH; 310 case AMDGPU::DS_WRITE_B32: LLVM_FALLTHROUGH; 311 case AMDGPU::DS_WRITE_B32_gfx9: 312 return 1; 313 case AMDGPU::DS_READ_B64: LLVM_FALLTHROUGH; 314 case AMDGPU::DS_READ_B64_gfx9: LLVM_FALLTHROUGH; 315 case AMDGPU::DS_WRITE_B64: LLVM_FALLTHROUGH; 316 case AMDGPU::DS_WRITE_B64_gfx9: 317 return 2; 318 default: 319 return 0; 320 } 321 } 322 323 /// Maps instruction opcode to enum InstClassEnum. 324 static InstClassEnum getInstClass(unsigned Opc, const SIInstrInfo &TII) { 325 switch (Opc) { 326 default: 327 if (TII.isMUBUF(Opc)) { 328 switch (AMDGPU::getMUBUFBaseOpcode(Opc)) { 329 default: 330 return UNKNOWN; 331 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN: 332 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact: 333 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET: 334 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact: 335 return BUFFER_LOAD; 336 case AMDGPU::BUFFER_STORE_DWORD_OFFEN: 337 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact: 338 case AMDGPU::BUFFER_STORE_DWORD_OFFSET: 339 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact: 340 return BUFFER_STORE; 341 } 342 } 343 if (TII.isMIMG(Opc)) { 344 // Ignore instructions encoded without vaddr. 345 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr) == -1 && 346 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0) == -1) 347 return UNKNOWN; 348 // Ignore BVH instructions 349 if (AMDGPU::getMIMGBaseOpcode(Opc)->BVH) 350 return UNKNOWN; 351 // TODO: Support IMAGE_GET_RESINFO and IMAGE_GET_LOD. 352 if (TII.get(Opc).mayStore() || !TII.get(Opc).mayLoad() || 353 TII.isGather4(Opc)) 354 return UNKNOWN; 355 return MIMG; 356 } 357 if (TII.isMTBUF(Opc)) { 358 switch (AMDGPU::getMTBUFBaseOpcode(Opc)) { 359 default: 360 return UNKNOWN; 361 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN: 362 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN_exact: 363 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET: 364 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET_exact: 365 return TBUFFER_LOAD; 366 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN: 367 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN_exact: 368 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET: 369 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET_exact: 370 return TBUFFER_STORE; 371 } 372 } 373 return UNKNOWN; 374 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: 375 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: 376 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: 377 case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM: 378 return S_BUFFER_LOAD_IMM; 379 case AMDGPU::DS_READ_B32: 380 case AMDGPU::DS_READ_B32_gfx9: 381 case AMDGPU::DS_READ_B64: 382 case AMDGPU::DS_READ_B64_gfx9: 383 return DS_READ; 384 case AMDGPU::DS_WRITE_B32: 385 case AMDGPU::DS_WRITE_B32_gfx9: 386 case AMDGPU::DS_WRITE_B64: 387 case AMDGPU::DS_WRITE_B64_gfx9: 388 return DS_WRITE; 389 } 390 } 391 392 /// Determines instruction subclass from opcode. Only instructions 393 /// of the same subclass can be merged together. The merged instruction may have 394 /// a different subclass but must have the same class. 395 static unsigned getInstSubclass(unsigned Opc, const SIInstrInfo &TII) { 396 switch (Opc) { 397 default: 398 if (TII.isMUBUF(Opc)) 399 return AMDGPU::getMUBUFBaseOpcode(Opc); 400 if (TII.isMIMG(Opc)) { 401 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc); 402 assert(Info); 403 return Info->BaseOpcode; 404 } 405 if (TII.isMTBUF(Opc)) 406 return AMDGPU::getMTBUFBaseOpcode(Opc); 407 return -1; 408 case AMDGPU::DS_READ_B32: 409 case AMDGPU::DS_READ_B32_gfx9: 410 case AMDGPU::DS_READ_B64: 411 case AMDGPU::DS_READ_B64_gfx9: 412 case AMDGPU::DS_WRITE_B32: 413 case AMDGPU::DS_WRITE_B32_gfx9: 414 case AMDGPU::DS_WRITE_B64: 415 case AMDGPU::DS_WRITE_B64_gfx9: 416 return Opc; 417 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: 418 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: 419 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: 420 case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM: 421 return AMDGPU::S_BUFFER_LOAD_DWORD_IMM; 422 } 423 } 424 425 static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) { 426 AddressRegs Result; 427 428 if (TII.isMUBUF(Opc)) { 429 if (AMDGPU::getMUBUFHasVAddr(Opc)) 430 Result.VAddr = true; 431 if (AMDGPU::getMUBUFHasSrsrc(Opc)) 432 Result.SRsrc = true; 433 if (AMDGPU::getMUBUFHasSoffset(Opc)) 434 Result.SOffset = true; 435 436 return Result; 437 } 438 439 if (TII.isMIMG(Opc)) { 440 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 441 if (VAddr0Idx >= 0) { 442 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 443 Result.NumVAddrs = SRsrcIdx - VAddr0Idx; 444 } else { 445 Result.VAddr = true; 446 } 447 Result.SRsrc = true; 448 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc); 449 if (Info && AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler) 450 Result.SSamp = true; 451 452 return Result; 453 } 454 if (TII.isMTBUF(Opc)) { 455 if (AMDGPU::getMTBUFHasVAddr(Opc)) 456 Result.VAddr = true; 457 if (AMDGPU::getMTBUFHasSrsrc(Opc)) 458 Result.SRsrc = true; 459 if (AMDGPU::getMTBUFHasSoffset(Opc)) 460 Result.SOffset = true; 461 462 return Result; 463 } 464 465 switch (Opc) { 466 default: 467 return Result; 468 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: 469 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: 470 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: 471 case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM: 472 Result.SBase = true; 473 return Result; 474 case AMDGPU::DS_READ_B32: 475 case AMDGPU::DS_READ_B64: 476 case AMDGPU::DS_READ_B32_gfx9: 477 case AMDGPU::DS_READ_B64_gfx9: 478 case AMDGPU::DS_WRITE_B32: 479 case AMDGPU::DS_WRITE_B64: 480 case AMDGPU::DS_WRITE_B32_gfx9: 481 case AMDGPU::DS_WRITE_B64_gfx9: 482 Result.Addr = true; 483 return Result; 484 } 485 } 486 487 void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI, 488 const SILoadStoreOptimizer &LSO) { 489 I = MI; 490 unsigned Opc = MI->getOpcode(); 491 InstClass = getInstClass(Opc, *LSO.TII); 492 493 if (InstClass == UNKNOWN) 494 return; 495 496 IsAGPR = LSO.TRI->hasAGPRs(LSO.getDataRegClass(*MI)); 497 498 switch (InstClass) { 499 case DS_READ: 500 EltSize = 501 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8 502 : 4; 503 break; 504 case DS_WRITE: 505 EltSize = 506 (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8 507 : 4; 508 break; 509 case S_BUFFER_LOAD_IMM: 510 EltSize = AMDGPU::convertSMRDOffsetUnits(*LSO.STM, 4); 511 break; 512 default: 513 EltSize = 4; 514 break; 515 } 516 517 if (InstClass == MIMG) { 518 DMask = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm(); 519 // Offset is not considered for MIMG instructions. 520 Offset = 0; 521 } else { 522 int OffsetIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::offset); 523 Offset = I->getOperand(OffsetIdx).getImm(); 524 } 525 526 if (InstClass == TBUFFER_LOAD || InstClass == TBUFFER_STORE) 527 Format = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::format)->getImm(); 528 529 Width = getOpcodeWidth(*I, *LSO.TII); 530 531 if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) { 532 Offset &= 0xffff; 533 } else if (InstClass != MIMG) { 534 CPol = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::cpol)->getImm(); 535 } 536 537 AddressRegs Regs = getRegs(Opc, *LSO.TII); 538 539 NumAddresses = 0; 540 for (unsigned J = 0; J < Regs.NumVAddrs; J++) 541 AddrIdx[NumAddresses++] = 542 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0) + J; 543 if (Regs.Addr) 544 AddrIdx[NumAddresses++] = 545 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::addr); 546 if (Regs.SBase) 547 AddrIdx[NumAddresses++] = 548 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sbase); 549 if (Regs.SRsrc) 550 AddrIdx[NumAddresses++] = 551 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 552 if (Regs.SOffset) 553 AddrIdx[NumAddresses++] = 554 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset); 555 if (Regs.VAddr) 556 AddrIdx[NumAddresses++] = 557 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); 558 if (Regs.SSamp) 559 AddrIdx[NumAddresses++] = 560 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::ssamp); 561 assert(NumAddresses <= MaxAddressRegs); 562 563 for (unsigned J = 0; J < NumAddresses; J++) 564 AddrReg[J] = &I->getOperand(AddrIdx[J]); 565 } 566 567 } // end anonymous namespace. 568 569 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE, 570 "SI Load Store Optimizer", false, false) 571 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 572 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer", 573 false, false) 574 575 char SILoadStoreOptimizer::ID = 0; 576 577 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID; 578 579 FunctionPass *llvm::createSILoadStoreOptimizerPass() { 580 return new SILoadStoreOptimizer(); 581 } 582 583 static void moveInstsAfter(MachineBasicBlock::iterator I, 584 ArrayRef<MachineInstr *> InstsToMove) { 585 MachineBasicBlock *MBB = I->getParent(); 586 ++I; 587 for (MachineInstr *MI : InstsToMove) { 588 MI->removeFromParent(); 589 MBB->insert(I, MI); 590 } 591 } 592 593 static void addDefsUsesToList(const MachineInstr &MI, 594 DenseSet<Register> &RegDefs, 595 DenseSet<Register> &PhysRegUses) { 596 for (const MachineOperand &Op : MI.operands()) { 597 if (Op.isReg()) { 598 if (Op.isDef()) 599 RegDefs.insert(Op.getReg()); 600 else if (Op.readsReg() && Op.getReg().isPhysical()) 601 PhysRegUses.insert(Op.getReg()); 602 } 603 } 604 } 605 606 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A, 607 MachineBasicBlock::iterator B, 608 AliasAnalysis *AA) { 609 // RAW or WAR - cannot reorder 610 // WAW - cannot reorder 611 // RAR - safe to reorder 612 return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true); 613 } 614 615 // Add MI and its defs to the lists if MI reads one of the defs that are 616 // already in the list. Returns true in that case. 617 static bool addToListsIfDependent(MachineInstr &MI, DenseSet<Register> &RegDefs, 618 DenseSet<Register> &PhysRegUses, 619 SmallVectorImpl<MachineInstr *> &Insts) { 620 for (MachineOperand &Use : MI.operands()) { 621 // If one of the defs is read, then there is a use of Def between I and the 622 // instruction that I will potentially be merged with. We will need to move 623 // this instruction after the merged instructions. 624 // 625 // Similarly, if there is a def which is read by an instruction that is to 626 // be moved for merging, then we need to move the def-instruction as well. 627 // This can only happen for physical registers such as M0; virtual 628 // registers are in SSA form. 629 if (Use.isReg() && ((Use.readsReg() && RegDefs.count(Use.getReg())) || 630 (Use.isDef() && RegDefs.count(Use.getReg())) || 631 (Use.isDef() && Use.getReg().isPhysical() && 632 PhysRegUses.count(Use.getReg())))) { 633 Insts.push_back(&MI); 634 addDefsUsesToList(MI, RegDefs, PhysRegUses); 635 return true; 636 } 637 } 638 639 return false; 640 } 641 642 static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp, 643 ArrayRef<MachineInstr *> InstsToMove, 644 AliasAnalysis *AA) { 645 assert(MemOp.mayLoadOrStore()); 646 647 for (MachineInstr *InstToMove : InstsToMove) { 648 if (!InstToMove->mayLoadOrStore()) 649 continue; 650 if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA)) 651 return false; 652 } 653 return true; 654 } 655 656 // This function assumes that \p A and \p B have are identical except for 657 // size and offset, and they reference adjacent memory. 658 static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF, 659 const MachineMemOperand *A, 660 const MachineMemOperand *B) { 661 unsigned MinOffset = std::min(A->getOffset(), B->getOffset()); 662 unsigned Size = A->getSize() + B->getSize(); 663 // This function adds the offset parameter to the existing offset for A, 664 // so we pass 0 here as the offset and then manually set it to the correct 665 // value after the call. 666 MachineMemOperand *MMO = MF.getMachineMemOperand(A, 0, Size); 667 MMO->setOffset(MinOffset); 668 return MMO; 669 } 670 671 bool SILoadStoreOptimizer::dmasksCanBeCombined(const CombineInfo &CI, 672 const SIInstrInfo &TII, 673 const CombineInfo &Paired) { 674 assert(CI.InstClass == MIMG); 675 676 // Ignore instructions with tfe/lwe set. 677 const auto *TFEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::tfe); 678 const auto *LWEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::lwe); 679 680 if ((TFEOp && TFEOp->getImm()) || (LWEOp && LWEOp->getImm())) 681 return false; 682 683 // Check other optional immediate operands for equality. 684 unsigned OperandsToMatch[] = {AMDGPU::OpName::cpol, AMDGPU::OpName::d16, 685 AMDGPU::OpName::unorm, AMDGPU::OpName::da, 686 AMDGPU::OpName::r128, AMDGPU::OpName::a16}; 687 688 for (auto op : OperandsToMatch) { 689 int Idx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), op); 690 if (AMDGPU::getNamedOperandIdx(Paired.I->getOpcode(), op) != Idx) 691 return false; 692 if (Idx != -1 && 693 CI.I->getOperand(Idx).getImm() != Paired.I->getOperand(Idx).getImm()) 694 return false; 695 } 696 697 // Check DMask for overlaps. 698 unsigned MaxMask = std::max(CI.DMask, Paired.DMask); 699 unsigned MinMask = std::min(CI.DMask, Paired.DMask); 700 701 unsigned AllowedBitsForMin = llvm::countTrailingZeros(MaxMask); 702 if ((1u << AllowedBitsForMin) <= MinMask) 703 return false; 704 705 return true; 706 } 707 708 static unsigned getBufferFormatWithCompCount(unsigned OldFormat, 709 unsigned ComponentCount, 710 const GCNSubtarget &STI) { 711 if (ComponentCount > 4) 712 return 0; 713 714 const llvm::AMDGPU::GcnBufferFormatInfo *OldFormatInfo = 715 llvm::AMDGPU::getGcnBufferFormatInfo(OldFormat, STI); 716 if (!OldFormatInfo) 717 return 0; 718 719 const llvm::AMDGPU::GcnBufferFormatInfo *NewFormatInfo = 720 llvm::AMDGPU::getGcnBufferFormatInfo(OldFormatInfo->BitsPerComp, 721 ComponentCount, 722 OldFormatInfo->NumFormat, STI); 723 724 if (!NewFormatInfo) 725 return 0; 726 727 assert(NewFormatInfo->NumFormat == OldFormatInfo->NumFormat && 728 NewFormatInfo->BitsPerComp == OldFormatInfo->BitsPerComp); 729 730 return NewFormatInfo->Format; 731 } 732 733 // Return the value in the inclusive range [Lo,Hi] that is aligned to the 734 // highest power of two. Note that the result is well defined for all inputs 735 // including corner cases like: 736 // - if Lo == Hi, return that value 737 // - if Lo == 0, return 0 (even though the "- 1" below underflows 738 // - if Lo > Hi, return 0 (as if the range wrapped around) 739 static uint32_t mostAlignedValueInRange(uint32_t Lo, uint32_t Hi) { 740 return Hi & maskLeadingOnes<uint32_t>(countLeadingZeros((Lo - 1) ^ Hi) + 1); 741 } 742 743 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI, 744 const GCNSubtarget &STI, 745 CombineInfo &Paired, 746 bool Modify) { 747 assert(CI.InstClass != MIMG); 748 749 // XXX - Would the same offset be OK? Is there any reason this would happen or 750 // be useful? 751 if (CI.Offset == Paired.Offset) 752 return false; 753 754 // This won't be valid if the offset isn't aligned. 755 if ((CI.Offset % CI.EltSize != 0) || (Paired.Offset % CI.EltSize != 0)) 756 return false; 757 758 if (CI.InstClass == TBUFFER_LOAD || CI.InstClass == TBUFFER_STORE) { 759 760 const llvm::AMDGPU::GcnBufferFormatInfo *Info0 = 761 llvm::AMDGPU::getGcnBufferFormatInfo(CI.Format, STI); 762 if (!Info0) 763 return false; 764 const llvm::AMDGPU::GcnBufferFormatInfo *Info1 = 765 llvm::AMDGPU::getGcnBufferFormatInfo(Paired.Format, STI); 766 if (!Info1) 767 return false; 768 769 if (Info0->BitsPerComp != Info1->BitsPerComp || 770 Info0->NumFormat != Info1->NumFormat) 771 return false; 772 773 // TODO: Should be possible to support more formats, but if format loads 774 // are not dword-aligned, the merged load might not be valid. 775 if (Info0->BitsPerComp != 32) 776 return false; 777 778 if (getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, STI) == 0) 779 return false; 780 } 781 782 uint32_t EltOffset0 = CI.Offset / CI.EltSize; 783 uint32_t EltOffset1 = Paired.Offset / CI.EltSize; 784 CI.UseST64 = false; 785 CI.BaseOff = 0; 786 787 // Handle all non-DS instructions. 788 if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) { 789 return (EltOffset0 + CI.Width == EltOffset1 || 790 EltOffset1 + Paired.Width == EltOffset0) && 791 CI.CPol == Paired.CPol && 792 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.CPol == Paired.CPol); 793 } 794 795 // If the offset in elements doesn't fit in 8-bits, we might be able to use 796 // the stride 64 versions. 797 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 && 798 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) { 799 if (Modify) { 800 CI.Offset = EltOffset0 / 64; 801 Paired.Offset = EltOffset1 / 64; 802 CI.UseST64 = true; 803 } 804 return true; 805 } 806 807 // Check if the new offsets fit in the reduced 8-bit range. 808 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) { 809 if (Modify) { 810 CI.Offset = EltOffset0; 811 Paired.Offset = EltOffset1; 812 } 813 return true; 814 } 815 816 // Try to shift base address to decrease offsets. 817 uint32_t Min = std::min(EltOffset0, EltOffset1); 818 uint32_t Max = std::max(EltOffset0, EltOffset1); 819 820 const uint32_t Mask = maskTrailingOnes<uint32_t>(8) * 64; 821 if (((Max - Min) & ~Mask) == 0) { 822 if (Modify) { 823 // From the range of values we could use for BaseOff, choose the one that 824 // is aligned to the highest power of two, to maximise the chance that 825 // the same offset can be reused for other load/store pairs. 826 uint32_t BaseOff = mostAlignedValueInRange(Max - 0xff * 64, Min); 827 // Copy the low bits of the offsets, so that when we adjust them by 828 // subtracting BaseOff they will be multiples of 64. 829 BaseOff |= Min & maskTrailingOnes<uint32_t>(6); 830 CI.BaseOff = BaseOff * CI.EltSize; 831 CI.Offset = (EltOffset0 - BaseOff) / 64; 832 Paired.Offset = (EltOffset1 - BaseOff) / 64; 833 CI.UseST64 = true; 834 } 835 return true; 836 } 837 838 if (isUInt<8>(Max - Min)) { 839 if (Modify) { 840 // From the range of values we could use for BaseOff, choose the one that 841 // is aligned to the highest power of two, to maximise the chance that 842 // the same offset can be reused for other load/store pairs. 843 uint32_t BaseOff = mostAlignedValueInRange(Max - 0xff, Min); 844 CI.BaseOff = BaseOff * CI.EltSize; 845 CI.Offset = EltOffset0 - BaseOff; 846 Paired.Offset = EltOffset1 - BaseOff; 847 } 848 return true; 849 } 850 851 return false; 852 } 853 854 bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM, 855 const CombineInfo &CI, 856 const CombineInfo &Paired) { 857 const unsigned Width = (CI.Width + Paired.Width); 858 switch (CI.InstClass) { 859 default: 860 return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3)); 861 case S_BUFFER_LOAD_IMM: 862 switch (Width) { 863 default: 864 return false; 865 case 2: 866 case 4: 867 case 8: 868 return true; 869 } 870 } 871 } 872 873 const TargetRegisterClass * 874 SILoadStoreOptimizer::getDataRegClass(const MachineInstr &MI) const { 875 if (const auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) { 876 return TRI->getRegClassForReg(*MRI, Dst->getReg()); 877 } 878 if (const auto *Src = TII->getNamedOperand(MI, AMDGPU::OpName::vdata)) { 879 return TRI->getRegClassForReg(*MRI, Src->getReg()); 880 } 881 if (const auto *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0)) { 882 return TRI->getRegClassForReg(*MRI, Src->getReg()); 883 } 884 if (const auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) { 885 return TRI->getRegClassForReg(*MRI, Dst->getReg()); 886 } 887 if (const auto *Src = TII->getNamedOperand(MI, AMDGPU::OpName::sdata)) { 888 return TRI->getRegClassForReg(*MRI, Src->getReg()); 889 } 890 return nullptr; 891 } 892 893 /// This function assumes that CI comes before Paired in a basic block. 894 bool SILoadStoreOptimizer::checkAndPrepareMerge( 895 CombineInfo &CI, CombineInfo &Paired, 896 SmallVectorImpl<MachineInstr *> &InstsToMove) { 897 // If another instruction has already been merged into CI, it may now be a 898 // type that we can't do any further merging into. 899 if (CI.InstClass == UNKNOWN || Paired.InstClass == UNKNOWN) 900 return false; 901 assert(CI.InstClass == Paired.InstClass); 902 903 // Check both offsets (or masks for MIMG) can be combined and fit in the 904 // reduced range. 905 if (CI.InstClass == MIMG) { 906 if (!dmasksCanBeCombined(CI, *TII, Paired)) 907 return false; 908 } else { 909 if (!widthsFit(*STM, CI, Paired) || !offsetsCanBeCombined(CI, *STM, Paired)) 910 return false; 911 } 912 913 const unsigned Opc = CI.I->getOpcode(); 914 const unsigned InstSubclass = getInstSubclass(Opc, *TII); 915 916 DenseSet<Register> RegDefsToMove; 917 DenseSet<Register> PhysRegUsesToMove; 918 addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove); 919 920 MachineBasicBlock::iterator E = std::next(Paired.I); 921 MachineBasicBlock::iterator MBBI = std::next(CI.I); 922 MachineBasicBlock::iterator MBBE = CI.I->getParent()->end(); 923 for (; MBBI != E; ++MBBI) { 924 925 if (MBBI == MBBE) { 926 // CombineInfo::Order is a hint on the instruction ordering within the 927 // basic block. This hint suggests that CI precedes Paired, which is 928 // true most of the time. However, moveInstsAfter() processing a 929 // previous list may have changed this order in a situation when it 930 // moves an instruction which exists in some other merge list. 931 // In this case it must be dependent. 932 return false; 933 } 934 935 if ((getInstClass(MBBI->getOpcode(), *TII) != CI.InstClass) || 936 (getInstSubclass(MBBI->getOpcode(), *TII) != InstSubclass)) { 937 // This is not a matching instruction, but we can keep looking as 938 // long as one of these conditions are met: 939 // 1. It is safe to move I down past MBBI. 940 // 2. It is safe to move MBBI down past the instruction that I will 941 // be merged into. 942 943 if (MBBI->mayLoadOrStore() && 944 (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) || 945 !canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA))) { 946 // We fail condition #1, but we may still be able to satisfy condition 947 // #2. Add this instruction to the move list and then we will check 948 // if condition #2 holds once we have selected the matching instruction. 949 InstsToMove.push_back(&*MBBI); 950 addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove); 951 continue; 952 } 953 954 // When we match I with another DS instruction we will be moving I down 955 // to the location of the matched instruction any uses of I will need to 956 // be moved down as well. 957 addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove, 958 InstsToMove); 959 continue; 960 } 961 962 // Handle a case like 963 // DS_WRITE_B32 addr, v, idx0 964 // w = DS_READ_B32 addr, idx0 965 // DS_WRITE_B32 addr, f(w), idx1 966 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents 967 // merging of the two writes. 968 if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove, 969 InstsToMove)) 970 continue; 971 972 if (&*MBBI == &*Paired.I) { 973 // We need to go through the list of instructions that we plan to 974 // move and make sure they are all safe to move down past the merged 975 // instruction. 976 if (canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA)) { 977 978 // Call offsetsCanBeCombined with modify = true so that the offsets are 979 // correct for the new instruction. This should return true, because 980 // this function should only be called on CombineInfo objects that 981 // have already been confirmed to be mergeable. 982 if (CI.InstClass != MIMG) 983 offsetsCanBeCombined(CI, *STM, Paired, true); 984 return true; 985 } 986 return false; 987 } 988 989 // We've found a load/store that we couldn't merge for some reason. 990 // We could potentially keep looking, but we'd need to make sure that 991 // it was safe to move I and also all the instruction in InstsToMove 992 // down past this instruction. 993 // check if we can move I across MBBI and if we can move all I's users 994 if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) || 995 !canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA)) 996 break; 997 } 998 return false; 999 } 1000 1001 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const { 1002 if (STM->ldsRequiresM0Init()) 1003 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64; 1004 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9; 1005 } 1006 1007 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const { 1008 if (STM->ldsRequiresM0Init()) 1009 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64; 1010 1011 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9 1012 : AMDGPU::DS_READ2ST64_B64_gfx9; 1013 } 1014 1015 MachineBasicBlock::iterator 1016 SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI, CombineInfo &Paired, 1017 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1018 MachineBasicBlock *MBB = CI.I->getParent(); 1019 1020 // Be careful, since the addresses could be subregisters themselves in weird 1021 // cases, like vectors of pointers. 1022 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr); 1023 1024 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst); 1025 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdst); 1026 1027 unsigned NewOffset0 = CI.Offset; 1028 unsigned NewOffset1 = Paired.Offset; 1029 unsigned Opc = 1030 CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize); 1031 1032 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1; 1033 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3; 1034 1035 if (NewOffset0 > NewOffset1) { 1036 // Canonicalize the merged instruction so the smaller offset comes first. 1037 std::swap(NewOffset0, NewOffset1); 1038 std::swap(SubRegIdx0, SubRegIdx1); 1039 } 1040 1041 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) && 1042 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit"); 1043 1044 const MCInstrDesc &Read2Desc = TII->get(Opc); 1045 1046 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1047 Register DestReg = MRI->createVirtualRegister(SuperRC); 1048 1049 DebugLoc DL = CI.I->getDebugLoc(); 1050 1051 Register BaseReg = AddrReg->getReg(); 1052 unsigned BaseSubReg = AddrReg->getSubReg(); 1053 unsigned BaseRegFlags = 0; 1054 if (CI.BaseOff) { 1055 Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1056 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg) 1057 .addImm(CI.BaseOff); 1058 1059 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1060 BaseRegFlags = RegState::Kill; 1061 1062 TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg) 1063 .addReg(ImmReg) 1064 .addReg(AddrReg->getReg(), 0, BaseSubReg) 1065 .addImm(0); // clamp bit 1066 BaseSubReg = 0; 1067 } 1068 1069 MachineInstrBuilder Read2 = 1070 BuildMI(*MBB, Paired.I, DL, Read2Desc, DestReg) 1071 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr 1072 .addImm(NewOffset0) // offset0 1073 .addImm(NewOffset1) // offset1 1074 .addImm(0) // gds 1075 .cloneMergedMemRefs({&*CI.I, &*Paired.I}); 1076 1077 (void)Read2; 1078 1079 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 1080 1081 // Copy to the old destination registers. 1082 BuildMI(*MBB, Paired.I, DL, CopyDesc) 1083 .add(*Dest0) // Copy to same destination including flags and sub reg. 1084 .addReg(DestReg, 0, SubRegIdx0); 1085 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc) 1086 .add(*Dest1) 1087 .addReg(DestReg, RegState::Kill, SubRegIdx1); 1088 1089 moveInstsAfter(Copy1, InstsToMove); 1090 1091 CI.I->eraseFromParent(); 1092 Paired.I->eraseFromParent(); 1093 1094 LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n'); 1095 return Read2; 1096 } 1097 1098 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const { 1099 if (STM->ldsRequiresM0Init()) 1100 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64; 1101 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9 1102 : AMDGPU::DS_WRITE2_B64_gfx9; 1103 } 1104 1105 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const { 1106 if (STM->ldsRequiresM0Init()) 1107 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 1108 : AMDGPU::DS_WRITE2ST64_B64; 1109 1110 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9 1111 : AMDGPU::DS_WRITE2ST64_B64_gfx9; 1112 } 1113 1114 MachineBasicBlock::iterator 1115 SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired, 1116 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1117 MachineBasicBlock *MBB = CI.I->getParent(); 1118 1119 // Be sure to use .addOperand(), and not .addReg() with these. We want to be 1120 // sure we preserve the subregister index and any register flags set on them. 1121 const MachineOperand *AddrReg = 1122 TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr); 1123 const MachineOperand *Data0 = 1124 TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0); 1125 const MachineOperand *Data1 = 1126 TII->getNamedOperand(*Paired.I, AMDGPU::OpName::data0); 1127 1128 unsigned NewOffset0 = CI.Offset; 1129 unsigned NewOffset1 = Paired.Offset; 1130 unsigned Opc = 1131 CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize); 1132 1133 if (NewOffset0 > NewOffset1) { 1134 // Canonicalize the merged instruction so the smaller offset comes first. 1135 std::swap(NewOffset0, NewOffset1); 1136 std::swap(Data0, Data1); 1137 } 1138 1139 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) && 1140 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit"); 1141 1142 const MCInstrDesc &Write2Desc = TII->get(Opc); 1143 DebugLoc DL = CI.I->getDebugLoc(); 1144 1145 Register BaseReg = AddrReg->getReg(); 1146 unsigned BaseSubReg = AddrReg->getSubReg(); 1147 unsigned BaseRegFlags = 0; 1148 if (CI.BaseOff) { 1149 Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1150 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg) 1151 .addImm(CI.BaseOff); 1152 1153 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1154 BaseRegFlags = RegState::Kill; 1155 1156 TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg) 1157 .addReg(ImmReg) 1158 .addReg(AddrReg->getReg(), 0, BaseSubReg) 1159 .addImm(0); // clamp bit 1160 BaseSubReg = 0; 1161 } 1162 1163 MachineInstrBuilder Write2 = 1164 BuildMI(*MBB, Paired.I, DL, Write2Desc) 1165 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr 1166 .add(*Data0) // data0 1167 .add(*Data1) // data1 1168 .addImm(NewOffset0) // offset0 1169 .addImm(NewOffset1) // offset1 1170 .addImm(0) // gds 1171 .cloneMergedMemRefs({&*CI.I, &*Paired.I}); 1172 1173 moveInstsAfter(Write2, InstsToMove); 1174 1175 CI.I->eraseFromParent(); 1176 Paired.I->eraseFromParent(); 1177 1178 LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n'); 1179 return Write2; 1180 } 1181 1182 MachineBasicBlock::iterator 1183 SILoadStoreOptimizer::mergeImagePair(CombineInfo &CI, CombineInfo &Paired, 1184 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1185 MachineBasicBlock *MBB = CI.I->getParent(); 1186 DebugLoc DL = CI.I->getDebugLoc(); 1187 const unsigned Opcode = getNewOpcode(CI, Paired); 1188 1189 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1190 1191 Register DestReg = MRI->createVirtualRegister(SuperRC); 1192 unsigned MergedDMask = CI.DMask | Paired.DMask; 1193 unsigned DMaskIdx = 1194 AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::dmask); 1195 1196 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg); 1197 for (unsigned I = 1, E = (*CI.I).getNumOperands(); I != E; ++I) { 1198 if (I == DMaskIdx) 1199 MIB.addImm(MergedDMask); 1200 else 1201 MIB.add((*CI.I).getOperand(I)); 1202 } 1203 1204 // It shouldn't be possible to get this far if the two instructions 1205 // don't have a single memoperand, because MachineInstr::mayAlias() 1206 // will return true if this is the case. 1207 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1208 1209 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1210 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1211 1212 MachineInstr *New = MIB.addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1213 1214 unsigned SubRegIdx0, SubRegIdx1; 1215 std::tie(SubRegIdx0, SubRegIdx1) = getSubRegIdxs(CI, Paired); 1216 1217 // Copy to the old destination registers. 1218 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 1219 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1220 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata); 1221 1222 BuildMI(*MBB, Paired.I, DL, CopyDesc) 1223 .add(*Dest0) // Copy to same destination including flags and sub reg. 1224 .addReg(DestReg, 0, SubRegIdx0); 1225 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc) 1226 .add(*Dest1) 1227 .addReg(DestReg, RegState::Kill, SubRegIdx1); 1228 1229 moveInstsAfter(Copy1, InstsToMove); 1230 1231 CI.I->eraseFromParent(); 1232 Paired.I->eraseFromParent(); 1233 return New; 1234 } 1235 1236 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair( 1237 CombineInfo &CI, CombineInfo &Paired, 1238 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1239 MachineBasicBlock *MBB = CI.I->getParent(); 1240 DebugLoc DL = CI.I->getDebugLoc(); 1241 const unsigned Opcode = getNewOpcode(CI, Paired); 1242 1243 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1244 1245 Register DestReg = MRI->createVirtualRegister(SuperRC); 1246 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset); 1247 1248 // It shouldn't be possible to get this far if the two instructions 1249 // don't have a single memoperand, because MachineInstr::mayAlias() 1250 // will return true if this is the case. 1251 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1252 1253 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1254 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1255 1256 MachineInstr *New = 1257 BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg) 1258 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase)) 1259 .addImm(MergedOffset) // offset 1260 .addImm(CI.CPol) // cpol 1261 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1262 1263 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired); 1264 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx); 1265 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx); 1266 1267 // Copy to the old destination registers. 1268 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 1269 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst); 1270 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::sdst); 1271 1272 BuildMI(*MBB, Paired.I, DL, CopyDesc) 1273 .add(*Dest0) // Copy to same destination including flags and sub reg. 1274 .addReg(DestReg, 0, SubRegIdx0); 1275 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc) 1276 .add(*Dest1) 1277 .addReg(DestReg, RegState::Kill, SubRegIdx1); 1278 1279 moveInstsAfter(Copy1, InstsToMove); 1280 1281 CI.I->eraseFromParent(); 1282 Paired.I->eraseFromParent(); 1283 return New; 1284 } 1285 1286 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair( 1287 CombineInfo &CI, CombineInfo &Paired, 1288 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1289 MachineBasicBlock *MBB = CI.I->getParent(); 1290 DebugLoc DL = CI.I->getDebugLoc(); 1291 1292 const unsigned Opcode = getNewOpcode(CI, Paired); 1293 1294 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1295 1296 // Copy to the new source register. 1297 Register DestReg = MRI->createVirtualRegister(SuperRC); 1298 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset); 1299 1300 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg); 1301 1302 AddressRegs Regs = getRegs(Opcode, *TII); 1303 1304 if (Regs.VAddr) 1305 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr)); 1306 1307 // It shouldn't be possible to get this far if the two instructions 1308 // don't have a single memoperand, because MachineInstr::mayAlias() 1309 // will return true if this is the case. 1310 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1311 1312 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1313 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1314 1315 MachineInstr *New = 1316 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) 1317 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) 1318 .addImm(MergedOffset) // offset 1319 .addImm(CI.CPol) // cpol 1320 .addImm(0) // tfe 1321 .addImm(0) // swz 1322 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1323 1324 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired); 1325 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx); 1326 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx); 1327 1328 // Copy to the old destination registers. 1329 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 1330 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1331 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata); 1332 1333 BuildMI(*MBB, Paired.I, DL, CopyDesc) 1334 .add(*Dest0) // Copy to same destination including flags and sub reg. 1335 .addReg(DestReg, 0, SubRegIdx0); 1336 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc) 1337 .add(*Dest1) 1338 .addReg(DestReg, RegState::Kill, SubRegIdx1); 1339 1340 moveInstsAfter(Copy1, InstsToMove); 1341 1342 CI.I->eraseFromParent(); 1343 Paired.I->eraseFromParent(); 1344 return New; 1345 } 1346 1347 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferLoadPair( 1348 CombineInfo &CI, CombineInfo &Paired, 1349 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1350 MachineBasicBlock *MBB = CI.I->getParent(); 1351 DebugLoc DL = CI.I->getDebugLoc(); 1352 1353 const unsigned Opcode = getNewOpcode(CI, Paired); 1354 1355 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1356 1357 // Copy to the new source register. 1358 Register DestReg = MRI->createVirtualRegister(SuperRC); 1359 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset); 1360 1361 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg); 1362 1363 AddressRegs Regs = getRegs(Opcode, *TII); 1364 1365 if (Regs.VAddr) 1366 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr)); 1367 1368 unsigned JoinedFormat = 1369 getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STM); 1370 1371 // It shouldn't be possible to get this far if the two instructions 1372 // don't have a single memoperand, because MachineInstr::mayAlias() 1373 // will return true if this is the case. 1374 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1375 1376 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1377 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1378 1379 MachineInstr *New = 1380 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) 1381 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) 1382 .addImm(MergedOffset) // offset 1383 .addImm(JoinedFormat) // format 1384 .addImm(CI.CPol) // cpol 1385 .addImm(0) // tfe 1386 .addImm(0) // swz 1387 .addMemOperand( 1388 combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1389 1390 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired); 1391 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx); 1392 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx); 1393 1394 // Copy to the old destination registers. 1395 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); 1396 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1397 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata); 1398 1399 BuildMI(*MBB, Paired.I, DL, CopyDesc) 1400 .add(*Dest0) // Copy to same destination including flags and sub reg. 1401 .addReg(DestReg, 0, SubRegIdx0); 1402 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc) 1403 .add(*Dest1) 1404 .addReg(DestReg, RegState::Kill, SubRegIdx1); 1405 1406 moveInstsAfter(Copy1, InstsToMove); 1407 1408 CI.I->eraseFromParent(); 1409 Paired.I->eraseFromParent(); 1410 return New; 1411 } 1412 1413 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferStorePair( 1414 CombineInfo &CI, CombineInfo &Paired, 1415 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1416 MachineBasicBlock *MBB = CI.I->getParent(); 1417 DebugLoc DL = CI.I->getDebugLoc(); 1418 1419 const unsigned Opcode = getNewOpcode(CI, Paired); 1420 1421 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired); 1422 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx); 1423 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx); 1424 1425 // Copy to the new source register. 1426 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1427 Register SrcReg = MRI->createVirtualRegister(SuperRC); 1428 1429 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1430 const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata); 1431 1432 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg) 1433 .add(*Src0) 1434 .addImm(SubRegIdx0) 1435 .add(*Src1) 1436 .addImm(SubRegIdx1); 1437 1438 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode)) 1439 .addReg(SrcReg, RegState::Kill); 1440 1441 AddressRegs Regs = getRegs(Opcode, *TII); 1442 1443 if (Regs.VAddr) 1444 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr)); 1445 1446 unsigned JoinedFormat = 1447 getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STM); 1448 1449 // It shouldn't be possible to get this far if the two instructions 1450 // don't have a single memoperand, because MachineInstr::mayAlias() 1451 // will return true if this is the case. 1452 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1453 1454 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1455 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1456 1457 MachineInstr *New = 1458 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) 1459 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) 1460 .addImm(std::min(CI.Offset, Paired.Offset)) // offset 1461 .addImm(JoinedFormat) // format 1462 .addImm(CI.CPol) // cpol 1463 .addImm(0) // tfe 1464 .addImm(0) // swz 1465 .addMemOperand( 1466 combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1467 1468 moveInstsAfter(MIB, InstsToMove); 1469 1470 CI.I->eraseFromParent(); 1471 Paired.I->eraseFromParent(); 1472 return New; 1473 } 1474 1475 unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI, 1476 const CombineInfo &Paired) { 1477 const unsigned Width = CI.Width + Paired.Width; 1478 1479 switch (CI.InstClass) { 1480 default: 1481 assert(CI.InstClass == BUFFER_LOAD || CI.InstClass == BUFFER_STORE); 1482 // FIXME: Handle d16 correctly 1483 return AMDGPU::getMUBUFOpcode(AMDGPU::getMUBUFBaseOpcode(CI.I->getOpcode()), 1484 Width); 1485 case TBUFFER_LOAD: 1486 case TBUFFER_STORE: 1487 return AMDGPU::getMTBUFOpcode(AMDGPU::getMTBUFBaseOpcode(CI.I->getOpcode()), 1488 Width); 1489 1490 case UNKNOWN: 1491 llvm_unreachable("Unknown instruction class"); 1492 case S_BUFFER_LOAD_IMM: 1493 switch (Width) { 1494 default: 1495 return 0; 1496 case 2: 1497 return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM; 1498 case 4: 1499 return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM; 1500 case 8: 1501 return AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM; 1502 } 1503 case MIMG: 1504 assert((countPopulation(CI.DMask | Paired.DMask) == Width) && 1505 "No overlaps"); 1506 return AMDGPU::getMaskedMIMGOp(CI.I->getOpcode(), Width); 1507 } 1508 } 1509 1510 std::pair<unsigned, unsigned> 1511 SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI, 1512 const CombineInfo &Paired) { 1513 bool ReverseOrder; 1514 if (CI.InstClass == MIMG) { 1515 assert( 1516 (countPopulation(CI.DMask | Paired.DMask) == CI.Width + Paired.Width) && 1517 "No overlaps"); 1518 ReverseOrder = CI.DMask > Paired.DMask; 1519 } else { 1520 ReverseOrder = CI.Offset > Paired.Offset; 1521 } 1522 1523 unsigned Idx0; 1524 unsigned Idx1; 1525 1526 static const unsigned Idxs[5][4] = { 1527 {AMDGPU::sub0, AMDGPU::sub0_sub1, AMDGPU::sub0_sub1_sub2, AMDGPU::sub0_sub1_sub2_sub3}, 1528 {AMDGPU::sub1, AMDGPU::sub1_sub2, AMDGPU::sub1_sub2_sub3, AMDGPU::sub1_sub2_sub3_sub4}, 1529 {AMDGPU::sub2, AMDGPU::sub2_sub3, AMDGPU::sub2_sub3_sub4, AMDGPU::sub2_sub3_sub4_sub5}, 1530 {AMDGPU::sub3, AMDGPU::sub3_sub4, AMDGPU::sub3_sub4_sub5, AMDGPU::sub3_sub4_sub5_sub6}, 1531 {AMDGPU::sub4, AMDGPU::sub4_sub5, AMDGPU::sub4_sub5_sub6, AMDGPU::sub4_sub5_sub6_sub7}, 1532 }; 1533 1534 assert(CI.Width >= 1 && CI.Width <= 4); 1535 assert(Paired.Width >= 1 && Paired.Width <= 4); 1536 1537 if (ReverseOrder) { 1538 Idx1 = Idxs[0][Paired.Width - 1]; 1539 Idx0 = Idxs[Paired.Width][CI.Width - 1]; 1540 } else { 1541 Idx0 = Idxs[0][CI.Width - 1]; 1542 Idx1 = Idxs[CI.Width][Paired.Width - 1]; 1543 } 1544 1545 return std::make_pair(Idx0, Idx1); 1546 } 1547 1548 const TargetRegisterClass * 1549 SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI, 1550 const CombineInfo &Paired) { 1551 if (CI.InstClass == S_BUFFER_LOAD_IMM) { 1552 switch (CI.Width + Paired.Width) { 1553 default: 1554 return nullptr; 1555 case 2: 1556 return &AMDGPU::SReg_64_XEXECRegClass; 1557 case 4: 1558 return &AMDGPU::SGPR_128RegClass; 1559 case 8: 1560 return &AMDGPU::SGPR_256RegClass; 1561 case 16: 1562 return &AMDGPU::SGPR_512RegClass; 1563 } 1564 } 1565 1566 unsigned BitWidth = 32 * (CI.Width + Paired.Width); 1567 return TRI->isAGPRClass(getDataRegClass(*CI.I)) 1568 ? TRI->getAGPRClassForBitWidth(BitWidth) 1569 : TRI->getVGPRClassForBitWidth(BitWidth); 1570 } 1571 1572 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair( 1573 CombineInfo &CI, CombineInfo &Paired, 1574 const SmallVectorImpl<MachineInstr *> &InstsToMove) { 1575 MachineBasicBlock *MBB = CI.I->getParent(); 1576 DebugLoc DL = CI.I->getDebugLoc(); 1577 1578 const unsigned Opcode = getNewOpcode(CI, Paired); 1579 1580 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired); 1581 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx); 1582 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx); 1583 1584 // Copy to the new source register. 1585 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired); 1586 Register SrcReg = MRI->createVirtualRegister(SuperRC); 1587 1588 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1589 const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata); 1590 1591 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg) 1592 .add(*Src0) 1593 .addImm(SubRegIdx0) 1594 .add(*Src1) 1595 .addImm(SubRegIdx1); 1596 1597 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode)) 1598 .addReg(SrcReg, RegState::Kill); 1599 1600 AddressRegs Regs = getRegs(Opcode, *TII); 1601 1602 if (Regs.VAddr) 1603 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr)); 1604 1605 1606 // It shouldn't be possible to get this far if the two instructions 1607 // don't have a single memoperand, because MachineInstr::mayAlias() 1608 // will return true if this is the case. 1609 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand()); 1610 1611 const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); 1612 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); 1613 1614 MachineInstr *New = 1615 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) 1616 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) 1617 .addImm(std::min(CI.Offset, Paired.Offset)) // offset 1618 .addImm(CI.CPol) // cpol 1619 .addImm(0) // tfe 1620 .addImm(0) // swz 1621 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); 1622 1623 moveInstsAfter(MIB, InstsToMove); 1624 1625 CI.I->eraseFromParent(); 1626 Paired.I->eraseFromParent(); 1627 return New; 1628 } 1629 1630 MachineOperand 1631 SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) const { 1632 APInt V(32, Val, true); 1633 if (TII->isInlineConstant(V)) 1634 return MachineOperand::CreateImm(Val); 1635 1636 Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1637 MachineInstr *Mov = 1638 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), 1639 TII->get(AMDGPU::S_MOV_B32), Reg) 1640 .addImm(Val); 1641 (void)Mov; 1642 LLVM_DEBUG(dbgs() << " "; Mov->dump()); 1643 return MachineOperand::CreateReg(Reg, false); 1644 } 1645 1646 // Compute base address using Addr and return the final register. 1647 Register SILoadStoreOptimizer::computeBase(MachineInstr &MI, 1648 const MemAddress &Addr) const { 1649 MachineBasicBlock *MBB = MI.getParent(); 1650 MachineBasicBlock::iterator MBBI = MI.getIterator(); 1651 DebugLoc DL = MI.getDebugLoc(); 1652 1653 assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 || 1654 Addr.Base.LoSubReg) && 1655 "Expected 32-bit Base-Register-Low!!"); 1656 1657 assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 || 1658 Addr.Base.HiSubReg) && 1659 "Expected 32-bit Base-Register-Hi!!"); 1660 1661 LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n"); 1662 MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI); 1663 MachineOperand OffsetHi = 1664 createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI); 1665 1666 const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 1667 Register CarryReg = MRI->createVirtualRegister(CarryRC); 1668 Register DeadCarryReg = MRI->createVirtualRegister(CarryRC); 1669 1670 Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1671 Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1672 MachineInstr *LoHalf = 1673 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_CO_U32_e64), DestSub0) 1674 .addReg(CarryReg, RegState::Define) 1675 .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg) 1676 .add(OffsetLo) 1677 .addImm(0); // clamp bit 1678 (void)LoHalf; 1679 LLVM_DEBUG(dbgs() << " "; LoHalf->dump();); 1680 1681 MachineInstr *HiHalf = 1682 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1) 1683 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 1684 .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg) 1685 .add(OffsetHi) 1686 .addReg(CarryReg, RegState::Kill) 1687 .addImm(0); // clamp bit 1688 (void)HiHalf; 1689 LLVM_DEBUG(dbgs() << " "; HiHalf->dump();); 1690 1691 Register FullDestReg = MRI->createVirtualRegister(TRI->getVGPR64Class()); 1692 MachineInstr *FullBase = 1693 BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg) 1694 .addReg(DestSub0) 1695 .addImm(AMDGPU::sub0) 1696 .addReg(DestSub1) 1697 .addImm(AMDGPU::sub1); 1698 (void)FullBase; 1699 LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";); 1700 1701 return FullDestReg; 1702 } 1703 1704 // Update base and offset with the NewBase and NewOffset in MI. 1705 void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI, 1706 Register NewBase, 1707 int32_t NewOffset) const { 1708 auto Base = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr); 1709 Base->setReg(NewBase); 1710 Base->setIsKill(false); 1711 TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset); 1712 } 1713 1714 Optional<int32_t> 1715 SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const { 1716 if (Op.isImm()) 1717 return Op.getImm(); 1718 1719 if (!Op.isReg()) 1720 return None; 1721 1722 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg()); 1723 if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 || 1724 !Def->getOperand(1).isImm()) 1725 return None; 1726 1727 return Def->getOperand(1).getImm(); 1728 } 1729 1730 // Analyze Base and extracts: 1731 // - 32bit base registers, subregisters 1732 // - 64bit constant offset 1733 // Expecting base computation as: 1734 // %OFFSET0:sgpr_32 = S_MOV_B32 8000 1735 // %LO:vgpr_32, %c:sreg_64_xexec = 1736 // V_ADD_CO_U32_e64 %BASE_LO:vgpr_32, %103:sgpr_32, 1737 // %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec 1738 // %Base:vreg_64 = 1739 // REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1 1740 void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base, 1741 MemAddress &Addr) const { 1742 if (!Base.isReg()) 1743 return; 1744 1745 MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg()); 1746 if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE 1747 || Def->getNumOperands() != 5) 1748 return; 1749 1750 MachineOperand BaseLo = Def->getOperand(1); 1751 MachineOperand BaseHi = Def->getOperand(3); 1752 if (!BaseLo.isReg() || !BaseHi.isReg()) 1753 return; 1754 1755 MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg()); 1756 MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg()); 1757 1758 if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_CO_U32_e64 || 1759 !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64) 1760 return; 1761 1762 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0); 1763 const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1); 1764 1765 auto Offset0P = extractConstOffset(*Src0); 1766 if (Offset0P) 1767 BaseLo = *Src1; 1768 else { 1769 if (!(Offset0P = extractConstOffset(*Src1))) 1770 return; 1771 BaseLo = *Src0; 1772 } 1773 1774 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0); 1775 Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1); 1776 1777 if (Src0->isImm()) 1778 std::swap(Src0, Src1); 1779 1780 if (!Src1->isImm()) 1781 return; 1782 1783 uint64_t Offset1 = Src1->getImm(); 1784 BaseHi = *Src0; 1785 1786 Addr.Base.LoReg = BaseLo.getReg(); 1787 Addr.Base.HiReg = BaseHi.getReg(); 1788 Addr.Base.LoSubReg = BaseLo.getSubReg(); 1789 Addr.Base.HiSubReg = BaseHi.getSubReg(); 1790 Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32); 1791 } 1792 1793 bool SILoadStoreOptimizer::promoteConstantOffsetToImm( 1794 MachineInstr &MI, 1795 MemInfoMap &Visited, 1796 SmallPtrSet<MachineInstr *, 4> &AnchorList) const { 1797 1798 if (!(MI.mayLoad() ^ MI.mayStore())) 1799 return false; 1800 1801 // TODO: Support flat and scratch. 1802 if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0) 1803 return false; 1804 1805 if (MI.mayLoad() && 1806 TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != nullptr) 1807 return false; 1808 1809 if (AnchorList.count(&MI)) 1810 return false; 1811 1812 LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump()); 1813 1814 if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) { 1815 LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";); 1816 return false; 1817 } 1818 1819 // Step1: Find the base-registers and a 64bit constant offset. 1820 MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr); 1821 MemAddress MAddr; 1822 if (Visited.find(&MI) == Visited.end()) { 1823 processBaseWithConstOffset(Base, MAddr); 1824 Visited[&MI] = MAddr; 1825 } else 1826 MAddr = Visited[&MI]; 1827 1828 if (MAddr.Offset == 0) { 1829 LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no" 1830 " constant offsets that can be promoted.\n";); 1831 return false; 1832 } 1833 1834 LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", " 1835 << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";); 1836 1837 // Step2: Traverse through MI's basic block and find an anchor(that has the 1838 // same base-registers) with the highest 13bit distance from MI's offset. 1839 // E.g. (64bit loads) 1840 // bb: 1841 // addr1 = &a + 4096; load1 = load(addr1, 0) 1842 // addr2 = &a + 6144; load2 = load(addr2, 0) 1843 // addr3 = &a + 8192; load3 = load(addr3, 0) 1844 // addr4 = &a + 10240; load4 = load(addr4, 0) 1845 // addr5 = &a + 12288; load5 = load(addr5, 0) 1846 // 1847 // Starting from the first load, the optimization will try to find a new base 1848 // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192 1849 // has 13bit distance from &a + 4096. The heuristic considers &a + 8192 1850 // as the new-base(anchor) because of the maximum distance which can 1851 // accomodate more intermediate bases presumeably. 1852 // 1853 // Step3: move (&a + 8192) above load1. Compute and promote offsets from 1854 // (&a + 8192) for load1, load2, load4. 1855 // addr = &a + 8192 1856 // load1 = load(addr, -4096) 1857 // load2 = load(addr, -2048) 1858 // load3 = load(addr, 0) 1859 // load4 = load(addr, 2048) 1860 // addr5 = &a + 12288; load5 = load(addr5, 0) 1861 // 1862 MachineInstr *AnchorInst = nullptr; 1863 MemAddress AnchorAddr; 1864 uint32_t MaxDist = std::numeric_limits<uint32_t>::min(); 1865 SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase; 1866 1867 MachineBasicBlock *MBB = MI.getParent(); 1868 MachineBasicBlock::iterator E = MBB->end(); 1869 MachineBasicBlock::iterator MBBI = MI.getIterator(); 1870 ++MBBI; 1871 const SITargetLowering *TLI = 1872 static_cast<const SITargetLowering *>(STM->getTargetLowering()); 1873 1874 for ( ; MBBI != E; ++MBBI) { 1875 MachineInstr &MINext = *MBBI; 1876 // TODO: Support finding an anchor(with same base) from store addresses or 1877 // any other load addresses where the opcodes are different. 1878 if (MINext.getOpcode() != MI.getOpcode() || 1879 TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm()) 1880 continue; 1881 1882 const MachineOperand &BaseNext = 1883 *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr); 1884 MemAddress MAddrNext; 1885 if (Visited.find(&MINext) == Visited.end()) { 1886 processBaseWithConstOffset(BaseNext, MAddrNext); 1887 Visited[&MINext] = MAddrNext; 1888 } else 1889 MAddrNext = Visited[&MINext]; 1890 1891 if (MAddrNext.Base.LoReg != MAddr.Base.LoReg || 1892 MAddrNext.Base.HiReg != MAddr.Base.HiReg || 1893 MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg || 1894 MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg) 1895 continue; 1896 1897 InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset)); 1898 1899 int64_t Dist = MAddr.Offset - MAddrNext.Offset; 1900 TargetLoweringBase::AddrMode AM; 1901 AM.HasBaseReg = true; 1902 AM.BaseOffs = Dist; 1903 if (TLI->isLegalGlobalAddressingMode(AM) && 1904 (uint32_t)std::abs(Dist) > MaxDist) { 1905 MaxDist = std::abs(Dist); 1906 1907 AnchorAddr = MAddrNext; 1908 AnchorInst = &MINext; 1909 } 1910 } 1911 1912 if (AnchorInst) { 1913 LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): "; 1914 AnchorInst->dump()); 1915 LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: " 1916 << AnchorAddr.Offset << "\n\n"); 1917 1918 // Instead of moving up, just re-compute anchor-instruction's base address. 1919 Register Base = computeBase(MI, AnchorAddr); 1920 1921 updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset); 1922 LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump();); 1923 1924 for (auto P : InstsWCommonBase) { 1925 TargetLoweringBase::AddrMode AM; 1926 AM.HasBaseReg = true; 1927 AM.BaseOffs = P.second - AnchorAddr.Offset; 1928 1929 if (TLI->isLegalGlobalAddressingMode(AM)) { 1930 LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second; 1931 dbgs() << ")"; P.first->dump()); 1932 updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset); 1933 LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump()); 1934 } 1935 } 1936 AnchorList.insert(AnchorInst); 1937 return true; 1938 } 1939 1940 return false; 1941 } 1942 1943 void SILoadStoreOptimizer::addInstToMergeableList(const CombineInfo &CI, 1944 std::list<std::list<CombineInfo> > &MergeableInsts) const { 1945 for (std::list<CombineInfo> &AddrList : MergeableInsts) { 1946 if (AddrList.front().InstClass == CI.InstClass && 1947 AddrList.front().IsAGPR == CI.IsAGPR && 1948 AddrList.front().hasSameBaseAddress(*CI.I)) { 1949 AddrList.emplace_back(CI); 1950 return; 1951 } 1952 } 1953 1954 // Base address not found, so add a new list. 1955 MergeableInsts.emplace_back(1, CI); 1956 } 1957 1958 std::pair<MachineBasicBlock::iterator, bool> 1959 SILoadStoreOptimizer::collectMergeableInsts( 1960 MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, 1961 MemInfoMap &Visited, SmallPtrSet<MachineInstr *, 4> &AnchorList, 1962 std::list<std::list<CombineInfo>> &MergeableInsts) const { 1963 bool Modified = false; 1964 1965 // Sort potential mergeable instructions into lists. One list per base address. 1966 unsigned Order = 0; 1967 MachineBasicBlock::iterator BlockI = Begin; 1968 for (; BlockI != End; ++BlockI) { 1969 MachineInstr &MI = *BlockI; 1970 1971 // We run this before checking if an address is mergeable, because it can produce 1972 // better code even if the instructions aren't mergeable. 1973 if (promoteConstantOffsetToImm(MI, Visited, AnchorList)) 1974 Modified = true; 1975 1976 // Treat volatile accesses, ordered accesses and unmodeled side effects as 1977 // barriers. We can look after this barrier for separate merges. 1978 if (MI.hasOrderedMemoryRef() || MI.hasUnmodeledSideEffects()) { 1979 LLVM_DEBUG(dbgs() << "Breaking search on barrier: " << MI); 1980 1981 // Search will resume after this instruction in a separate merge list. 1982 ++BlockI; 1983 break; 1984 } 1985 1986 const InstClassEnum InstClass = getInstClass(MI.getOpcode(), *TII); 1987 if (InstClass == UNKNOWN) 1988 continue; 1989 1990 // Do not merge VMEM buffer instructions with "swizzled" bit set. 1991 int Swizzled = 1992 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz); 1993 if (Swizzled != -1 && MI.getOperand(Swizzled).getImm()) 1994 continue; 1995 1996 CombineInfo CI; 1997 CI.setMI(MI, *this); 1998 CI.Order = Order++; 1999 2000 if (!CI.hasMergeableAddress(*MRI)) 2001 continue; 2002 2003 if (CI.InstClass == DS_WRITE && CI.IsAGPR) { 2004 // FIXME: nothing is illegal in a ds_write2 opcode with two AGPR data 2005 // operands. However we are reporting that ds_write2 shall have 2006 // only VGPR data so that machine copy propagation does not 2007 // create an illegal instruction with a VGPR and AGPR sources. 2008 // Consequenctially if we create such instruction the verifier 2009 // will complain. 2010 continue; 2011 } 2012 2013 LLVM_DEBUG(dbgs() << "Mergeable: " << MI); 2014 2015 addInstToMergeableList(CI, MergeableInsts); 2016 } 2017 2018 // At this point we have lists of Mergeable instructions. 2019 // 2020 // Part 2: Sort lists by offset and then for each CombineInfo object in the 2021 // list try to find an instruction that can be merged with I. If an instruction 2022 // is found, it is stored in the Paired field. If no instructions are found, then 2023 // the CombineInfo object is deleted from the list. 2024 2025 for (std::list<std::list<CombineInfo>>::iterator I = MergeableInsts.begin(), 2026 E = MergeableInsts.end(); I != E;) { 2027 2028 std::list<CombineInfo> &MergeList = *I; 2029 if (MergeList.size() <= 1) { 2030 // This means we have found only one instruction with a given address 2031 // that can be merged, and we need at least 2 instructions to do a merge, 2032 // so this list can be discarded. 2033 I = MergeableInsts.erase(I); 2034 continue; 2035 } 2036 2037 // Sort the lists by offsets, this way mergeable instructions will be 2038 // adjacent to each other in the list, which will make it easier to find 2039 // matches. 2040 MergeList.sort( 2041 [] (const CombineInfo &A, const CombineInfo &B) { 2042 return A.Offset < B.Offset; 2043 }); 2044 ++I; 2045 } 2046 2047 return std::make_pair(BlockI, Modified); 2048 } 2049 2050 // Scan through looking for adjacent LDS operations with constant offsets from 2051 // the same base register. We rely on the scheduler to do the hard work of 2052 // clustering nearby loads, and assume these are all adjacent. 2053 bool SILoadStoreOptimizer::optimizeBlock( 2054 std::list<std::list<CombineInfo> > &MergeableInsts) { 2055 bool Modified = false; 2056 2057 for (std::list<std::list<CombineInfo>>::iterator I = MergeableInsts.begin(), 2058 E = MergeableInsts.end(); I != E;) { 2059 std::list<CombineInfo> &MergeList = *I; 2060 2061 bool OptimizeListAgain = false; 2062 if (!optimizeInstsWithSameBaseAddr(MergeList, OptimizeListAgain)) { 2063 // We weren't able to make any changes, so delete the list so we don't 2064 // process the same instructions the next time we try to optimize this 2065 // block. 2066 I = MergeableInsts.erase(I); 2067 continue; 2068 } 2069 2070 Modified = true; 2071 2072 // We made changes, but also determined that there were no more optimization 2073 // opportunities, so we don't need to reprocess the list 2074 if (!OptimizeListAgain) { 2075 I = MergeableInsts.erase(I); 2076 continue; 2077 } 2078 OptimizeAgain = true; 2079 } 2080 return Modified; 2081 } 2082 2083 bool 2084 SILoadStoreOptimizer::optimizeInstsWithSameBaseAddr( 2085 std::list<CombineInfo> &MergeList, 2086 bool &OptimizeListAgain) { 2087 if (MergeList.empty()) 2088 return false; 2089 2090 bool Modified = false; 2091 2092 for (auto I = MergeList.begin(), Next = std::next(I); Next != MergeList.end(); 2093 Next = std::next(I)) { 2094 2095 auto First = I; 2096 auto Second = Next; 2097 2098 if ((*First).Order > (*Second).Order) 2099 std::swap(First, Second); 2100 CombineInfo &CI = *First; 2101 CombineInfo &Paired = *Second; 2102 2103 SmallVector<MachineInstr *, 8> InstsToMove; 2104 if (!checkAndPrepareMerge(CI, Paired, InstsToMove)) { 2105 ++I; 2106 continue; 2107 } 2108 2109 Modified = true; 2110 2111 LLVM_DEBUG(dbgs() << "Merging: " << *CI.I << " with: " << *Paired.I); 2112 2113 MachineBasicBlock::iterator NewMI; 2114 switch (CI.InstClass) { 2115 default: 2116 llvm_unreachable("unknown InstClass"); 2117 break; 2118 case DS_READ: 2119 NewMI = mergeRead2Pair(CI, Paired, InstsToMove); 2120 break; 2121 case DS_WRITE: 2122 NewMI = mergeWrite2Pair(CI, Paired, InstsToMove); 2123 break; 2124 case S_BUFFER_LOAD_IMM: 2125 NewMI = mergeSBufferLoadImmPair(CI, Paired, InstsToMove); 2126 OptimizeListAgain |= CI.Width + Paired.Width < 8; 2127 break; 2128 case BUFFER_LOAD: 2129 NewMI = mergeBufferLoadPair(CI, Paired, InstsToMove); 2130 OptimizeListAgain |= CI.Width + Paired.Width < 4; 2131 break; 2132 case BUFFER_STORE: 2133 NewMI = mergeBufferStorePair(CI, Paired, InstsToMove); 2134 OptimizeListAgain |= CI.Width + Paired.Width < 4; 2135 break; 2136 case MIMG: 2137 NewMI = mergeImagePair(CI, Paired, InstsToMove); 2138 OptimizeListAgain |= CI.Width + Paired.Width < 4; 2139 break; 2140 case TBUFFER_LOAD: 2141 NewMI = mergeTBufferLoadPair(CI, Paired, InstsToMove); 2142 OptimizeListAgain |= CI.Width + Paired.Width < 4; 2143 break; 2144 case TBUFFER_STORE: 2145 NewMI = mergeTBufferStorePair(CI, Paired, InstsToMove); 2146 OptimizeListAgain |= CI.Width + Paired.Width < 4; 2147 break; 2148 } 2149 CI.setMI(NewMI, *this); 2150 CI.Order = Paired.Order; 2151 if (I == Second) 2152 I = Next; 2153 2154 MergeList.erase(Second); 2155 } 2156 2157 return Modified; 2158 } 2159 2160 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) { 2161 if (skipFunction(MF.getFunction())) 2162 return false; 2163 2164 STM = &MF.getSubtarget<GCNSubtarget>(); 2165 if (!STM->loadStoreOptEnabled()) 2166 return false; 2167 2168 TII = STM->getInstrInfo(); 2169 TRI = &TII->getRegisterInfo(); 2170 2171 MRI = &MF.getRegInfo(); 2172 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2173 2174 LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n"); 2175 2176 bool Modified = false; 2177 2178 // Contains the list of instructions for which constant offsets are being 2179 // promoted to the IMM. This is tracked for an entire block at time. 2180 SmallPtrSet<MachineInstr *, 4> AnchorList; 2181 MemInfoMap Visited; 2182 2183 for (MachineBasicBlock &MBB : MF) { 2184 MachineBasicBlock::iterator SectionEnd; 2185 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; 2186 I = SectionEnd) { 2187 bool CollectModified; 2188 std::list<std::list<CombineInfo>> MergeableInsts; 2189 2190 // First pass: Collect list of all instructions we know how to merge in a 2191 // subset of the block. 2192 std::tie(SectionEnd, CollectModified) = 2193 collectMergeableInsts(I, E, Visited, AnchorList, MergeableInsts); 2194 2195 Modified |= CollectModified; 2196 2197 do { 2198 OptimizeAgain = false; 2199 Modified |= optimizeBlock(MergeableInsts); 2200 } while (OptimizeAgain); 2201 } 2202 2203 Visited.clear(); 2204 AnchorList.clear(); 2205 } 2206 2207 return Modified; 2208 } 2209