1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass tries to fuse DS instructions with close by immediate offsets.
11 // This will fuse operations such as
12 //  ds_read_b32 v0, v2 offset:16
13 //  ds_read_b32 v1, v2 offset:32
14 // ==>
15 //   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16 //
17 // The same is done for certain SMEM and VMEM opcodes, e.g.:
18 //  s_buffer_load_dword s4, s[0:3], 4
19 //  s_buffer_load_dword s5, s[0:3], 8
20 // ==>
21 //  s_buffer_load_dwordx2 s[4:5], s[0:3], 4
22 //
23 //
24 // Future improvements:
25 //
26 // - This currently relies on the scheduler to place loads and stores next to
27 //   each other, and then only merges adjacent pairs of instructions. It would
28 //   be good to be more flexible with interleaved instructions, and possibly run
29 //   before scheduling. It currently missing stores of constants because loading
30 //   the constant into the data register is placed between the stores, although
31 //   this is arguably a scheduling problem.
32 //
33 // - Live interval recomputing seems inefficient. This currently only matches
34 //   one pair, and recomputes live intervals and moves on to the next pair. It
35 //   would be better to compute a list of all merges that need to occur.
36 //
37 // - With a list of instructions to process, we can also merge more. If a
38 //   cluster of loads have offsets that are too large to fit in the 8-bit
39 //   offsets, but are close enough to fit in the 8 bits, we can add to the base
40 //   pointer and use the new reduced offsets.
41 //
42 //===----------------------------------------------------------------------===//
43 
44 #include "AMDGPU.h"
45 #include "AMDGPUSubtarget.h"
46 #include "SIInstrInfo.h"
47 #include "SIRegisterInfo.h"
48 #include "Utils/AMDGPUBaseInfo.h"
49 #include "llvm/ADT/ArrayRef.h"
50 #include "llvm/ADT/SmallVector.h"
51 #include "llvm/ADT/StringRef.h"
52 #include "llvm/Analysis/AliasAnalysis.h"
53 #include "llvm/CodeGen/MachineBasicBlock.h"
54 #include "llvm/CodeGen/MachineFunction.h"
55 #include "llvm/CodeGen/MachineFunctionPass.h"
56 #include "llvm/CodeGen/MachineInstr.h"
57 #include "llvm/CodeGen/MachineInstrBuilder.h"
58 #include "llvm/CodeGen/MachineOperand.h"
59 #include "llvm/CodeGen/MachineRegisterInfo.h"
60 #include "llvm/IR/DebugLoc.h"
61 #include "llvm/Pass.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdlib>
68 #include <iterator>
69 #include <utility>
70 
71 using namespace llvm;
72 
73 #define DEBUG_TYPE "si-load-store-opt"
74 
75 namespace {
76 
77 class SILoadStoreOptimizer : public MachineFunctionPass {
78   enum InstClassEnum {
79     DS_READ_WRITE,
80     S_BUFFER_LOAD_IMM,
81     BUFFER_LOAD_OFFEN,
82     BUFFER_LOAD_OFFSET,
83     BUFFER_STORE_OFFEN,
84     BUFFER_STORE_OFFSET,
85   };
86 
87   struct CombineInfo {
88     MachineBasicBlock::iterator I;
89     MachineBasicBlock::iterator Paired;
90     unsigned EltSize;
91     unsigned Offset0;
92     unsigned Offset1;
93     unsigned BaseOff;
94     InstClassEnum InstClass;
95     bool GLC0;
96     bool GLC1;
97     bool SLC0;
98     bool SLC1;
99     bool UseST64;
100     bool IsX2;
101     SmallVector<MachineInstr*, 8> InstsToMove;
102    };
103 
104 private:
105   const SISubtarget *STM = nullptr;
106   const SIInstrInfo *TII = nullptr;
107   const SIRegisterInfo *TRI = nullptr;
108   MachineRegisterInfo *MRI = nullptr;
109   AliasAnalysis *AA = nullptr;
110   unsigned CreatedX2;
111 
112   static bool offsetsCanBeCombined(CombineInfo &CI);
113 
114   bool findMatchingInst(CombineInfo &CI);
115 
116   unsigned read2Opcode(unsigned EltSize) const;
117   unsigned read2ST64Opcode(unsigned EltSize) const;
118   MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
119 
120   unsigned write2Opcode(unsigned EltSize) const;
121   unsigned write2ST64Opcode(unsigned EltSize) const;
122   MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
123   MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
124   MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
125   unsigned promoteBufferStoreOpcode(const MachineInstr &I, bool &IsX2,
126                                     bool &IsOffen) const;
127   MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
128 
129 public:
130   static char ID;
131 
132   SILoadStoreOptimizer() : MachineFunctionPass(ID) {
133     initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
134   }
135 
136   bool optimizeBlock(MachineBasicBlock &MBB);
137 
138   bool runOnMachineFunction(MachineFunction &MF) override;
139 
140   StringRef getPassName() const override { return "SI Load Store Optimizer"; }
141 
142   void getAnalysisUsage(AnalysisUsage &AU) const override {
143     AU.setPreservesCFG();
144     AU.addRequired<AAResultsWrapperPass>();
145 
146     MachineFunctionPass::getAnalysisUsage(AU);
147   }
148 };
149 
150 } // end anonymous namespace.
151 
152 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
153                       "SI Load Store Optimizer", false, false)
154 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
155 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
156                     "SI Load Store Optimizer", false, false)
157 
158 char SILoadStoreOptimizer::ID = 0;
159 
160 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
161 
162 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
163   return new SILoadStoreOptimizer();
164 }
165 
166 static void moveInstsAfter(MachineBasicBlock::iterator I,
167                            ArrayRef<MachineInstr*> InstsToMove) {
168   MachineBasicBlock *MBB = I->getParent();
169   ++I;
170   for (MachineInstr *MI : InstsToMove) {
171     MI->removeFromParent();
172     MBB->insert(I, MI);
173   }
174 }
175 
176 static void addDefsToList(const MachineInstr &MI, DenseSet<unsigned> &Defs) {
177   // XXX: Should this be looking for implicit defs?
178   for (const MachineOperand &Def : MI.defs())
179     Defs.insert(Def.getReg());
180 }
181 
182 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
183                                       MachineBasicBlock::iterator B,
184                                       const SIInstrInfo *TII,
185                                       AliasAnalysis * AA) {
186   // RAW or WAR - cannot reorder
187   // WAW - cannot reorder
188   // RAR - safe to reorder
189   return !(A->mayStore() || B->mayStore()) ||
190     TII->areMemAccessesTriviallyDisjoint(*A, *B, AA);
191 }
192 
193 // Add MI and its defs to the lists if MI reads one of the defs that are
194 // already in the list. Returns true in that case.
195 static bool
196 addToListsIfDependent(MachineInstr &MI,
197                       DenseSet<unsigned> &Defs,
198                       SmallVectorImpl<MachineInstr*> &Insts) {
199   for (MachineOperand &Use : MI.operands()) {
200     // If one of the defs is read, then there is a use of Def between I and the
201     // instruction that I will potentially be merged with. We will need to move
202     // this instruction after the merged instructions.
203 
204     if (Use.isReg() && Use.readsReg() && Defs.count(Use.getReg())) {
205       Insts.push_back(&MI);
206       addDefsToList(MI, Defs);
207       return true;
208     }
209   }
210 
211   return false;
212 }
213 
214 static bool
215 canMoveInstsAcrossMemOp(MachineInstr &MemOp,
216                         ArrayRef<MachineInstr*> InstsToMove,
217                         const SIInstrInfo *TII,
218                         AliasAnalysis *AA) {
219   assert(MemOp.mayLoadOrStore());
220 
221   for (MachineInstr *InstToMove : InstsToMove) {
222     if (!InstToMove->mayLoadOrStore())
223       continue;
224     if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
225         return false;
226   }
227   return true;
228 }
229 
230 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
231   // XXX - Would the same offset be OK? Is there any reason this would happen or
232   // be useful?
233   if (CI.Offset0 == CI.Offset1)
234     return false;
235 
236   // This won't be valid if the offset isn't aligned.
237   if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
238     return false;
239 
240   unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
241   unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
242   CI.UseST64 = false;
243   CI.BaseOff = 0;
244 
245   // Handle SMEM and VMEM instructions.
246   if (CI.InstClass != DS_READ_WRITE) {
247     unsigned Diff = CI.IsX2 ? 2 : 1;
248     return (EltOffset0 + Diff == EltOffset1 ||
249             EltOffset1 + Diff == EltOffset0) &&
250            CI.GLC0 == CI.GLC1 &&
251            (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
252   }
253 
254   // If the offset in elements doesn't fit in 8-bits, we might be able to use
255   // the stride 64 versions.
256   if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
257       isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
258     CI.Offset0 = EltOffset0 / 64;
259     CI.Offset1 = EltOffset1 / 64;
260     CI.UseST64 = true;
261     return true;
262   }
263 
264   // Check if the new offsets fit in the reduced 8-bit range.
265   if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
266     CI.Offset0 = EltOffset0;
267     CI.Offset1 = EltOffset1;
268     return true;
269   }
270 
271   // Try to shift base address to decrease offsets.
272   unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
273   CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
274 
275   if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
276     CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
277     CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
278     CI.UseST64 = true;
279     return true;
280   }
281 
282   if (isUInt<8>(OffsetDiff)) {
283     CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
284     CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
285     return true;
286   }
287 
288   return false;
289 }
290 
291 bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
292   MachineBasicBlock *MBB = CI.I->getParent();
293   MachineBasicBlock::iterator E = MBB->end();
294   MachineBasicBlock::iterator MBBI = CI.I;
295 
296   unsigned AddrOpName[3] = {0};
297   int AddrIdx[3];
298   const MachineOperand *AddrReg[3];
299   unsigned NumAddresses = 0;
300 
301   switch (CI.InstClass) {
302   case DS_READ_WRITE:
303     AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
304     break;
305   case S_BUFFER_LOAD_IMM:
306     AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
307     break;
308   case BUFFER_LOAD_OFFEN:
309   case BUFFER_STORE_OFFEN:
310     AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
311     AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
312     AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
313     break;
314   case BUFFER_LOAD_OFFSET:
315   case BUFFER_STORE_OFFSET:
316     AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
317     AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
318     break;
319   }
320 
321   for (unsigned i = 0; i < NumAddresses; i++) {
322     AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
323     AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
324 
325     // We only ever merge operations with the same base address register, so don't
326     // bother scanning forward if there are no other uses.
327     if (AddrReg[i]->isReg() &&
328         (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
329          MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
330       return false;
331   }
332 
333   ++MBBI;
334 
335   DenseSet<unsigned> DefsToMove;
336   addDefsToList(*CI.I, DefsToMove);
337 
338   for ( ; MBBI != E; ++MBBI) {
339     if (MBBI->getOpcode() != CI.I->getOpcode()) {
340       // This is not a matching DS instruction, but we can keep looking as
341       // long as one of these conditions are met:
342       // 1. It is safe to move I down past MBBI.
343       // 2. It is safe to move MBBI down past the instruction that I will
344       //    be merged into.
345 
346       if (MBBI->hasUnmodeledSideEffects()) {
347         // We can't re-order this instruction with respect to other memory
348         // operations, so we fail both conditions mentioned above.
349         return false;
350       }
351 
352       if (MBBI->mayLoadOrStore() &&
353         (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
354          !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))) {
355         // We fail condition #1, but we may still be able to satisfy condition
356         // #2.  Add this instruction to the move list and then we will check
357         // if condition #2 holds once we have selected the matching instruction.
358         CI.InstsToMove.push_back(&*MBBI);
359         addDefsToList(*MBBI, DefsToMove);
360         continue;
361       }
362 
363       // When we match I with another DS instruction we will be moving I down
364       // to the location of the matched instruction any uses of I will need to
365       // be moved down as well.
366       addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
367       continue;
368     }
369 
370     // Don't merge volatiles.
371     if (MBBI->hasOrderedMemoryRef())
372       return false;
373 
374     // Handle a case like
375     //   DS_WRITE_B32 addr, v, idx0
376     //   w = DS_READ_B32 addr, idx0
377     //   DS_WRITE_B32 addr, f(w), idx1
378     // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
379     // merging of the two writes.
380     if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
381       continue;
382 
383     bool Match = true;
384     for (unsigned i = 0; i < NumAddresses; i++) {
385       const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
386 
387       if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
388         if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
389             AddrReg[i]->getImm() != AddrRegNext.getImm()) {
390           Match = false;
391           break;
392         }
393         continue;
394       }
395 
396       // Check same base pointer. Be careful of subregisters, which can occur with
397       // vectors of pointers.
398       if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
399           AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
400         Match = false;
401         break;
402       }
403     }
404 
405     if (Match) {
406       int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
407                                                  AMDGPU::OpName::offset);
408       CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
409       CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
410       CI.Paired = MBBI;
411 
412       if (CI.InstClass == DS_READ_WRITE) {
413         CI.Offset0 &= 0xffff;
414         CI.Offset1 &= 0xffff;
415       } else {
416         CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
417         CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
418         if (CI.InstClass != S_BUFFER_LOAD_IMM) {
419           CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
420           CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
421         }
422       }
423 
424       // Check both offsets fit in the reduced range.
425       // We also need to go through the list of instructions that we plan to
426       // move and make sure they are all safe to move down past the merged
427       // instruction.
428       if (offsetsCanBeCombined(CI))
429         if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
430           return true;
431     }
432 
433     // We've found a load/store that we couldn't merge for some reason.
434     // We could potentially keep looking, but we'd need to make sure that
435     // it was safe to move I and also all the instruction in InstsToMove
436     // down past this instruction.
437     // check if we can move I across MBBI and if we can move all I's users
438     if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
439       !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
440       break;
441   }
442   return false;
443 }
444 
445 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
446   if (STM->ldsRequiresM0Init())
447     return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
448   return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
449 }
450 
451 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
452   if (STM->ldsRequiresM0Init())
453     return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
454 
455   return (EltSize == 4) ?
456     AMDGPU::DS_READ2ST64_B32_gfx9 : AMDGPU::DS_READ2ST64_B64_gfx9;
457 }
458 
459 MachineBasicBlock::iterator  SILoadStoreOptimizer::mergeRead2Pair(
460   CombineInfo &CI) {
461   MachineBasicBlock *MBB = CI.I->getParent();
462 
463   // Be careful, since the addresses could be subregisters themselves in weird
464   // cases, like vectors of pointers.
465   const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
466 
467   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
468   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
469 
470   unsigned NewOffset0 = CI.Offset0;
471   unsigned NewOffset1 = CI.Offset1;
472   unsigned Opc = CI.UseST64 ?
473     read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
474 
475   unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
476   unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
477 
478   if (NewOffset0 > NewOffset1) {
479     // Canonicalize the merged instruction so the smaller offset comes first.
480     std::swap(NewOffset0, NewOffset1);
481     std::swap(SubRegIdx0, SubRegIdx1);
482   }
483 
484   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
485          (NewOffset0 != NewOffset1) &&
486          "Computed offset doesn't fit");
487 
488   const MCInstrDesc &Read2Desc = TII->get(Opc);
489 
490   const TargetRegisterClass *SuperRC
491     = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
492   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
493 
494   DebugLoc DL = CI.I->getDebugLoc();
495 
496   unsigned BaseReg = AddrReg->getReg();
497   unsigned BaseRegFlags = 0;
498   if (CI.BaseOff) {
499     unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
500     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
501       .addImm(CI.BaseOff);
502 
503     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
504     BaseRegFlags = RegState::Kill;
505 
506     TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
507       .addReg(ImmReg)
508       .addReg(AddrReg->getReg());
509   }
510 
511   MachineInstrBuilder Read2 =
512     BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
513       .addReg(BaseReg, BaseRegFlags) // addr
514       .addImm(NewOffset0)            // offset0
515       .addImm(NewOffset1)            // offset1
516       .addImm(0)                     // gds
517       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
518 
519   (void)Read2;
520 
521   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
522 
523   // Copy to the old destination registers.
524   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
525       .add(*Dest0) // Copy to same destination including flags and sub reg.
526       .addReg(DestReg, 0, SubRegIdx0);
527   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
528                             .add(*Dest1)
529                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
530 
531   moveInstsAfter(Copy1, CI.InstsToMove);
532 
533   MachineBasicBlock::iterator Next = std::next(CI.I);
534   CI.I->eraseFromParent();
535   CI.Paired->eraseFromParent();
536 
537   DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
538   return Next;
539 }
540 
541 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
542   if (STM->ldsRequiresM0Init())
543     return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
544   return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9 : AMDGPU::DS_WRITE2_B64_gfx9;
545 }
546 
547 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
548   if (STM->ldsRequiresM0Init())
549     return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
550 
551   return (EltSize == 4) ?
552     AMDGPU::DS_WRITE2ST64_B32_gfx9 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
553 }
554 
555 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
556   CombineInfo &CI) {
557   MachineBasicBlock *MBB = CI.I->getParent();
558 
559   // Be sure to use .addOperand(), and not .addReg() with these. We want to be
560   // sure we preserve the subregister index and any register flags set on them.
561   const MachineOperand *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
562   const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
563   const MachineOperand *Data1
564     = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
565 
566   unsigned NewOffset0 = CI.Offset0;
567   unsigned NewOffset1 = CI.Offset1;
568   unsigned Opc = CI.UseST64 ?
569     write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
570 
571   if (NewOffset0 > NewOffset1) {
572     // Canonicalize the merged instruction so the smaller offset comes first.
573     std::swap(NewOffset0, NewOffset1);
574     std::swap(Data0, Data1);
575   }
576 
577   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
578          (NewOffset0 != NewOffset1) &&
579          "Computed offset doesn't fit");
580 
581   const MCInstrDesc &Write2Desc = TII->get(Opc);
582   DebugLoc DL = CI.I->getDebugLoc();
583 
584   unsigned BaseReg = AddrReg->getReg();
585   unsigned BaseRegFlags = 0;
586   if (CI.BaseOff) {
587     unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
588     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
589       .addImm(CI.BaseOff);
590 
591     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
592     BaseRegFlags = RegState::Kill;
593 
594     TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
595       .addReg(ImmReg)
596       .addReg(AddrReg->getReg());
597   }
598 
599   MachineInstrBuilder Write2 =
600     BuildMI(*MBB, CI.Paired, DL, Write2Desc)
601       .addReg(BaseReg, BaseRegFlags) // addr
602       .add(*Data0)                   // data0
603       .add(*Data1)                   // data1
604       .addImm(NewOffset0)            // offset0
605       .addImm(NewOffset1)            // offset1
606       .addImm(0)                     // gds
607       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
608 
609   moveInstsAfter(Write2, CI.InstsToMove);
610 
611   MachineBasicBlock::iterator Next = std::next(CI.I);
612   CI.I->eraseFromParent();
613   CI.Paired->eraseFromParent();
614 
615   DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
616   return Next;
617 }
618 
619 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair(
620   CombineInfo &CI) {
621   MachineBasicBlock *MBB = CI.I->getParent();
622   DebugLoc DL = CI.I->getDebugLoc();
623   unsigned Opcode = CI.IsX2 ? AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM :
624                               AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
625 
626   const TargetRegisterClass *SuperRC =
627     CI.IsX2 ? &AMDGPU::SReg_128RegClass : &AMDGPU::SReg_64_XEXECRegClass;
628   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
629   unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
630 
631   BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
632       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
633       .addImm(MergedOffset) // offset
634       .addImm(CI.GLC0)      // glc
635       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
636 
637   unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
638   unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
639 
640   // Handle descending offsets
641   if (CI.Offset0 > CI.Offset1)
642     std::swap(SubRegIdx0, SubRegIdx1);
643 
644   // Copy to the old destination registers.
645   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
646   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
647   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
648 
649   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
650       .add(*Dest0) // Copy to same destination including flags and sub reg.
651       .addReg(DestReg, 0, SubRegIdx0);
652   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
653                             .add(*Dest1)
654                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
655 
656   moveInstsAfter(Copy1, CI.InstsToMove);
657 
658   MachineBasicBlock::iterator Next = std::next(CI.I);
659   CI.I->eraseFromParent();
660   CI.Paired->eraseFromParent();
661   return Next;
662 }
663 
664 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
665   CombineInfo &CI) {
666   MachineBasicBlock *MBB = CI.I->getParent();
667   DebugLoc DL = CI.I->getDebugLoc();
668   unsigned Opcode;
669 
670   if (CI.InstClass == BUFFER_LOAD_OFFEN) {
671     Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN :
672                        AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN;
673   } else {
674     Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET :
675                        AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
676   }
677 
678   const TargetRegisterClass *SuperRC =
679     CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
680   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
681   unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
682 
683   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
684 
685   if (CI.InstClass == BUFFER_LOAD_OFFEN)
686       MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
687 
688   MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
689       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
690       .addImm(MergedOffset) // offset
691       .addImm(CI.GLC0)      // glc
692       .addImm(CI.SLC0)      // slc
693       .addImm(0)            // tfe
694       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
695 
696   unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
697   unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
698 
699   // Handle descending offsets
700   if (CI.Offset0 > CI.Offset1)
701     std::swap(SubRegIdx0, SubRegIdx1);
702 
703   // Copy to the old destination registers.
704   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
705   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
706   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
707 
708   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
709       .add(*Dest0) // Copy to same destination including flags and sub reg.
710       .addReg(DestReg, 0, SubRegIdx0);
711   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
712                             .add(*Dest1)
713                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
714 
715   moveInstsAfter(Copy1, CI.InstsToMove);
716 
717   MachineBasicBlock::iterator Next = std::next(CI.I);
718   CI.I->eraseFromParent();
719   CI.Paired->eraseFromParent();
720   return Next;
721 }
722 
723 unsigned SILoadStoreOptimizer::promoteBufferStoreOpcode(
724   const MachineInstr &I, bool &IsX2, bool &IsOffen) const {
725   IsX2 = false;
726   IsOffen = false;
727 
728   switch (I.getOpcode()) {
729   case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
730     IsOffen = true;
731     return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN;
732   case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
733     IsOffen = true;
734     return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact;
735   case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
736     IsX2 = true;
737     IsOffen = true;
738     return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN;
739   case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact:
740     IsX2 = true;
741     IsOffen = true;
742     return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN_exact;
743   case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
744     return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
745   case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
746     return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact;
747   case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET:
748     IsX2 = true;
749     return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
750   case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact:
751     IsX2 = true;
752     return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET_exact;
753   }
754   return 0;
755 }
756 
757 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
758   CombineInfo &CI) {
759   MachineBasicBlock *MBB = CI.I->getParent();
760   DebugLoc DL = CI.I->getDebugLoc();
761   bool Unused1, Unused2;
762   unsigned Opcode = promoteBufferStoreOpcode(*CI.I, Unused1, Unused2);
763 
764   unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
765   unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
766 
767   // Handle descending offsets
768   if (CI.Offset0 > CI.Offset1)
769     std::swap(SubRegIdx0, SubRegIdx1);
770 
771   // Copy to the new source register.
772   const TargetRegisterClass *SuperRC =
773     CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
774   unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
775 
776   const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
777   const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
778 
779   BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
780       .add(*Src0)
781       .addImm(SubRegIdx0)
782       .add(*Src1)
783       .addImm(SubRegIdx1);
784 
785   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
786       .addReg(SrcReg, RegState::Kill);
787 
788   if (CI.InstClass == BUFFER_STORE_OFFEN)
789       MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
790 
791   MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
792       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
793       .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
794       .addImm(CI.GLC0)      // glc
795       .addImm(CI.SLC0)      // slc
796       .addImm(0)            // tfe
797       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
798 
799   moveInstsAfter(MIB, CI.InstsToMove);
800 
801   MachineBasicBlock::iterator Next = std::next(CI.I);
802   CI.I->eraseFromParent();
803   CI.Paired->eraseFromParent();
804   return Next;
805 }
806 
807 // Scan through looking for adjacent LDS operations with constant offsets from
808 // the same base register. We rely on the scheduler to do the hard work of
809 // clustering nearby loads, and assume these are all adjacent.
810 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
811   bool Modified = false;
812 
813   for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
814     MachineInstr &MI = *I;
815 
816     // Don't combine if volatile.
817     if (MI.hasOrderedMemoryRef()) {
818       ++I;
819       continue;
820     }
821 
822     CombineInfo CI;
823     CI.I = I;
824     unsigned Opc = MI.getOpcode();
825     if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64 ||
826         Opc == AMDGPU::DS_READ_B32_gfx9 || Opc == AMDGPU::DS_READ_B64_gfx9) {
827 
828       CI.InstClass = DS_READ_WRITE;
829       CI.EltSize =
830         (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8 : 4;
831 
832       if (findMatchingInst(CI)) {
833         Modified = true;
834         I = mergeRead2Pair(CI);
835       } else {
836         ++I;
837       }
838 
839       continue;
840     } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64 ||
841                Opc == AMDGPU::DS_WRITE_B32_gfx9 ||
842                Opc == AMDGPU::DS_WRITE_B64_gfx9) {
843       CI.InstClass = DS_READ_WRITE;
844       CI.EltSize
845         = (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8 : 4;
846 
847       if (findMatchingInst(CI)) {
848         Modified = true;
849         I = mergeWrite2Pair(CI);
850       } else {
851         ++I;
852       }
853 
854       continue;
855     }
856     if (STM->hasSBufferLoadStoreAtomicDwordxN() &&
857         (Opc == AMDGPU::S_BUFFER_LOAD_DWORD_IMM ||
858          Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM)) {
859       // EltSize is in units of the offset encoding.
860       CI.InstClass = S_BUFFER_LOAD_IMM;
861       CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
862       CI.IsX2 = Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
863       if (findMatchingInst(CI)) {
864         Modified = true;
865         I = mergeSBufferLoadImmPair(CI);
866         if (!CI.IsX2)
867           CreatedX2++;
868       } else {
869         ++I;
870       }
871       continue;
872     }
873     if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
874         Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
875         Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFSET ||
876         Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET) {
877       if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
878           Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN)
879         CI.InstClass = BUFFER_LOAD_OFFEN;
880       else
881         CI.InstClass = BUFFER_LOAD_OFFSET;
882 
883       CI.EltSize = 4;
884       CI.IsX2 = Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
885                 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
886       if (findMatchingInst(CI)) {
887         Modified = true;
888         I = mergeBufferLoadPair(CI);
889         if (!CI.IsX2)
890           CreatedX2++;
891       } else {
892         ++I;
893       }
894       continue;
895     }
896 
897     bool StoreIsX2, IsOffen;
898     if (promoteBufferStoreOpcode(*I, StoreIsX2, IsOffen)) {
899       CI.InstClass = IsOffen ? BUFFER_STORE_OFFEN : BUFFER_STORE_OFFSET;
900       CI.EltSize = 4;
901       CI.IsX2 = StoreIsX2;
902       if (findMatchingInst(CI)) {
903         Modified = true;
904         I = mergeBufferStorePair(CI);
905         if (!CI.IsX2)
906           CreatedX2++;
907       } else {
908         ++I;
909       }
910       continue;
911     }
912 
913     ++I;
914   }
915 
916   return Modified;
917 }
918 
919 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
920   if (skipFunction(MF.getFunction()))
921     return false;
922 
923   STM = &MF.getSubtarget<SISubtarget>();
924   if (!STM->loadStoreOptEnabled())
925     return false;
926 
927   TII = STM->getInstrInfo();
928   TRI = &TII->getRegisterInfo();
929 
930   MRI = &MF.getRegInfo();
931   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
932 
933   assert(MRI->isSSA() && "Must be run on SSA");
934 
935   DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
936 
937   bool Modified = false;
938 
939   for (MachineBasicBlock &MBB : MF) {
940     CreatedX2 = 0;
941     Modified |= optimizeBlock(MBB);
942 
943     // Run again to convert x2 to x4.
944     if (CreatedX2 >= 1)
945       Modified |= optimizeBlock(MBB);
946   }
947 
948   return Modified;
949 }
950