1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass tries to fuse DS instructions with close by immediate offsets.
11 // This will fuse operations such as
12 //  ds_read_b32 v0, v2 offset:16
13 //  ds_read_b32 v1, v2 offset:32
14 // ==>
15 //   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16 //
17 // The same is done for certain SMEM and VMEM opcodes, e.g.:
18 //  s_buffer_load_dword s4, s[0:3], 4
19 //  s_buffer_load_dword s5, s[0:3], 8
20 // ==>
21 //  s_buffer_load_dwordx2 s[4:5], s[0:3], 4
22 //
23 //
24 // Future improvements:
25 //
26 // - This currently relies on the scheduler to place loads and stores next to
27 //   each other, and then only merges adjacent pairs of instructions. It would
28 //   be good to be more flexible with interleaved instructions, and possibly run
29 //   before scheduling. It currently missing stores of constants because loading
30 //   the constant into the data register is placed between the stores, although
31 //   this is arguably a scheduling problem.
32 //
33 // - Live interval recomputing seems inefficient. This currently only matches
34 //   one pair, and recomputes live intervals and moves on to the next pair. It
35 //   would be better to compute a list of all merges that need to occur.
36 //
37 // - With a list of instructions to process, we can also merge more. If a
38 //   cluster of loads have offsets that are too large to fit in the 8-bit
39 //   offsets, but are close enough to fit in the 8 bits, we can add to the base
40 //   pointer and use the new reduced offsets.
41 //
42 //===----------------------------------------------------------------------===//
43 
44 #include "AMDGPU.h"
45 #include "AMDGPUSubtarget.h"
46 #include "SIInstrInfo.h"
47 #include "SIRegisterInfo.h"
48 #include "Utils/AMDGPUBaseInfo.h"
49 #include "llvm/ADT/ArrayRef.h"
50 #include "llvm/ADT/SmallVector.h"
51 #include "llvm/ADT/StringRef.h"
52 #include "llvm/Analysis/AliasAnalysis.h"
53 #include "llvm/CodeGen/MachineBasicBlock.h"
54 #include "llvm/CodeGen/MachineFunction.h"
55 #include "llvm/CodeGen/MachineFunctionPass.h"
56 #include "llvm/CodeGen/MachineInstr.h"
57 #include "llvm/CodeGen/MachineInstrBuilder.h"
58 #include "llvm/CodeGen/MachineOperand.h"
59 #include "llvm/CodeGen/MachineRegisterInfo.h"
60 #include "llvm/IR/DebugLoc.h"
61 #include "llvm/Pass.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdlib>
68 #include <iterator>
69 #include <utility>
70 
71 using namespace llvm;
72 
73 #define DEBUG_TYPE "si-load-store-opt"
74 
75 namespace {
76 
77 class SILoadStoreOptimizer : public MachineFunctionPass {
78   enum InstClassEnum {
79     DS_READ_WRITE,
80     S_BUFFER_LOAD_IMM,
81     BUFFER_LOAD_OFFEN,
82     BUFFER_LOAD_OFFSET,
83     BUFFER_STORE_OFFEN,
84     BUFFER_STORE_OFFSET,
85   };
86 
87   struct CombineInfo {
88     MachineBasicBlock::iterator I;
89     MachineBasicBlock::iterator Paired;
90     unsigned EltSize;
91     unsigned Offset0;
92     unsigned Offset1;
93     unsigned BaseOff;
94     InstClassEnum InstClass;
95     bool GLC0;
96     bool GLC1;
97     bool SLC0;
98     bool SLC1;
99     bool UseST64;
100     bool IsX2;
101     SmallVector<MachineInstr*, 8> InstsToMove;
102    };
103 
104 private:
105   const SISubtarget *STM = nullptr;
106   const SIInstrInfo *TII = nullptr;
107   const SIRegisterInfo *TRI = nullptr;
108   MachineRegisterInfo *MRI = nullptr;
109   AliasAnalysis *AA = nullptr;
110   unsigned CreatedX2;
111 
112   static bool offsetsCanBeCombined(CombineInfo &CI);
113 
114   bool findMatchingInst(CombineInfo &CI);
115 
116   unsigned read2Opcode(unsigned EltSize) const;
117   unsigned read2ST64Opcode(unsigned EltSize) const;
118   MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
119 
120   unsigned write2Opcode(unsigned EltSize) const;
121   unsigned write2ST64Opcode(unsigned EltSize) const;
122   MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
123   MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
124   MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
125   unsigned promoteBufferStoreOpcode(const MachineInstr &I, bool &IsX2,
126                                     bool &IsOffen) const;
127   MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
128 
129 public:
130   static char ID;
131 
132   SILoadStoreOptimizer() : MachineFunctionPass(ID) {
133     initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
134   }
135 
136   bool optimizeBlock(MachineBasicBlock &MBB);
137 
138   bool runOnMachineFunction(MachineFunction &MF) override;
139 
140   StringRef getPassName() const override { return "SI Load Store Optimizer"; }
141 
142   void getAnalysisUsage(AnalysisUsage &AU) const override {
143     AU.setPreservesCFG();
144     AU.addRequired<AAResultsWrapperPass>();
145 
146     MachineFunctionPass::getAnalysisUsage(AU);
147   }
148 };
149 
150 } // end anonymous namespace.
151 
152 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
153                       "SI Load Store Optimizer", false, false)
154 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
155 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
156                     "SI Load Store Optimizer", false, false)
157 
158 char SILoadStoreOptimizer::ID = 0;
159 
160 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
161 
162 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
163   return new SILoadStoreOptimizer();
164 }
165 
166 static void moveInstsAfter(MachineBasicBlock::iterator I,
167                            ArrayRef<MachineInstr*> InstsToMove) {
168   MachineBasicBlock *MBB = I->getParent();
169   ++I;
170   for (MachineInstr *MI : InstsToMove) {
171     MI->removeFromParent();
172     MBB->insert(I, MI);
173   }
174 }
175 
176 static void addDefsToList(const MachineInstr &MI, DenseSet<unsigned> &Defs) {
177   for (const MachineOperand &Def : MI.operands()) {
178     if (Def.isReg() && Def.isDef())
179       Defs.insert(Def.getReg());
180   }
181 }
182 
183 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
184                                       MachineBasicBlock::iterator B,
185                                       const SIInstrInfo *TII,
186                                       AliasAnalysis * AA) {
187   // RAW or WAR - cannot reorder
188   // WAW - cannot reorder
189   // RAR - safe to reorder
190   return !(A->mayStore() || B->mayStore()) ||
191     TII->areMemAccessesTriviallyDisjoint(*A, *B, AA);
192 }
193 
194 // Add MI and its defs to the lists if MI reads one of the defs that are
195 // already in the list. Returns true in that case.
196 static bool
197 addToListsIfDependent(MachineInstr &MI,
198                       DenseSet<unsigned> &Defs,
199                       SmallVectorImpl<MachineInstr*> &Insts) {
200   for (MachineOperand &Use : MI.operands()) {
201     // If one of the defs is read, then there is a use of Def between I and the
202     // instruction that I will potentially be merged with. We will need to move
203     // this instruction after the merged instructions.
204 
205     if (Use.isReg() && Use.readsReg() && Defs.count(Use.getReg())) {
206       Insts.push_back(&MI);
207       addDefsToList(MI, Defs);
208       return true;
209     }
210   }
211 
212   return false;
213 }
214 
215 static bool
216 canMoveInstsAcrossMemOp(MachineInstr &MemOp,
217                         ArrayRef<MachineInstr*> InstsToMove,
218                         const SIInstrInfo *TII,
219                         AliasAnalysis *AA) {
220   assert(MemOp.mayLoadOrStore());
221 
222   for (MachineInstr *InstToMove : InstsToMove) {
223     if (!InstToMove->mayLoadOrStore())
224       continue;
225     if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
226         return false;
227   }
228   return true;
229 }
230 
231 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
232   // XXX - Would the same offset be OK? Is there any reason this would happen or
233   // be useful?
234   if (CI.Offset0 == CI.Offset1)
235     return false;
236 
237   // This won't be valid if the offset isn't aligned.
238   if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
239     return false;
240 
241   unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
242   unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
243   CI.UseST64 = false;
244   CI.BaseOff = 0;
245 
246   // Handle SMEM and VMEM instructions.
247   if (CI.InstClass != DS_READ_WRITE) {
248     unsigned Diff = CI.IsX2 ? 2 : 1;
249     return (EltOffset0 + Diff == EltOffset1 ||
250             EltOffset1 + Diff == EltOffset0) &&
251            CI.GLC0 == CI.GLC1 &&
252            (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
253   }
254 
255   // If the offset in elements doesn't fit in 8-bits, we might be able to use
256   // the stride 64 versions.
257   if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
258       isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
259     CI.Offset0 = EltOffset0 / 64;
260     CI.Offset1 = EltOffset1 / 64;
261     CI.UseST64 = true;
262     return true;
263   }
264 
265   // Check if the new offsets fit in the reduced 8-bit range.
266   if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
267     CI.Offset0 = EltOffset0;
268     CI.Offset1 = EltOffset1;
269     return true;
270   }
271 
272   // Try to shift base address to decrease offsets.
273   unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
274   CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
275 
276   if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
277     CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
278     CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
279     CI.UseST64 = true;
280     return true;
281   }
282 
283   if (isUInt<8>(OffsetDiff)) {
284     CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
285     CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
286     return true;
287   }
288 
289   return false;
290 }
291 
292 bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
293   MachineBasicBlock *MBB = CI.I->getParent();
294   MachineBasicBlock::iterator E = MBB->end();
295   MachineBasicBlock::iterator MBBI = CI.I;
296 
297   unsigned AddrOpName[3] = {0};
298   int AddrIdx[3];
299   const MachineOperand *AddrReg[3];
300   unsigned NumAddresses = 0;
301 
302   switch (CI.InstClass) {
303   case DS_READ_WRITE:
304     AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
305     break;
306   case S_BUFFER_LOAD_IMM:
307     AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
308     break;
309   case BUFFER_LOAD_OFFEN:
310   case BUFFER_STORE_OFFEN:
311     AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
312     AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
313     AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
314     break;
315   case BUFFER_LOAD_OFFSET:
316   case BUFFER_STORE_OFFSET:
317     AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
318     AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
319     break;
320   }
321 
322   for (unsigned i = 0; i < NumAddresses; i++) {
323     AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
324     AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
325 
326     // We only ever merge operations with the same base address register, so don't
327     // bother scanning forward if there are no other uses.
328     if (AddrReg[i]->isReg() &&
329         (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
330          MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
331       return false;
332   }
333 
334   ++MBBI;
335 
336   DenseSet<unsigned> DefsToMove;
337   addDefsToList(*CI.I, DefsToMove);
338 
339   for ( ; MBBI != E; ++MBBI) {
340     if (MBBI->getOpcode() != CI.I->getOpcode()) {
341       // This is not a matching DS instruction, but we can keep looking as
342       // long as one of these conditions are met:
343       // 1. It is safe to move I down past MBBI.
344       // 2. It is safe to move MBBI down past the instruction that I will
345       //    be merged into.
346 
347       if (MBBI->hasUnmodeledSideEffects()) {
348         // We can't re-order this instruction with respect to other memory
349         // operations, so we fail both conditions mentioned above.
350         return false;
351       }
352 
353       if (MBBI->mayLoadOrStore() &&
354         (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
355          !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))) {
356         // We fail condition #1, but we may still be able to satisfy condition
357         // #2.  Add this instruction to the move list and then we will check
358         // if condition #2 holds once we have selected the matching instruction.
359         CI.InstsToMove.push_back(&*MBBI);
360         addDefsToList(*MBBI, DefsToMove);
361         continue;
362       }
363 
364       // When we match I with another DS instruction we will be moving I down
365       // to the location of the matched instruction any uses of I will need to
366       // be moved down as well.
367       addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
368       continue;
369     }
370 
371     // Don't merge volatiles.
372     if (MBBI->hasOrderedMemoryRef())
373       return false;
374 
375     // Handle a case like
376     //   DS_WRITE_B32 addr, v, idx0
377     //   w = DS_READ_B32 addr, idx0
378     //   DS_WRITE_B32 addr, f(w), idx1
379     // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
380     // merging of the two writes.
381     if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
382       continue;
383 
384     bool Match = true;
385     for (unsigned i = 0; i < NumAddresses; i++) {
386       const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
387 
388       if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
389         if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
390             AddrReg[i]->getImm() != AddrRegNext.getImm()) {
391           Match = false;
392           break;
393         }
394         continue;
395       }
396 
397       // Check same base pointer. Be careful of subregisters, which can occur with
398       // vectors of pointers.
399       if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
400           AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
401         Match = false;
402         break;
403       }
404     }
405 
406     if (Match) {
407       int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
408                                                  AMDGPU::OpName::offset);
409       CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
410       CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
411       CI.Paired = MBBI;
412 
413       if (CI.InstClass == DS_READ_WRITE) {
414         CI.Offset0 &= 0xffff;
415         CI.Offset1 &= 0xffff;
416       } else {
417         CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
418         CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
419         if (CI.InstClass != S_BUFFER_LOAD_IMM) {
420           CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
421           CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
422         }
423       }
424 
425       // Check both offsets fit in the reduced range.
426       // We also need to go through the list of instructions that we plan to
427       // move and make sure they are all safe to move down past the merged
428       // instruction.
429       if (offsetsCanBeCombined(CI))
430         if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
431           return true;
432     }
433 
434     // We've found a load/store that we couldn't merge for some reason.
435     // We could potentially keep looking, but we'd need to make sure that
436     // it was safe to move I and also all the instruction in InstsToMove
437     // down past this instruction.
438     // check if we can move I across MBBI and if we can move all I's users
439     if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
440       !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
441       break;
442   }
443   return false;
444 }
445 
446 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
447   if (STM->ldsRequiresM0Init())
448     return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
449   return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
450 }
451 
452 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
453   if (STM->ldsRequiresM0Init())
454     return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
455 
456   return (EltSize == 4) ?
457     AMDGPU::DS_READ2ST64_B32_gfx9 : AMDGPU::DS_READ2ST64_B64_gfx9;
458 }
459 
460 MachineBasicBlock::iterator  SILoadStoreOptimizer::mergeRead2Pair(
461   CombineInfo &CI) {
462   MachineBasicBlock *MBB = CI.I->getParent();
463 
464   // Be careful, since the addresses could be subregisters themselves in weird
465   // cases, like vectors of pointers.
466   const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
467 
468   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
469   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
470 
471   unsigned NewOffset0 = CI.Offset0;
472   unsigned NewOffset1 = CI.Offset1;
473   unsigned Opc = CI.UseST64 ?
474     read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
475 
476   unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
477   unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
478 
479   if (NewOffset0 > NewOffset1) {
480     // Canonicalize the merged instruction so the smaller offset comes first.
481     std::swap(NewOffset0, NewOffset1);
482     std::swap(SubRegIdx0, SubRegIdx1);
483   }
484 
485   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
486          (NewOffset0 != NewOffset1) &&
487          "Computed offset doesn't fit");
488 
489   const MCInstrDesc &Read2Desc = TII->get(Opc);
490 
491   const TargetRegisterClass *SuperRC
492     = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
493   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
494 
495   DebugLoc DL = CI.I->getDebugLoc();
496 
497   unsigned BaseReg = AddrReg->getReg();
498   unsigned BaseRegFlags = 0;
499   if (CI.BaseOff) {
500     unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
501     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
502       .addImm(CI.BaseOff);
503 
504     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
505     BaseRegFlags = RegState::Kill;
506 
507     TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
508       .addReg(ImmReg)
509       .addReg(AddrReg->getReg());
510   }
511 
512   MachineInstrBuilder Read2 =
513     BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
514       .addReg(BaseReg, BaseRegFlags) // addr
515       .addImm(NewOffset0)            // offset0
516       .addImm(NewOffset1)            // offset1
517       .addImm(0)                     // gds
518       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
519 
520   (void)Read2;
521 
522   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
523 
524   // Copy to the old destination registers.
525   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
526       .add(*Dest0) // Copy to same destination including flags and sub reg.
527       .addReg(DestReg, 0, SubRegIdx0);
528   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
529                             .add(*Dest1)
530                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
531 
532   moveInstsAfter(Copy1, CI.InstsToMove);
533 
534   MachineBasicBlock::iterator Next = std::next(CI.I);
535   CI.I->eraseFromParent();
536   CI.Paired->eraseFromParent();
537 
538   DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
539   return Next;
540 }
541 
542 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
543   if (STM->ldsRequiresM0Init())
544     return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
545   return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9 : AMDGPU::DS_WRITE2_B64_gfx9;
546 }
547 
548 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
549   if (STM->ldsRequiresM0Init())
550     return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
551 
552   return (EltSize == 4) ?
553     AMDGPU::DS_WRITE2ST64_B32_gfx9 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
554 }
555 
556 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
557   CombineInfo &CI) {
558   MachineBasicBlock *MBB = CI.I->getParent();
559 
560   // Be sure to use .addOperand(), and not .addReg() with these. We want to be
561   // sure we preserve the subregister index and any register flags set on them.
562   const MachineOperand *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
563   const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
564   const MachineOperand *Data1
565     = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
566 
567   unsigned NewOffset0 = CI.Offset0;
568   unsigned NewOffset1 = CI.Offset1;
569   unsigned Opc = CI.UseST64 ?
570     write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
571 
572   if (NewOffset0 > NewOffset1) {
573     // Canonicalize the merged instruction so the smaller offset comes first.
574     std::swap(NewOffset0, NewOffset1);
575     std::swap(Data0, Data1);
576   }
577 
578   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
579          (NewOffset0 != NewOffset1) &&
580          "Computed offset doesn't fit");
581 
582   const MCInstrDesc &Write2Desc = TII->get(Opc);
583   DebugLoc DL = CI.I->getDebugLoc();
584 
585   unsigned BaseReg = AddrReg->getReg();
586   unsigned BaseRegFlags = 0;
587   if (CI.BaseOff) {
588     unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
589     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
590       .addImm(CI.BaseOff);
591 
592     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
593     BaseRegFlags = RegState::Kill;
594 
595     TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
596       .addReg(ImmReg)
597       .addReg(AddrReg->getReg());
598   }
599 
600   MachineInstrBuilder Write2 =
601     BuildMI(*MBB, CI.Paired, DL, Write2Desc)
602       .addReg(BaseReg, BaseRegFlags) // addr
603       .add(*Data0)                   // data0
604       .add(*Data1)                   // data1
605       .addImm(NewOffset0)            // offset0
606       .addImm(NewOffset1)            // offset1
607       .addImm(0)                     // gds
608       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
609 
610   moveInstsAfter(Write2, CI.InstsToMove);
611 
612   MachineBasicBlock::iterator Next = std::next(CI.I);
613   CI.I->eraseFromParent();
614   CI.Paired->eraseFromParent();
615 
616   DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
617   return Next;
618 }
619 
620 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair(
621   CombineInfo &CI) {
622   MachineBasicBlock *MBB = CI.I->getParent();
623   DebugLoc DL = CI.I->getDebugLoc();
624   unsigned Opcode = CI.IsX2 ? AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM :
625                               AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
626 
627   const TargetRegisterClass *SuperRC =
628     CI.IsX2 ? &AMDGPU::SReg_128RegClass : &AMDGPU::SReg_64_XEXECRegClass;
629   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
630   unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
631 
632   BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
633       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
634       .addImm(MergedOffset) // offset
635       .addImm(CI.GLC0)      // glc
636       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
637 
638   unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
639   unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
640 
641   // Handle descending offsets
642   if (CI.Offset0 > CI.Offset1)
643     std::swap(SubRegIdx0, SubRegIdx1);
644 
645   // Copy to the old destination registers.
646   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
647   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
648   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
649 
650   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
651       .add(*Dest0) // Copy to same destination including flags and sub reg.
652       .addReg(DestReg, 0, SubRegIdx0);
653   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
654                             .add(*Dest1)
655                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
656 
657   moveInstsAfter(Copy1, CI.InstsToMove);
658 
659   MachineBasicBlock::iterator Next = std::next(CI.I);
660   CI.I->eraseFromParent();
661   CI.Paired->eraseFromParent();
662   return Next;
663 }
664 
665 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
666   CombineInfo &CI) {
667   MachineBasicBlock *MBB = CI.I->getParent();
668   DebugLoc DL = CI.I->getDebugLoc();
669   unsigned Opcode;
670 
671   if (CI.InstClass == BUFFER_LOAD_OFFEN) {
672     Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN :
673                        AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN;
674   } else {
675     Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET :
676                        AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
677   }
678 
679   const TargetRegisterClass *SuperRC =
680     CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
681   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
682   unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
683 
684   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
685 
686   if (CI.InstClass == BUFFER_LOAD_OFFEN)
687       MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
688 
689   MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
690       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
691       .addImm(MergedOffset) // offset
692       .addImm(CI.GLC0)      // glc
693       .addImm(CI.SLC0)      // slc
694       .addImm(0)            // tfe
695       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
696 
697   unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
698   unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
699 
700   // Handle descending offsets
701   if (CI.Offset0 > CI.Offset1)
702     std::swap(SubRegIdx0, SubRegIdx1);
703 
704   // Copy to the old destination registers.
705   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
706   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
707   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
708 
709   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
710       .add(*Dest0) // Copy to same destination including flags and sub reg.
711       .addReg(DestReg, 0, SubRegIdx0);
712   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
713                             .add(*Dest1)
714                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
715 
716   moveInstsAfter(Copy1, CI.InstsToMove);
717 
718   MachineBasicBlock::iterator Next = std::next(CI.I);
719   CI.I->eraseFromParent();
720   CI.Paired->eraseFromParent();
721   return Next;
722 }
723 
724 unsigned SILoadStoreOptimizer::promoteBufferStoreOpcode(
725   const MachineInstr &I, bool &IsX2, bool &IsOffen) const {
726   IsX2 = false;
727   IsOffen = false;
728 
729   switch (I.getOpcode()) {
730   case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
731     IsOffen = true;
732     return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN;
733   case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
734     IsOffen = true;
735     return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact;
736   case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
737     IsX2 = true;
738     IsOffen = true;
739     return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN;
740   case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact:
741     IsX2 = true;
742     IsOffen = true;
743     return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN_exact;
744   case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
745     return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
746   case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
747     return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact;
748   case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET:
749     IsX2 = true;
750     return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
751   case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact:
752     IsX2 = true;
753     return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET_exact;
754   }
755   return 0;
756 }
757 
758 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
759   CombineInfo &CI) {
760   MachineBasicBlock *MBB = CI.I->getParent();
761   DebugLoc DL = CI.I->getDebugLoc();
762   bool Unused1, Unused2;
763   unsigned Opcode = promoteBufferStoreOpcode(*CI.I, Unused1, Unused2);
764 
765   unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
766   unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
767 
768   // Handle descending offsets
769   if (CI.Offset0 > CI.Offset1)
770     std::swap(SubRegIdx0, SubRegIdx1);
771 
772   // Copy to the new source register.
773   const TargetRegisterClass *SuperRC =
774     CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
775   unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
776 
777   const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
778   const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
779 
780   BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
781       .add(*Src0)
782       .addImm(SubRegIdx0)
783       .add(*Src1)
784       .addImm(SubRegIdx1);
785 
786   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
787       .addReg(SrcReg, RegState::Kill);
788 
789   if (CI.InstClass == BUFFER_STORE_OFFEN)
790       MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
791 
792   MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
793       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
794       .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
795       .addImm(CI.GLC0)      // glc
796       .addImm(CI.SLC0)      // slc
797       .addImm(0)            // tfe
798       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
799 
800   moveInstsAfter(MIB, CI.InstsToMove);
801 
802   MachineBasicBlock::iterator Next = std::next(CI.I);
803   CI.I->eraseFromParent();
804   CI.Paired->eraseFromParent();
805   return Next;
806 }
807 
808 // Scan through looking for adjacent LDS operations with constant offsets from
809 // the same base register. We rely on the scheduler to do the hard work of
810 // clustering nearby loads, and assume these are all adjacent.
811 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
812   bool Modified = false;
813 
814   for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
815     MachineInstr &MI = *I;
816 
817     // Don't combine if volatile.
818     if (MI.hasOrderedMemoryRef()) {
819       ++I;
820       continue;
821     }
822 
823     CombineInfo CI;
824     CI.I = I;
825     unsigned Opc = MI.getOpcode();
826     if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64 ||
827         Opc == AMDGPU::DS_READ_B32_gfx9 || Opc == AMDGPU::DS_READ_B64_gfx9) {
828 
829       CI.InstClass = DS_READ_WRITE;
830       CI.EltSize =
831         (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8 : 4;
832 
833       if (findMatchingInst(CI)) {
834         Modified = true;
835         I = mergeRead2Pair(CI);
836       } else {
837         ++I;
838       }
839 
840       continue;
841     } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64 ||
842                Opc == AMDGPU::DS_WRITE_B32_gfx9 ||
843                Opc == AMDGPU::DS_WRITE_B64_gfx9) {
844       CI.InstClass = DS_READ_WRITE;
845       CI.EltSize
846         = (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8 : 4;
847 
848       if (findMatchingInst(CI)) {
849         Modified = true;
850         I = mergeWrite2Pair(CI);
851       } else {
852         ++I;
853       }
854 
855       continue;
856     }
857     if (Opc == AMDGPU::S_BUFFER_LOAD_DWORD_IMM ||
858         Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM) {
859       // EltSize is in units of the offset encoding.
860       CI.InstClass = S_BUFFER_LOAD_IMM;
861       CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
862       CI.IsX2 = Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
863       if (findMatchingInst(CI)) {
864         Modified = true;
865         I = mergeSBufferLoadImmPair(CI);
866         if (!CI.IsX2)
867           CreatedX2++;
868       } else {
869         ++I;
870       }
871       continue;
872     }
873     if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
874         Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
875         Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFSET ||
876         Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET) {
877       if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
878           Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN)
879         CI.InstClass = BUFFER_LOAD_OFFEN;
880       else
881         CI.InstClass = BUFFER_LOAD_OFFSET;
882 
883       CI.EltSize = 4;
884       CI.IsX2 = Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
885                 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
886       if (findMatchingInst(CI)) {
887         Modified = true;
888         I = mergeBufferLoadPair(CI);
889         if (!CI.IsX2)
890           CreatedX2++;
891       } else {
892         ++I;
893       }
894       continue;
895     }
896 
897     bool StoreIsX2, IsOffen;
898     if (promoteBufferStoreOpcode(*I, StoreIsX2, IsOffen)) {
899       CI.InstClass = IsOffen ? BUFFER_STORE_OFFEN : BUFFER_STORE_OFFSET;
900       CI.EltSize = 4;
901       CI.IsX2 = StoreIsX2;
902       if (findMatchingInst(CI)) {
903         Modified = true;
904         I = mergeBufferStorePair(CI);
905         if (!CI.IsX2)
906           CreatedX2++;
907       } else {
908         ++I;
909       }
910       continue;
911     }
912 
913     ++I;
914   }
915 
916   return Modified;
917 }
918 
919 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
920   if (skipFunction(MF.getFunction()))
921     return false;
922 
923   STM = &MF.getSubtarget<SISubtarget>();
924   if (!STM->loadStoreOptEnabled())
925     return false;
926 
927   TII = STM->getInstrInfo();
928   TRI = &TII->getRegisterInfo();
929 
930   MRI = &MF.getRegInfo();
931   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
932 
933   assert(MRI->isSSA() && "Must be run on SSA");
934 
935   DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
936 
937   bool Modified = false;
938 
939   for (MachineBasicBlock &MBB : MF) {
940     CreatedX2 = 0;
941     Modified |= optimizeBlock(MBB);
942 
943     // Run again to convert x2 to x4.
944     if (CreatedX2 >= 1)
945       Modified |= optimizeBlock(MBB);
946   }
947 
948   return Modified;
949 }
950