1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass tries to fuse DS instructions with close by immediate offsets.
11 // This will fuse operations such as
12 //  ds_read_b32 v0, v2 offset:16
13 //  ds_read_b32 v1, v2 offset:32
14 // ==>
15 //   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16 //
17 // The same is done for certain SMEM and VMEM opcodes, e.g.:
18 //  s_buffer_load_dword s4, s[0:3], 4
19 //  s_buffer_load_dword s5, s[0:3], 8
20 // ==>
21 //  s_buffer_load_dwordx2 s[4:5], s[0:3], 4
22 //
23 //
24 // Future improvements:
25 //
26 // - This currently relies on the scheduler to place loads and stores next to
27 //   each other, and then only merges adjacent pairs of instructions. It would
28 //   be good to be more flexible with interleaved instructions, and possibly run
29 //   before scheduling. It currently missing stores of constants because loading
30 //   the constant into the data register is placed between the stores, although
31 //   this is arguably a scheduling problem.
32 //
33 // - Live interval recomputing seems inefficient. This currently only matches
34 //   one pair, and recomputes live intervals and moves on to the next pair. It
35 //   would be better to compute a list of all merges that need to occur.
36 //
37 // - With a list of instructions to process, we can also merge more. If a
38 //   cluster of loads have offsets that are too large to fit in the 8-bit
39 //   offsets, but are close enough to fit in the 8 bits, we can add to the base
40 //   pointer and use the new reduced offsets.
41 //
42 //===----------------------------------------------------------------------===//
43 
44 #include "AMDGPU.h"
45 #include "AMDGPUSubtarget.h"
46 #include "SIInstrInfo.h"
47 #include "SIRegisterInfo.h"
48 #include "Utils/AMDGPUBaseInfo.h"
49 #include "llvm/ADT/ArrayRef.h"
50 #include "llvm/ADT/SmallVector.h"
51 #include "llvm/ADT/StringRef.h"
52 #include "llvm/Analysis/AliasAnalysis.h"
53 #include "llvm/CodeGen/MachineBasicBlock.h"
54 #include "llvm/CodeGen/MachineFunction.h"
55 #include "llvm/CodeGen/MachineFunctionPass.h"
56 #include "llvm/CodeGen/MachineInstr.h"
57 #include "llvm/CodeGen/MachineInstrBuilder.h"
58 #include "llvm/CodeGen/MachineOperand.h"
59 #include "llvm/CodeGen/MachineRegisterInfo.h"
60 #include "llvm/IR/DebugLoc.h"
61 #include "llvm/Pass.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdlib>
68 #include <iterator>
69 #include <utility>
70 
71 using namespace llvm;
72 
73 #define DEBUG_TYPE "si-load-store-opt"
74 
75 namespace {
76 
77 class SILoadStoreOptimizer : public MachineFunctionPass {
78   enum InstClassEnum {
79     DS_READ_WRITE,
80     S_BUFFER_LOAD_IMM,
81     BUFFER_LOAD_OFFEN,
82     BUFFER_LOAD_OFFSET,
83     BUFFER_STORE_OFFEN,
84     BUFFER_STORE_OFFSET,
85   };
86 
87   struct CombineInfo {
88     MachineBasicBlock::iterator I;
89     MachineBasicBlock::iterator Paired;
90     unsigned EltSize;
91     unsigned Offset0;
92     unsigned Offset1;
93     unsigned BaseOff;
94     InstClassEnum InstClass;
95     bool GLC0;
96     bool GLC1;
97     bool SLC0;
98     bool SLC1;
99     bool UseST64;
100     bool IsX2;
101     SmallVector<MachineInstr*, 8> InstsToMove;
102    };
103 
104 private:
105   const SISubtarget *STM = nullptr;
106   const SIInstrInfo *TII = nullptr;
107   const SIRegisterInfo *TRI = nullptr;
108   MachineRegisterInfo *MRI = nullptr;
109   AliasAnalysis *AA = nullptr;
110   unsigned CreatedX2;
111 
112   static bool offsetsCanBeCombined(CombineInfo &CI);
113 
114   bool findMatchingInst(CombineInfo &CI);
115 
116   unsigned read2Opcode(unsigned EltSize) const;
117   unsigned read2ST64Opcode(unsigned EltSize) const;
118   MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
119 
120   unsigned write2Opcode(unsigned EltSize) const;
121   unsigned write2ST64Opcode(unsigned EltSize) const;
122   MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
123   MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
124   MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
125   unsigned promoteBufferStoreOpcode(const MachineInstr &I, bool &IsX2,
126                                     bool &IsOffen) const;
127   MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
128 
129 public:
130   static char ID;
131 
132   SILoadStoreOptimizer() : MachineFunctionPass(ID) {
133     initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
134   }
135 
136   bool optimizeBlock(MachineBasicBlock &MBB);
137 
138   bool runOnMachineFunction(MachineFunction &MF) override;
139 
140   StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
141 
142   void getAnalysisUsage(AnalysisUsage &AU) const override {
143     AU.setPreservesCFG();
144     AU.addRequired<AAResultsWrapperPass>();
145 
146     MachineFunctionPass::getAnalysisUsage(AU);
147   }
148 };
149 
150 } // end anonymous namespace.
151 
152 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
153                       "SI Load / Store Optimizer", false, false)
154 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
155 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
156                     "SI Load / Store Optimizer", false, false)
157 
158 char SILoadStoreOptimizer::ID = 0;
159 
160 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
161 
162 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
163   return new SILoadStoreOptimizer();
164 }
165 
166 static void moveInstsAfter(MachineBasicBlock::iterator I,
167                            ArrayRef<MachineInstr*> InstsToMove) {
168   MachineBasicBlock *MBB = I->getParent();
169   ++I;
170   for (MachineInstr *MI : InstsToMove) {
171     MI->removeFromParent();
172     MBB->insert(I, MI);
173   }
174 }
175 
176 static void addDefsToList(const MachineInstr &MI, DenseSet<unsigned> &Defs) {
177   // XXX: Should this be looking for implicit defs?
178   for (const MachineOperand &Def : MI.defs())
179     Defs.insert(Def.getReg());
180 }
181 
182 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
183                                       MachineBasicBlock::iterator B,
184                                       const SIInstrInfo *TII,
185                                       AliasAnalysis * AA) {
186   // RAW or WAR - cannot reorder
187   // WAW - cannot reorder
188   // RAR - safe to reorder
189   return !(A->mayStore() || B->mayStore()) ||
190     TII->areMemAccessesTriviallyDisjoint(*A, *B, AA);
191 }
192 
193 // Add MI and its defs to the lists if MI reads one of the defs that are
194 // already in the list. Returns true in that case.
195 static bool
196 addToListsIfDependent(MachineInstr &MI,
197                       DenseSet<unsigned> &Defs,
198                       SmallVectorImpl<MachineInstr*> &Insts) {
199   for (MachineOperand &Use : MI.operands()) {
200     // If one of the defs is read, then there is a use of Def between I and the
201     // instruction that I will potentially be merged with. We will need to move
202     // this instruction after the merged instructions.
203 
204     if (Use.isReg() && Use.readsReg() && Defs.count(Use.getReg())) {
205       Insts.push_back(&MI);
206       addDefsToList(MI, Defs);
207       return true;
208     }
209   }
210 
211   return false;
212 }
213 
214 static bool
215 canMoveInstsAcrossMemOp(MachineInstr &MemOp,
216                         ArrayRef<MachineInstr*> InstsToMove,
217                         const SIInstrInfo *TII,
218                         AliasAnalysis *AA) {
219   assert(MemOp.mayLoadOrStore());
220 
221   for (MachineInstr *InstToMove : InstsToMove) {
222     if (!InstToMove->mayLoadOrStore())
223       continue;
224     if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
225         return false;
226   }
227   return true;
228 }
229 
230 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
231   // XXX - Would the same offset be OK? Is there any reason this would happen or
232   // be useful?
233   if (CI.Offset0 == CI.Offset1)
234     return false;
235 
236   // This won't be valid if the offset isn't aligned.
237   if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
238     return false;
239 
240   unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
241   unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
242   CI.UseST64 = false;
243   CI.BaseOff = 0;
244 
245   // Handle SMEM and VMEM instructions.
246   if (CI.InstClass != DS_READ_WRITE) {
247     unsigned Diff = CI.IsX2 ? 2 : 1;
248     return (EltOffset0 + Diff == EltOffset1 ||
249             EltOffset1 + Diff == EltOffset0) &&
250            CI.GLC0 == CI.GLC1 &&
251            (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
252   }
253 
254   // If the offset in elements doesn't fit in 8-bits, we might be able to use
255   // the stride 64 versions.
256   if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
257       isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
258     CI.Offset0 = EltOffset0 / 64;
259     CI.Offset1 = EltOffset1 / 64;
260     CI.UseST64 = true;
261     return true;
262   }
263 
264   // Check if the new offsets fit in the reduced 8-bit range.
265   if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
266     CI.Offset0 = EltOffset0;
267     CI.Offset1 = EltOffset1;
268     return true;
269   }
270 
271   // Try to shift base address to decrease offsets.
272   unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
273   CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
274 
275   if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
276     CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
277     CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
278     CI.UseST64 = true;
279     return true;
280   }
281 
282   if (isUInt<8>(OffsetDiff)) {
283     CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
284     CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
285     return true;
286   }
287 
288   return false;
289 }
290 
291 bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
292   MachineBasicBlock *MBB = CI.I->getParent();
293   MachineBasicBlock::iterator E = MBB->end();
294   MachineBasicBlock::iterator MBBI = CI.I;
295 
296   unsigned AddrOpName[3] = {0};
297   int AddrIdx[3];
298   const MachineOperand *AddrReg[3];
299   unsigned NumAddresses = 0;
300 
301   switch (CI.InstClass) {
302   case DS_READ_WRITE:
303     AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
304     break;
305   case S_BUFFER_LOAD_IMM:
306     AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
307     break;
308   case BUFFER_LOAD_OFFEN:
309   case BUFFER_STORE_OFFEN:
310     AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
311     AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
312     AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
313     break;
314   case BUFFER_LOAD_OFFSET:
315   case BUFFER_STORE_OFFSET:
316     AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
317     AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
318     break;
319   }
320 
321   for (unsigned i = 0; i < NumAddresses; i++) {
322     AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
323     AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
324 
325     // We only ever merge operations with the same base address register, so don't
326     // bother scanning forward if there are no other uses.
327     if (AddrReg[i]->isReg() &&
328         (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
329          MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
330       return false;
331   }
332 
333   ++MBBI;
334 
335   DenseSet<unsigned> DefsToMove;
336   addDefsToList(*CI.I, DefsToMove);
337 
338   for ( ; MBBI != E; ++MBBI) {
339     if (MBBI->getOpcode() != CI.I->getOpcode()) {
340       // This is not a matching DS instruction, but we can keep looking as
341       // long as one of these conditions are met:
342       // 1. It is safe to move I down past MBBI.
343       // 2. It is safe to move MBBI down past the instruction that I will
344       //    be merged into.
345 
346       if (MBBI->hasUnmodeledSideEffects()) {
347         // We can't re-order this instruction with respect to other memory
348         // operations, so we fail both conditions mentioned above.
349         return false;
350       }
351 
352       if (MBBI->mayLoadOrStore() &&
353         (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
354          !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))) {
355         // We fail condition #1, but we may still be able to satisfy condition
356         // #2.  Add this instruction to the move list and then we will check
357         // if condition #2 holds once we have selected the matching instruction.
358         CI.InstsToMove.push_back(&*MBBI);
359         addDefsToList(*MBBI, DefsToMove);
360         continue;
361       }
362 
363       // When we match I with another DS instruction we will be moving I down
364       // to the location of the matched instruction any uses of I will need to
365       // be moved down as well.
366       addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
367       continue;
368     }
369 
370     // Don't merge volatiles.
371     if (MBBI->hasOrderedMemoryRef())
372       return false;
373 
374     // Handle a case like
375     //   DS_WRITE_B32 addr, v, idx0
376     //   w = DS_READ_B32 addr, idx0
377     //   DS_WRITE_B32 addr, f(w), idx1
378     // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
379     // merging of the two writes.
380     if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
381       continue;
382 
383     bool Match = true;
384     for (unsigned i = 0; i < NumAddresses; i++) {
385       const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
386 
387       if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
388         if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
389             AddrReg[i]->getImm() != AddrRegNext.getImm()) {
390           Match = false;
391           break;
392         }
393         continue;
394       }
395 
396       // Check same base pointer. Be careful of subregisters, which can occur with
397       // vectors of pointers.
398       if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
399           AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
400         Match = false;
401         break;
402       }
403     }
404 
405     if (Match) {
406       int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
407                                                  AMDGPU::OpName::offset);
408       CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
409       CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
410       CI.Paired = MBBI;
411 
412       if (CI.InstClass == DS_READ_WRITE) {
413         CI.Offset0 &= 0xffff;
414         CI.Offset1 &= 0xffff;
415       } else {
416         CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
417         CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
418         if (CI.InstClass != S_BUFFER_LOAD_IMM) {
419           CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
420           CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
421         }
422       }
423 
424       // Check both offsets fit in the reduced range.
425       // We also need to go through the list of instructions that we plan to
426       // move and make sure they are all safe to move down past the merged
427       // instruction.
428       if (offsetsCanBeCombined(CI))
429         if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
430           return true;
431     }
432 
433     // We've found a load/store that we couldn't merge for some reason.
434     // We could potentially keep looking, but we'd need to make sure that
435     // it was safe to move I and also all the instruction in InstsToMove
436     // down past this instruction.
437     // check if we can move I across MBBI and if we can move all I's users
438     if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
439       !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
440       break;
441   }
442   return false;
443 }
444 
445 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
446   if (STM->ldsRequiresM0Init())
447     return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
448   return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
449 }
450 
451 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
452   if (STM->ldsRequiresM0Init())
453     return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
454 
455   return (EltSize == 4) ?
456     AMDGPU::DS_READ2ST64_B32_gfx9 : AMDGPU::DS_READ2ST64_B64_gfx9;
457 }
458 
459 MachineBasicBlock::iterator  SILoadStoreOptimizer::mergeRead2Pair(
460   CombineInfo &CI) {
461   MachineBasicBlock *MBB = CI.I->getParent();
462 
463   // Be careful, since the addresses could be subregisters themselves in weird
464   // cases, like vectors of pointers.
465   const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
466 
467   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
468   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
469 
470   unsigned NewOffset0 = CI.Offset0;
471   unsigned NewOffset1 = CI.Offset1;
472   unsigned Opc = CI.UseST64 ?
473     read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
474 
475   unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
476   unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
477 
478   if (NewOffset0 > NewOffset1) {
479     // Canonicalize the merged instruction so the smaller offset comes first.
480     std::swap(NewOffset0, NewOffset1);
481     std::swap(SubRegIdx0, SubRegIdx1);
482   }
483 
484   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
485          (NewOffset0 != NewOffset1) &&
486          "Computed offset doesn't fit");
487 
488   const MCInstrDesc &Read2Desc = TII->get(Opc);
489 
490   const TargetRegisterClass *SuperRC
491     = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
492   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
493 
494   DebugLoc DL = CI.I->getDebugLoc();
495 
496   unsigned BaseReg = AddrReg->getReg();
497   unsigned BaseRegFlags = 0;
498   if (CI.BaseOff) {
499     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
500     BaseRegFlags = RegState::Kill;
501     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
502            .addImm(CI.BaseOff)
503            .addReg(AddrReg->getReg());
504   }
505 
506   MachineInstrBuilder Read2 =
507     BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
508       .addReg(BaseReg, BaseRegFlags) // addr
509       .addImm(NewOffset0)            // offset0
510       .addImm(NewOffset1)            // offset1
511       .addImm(0)                     // gds
512       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
513 
514   (void)Read2;
515 
516   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
517 
518   // Copy to the old destination registers.
519   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
520       .add(*Dest0) // Copy to same destination including flags and sub reg.
521       .addReg(DestReg, 0, SubRegIdx0);
522   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
523                             .add(*Dest1)
524                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
525 
526   moveInstsAfter(Copy1, CI.InstsToMove);
527 
528   MachineBasicBlock::iterator Next = std::next(CI.I);
529   CI.I->eraseFromParent();
530   CI.Paired->eraseFromParent();
531 
532   DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
533   return Next;
534 }
535 
536 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
537   if (STM->ldsRequiresM0Init())
538     return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
539   return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9 : AMDGPU::DS_WRITE2_B64_gfx9;
540 }
541 
542 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
543   if (STM->ldsRequiresM0Init())
544     return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
545 
546   return (EltSize == 4) ?
547     AMDGPU::DS_WRITE2ST64_B32_gfx9 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
548 }
549 
550 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
551   CombineInfo &CI) {
552   MachineBasicBlock *MBB = CI.I->getParent();
553 
554   // Be sure to use .addOperand(), and not .addReg() with these. We want to be
555   // sure we preserve the subregister index and any register flags set on them.
556   const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
557   const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
558   const MachineOperand *Data1
559     = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
560 
561   unsigned NewOffset0 = CI.Offset0;
562   unsigned NewOffset1 = CI.Offset1;
563   unsigned Opc = CI.UseST64 ?
564     write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
565 
566   if (NewOffset0 > NewOffset1) {
567     // Canonicalize the merged instruction so the smaller offset comes first.
568     std::swap(NewOffset0, NewOffset1);
569     std::swap(Data0, Data1);
570   }
571 
572   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
573          (NewOffset0 != NewOffset1) &&
574          "Computed offset doesn't fit");
575 
576   const MCInstrDesc &Write2Desc = TII->get(Opc);
577   DebugLoc DL = CI.I->getDebugLoc();
578 
579   unsigned BaseReg = Addr->getReg();
580   unsigned BaseRegFlags = 0;
581   if (CI.BaseOff) {
582     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
583     BaseRegFlags = RegState::Kill;
584     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
585            .addImm(CI.BaseOff)
586            .addReg(Addr->getReg());
587   }
588 
589   MachineInstrBuilder Write2 =
590     BuildMI(*MBB, CI.Paired, DL, Write2Desc)
591       .addReg(BaseReg, BaseRegFlags) // addr
592       .add(*Data0)                   // data0
593       .add(*Data1)                   // data1
594       .addImm(NewOffset0)            // offset0
595       .addImm(NewOffset1)            // offset1
596       .addImm(0)                     // gds
597       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
598 
599   moveInstsAfter(Write2, CI.InstsToMove);
600 
601   MachineBasicBlock::iterator Next = std::next(CI.I);
602   CI.I->eraseFromParent();
603   CI.Paired->eraseFromParent();
604 
605   DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
606   return Next;
607 }
608 
609 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair(
610   CombineInfo &CI) {
611   MachineBasicBlock *MBB = CI.I->getParent();
612   DebugLoc DL = CI.I->getDebugLoc();
613   unsigned Opcode = CI.IsX2 ? AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM :
614                               AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
615 
616   const TargetRegisterClass *SuperRC =
617     CI.IsX2 ? &AMDGPU::SReg_128RegClass : &AMDGPU::SReg_64_XEXECRegClass;
618   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
619   unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
620 
621   BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
622       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
623       .addImm(MergedOffset) // offset
624       .addImm(CI.GLC0)      // glc
625       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
626 
627   unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
628   unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
629 
630   // Handle descending offsets
631   if (CI.Offset0 > CI.Offset1)
632     std::swap(SubRegIdx0, SubRegIdx1);
633 
634   // Copy to the old destination registers.
635   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
636   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
637   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
638 
639   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
640       .add(*Dest0) // Copy to same destination including flags and sub reg.
641       .addReg(DestReg, 0, SubRegIdx0);
642   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
643                             .add(*Dest1)
644                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
645 
646   moveInstsAfter(Copy1, CI.InstsToMove);
647 
648   MachineBasicBlock::iterator Next = std::next(CI.I);
649   CI.I->eraseFromParent();
650   CI.Paired->eraseFromParent();
651   return Next;
652 }
653 
654 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
655   CombineInfo &CI) {
656   MachineBasicBlock *MBB = CI.I->getParent();
657   DebugLoc DL = CI.I->getDebugLoc();
658   unsigned Opcode;
659 
660   if (CI.InstClass == BUFFER_LOAD_OFFEN) {
661     Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN :
662                        AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN;
663   } else {
664     Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET :
665                        AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
666   }
667 
668   const TargetRegisterClass *SuperRC =
669     CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
670   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
671   unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
672 
673   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
674 
675   if (CI.InstClass == BUFFER_LOAD_OFFEN)
676       MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
677 
678   MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
679       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
680       .addImm(MergedOffset) // offset
681       .addImm(CI.GLC0)      // glc
682       .addImm(CI.SLC0)      // slc
683       .addImm(0)            // tfe
684       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
685 
686   unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
687   unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
688 
689   // Handle descending offsets
690   if (CI.Offset0 > CI.Offset1)
691     std::swap(SubRegIdx0, SubRegIdx1);
692 
693   // Copy to the old destination registers.
694   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
695   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
696   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
697 
698   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
699       .add(*Dest0) // Copy to same destination including flags and sub reg.
700       .addReg(DestReg, 0, SubRegIdx0);
701   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
702                             .add(*Dest1)
703                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
704 
705   moveInstsAfter(Copy1, CI.InstsToMove);
706 
707   MachineBasicBlock::iterator Next = std::next(CI.I);
708   CI.I->eraseFromParent();
709   CI.Paired->eraseFromParent();
710   return Next;
711 }
712 
713 unsigned SILoadStoreOptimizer::promoteBufferStoreOpcode(
714   const MachineInstr &I, bool &IsX2, bool &IsOffen) const {
715   IsX2 = false;
716   IsOffen = false;
717 
718   switch (I.getOpcode()) {
719   case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
720     IsOffen = true;
721     return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN;
722   case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
723     IsOffen = true;
724     return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact;
725   case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
726     IsX2 = true;
727     IsOffen = true;
728     return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN;
729   case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact:
730     IsX2 = true;
731     IsOffen = true;
732     return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN_exact;
733   case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
734     return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
735   case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
736     return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact;
737   case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET:
738     IsX2 = true;
739     return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
740   case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact:
741     IsX2 = true;
742     return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET_exact;
743   }
744   return 0;
745 }
746 
747 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
748   CombineInfo &CI) {
749   MachineBasicBlock *MBB = CI.I->getParent();
750   DebugLoc DL = CI.I->getDebugLoc();
751   bool Unused1, Unused2;
752   unsigned Opcode = promoteBufferStoreOpcode(*CI.I, Unused1, Unused2);
753 
754   unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
755   unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
756 
757   // Handle descending offsets
758   if (CI.Offset0 > CI.Offset1)
759     std::swap(SubRegIdx0, SubRegIdx1);
760 
761   // Copy to the new source register.
762   const TargetRegisterClass *SuperRC =
763     CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
764   unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
765 
766   const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
767   const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
768 
769   BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
770       .add(*Src0)
771       .addImm(SubRegIdx0)
772       .add(*Src1)
773       .addImm(SubRegIdx1);
774 
775   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
776       .addReg(SrcReg, RegState::Kill);
777 
778   if (CI.InstClass == BUFFER_STORE_OFFEN)
779       MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
780 
781   MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
782       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
783       .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
784       .addImm(CI.GLC0)      // glc
785       .addImm(CI.SLC0)      // slc
786       .addImm(0)            // tfe
787       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
788 
789   moveInstsAfter(MIB, CI.InstsToMove);
790 
791   MachineBasicBlock::iterator Next = std::next(CI.I);
792   CI.I->eraseFromParent();
793   CI.Paired->eraseFromParent();
794   return Next;
795 }
796 
797 // Scan through looking for adjacent LDS operations with constant offsets from
798 // the same base register. We rely on the scheduler to do the hard work of
799 // clustering nearby loads, and assume these are all adjacent.
800 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
801   bool Modified = false;
802 
803   for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
804     MachineInstr &MI = *I;
805 
806     // Don't combine if volatile.
807     if (MI.hasOrderedMemoryRef()) {
808       ++I;
809       continue;
810     }
811 
812     CombineInfo CI;
813     CI.I = I;
814     unsigned Opc = MI.getOpcode();
815     if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64 ||
816         Opc == AMDGPU::DS_READ_B32_gfx9 || Opc == AMDGPU::DS_READ_B64_gfx9) {
817 
818       CI.InstClass = DS_READ_WRITE;
819       CI.EltSize =
820         (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8 : 4;
821 
822       if (findMatchingInst(CI)) {
823         Modified = true;
824         I = mergeRead2Pair(CI);
825       } else {
826         ++I;
827       }
828 
829       continue;
830     } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64 ||
831                Opc == AMDGPU::DS_WRITE_B32_gfx9 ||
832                Opc == AMDGPU::DS_WRITE_B64_gfx9) {
833       CI.InstClass = DS_READ_WRITE;
834       CI.EltSize
835         = (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8 : 4;
836 
837       if (findMatchingInst(CI)) {
838         Modified = true;
839         I = mergeWrite2Pair(CI);
840       } else {
841         ++I;
842       }
843 
844       continue;
845     }
846     if (STM->hasSBufferLoadStoreAtomicDwordxN() &&
847         (Opc == AMDGPU::S_BUFFER_LOAD_DWORD_IMM ||
848          Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM)) {
849       // EltSize is in units of the offset encoding.
850       CI.InstClass = S_BUFFER_LOAD_IMM;
851       CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
852       CI.IsX2 = Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
853       if (findMatchingInst(CI)) {
854         Modified = true;
855         I = mergeSBufferLoadImmPair(CI);
856         if (!CI.IsX2)
857           CreatedX2++;
858       } else {
859         ++I;
860       }
861       continue;
862     }
863     if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
864         Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
865         Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFSET ||
866         Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET) {
867       if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
868           Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN)
869         CI.InstClass = BUFFER_LOAD_OFFEN;
870       else
871         CI.InstClass = BUFFER_LOAD_OFFSET;
872 
873       CI.EltSize = 4;
874       CI.IsX2 = Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
875                 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
876       if (findMatchingInst(CI)) {
877         Modified = true;
878         I = mergeBufferLoadPair(CI);
879         if (!CI.IsX2)
880           CreatedX2++;
881       } else {
882         ++I;
883       }
884       continue;
885     }
886 
887     bool StoreIsX2, IsOffen;
888     if (promoteBufferStoreOpcode(*I, StoreIsX2, IsOffen)) {
889       CI.InstClass = IsOffen ? BUFFER_STORE_OFFEN : BUFFER_STORE_OFFSET;
890       CI.EltSize = 4;
891       CI.IsX2 = StoreIsX2;
892       if (findMatchingInst(CI)) {
893         Modified = true;
894         I = mergeBufferStorePair(CI);
895         if (!CI.IsX2)
896           CreatedX2++;
897       } else {
898         ++I;
899       }
900       continue;
901     }
902 
903     ++I;
904   }
905 
906   return Modified;
907 }
908 
909 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
910   if (skipFunction(*MF.getFunction()))
911     return false;
912 
913   STM = &MF.getSubtarget<SISubtarget>();
914   if (!STM->loadStoreOptEnabled())
915     return false;
916 
917   TII = STM->getInstrInfo();
918   TRI = &TII->getRegisterInfo();
919 
920   MRI = &MF.getRegInfo();
921   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
922 
923   assert(MRI->isSSA() && "Must be run on SSA");
924 
925   DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
926 
927   bool Modified = false;
928 
929   for (MachineBasicBlock &MBB : MF) {
930     CreatedX2 = 0;
931     Modified |= optimizeBlock(MBB);
932 
933     // Run again to convert x2 to x4.
934     if (CreatedX2 >= 1)
935       Modified |= optimizeBlock(MBB);
936   }
937 
938   return Modified;
939 }
940