1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass tries to fuse DS instructions with close by immediate offsets.
10 // This will fuse operations such as
11 //  ds_read_b32 v0, v2 offset:16
12 //  ds_read_b32 v1, v2 offset:32
13 // ==>
14 //   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
15 //
16 // The same is done for certain SMEM and VMEM opcodes, e.g.:
17 //  s_buffer_load_dword s4, s[0:3], 4
18 //  s_buffer_load_dword s5, s[0:3], 8
19 // ==>
20 //  s_buffer_load_dwordx2 s[4:5], s[0:3], 4
21 //
22 // This pass also tries to promote constant offset to the immediate by
23 // adjusting the base. It tries to use a base from the nearby instructions that
24 // allows it to have a 13bit constant offset and then promotes the 13bit offset
25 // to the immediate.
26 // E.g.
27 //  s_movk_i32 s0, 0x1800
28 //  v_add_co_u32_e32 v0, vcc, s0, v2
29 //  v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
30 //
31 //  s_movk_i32 s0, 0x1000
32 //  v_add_co_u32_e32 v5, vcc, s0, v2
33 //  v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
34 //  global_load_dwordx2 v[5:6], v[5:6], off
35 //  global_load_dwordx2 v[0:1], v[0:1], off
36 // =>
37 //  s_movk_i32 s0, 0x1000
38 //  v_add_co_u32_e32 v5, vcc, s0, v2
39 //  v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
40 //  global_load_dwordx2 v[5:6], v[5:6], off
41 //  global_load_dwordx2 v[0:1], v[5:6], off offset:2048
42 //
43 // Future improvements:
44 //
45 // - This currently relies on the scheduler to place loads and stores next to
46 //   each other, and then only merges adjacent pairs of instructions. It would
47 //   be good to be more flexible with interleaved instructions, and possibly run
48 //   before scheduling. It currently missing stores of constants because loading
49 //   the constant into the data register is placed between the stores, although
50 //   this is arguably a scheduling problem.
51 //
52 // - Live interval recomputing seems inefficient. This currently only matches
53 //   one pair, and recomputes live intervals and moves on to the next pair. It
54 //   would be better to compute a list of all merges that need to occur.
55 //
56 // - With a list of instructions to process, we can also merge more. If a
57 //   cluster of loads have offsets that are too large to fit in the 8-bit
58 //   offsets, but are close enough to fit in the 8 bits, we can add to the base
59 //   pointer and use the new reduced offsets.
60 //
61 //===----------------------------------------------------------------------===//
62 
63 #include "AMDGPU.h"
64 #include "AMDGPUSubtarget.h"
65 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
66 #include "SIInstrInfo.h"
67 #include "SIRegisterInfo.h"
68 #include "Utils/AMDGPUBaseInfo.h"
69 #include "llvm/ADT/ArrayRef.h"
70 #include "llvm/ADT/SmallVector.h"
71 #include "llvm/ADT/StringRef.h"
72 #include "llvm/Analysis/AliasAnalysis.h"
73 #include "llvm/CodeGen/MachineBasicBlock.h"
74 #include "llvm/CodeGen/MachineFunction.h"
75 #include "llvm/CodeGen/MachineFunctionPass.h"
76 #include "llvm/CodeGen/MachineInstr.h"
77 #include "llvm/CodeGen/MachineInstrBuilder.h"
78 #include "llvm/CodeGen/MachineOperand.h"
79 #include "llvm/CodeGen/MachineRegisterInfo.h"
80 #include "llvm/IR/DebugLoc.h"
81 #include "llvm/Pass.h"
82 #include "llvm/Support/Debug.h"
83 #include "llvm/Support/MathExtras.h"
84 #include "llvm/Support/raw_ostream.h"
85 #include <algorithm>
86 #include <cassert>
87 #include <cstdlib>
88 #include <iterator>
89 #include <utility>
90 
91 using namespace llvm;
92 
93 #define DEBUG_TYPE "si-load-store-opt"
94 
95 namespace {
96 enum InstClassEnum {
97   UNKNOWN,
98   DS_READ,
99   DS_WRITE,
100   S_BUFFER_LOAD_IMM,
101   BUFFER_LOAD_OFFEN = AMDGPU::BUFFER_LOAD_DWORD_OFFEN,
102   BUFFER_LOAD_OFFSET = AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
103   BUFFER_STORE_OFFEN = AMDGPU::BUFFER_STORE_DWORD_OFFEN,
104   BUFFER_STORE_OFFSET = AMDGPU::BUFFER_STORE_DWORD_OFFSET,
105   BUFFER_LOAD_OFFEN_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact,
106   BUFFER_LOAD_OFFSET_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact,
107   BUFFER_STORE_OFFEN_exact = AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact,
108   BUFFER_STORE_OFFSET_exact = AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact,
109 };
110 
111 enum RegisterEnum {
112   SBASE = 0x1,
113   SRSRC = 0x2,
114   SOFFSET = 0x4,
115   VADDR = 0x8,
116   ADDR = 0x10,
117 };
118 
119 class SILoadStoreOptimizer : public MachineFunctionPass {
120   struct CombineInfo {
121     MachineBasicBlock::iterator I;
122     MachineBasicBlock::iterator Paired;
123     unsigned EltSize;
124     unsigned Offset0;
125     unsigned Offset1;
126     unsigned Width0;
127     unsigned Width1;
128     unsigned BaseOff;
129     InstClassEnum InstClass;
130     bool GLC0;
131     bool GLC1;
132     bool SLC0;
133     bool SLC1;
134     bool DLC0;
135     bool DLC1;
136     bool UseST64;
137     SmallVector<MachineInstr *, 8> InstsToMove;
138   };
139 
140   struct BaseRegisters {
141     unsigned LoReg = 0;
142     unsigned HiReg = 0;
143 
144     unsigned LoSubReg = 0;
145     unsigned HiSubReg = 0;
146   };
147 
148   struct MemAddress {
149     BaseRegisters Base;
150     int64_t Offset = 0;
151   };
152 
153   using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
154 
155 private:
156   const GCNSubtarget *STM = nullptr;
157   const SIInstrInfo *TII = nullptr;
158   const SIRegisterInfo *TRI = nullptr;
159   MachineRegisterInfo *MRI = nullptr;
160   AliasAnalysis *AA = nullptr;
161   bool OptimizeAgain;
162 
163   static bool offsetsCanBeCombined(CombineInfo &CI);
164   static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
165   static unsigned getNewOpcode(const CombineInfo &CI);
166   static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
167   const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
168   unsigned getOpcodeWidth(const MachineInstr &MI) const;
169   InstClassEnum getInstClass(unsigned Opc) const;
170   unsigned getRegs(unsigned Opc) const;
171 
172   bool findMatchingInst(CombineInfo &CI);
173 
174   unsigned read2Opcode(unsigned EltSize) const;
175   unsigned read2ST64Opcode(unsigned EltSize) const;
176   MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
177 
178   unsigned write2Opcode(unsigned EltSize) const;
179   unsigned write2ST64Opcode(unsigned EltSize) const;
180   MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
181   MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
182   MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
183   MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
184 
185   void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
186                            int32_t NewOffset);
187   unsigned computeBase(MachineInstr &MI, const MemAddress &Addr);
188   MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI);
189   Optional<int32_t> extractConstOffset(const MachineOperand &Op);
190   void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr);
191   /// Promotes constant offset to the immediate by adjusting the base. It
192   /// tries to use a base from the nearby instructions that allows it to have
193   /// a 13bit constant offset which gets promoted to the immediate.
194   bool promoteConstantOffsetToImm(MachineInstr &CI,
195                                   MemInfoMap &Visited,
196                                   SmallPtrSet<MachineInstr *, 4> &Promoted);
197 
198 public:
199   static char ID;
200 
201   SILoadStoreOptimizer() : MachineFunctionPass(ID) {
202     initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
203   }
204 
205   bool optimizeBlock(MachineBasicBlock &MBB);
206 
207   bool runOnMachineFunction(MachineFunction &MF) override;
208 
209   StringRef getPassName() const override { return "SI Load Store Optimizer"; }
210 
211   void getAnalysisUsage(AnalysisUsage &AU) const override {
212     AU.setPreservesCFG();
213     AU.addRequired<AAResultsWrapperPass>();
214 
215     MachineFunctionPass::getAnalysisUsage(AU);
216   }
217 };
218 
219 } // end anonymous namespace.
220 
221 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
222                       "SI Load Store Optimizer", false, false)
223 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
224 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
225                     false, false)
226 
227 char SILoadStoreOptimizer::ID = 0;
228 
229 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
230 
231 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
232   return new SILoadStoreOptimizer();
233 }
234 
235 static void moveInstsAfter(MachineBasicBlock::iterator I,
236                            ArrayRef<MachineInstr *> InstsToMove) {
237   MachineBasicBlock *MBB = I->getParent();
238   ++I;
239   for (MachineInstr *MI : InstsToMove) {
240     MI->removeFromParent();
241     MBB->insert(I, MI);
242   }
243 }
244 
245 static void addDefsUsesToList(const MachineInstr &MI,
246                               DenseSet<unsigned> &RegDefs,
247                               DenseSet<unsigned> &PhysRegUses) {
248   for (const MachineOperand &Op : MI.operands()) {
249     if (Op.isReg()) {
250       if (Op.isDef())
251         RegDefs.insert(Op.getReg());
252       else if (Op.readsReg() && Register::isPhysicalRegister(Op.getReg()))
253         PhysRegUses.insert(Op.getReg());
254     }
255   }
256 }
257 
258 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
259                                       MachineBasicBlock::iterator B,
260                                       AliasAnalysis *AA) {
261   // RAW or WAR - cannot reorder
262   // WAW - cannot reorder
263   // RAR - safe to reorder
264   return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
265 }
266 
267 // Add MI and its defs to the lists if MI reads one of the defs that are
268 // already in the list. Returns true in that case.
269 static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
270                                   DenseSet<unsigned> &PhysRegUses,
271                                   SmallVectorImpl<MachineInstr *> &Insts) {
272   for (MachineOperand &Use : MI.operands()) {
273     // If one of the defs is read, then there is a use of Def between I and the
274     // instruction that I will potentially be merged with. We will need to move
275     // this instruction after the merged instructions.
276     //
277     // Similarly, if there is a def which is read by an instruction that is to
278     // be moved for merging, then we need to move the def-instruction as well.
279     // This can only happen for physical registers such as M0; virtual
280     // registers are in SSA form.
281     if (Use.isReg() &&
282         ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
283          (Use.isDef() && RegDefs.count(Use.getReg())) ||
284          (Use.isDef() && Register::isPhysicalRegister(Use.getReg()) &&
285           PhysRegUses.count(Use.getReg())))) {
286       Insts.push_back(&MI);
287       addDefsUsesToList(MI, RegDefs, PhysRegUses);
288       return true;
289     }
290   }
291 
292   return false;
293 }
294 
295 static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
296                                     ArrayRef<MachineInstr *> InstsToMove,
297                                     AliasAnalysis *AA) {
298   assert(MemOp.mayLoadOrStore());
299 
300   for (MachineInstr *InstToMove : InstsToMove) {
301     if (!InstToMove->mayLoadOrStore())
302       continue;
303     if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
304       return false;
305   }
306   return true;
307 }
308 
309 // This function assumes that \p A and \p B have are identical except for
310 // size and offset, and they referecne adjacent memory.
311 static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF,
312                                                    const MachineMemOperand *A,
313                                                    const MachineMemOperand *B) {
314   unsigned MinOffset = std::min(A->getOffset(), B->getOffset());
315   unsigned Size = A->getSize() + B->getSize();
316   // This function adds the offset parameter to the existing offset for A,
317   // so we pass 0 here as the offset and then manually set it to the correct
318   // value after the call.
319   MachineMemOperand *MMO = MF.getMachineMemOperand(A, 0, Size);
320   MMO->setOffset(MinOffset);
321   return MMO;
322 }
323 
324 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
325   // XXX - Would the same offset be OK? Is there any reason this would happen or
326   // be useful?
327   if (CI.Offset0 == CI.Offset1)
328     return false;
329 
330   // This won't be valid if the offset isn't aligned.
331   if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
332     return false;
333 
334   unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
335   unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
336   CI.UseST64 = false;
337   CI.BaseOff = 0;
338 
339   // Handle SMEM and VMEM instructions.
340   if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
341     return (EltOffset0 + CI.Width0 == EltOffset1 ||
342             EltOffset1 + CI.Width1 == EltOffset0) &&
343            CI.GLC0 == CI.GLC1 && CI.DLC0 == CI.DLC1 &&
344            (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
345   }
346 
347   // If the offset in elements doesn't fit in 8-bits, we might be able to use
348   // the stride 64 versions.
349   if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
350       isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
351     CI.Offset0 = EltOffset0 / 64;
352     CI.Offset1 = EltOffset1 / 64;
353     CI.UseST64 = true;
354     return true;
355   }
356 
357   // Check if the new offsets fit in the reduced 8-bit range.
358   if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
359     CI.Offset0 = EltOffset0;
360     CI.Offset1 = EltOffset1;
361     return true;
362   }
363 
364   // Try to shift base address to decrease offsets.
365   unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
366   CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
367 
368   if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
369     CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
370     CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
371     CI.UseST64 = true;
372     return true;
373   }
374 
375   if (isUInt<8>(OffsetDiff)) {
376     CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
377     CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
378     return true;
379   }
380 
381   return false;
382 }
383 
384 bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
385                                      const CombineInfo &CI) {
386   const unsigned Width = (CI.Width0 + CI.Width1);
387   switch (CI.InstClass) {
388   default:
389     return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
390   case S_BUFFER_LOAD_IMM:
391     switch (Width) {
392     default:
393       return false;
394     case 2:
395     case 4:
396       return true;
397     }
398   }
399 }
400 
401 unsigned SILoadStoreOptimizer::getOpcodeWidth(const MachineInstr &MI) const {
402   const unsigned Opc = MI.getOpcode();
403 
404   if (TII->isMUBUF(MI)) {
405     return AMDGPU::getMUBUFDwords(Opc);
406   }
407 
408   switch (Opc) {
409   default:
410     return 0;
411   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
412     return 1;
413   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
414     return 2;
415   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
416     return 4;
417   }
418 }
419 
420 InstClassEnum SILoadStoreOptimizer::getInstClass(unsigned Opc) const {
421   if (TII->isMUBUF(Opc)) {
422     const int baseOpcode = AMDGPU::getMUBUFBaseOpcode(Opc);
423 
424     // If we couldn't identify the opcode, bail out.
425     if (baseOpcode == -1) {
426       return UNKNOWN;
427     }
428 
429     switch (baseOpcode) {
430     default:
431       return UNKNOWN;
432     case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
433       return BUFFER_LOAD_OFFEN;
434     case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
435       return BUFFER_LOAD_OFFSET;
436     case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
437       return BUFFER_STORE_OFFEN;
438     case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
439       return BUFFER_STORE_OFFSET;
440     case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
441       return BUFFER_LOAD_OFFEN_exact;
442     case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
443       return BUFFER_LOAD_OFFSET_exact;
444     case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
445       return BUFFER_STORE_OFFEN_exact;
446     case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
447       return BUFFER_STORE_OFFSET_exact;
448     }
449   }
450 
451   switch (Opc) {
452   default:
453     return UNKNOWN;
454   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
455   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
456   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
457     return S_BUFFER_LOAD_IMM;
458   case AMDGPU::DS_READ_B32:
459   case AMDGPU::DS_READ_B64:
460   case AMDGPU::DS_READ_B32_gfx9:
461   case AMDGPU::DS_READ_B64_gfx9:
462     return DS_READ;
463   case AMDGPU::DS_WRITE_B32:
464   case AMDGPU::DS_WRITE_B64:
465   case AMDGPU::DS_WRITE_B32_gfx9:
466   case AMDGPU::DS_WRITE_B64_gfx9:
467     return DS_WRITE;
468   }
469 }
470 
471 unsigned SILoadStoreOptimizer::getRegs(unsigned Opc) const {
472   if (TII->isMUBUF(Opc)) {
473     unsigned result = 0;
474 
475     if (AMDGPU::getMUBUFHasVAddr(Opc)) {
476       result |= VADDR;
477     }
478 
479     if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
480       result |= SRSRC;
481     }
482 
483     if (AMDGPU::getMUBUFHasSoffset(Opc)) {
484       result |= SOFFSET;
485     }
486 
487     return result;
488   }
489 
490   switch (Opc) {
491   default:
492     return 0;
493   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
494   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
495   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
496     return SBASE;
497   case AMDGPU::DS_READ_B32:
498   case AMDGPU::DS_READ_B64:
499   case AMDGPU::DS_READ_B32_gfx9:
500   case AMDGPU::DS_READ_B64_gfx9:
501   case AMDGPU::DS_WRITE_B32:
502   case AMDGPU::DS_WRITE_B64:
503   case AMDGPU::DS_WRITE_B32_gfx9:
504   case AMDGPU::DS_WRITE_B64_gfx9:
505     return ADDR;
506   }
507 }
508 
509 bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
510   MachineBasicBlock *MBB = CI.I->getParent();
511   MachineBasicBlock::iterator E = MBB->end();
512   MachineBasicBlock::iterator MBBI = CI.I;
513 
514   const unsigned Opc = CI.I->getOpcode();
515   const InstClassEnum InstClass = getInstClass(Opc);
516 
517   if (InstClass == UNKNOWN) {
518     return false;
519   }
520 
521   const unsigned Regs = getRegs(Opc);
522 
523   unsigned AddrOpName[5] = {0};
524   int AddrIdx[5];
525   const MachineOperand *AddrReg[5];
526   unsigned NumAddresses = 0;
527 
528   if (Regs & ADDR) {
529     AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
530   }
531 
532   if (Regs & SBASE) {
533     AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
534   }
535 
536   if (Regs & SRSRC) {
537     AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
538   }
539 
540   if (Regs & SOFFSET) {
541     AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
542   }
543 
544   if (Regs & VADDR) {
545     AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
546   }
547 
548   for (unsigned i = 0; i < NumAddresses; i++) {
549     AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
550     AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
551 
552     // We only ever merge operations with the same base address register, so
553     // don't bother scanning forward if there are no other uses.
554     if (AddrReg[i]->isReg() &&
555         (Register::isPhysicalRegister(AddrReg[i]->getReg()) ||
556          MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
557       return false;
558   }
559 
560   ++MBBI;
561 
562   DenseSet<unsigned> RegDefsToMove;
563   DenseSet<unsigned> PhysRegUsesToMove;
564   addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
565 
566   for (; MBBI != E; ++MBBI) {
567     const bool IsDS = (InstClass == DS_READ) || (InstClass == DS_WRITE);
568 
569     if ((getInstClass(MBBI->getOpcode()) != InstClass) ||
570         (IsDS && (MBBI->getOpcode() != Opc))) {
571       // This is not a matching DS instruction, but we can keep looking as
572       // long as one of these conditions are met:
573       // 1. It is safe to move I down past MBBI.
574       // 2. It is safe to move MBBI down past the instruction that I will
575       //    be merged into.
576 
577       if (MBBI->hasUnmodeledSideEffects()) {
578         // We can't re-order this instruction with respect to other memory
579         // operations, so we fail both conditions mentioned above.
580         return false;
581       }
582 
583       if (MBBI->mayLoadOrStore() &&
584           (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
585            !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
586         // We fail condition #1, but we may still be able to satisfy condition
587         // #2.  Add this instruction to the move list and then we will check
588         // if condition #2 holds once we have selected the matching instruction.
589         CI.InstsToMove.push_back(&*MBBI);
590         addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
591         continue;
592       }
593 
594       // When we match I with another DS instruction we will be moving I down
595       // to the location of the matched instruction any uses of I will need to
596       // be moved down as well.
597       addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
598                             CI.InstsToMove);
599       continue;
600     }
601 
602     // Don't merge volatiles.
603     if (MBBI->hasOrderedMemoryRef())
604       return false;
605 
606     // Handle a case like
607     //   DS_WRITE_B32 addr, v, idx0
608     //   w = DS_READ_B32 addr, idx0
609     //   DS_WRITE_B32 addr, f(w), idx1
610     // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
611     // merging of the two writes.
612     if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
613                               CI.InstsToMove))
614       continue;
615 
616     bool Match = true;
617     for (unsigned i = 0; i < NumAddresses; i++) {
618       const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
619 
620       if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
621         if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
622             AddrReg[i]->getImm() != AddrRegNext.getImm()) {
623           Match = false;
624           break;
625         }
626         continue;
627       }
628 
629       // Check same base pointer. Be careful of subregisters, which can occur
630       // with vectors of pointers.
631       if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
632           AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
633         Match = false;
634         break;
635       }
636     }
637 
638     if (Match) {
639       int OffsetIdx =
640           AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::offset);
641       CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
642       CI.Width0 = getOpcodeWidth(*CI.I);
643       CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
644       CI.Width1 = getOpcodeWidth(*MBBI);
645       CI.Paired = MBBI;
646 
647       if ((CI.InstClass == DS_READ) || (CI.InstClass == DS_WRITE)) {
648         CI.Offset0 &= 0xffff;
649         CI.Offset1 &= 0xffff;
650       } else {
651         CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
652         CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
653         if (CI.InstClass != S_BUFFER_LOAD_IMM) {
654           CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
655           CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
656         }
657         CI.DLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::dlc)->getImm();
658         CI.DLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::dlc)->getImm();
659       }
660 
661       // Check both offsets fit in the reduced range.
662       // We also need to go through the list of instructions that we plan to
663       // move and make sure they are all safe to move down past the merged
664       // instruction.
665       if (widthsFit(*STM, CI) && offsetsCanBeCombined(CI))
666         if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
667           return true;
668     }
669 
670     // We've found a load/store that we couldn't merge for some reason.
671     // We could potentially keep looking, but we'd need to make sure that
672     // it was safe to move I and also all the instruction in InstsToMove
673     // down past this instruction.
674     // check if we can move I across MBBI and if we can move all I's users
675     if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
676         !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
677       break;
678   }
679   return false;
680 }
681 
682 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
683   if (STM->ldsRequiresM0Init())
684     return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
685   return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
686 }
687 
688 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
689   if (STM->ldsRequiresM0Init())
690     return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
691 
692   return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
693                         : AMDGPU::DS_READ2ST64_B64_gfx9;
694 }
695 
696 MachineBasicBlock::iterator
697 SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
698   MachineBasicBlock *MBB = CI.I->getParent();
699 
700   // Be careful, since the addresses could be subregisters themselves in weird
701   // cases, like vectors of pointers.
702   const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
703 
704   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
705   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
706 
707   unsigned NewOffset0 = CI.Offset0;
708   unsigned NewOffset1 = CI.Offset1;
709   unsigned Opc =
710       CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
711 
712   unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
713   unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
714 
715   if (NewOffset0 > NewOffset1) {
716     // Canonicalize the merged instruction so the smaller offset comes first.
717     std::swap(NewOffset0, NewOffset1);
718     std::swap(SubRegIdx0, SubRegIdx1);
719   }
720 
721   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
722          (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
723 
724   const MCInstrDesc &Read2Desc = TII->get(Opc);
725 
726   const TargetRegisterClass *SuperRC =
727       (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
728   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
729 
730   DebugLoc DL = CI.I->getDebugLoc();
731 
732   unsigned BaseReg = AddrReg->getReg();
733   unsigned BaseSubReg = AddrReg->getSubReg();
734   unsigned BaseRegFlags = 0;
735   if (CI.BaseOff) {
736     unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
737     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
738         .addImm(CI.BaseOff);
739 
740     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
741     BaseRegFlags = RegState::Kill;
742 
743     TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
744         .addReg(ImmReg)
745         .addReg(AddrReg->getReg(), 0, BaseSubReg)
746         .addImm(0); // clamp bit
747     BaseSubReg = 0;
748   }
749 
750   MachineInstrBuilder Read2 =
751       BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
752           .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
753           .addImm(NewOffset0)                        // offset0
754           .addImm(NewOffset1)                        // offset1
755           .addImm(0)                                 // gds
756           .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
757 
758   (void)Read2;
759 
760   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
761 
762   // Copy to the old destination registers.
763   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
764       .add(*Dest0) // Copy to same destination including flags and sub reg.
765       .addReg(DestReg, 0, SubRegIdx0);
766   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
767                             .add(*Dest1)
768                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
769 
770   moveInstsAfter(Copy1, CI.InstsToMove);
771 
772   MachineBasicBlock::iterator Next = std::next(CI.I);
773   CI.I->eraseFromParent();
774   CI.Paired->eraseFromParent();
775 
776   LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
777   return Next;
778 }
779 
780 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
781   if (STM->ldsRequiresM0Init())
782     return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
783   return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
784                         : AMDGPU::DS_WRITE2_B64_gfx9;
785 }
786 
787 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
788   if (STM->ldsRequiresM0Init())
789     return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
790                           : AMDGPU::DS_WRITE2ST64_B64;
791 
792   return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
793                         : AMDGPU::DS_WRITE2ST64_B64_gfx9;
794 }
795 
796 MachineBasicBlock::iterator
797 SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
798   MachineBasicBlock *MBB = CI.I->getParent();
799 
800   // Be sure to use .addOperand(), and not .addReg() with these. We want to be
801   // sure we preserve the subregister index and any register flags set on them.
802   const MachineOperand *AddrReg =
803       TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
804   const MachineOperand *Data0 =
805       TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
806   const MachineOperand *Data1 =
807       TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
808 
809   unsigned NewOffset0 = CI.Offset0;
810   unsigned NewOffset1 = CI.Offset1;
811   unsigned Opc =
812       CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
813 
814   if (NewOffset0 > NewOffset1) {
815     // Canonicalize the merged instruction so the smaller offset comes first.
816     std::swap(NewOffset0, NewOffset1);
817     std::swap(Data0, Data1);
818   }
819 
820   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
821          (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
822 
823   const MCInstrDesc &Write2Desc = TII->get(Opc);
824   DebugLoc DL = CI.I->getDebugLoc();
825 
826   unsigned BaseReg = AddrReg->getReg();
827   unsigned BaseSubReg = AddrReg->getSubReg();
828   unsigned BaseRegFlags = 0;
829   if (CI.BaseOff) {
830     unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
831     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
832         .addImm(CI.BaseOff);
833 
834     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
835     BaseRegFlags = RegState::Kill;
836 
837     TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
838         .addReg(ImmReg)
839         .addReg(AddrReg->getReg(), 0, BaseSubReg)
840         .addImm(0); // clamp bit
841     BaseSubReg = 0;
842   }
843 
844   MachineInstrBuilder Write2 =
845       BuildMI(*MBB, CI.Paired, DL, Write2Desc)
846           .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
847           .add(*Data0)                               // data0
848           .add(*Data1)                               // data1
849           .addImm(NewOffset0)                        // offset0
850           .addImm(NewOffset1)                        // offset1
851           .addImm(0)                                 // gds
852           .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
853 
854   moveInstsAfter(Write2, CI.InstsToMove);
855 
856   MachineBasicBlock::iterator Next = std::next(CI.I);
857   CI.I->eraseFromParent();
858   CI.Paired->eraseFromParent();
859 
860   LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
861   return Next;
862 }
863 
864 MachineBasicBlock::iterator
865 SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
866   MachineBasicBlock *MBB = CI.I->getParent();
867   DebugLoc DL = CI.I->getDebugLoc();
868   const unsigned Opcode = getNewOpcode(CI);
869 
870   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
871 
872   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
873   unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
874 
875   // It shouldn't be possible to get this far if the two instructions
876   // don't have a single memoperand, because MachineInstr::mayAlias()
877   // will return true if this is the case.
878   assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
879 
880   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
881   const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
882 
883   BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
884       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
885       .addImm(MergedOffset) // offset
886       .addImm(CI.GLC0)      // glc
887       .addImm(CI.DLC0)      // dlc
888       .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
889 
890   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
891   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
892   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
893 
894   // Copy to the old destination registers.
895   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
896   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
897   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
898 
899   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
900       .add(*Dest0) // Copy to same destination including flags and sub reg.
901       .addReg(DestReg, 0, SubRegIdx0);
902   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
903                             .add(*Dest1)
904                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
905 
906   moveInstsAfter(Copy1, CI.InstsToMove);
907 
908   MachineBasicBlock::iterator Next = std::next(CI.I);
909   CI.I->eraseFromParent();
910   CI.Paired->eraseFromParent();
911   return Next;
912 }
913 
914 MachineBasicBlock::iterator
915 SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
916   MachineBasicBlock *MBB = CI.I->getParent();
917   DebugLoc DL = CI.I->getDebugLoc();
918 
919   const unsigned Opcode = getNewOpcode(CI);
920 
921   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
922 
923   // Copy to the new source register.
924   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
925   unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
926 
927   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
928 
929   const unsigned Regs = getRegs(Opcode);
930 
931   if (Regs & VADDR)
932     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
933 
934   // It shouldn't be possible to get this far if the two instructions
935   // don't have a single memoperand, because MachineInstr::mayAlias()
936   // will return true if this is the case.
937   assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
938 
939   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
940   const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
941 
942   MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
943       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
944       .addImm(MergedOffset) // offset
945       .addImm(CI.GLC0)      // glc
946       .addImm(CI.SLC0)      // slc
947       .addImm(0)            // tfe
948       .addImm(CI.DLC0)      // dlc
949       .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
950 
951   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
952   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
953   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
954 
955   // Copy to the old destination registers.
956   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
957   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
958   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
959 
960   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
961       .add(*Dest0) // Copy to same destination including flags and sub reg.
962       .addReg(DestReg, 0, SubRegIdx0);
963   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
964                             .add(*Dest1)
965                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
966 
967   moveInstsAfter(Copy1, CI.InstsToMove);
968 
969   MachineBasicBlock::iterator Next = std::next(CI.I);
970   CI.I->eraseFromParent();
971   CI.Paired->eraseFromParent();
972   return Next;
973 }
974 
975 unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI) {
976   const unsigned Width = CI.Width0 + CI.Width1;
977 
978   switch (CI.InstClass) {
979   default:
980     return AMDGPU::getMUBUFOpcode(CI.InstClass, Width);
981   case UNKNOWN:
982     llvm_unreachable("Unknown instruction class");
983   case S_BUFFER_LOAD_IMM:
984     switch (Width) {
985     default:
986       return 0;
987     case 2:
988       return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
989     case 4:
990       return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
991     }
992   }
993 }
994 
995 std::pair<unsigned, unsigned>
996 SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI) {
997   if (CI.Offset0 > CI.Offset1) {
998     switch (CI.Width0) {
999     default:
1000       return std::make_pair(0, 0);
1001     case 1:
1002       switch (CI.Width1) {
1003       default:
1004         return std::make_pair(0, 0);
1005       case 1:
1006         return std::make_pair(AMDGPU::sub1, AMDGPU::sub0);
1007       case 2:
1008         return std::make_pair(AMDGPU::sub2, AMDGPU::sub0_sub1);
1009       case 3:
1010         return std::make_pair(AMDGPU::sub3, AMDGPU::sub0_sub1_sub2);
1011       }
1012     case 2:
1013       switch (CI.Width1) {
1014       default:
1015         return std::make_pair(0, 0);
1016       case 1:
1017         return std::make_pair(AMDGPU::sub1_sub2, AMDGPU::sub0);
1018       case 2:
1019         return std::make_pair(AMDGPU::sub2_sub3, AMDGPU::sub0_sub1);
1020       }
1021     case 3:
1022       switch (CI.Width1) {
1023       default:
1024         return std::make_pair(0, 0);
1025       case 1:
1026         return std::make_pair(AMDGPU::sub1_sub2_sub3, AMDGPU::sub0);
1027       }
1028     }
1029   } else {
1030     switch (CI.Width0) {
1031     default:
1032       return std::make_pair(0, 0);
1033     case 1:
1034       switch (CI.Width1) {
1035       default:
1036         return std::make_pair(0, 0);
1037       case 1:
1038         return std::make_pair(AMDGPU::sub0, AMDGPU::sub1);
1039       case 2:
1040         return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2);
1041       case 3:
1042         return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2_sub3);
1043       }
1044     case 2:
1045       switch (CI.Width1) {
1046       default:
1047         return std::make_pair(0, 0);
1048       case 1:
1049         return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2);
1050       case 2:
1051         return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2_sub3);
1052       }
1053     case 3:
1054       switch (CI.Width1) {
1055       default:
1056         return std::make_pair(0, 0);
1057       case 1:
1058         return std::make_pair(AMDGPU::sub0_sub1_sub2, AMDGPU::sub3);
1059       }
1060     }
1061   }
1062 }
1063 
1064 const TargetRegisterClass *
1065 SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI) {
1066   if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1067     switch (CI.Width0 + CI.Width1) {
1068     default:
1069       return nullptr;
1070     case 2:
1071       return &AMDGPU::SReg_64_XEXECRegClass;
1072     case 4:
1073       return &AMDGPU::SReg_128RegClass;
1074     case 8:
1075       return &AMDGPU::SReg_256RegClass;
1076     case 16:
1077       return &AMDGPU::SReg_512RegClass;
1078     }
1079   } else {
1080     switch (CI.Width0 + CI.Width1) {
1081     default:
1082       return nullptr;
1083     case 2:
1084       return &AMDGPU::VReg_64RegClass;
1085     case 3:
1086       return &AMDGPU::VReg_96RegClass;
1087     case 4:
1088       return &AMDGPU::VReg_128RegClass;
1089     }
1090   }
1091 }
1092 
1093 MachineBasicBlock::iterator
1094 SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
1095   MachineBasicBlock *MBB = CI.I->getParent();
1096   DebugLoc DL = CI.I->getDebugLoc();
1097 
1098   const unsigned Opcode = getNewOpcode(CI);
1099 
1100   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1101   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1102   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1103 
1104   // Copy to the new source register.
1105   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
1106   unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
1107 
1108   const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1109   const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
1110 
1111   BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1112       .add(*Src0)
1113       .addImm(SubRegIdx0)
1114       .add(*Src1)
1115       .addImm(SubRegIdx1);
1116 
1117   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
1118                  .addReg(SrcReg, RegState::Kill);
1119 
1120   const unsigned Regs = getRegs(Opcode);
1121 
1122   if (Regs & VADDR)
1123     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1124 
1125 
1126   // It shouldn't be possible to get this far if the two instructions
1127   // don't have a single memoperand, because MachineInstr::mayAlias()
1128   // will return true if this is the case.
1129   assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
1130 
1131   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1132   const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
1133 
1134   MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1135       .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1136       .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
1137       .addImm(CI.GLC0)      // glc
1138       .addImm(CI.SLC0)      // slc
1139       .addImm(0)            // tfe
1140       .addImm(CI.DLC0)      // dlc
1141       .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1142 
1143   moveInstsAfter(MIB, CI.InstsToMove);
1144 
1145   MachineBasicBlock::iterator Next = std::next(CI.I);
1146   CI.I->eraseFromParent();
1147   CI.Paired->eraseFromParent();
1148   return Next;
1149 }
1150 
1151 MachineOperand
1152 SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) {
1153   APInt V(32, Val, true);
1154   if (TII->isInlineConstant(V))
1155     return MachineOperand::CreateImm(Val);
1156 
1157   unsigned Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1158   MachineInstr *Mov =
1159   BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1160           TII->get(AMDGPU::S_MOV_B32), Reg)
1161     .addImm(Val);
1162   (void)Mov;
1163   LLVM_DEBUG(dbgs() << "    "; Mov->dump());
1164   return MachineOperand::CreateReg(Reg, false);
1165 }
1166 
1167 // Compute base address using Addr and return the final register.
1168 unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
1169                                            const MemAddress &Addr) {
1170   MachineBasicBlock *MBB = MI.getParent();
1171   MachineBasicBlock::iterator MBBI = MI.getIterator();
1172   DebugLoc DL = MI.getDebugLoc();
1173 
1174   assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1175           Addr.Base.LoSubReg) &&
1176          "Expected 32-bit Base-Register-Low!!");
1177 
1178   assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1179           Addr.Base.HiSubReg) &&
1180          "Expected 32-bit Base-Register-Hi!!");
1181 
1182   LLVM_DEBUG(dbgs() << "  Re-Computed Anchor-Base:\n");
1183   MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1184   MachineOperand OffsetHi =
1185     createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
1186 
1187   const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1188   unsigned CarryReg = MRI->createVirtualRegister(CarryRC);
1189   unsigned DeadCarryReg = MRI->createVirtualRegister(CarryRC);
1190 
1191   unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1192   unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1193   MachineInstr *LoHalf =
1194     BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
1195       .addReg(CarryReg, RegState::Define)
1196       .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
1197       .add(OffsetLo)
1198       .addImm(0); // clamp bit
1199   (void)LoHalf;
1200   LLVM_DEBUG(dbgs() << "    "; LoHalf->dump(););
1201 
1202   MachineInstr *HiHalf =
1203   BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1204     .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1205     .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1206     .add(OffsetHi)
1207     .addReg(CarryReg, RegState::Kill)
1208     .addImm(0); // clamp bit
1209   (void)HiHalf;
1210   LLVM_DEBUG(dbgs() << "    "; HiHalf->dump(););
1211 
1212   unsigned FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
1213   MachineInstr *FullBase =
1214     BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1215       .addReg(DestSub0)
1216       .addImm(AMDGPU::sub0)
1217       .addReg(DestSub1)
1218       .addImm(AMDGPU::sub1);
1219   (void)FullBase;
1220   LLVM_DEBUG(dbgs() << "    "; FullBase->dump(); dbgs() << "\n";);
1221 
1222   return FullDestReg;
1223 }
1224 
1225 // Update base and offset with the NewBase and NewOffset in MI.
1226 void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1227                                                unsigned NewBase,
1228                                                int32_t NewOffset) {
1229   TII->getNamedOperand(MI, AMDGPU::OpName::vaddr)->setReg(NewBase);
1230   TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1231 }
1232 
1233 Optional<int32_t>
1234 SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) {
1235   if (Op.isImm())
1236     return Op.getImm();
1237 
1238   if (!Op.isReg())
1239     return None;
1240 
1241   MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1242   if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1243       !Def->getOperand(1).isImm())
1244     return None;
1245 
1246   return Def->getOperand(1).getImm();
1247 }
1248 
1249 // Analyze Base and extracts:
1250 //  - 32bit base registers, subregisters
1251 //  - 64bit constant offset
1252 // Expecting base computation as:
1253 //   %OFFSET0:sgpr_32 = S_MOV_B32 8000
1254 //   %LO:vgpr_32, %c:sreg_64_xexec =
1255 //       V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1256 //   %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1257 //   %Base:vreg_64 =
1258 //       REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
1259 void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
1260                                                       MemAddress &Addr) {
1261   if (!Base.isReg())
1262     return;
1263 
1264   MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1265   if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1266       || Def->getNumOperands() != 5)
1267     return;
1268 
1269   MachineOperand BaseLo = Def->getOperand(1);
1270   MachineOperand BaseHi = Def->getOperand(3);
1271   if (!BaseLo.isReg() || !BaseHi.isReg())
1272     return;
1273 
1274   MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1275   MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1276 
1277   if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
1278       !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1279     return;
1280 
1281   const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1282   const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1283 
1284   auto Offset0P = extractConstOffset(*Src0);
1285   if (Offset0P)
1286     BaseLo = *Src1;
1287   else {
1288     if (!(Offset0P = extractConstOffset(*Src1)))
1289       return;
1290     BaseLo = *Src0;
1291   }
1292 
1293   Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1294   Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1295 
1296   if (Src0->isImm())
1297     std::swap(Src0, Src1);
1298 
1299   if (!Src1->isImm())
1300     return;
1301 
1302   uint64_t Offset1 = Src1->getImm();
1303   BaseHi = *Src0;
1304 
1305   Addr.Base.LoReg = BaseLo.getReg();
1306   Addr.Base.HiReg = BaseHi.getReg();
1307   Addr.Base.LoSubReg = BaseLo.getSubReg();
1308   Addr.Base.HiSubReg = BaseHi.getSubReg();
1309   Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1310 }
1311 
1312 bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1313     MachineInstr &MI,
1314     MemInfoMap &Visited,
1315     SmallPtrSet<MachineInstr *, 4> &AnchorList) {
1316 
1317   // TODO: Support flat and scratch.
1318   if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0 ||
1319       TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
1320     return false;
1321 
1322   // TODO: Support Store.
1323   if (!MI.mayLoad())
1324     return false;
1325 
1326   if (AnchorList.count(&MI))
1327     return false;
1328 
1329   LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1330 
1331   if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1332     LLVM_DEBUG(dbgs() << "  Const-offset is already promoted.\n";);
1333     return false;
1334   }
1335 
1336   // Step1: Find the base-registers and a 64bit constant offset.
1337   MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1338   MemAddress MAddr;
1339   if (Visited.find(&MI) == Visited.end()) {
1340     processBaseWithConstOffset(Base, MAddr);
1341     Visited[&MI] = MAddr;
1342   } else
1343     MAddr = Visited[&MI];
1344 
1345   if (MAddr.Offset == 0) {
1346     LLVM_DEBUG(dbgs() << "  Failed to extract constant-offset or there are no"
1347                          " constant offsets that can be promoted.\n";);
1348     return false;
1349   }
1350 
1351   LLVM_DEBUG(dbgs() << "  BASE: {" << MAddr.Base.HiReg << ", "
1352              << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1353 
1354   // Step2: Traverse through MI's basic block and find an anchor(that has the
1355   // same base-registers) with the highest 13bit distance from MI's offset.
1356   // E.g. (64bit loads)
1357   // bb:
1358   //   addr1 = &a + 4096;   load1 = load(addr1,  0)
1359   //   addr2 = &a + 6144;   load2 = load(addr2,  0)
1360   //   addr3 = &a + 8192;   load3 = load(addr3,  0)
1361   //   addr4 = &a + 10240;  load4 = load(addr4,  0)
1362   //   addr5 = &a + 12288;  load5 = load(addr5,  0)
1363   //
1364   // Starting from the first load, the optimization will try to find a new base
1365   // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1366   // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1367   // as the new-base(anchor) because of the maximum distance which can
1368   // accomodate more intermediate bases presumeably.
1369   //
1370   // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1371   // (&a + 8192) for load1, load2, load4.
1372   //   addr = &a + 8192
1373   //   load1 = load(addr,       -4096)
1374   //   load2 = load(addr,       -2048)
1375   //   load3 = load(addr,       0)
1376   //   load4 = load(addr,       2048)
1377   //   addr5 = &a + 12288;  load5 = load(addr5,  0)
1378   //
1379   MachineInstr *AnchorInst = nullptr;
1380   MemAddress AnchorAddr;
1381   uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1382   SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1383 
1384   MachineBasicBlock *MBB = MI.getParent();
1385   MachineBasicBlock::iterator E = MBB->end();
1386   MachineBasicBlock::iterator MBBI = MI.getIterator();
1387   ++MBBI;
1388   const SITargetLowering *TLI =
1389     static_cast<const SITargetLowering *>(STM->getTargetLowering());
1390 
1391   for ( ; MBBI != E; ++MBBI) {
1392     MachineInstr &MINext = *MBBI;
1393     // TODO: Support finding an anchor(with same base) from store addresses or
1394     // any other load addresses where the opcodes are different.
1395     if (MINext.getOpcode() != MI.getOpcode() ||
1396         TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1397       continue;
1398 
1399     const MachineOperand &BaseNext =
1400       *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1401     MemAddress MAddrNext;
1402     if (Visited.find(&MINext) == Visited.end()) {
1403       processBaseWithConstOffset(BaseNext, MAddrNext);
1404       Visited[&MINext] = MAddrNext;
1405     } else
1406       MAddrNext = Visited[&MINext];
1407 
1408     if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1409         MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1410         MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1411         MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1412       continue;
1413 
1414     InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1415 
1416     int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1417     TargetLoweringBase::AddrMode AM;
1418     AM.HasBaseReg = true;
1419     AM.BaseOffs = Dist;
1420     if (TLI->isLegalGlobalAddressingMode(AM) &&
1421         (uint32_t)std::abs(Dist) > MaxDist) {
1422       MaxDist = std::abs(Dist);
1423 
1424       AnchorAddr = MAddrNext;
1425       AnchorInst = &MINext;
1426     }
1427   }
1428 
1429   if (AnchorInst) {
1430     LLVM_DEBUG(dbgs() << "  Anchor-Inst(with max-distance from Offset): ";
1431                AnchorInst->dump());
1432     LLVM_DEBUG(dbgs() << "  Anchor-Offset from BASE: "
1433                <<  AnchorAddr.Offset << "\n\n");
1434 
1435     // Instead of moving up, just re-compute anchor-instruction's base address.
1436     unsigned Base = computeBase(MI, AnchorAddr);
1437 
1438     updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1439     LLVM_DEBUG(dbgs() << "  After promotion: "; MI.dump(););
1440 
1441     for (auto P : InstsWCommonBase) {
1442       TargetLoweringBase::AddrMode AM;
1443       AM.HasBaseReg = true;
1444       AM.BaseOffs = P.second - AnchorAddr.Offset;
1445 
1446       if (TLI->isLegalGlobalAddressingMode(AM)) {
1447         LLVM_DEBUG(dbgs() << "  Promote Offset(" << P.second;
1448                    dbgs() << ")"; P.first->dump());
1449         updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1450         LLVM_DEBUG(dbgs() << "     After promotion: "; P.first->dump());
1451       }
1452     }
1453     AnchorList.insert(AnchorInst);
1454     return true;
1455   }
1456 
1457   return false;
1458 }
1459 
1460 // Scan through looking for adjacent LDS operations with constant offsets from
1461 // the same base register. We rely on the scheduler to do the hard work of
1462 // clustering nearby loads, and assume these are all adjacent.
1463 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
1464   bool Modified = false;
1465 
1466   // Contain the list
1467   MemInfoMap Visited;
1468   // Contains the list of instructions for which constant offsets are being
1469   // promoted to the IMM.
1470   SmallPtrSet<MachineInstr *, 4> AnchorList;
1471 
1472   for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
1473     MachineInstr &MI = *I;
1474 
1475     if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
1476       Modified = true;
1477 
1478     // Don't combine if volatile.
1479     if (MI.hasOrderedMemoryRef()) {
1480       ++I;
1481       continue;
1482     }
1483 
1484     const unsigned Opc = MI.getOpcode();
1485 
1486     CombineInfo CI;
1487     CI.I = I;
1488     CI.InstClass = getInstClass(Opc);
1489 
1490     switch (CI.InstClass) {
1491     default:
1492       break;
1493     case DS_READ:
1494       CI.EltSize =
1495           (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
1496                                                                           : 4;
1497       if (findMatchingInst(CI)) {
1498         Modified = true;
1499         I = mergeRead2Pair(CI);
1500       } else {
1501         ++I;
1502       }
1503       continue;
1504     case DS_WRITE:
1505       CI.EltSize =
1506           (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
1507                                                                             : 4;
1508       if (findMatchingInst(CI)) {
1509         Modified = true;
1510         I = mergeWrite2Pair(CI);
1511       } else {
1512         ++I;
1513       }
1514       continue;
1515     case S_BUFFER_LOAD_IMM:
1516       CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
1517       if (findMatchingInst(CI)) {
1518         Modified = true;
1519         I = mergeSBufferLoadImmPair(CI);
1520         OptimizeAgain |= (CI.Width0 + CI.Width1) < 16;
1521       } else {
1522         ++I;
1523       }
1524       continue;
1525     case BUFFER_LOAD_OFFEN:
1526     case BUFFER_LOAD_OFFSET:
1527     case BUFFER_LOAD_OFFEN_exact:
1528     case BUFFER_LOAD_OFFSET_exact:
1529       CI.EltSize = 4;
1530       if (findMatchingInst(CI)) {
1531         Modified = true;
1532         I = mergeBufferLoadPair(CI);
1533         OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
1534       } else {
1535         ++I;
1536       }
1537       continue;
1538     case BUFFER_STORE_OFFEN:
1539     case BUFFER_STORE_OFFSET:
1540     case BUFFER_STORE_OFFEN_exact:
1541     case BUFFER_STORE_OFFSET_exact:
1542       CI.EltSize = 4;
1543       if (findMatchingInst(CI)) {
1544         Modified = true;
1545         I = mergeBufferStorePair(CI);
1546         OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
1547       } else {
1548         ++I;
1549       }
1550       continue;
1551     }
1552 
1553     ++I;
1554   }
1555 
1556   return Modified;
1557 }
1558 
1559 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
1560   if (skipFunction(MF.getFunction()))
1561     return false;
1562 
1563   STM = &MF.getSubtarget<GCNSubtarget>();
1564   if (!STM->loadStoreOptEnabled())
1565     return false;
1566 
1567   TII = STM->getInstrInfo();
1568   TRI = &TII->getRegisterInfo();
1569 
1570   MRI = &MF.getRegInfo();
1571   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1572 
1573   assert(MRI->isSSA() && "Must be run on SSA");
1574 
1575   LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
1576 
1577   bool Modified = false;
1578 
1579   for (MachineBasicBlock &MBB : MF) {
1580     do {
1581       OptimizeAgain = false;
1582       Modified |= optimizeBlock(MBB);
1583     } while (OptimizeAgain);
1584   }
1585 
1586   return Modified;
1587 }
1588