1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass tries to fuse DS instructions with close by immediate offsets.
10 // This will fuse operations such as
11 //  ds_read_b32 v0, v2 offset:16
12 //  ds_read_b32 v1, v2 offset:32
13 // ==>
14 //   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
15 //
16 // The same is done for certain SMEM and VMEM opcodes, e.g.:
17 //  s_buffer_load_dword s4, s[0:3], 4
18 //  s_buffer_load_dword s5, s[0:3], 8
19 // ==>
20 //  s_buffer_load_dwordx2 s[4:5], s[0:3], 4
21 //
22 // This pass also tries to promote constant offset to the immediate by
23 // adjusting the base. It tries to use a base from the nearby instructions that
24 // allows it to have a 13bit constant offset and then promotes the 13bit offset
25 // to the immediate.
26 // E.g.
27 //  s_movk_i32 s0, 0x1800
28 //  v_add_co_u32_e32 v0, vcc, s0, v2
29 //  v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
30 //
31 //  s_movk_i32 s0, 0x1000
32 //  v_add_co_u32_e32 v5, vcc, s0, v2
33 //  v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
34 //  global_load_dwordx2 v[5:6], v[5:6], off
35 //  global_load_dwordx2 v[0:1], v[0:1], off
36 // =>
37 //  s_movk_i32 s0, 0x1000
38 //  v_add_co_u32_e32 v5, vcc, s0, v2
39 //  v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
40 //  global_load_dwordx2 v[5:6], v[5:6], off
41 //  global_load_dwordx2 v[0:1], v[5:6], off offset:2048
42 //
43 // Future improvements:
44 //
45 // - This is currently missing stores of constants because loading
46 //   the constant into the data register is placed between the stores, although
47 //   this is arguably a scheduling problem.
48 //
49 // - Live interval recomputing seems inefficient. This currently only matches
50 //   one pair, and recomputes live intervals and moves on to the next pair. It
51 //   would be better to compute a list of all merges that need to occur.
52 //
53 // - With a list of instructions to process, we can also merge more. If a
54 //   cluster of loads have offsets that are too large to fit in the 8-bit
55 //   offsets, but are close enough to fit in the 8 bits, we can add to the base
56 //   pointer and use the new reduced offsets.
57 //
58 //===----------------------------------------------------------------------===//
59 
60 #include "AMDGPU.h"
61 #include "AMDGPUSubtarget.h"
62 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
63 #include "SIInstrInfo.h"
64 #include "SIRegisterInfo.h"
65 #include "Utils/AMDGPUBaseInfo.h"
66 #include "llvm/ADT/ArrayRef.h"
67 #include "llvm/ADT/SmallVector.h"
68 #include "llvm/ADT/StringRef.h"
69 #include "llvm/Analysis/AliasAnalysis.h"
70 #include "llvm/CodeGen/MachineBasicBlock.h"
71 #include "llvm/CodeGen/MachineFunction.h"
72 #include "llvm/CodeGen/MachineFunctionPass.h"
73 #include "llvm/CodeGen/MachineInstr.h"
74 #include "llvm/CodeGen/MachineInstrBuilder.h"
75 #include "llvm/CodeGen/MachineOperand.h"
76 #include "llvm/CodeGen/MachineRegisterInfo.h"
77 #include "llvm/IR/DebugLoc.h"
78 #include "llvm/InitializePasses.h"
79 #include "llvm/Pass.h"
80 #include "llvm/Support/Debug.h"
81 #include "llvm/Support/MathExtras.h"
82 #include "llvm/Support/raw_ostream.h"
83 #include <algorithm>
84 #include <cassert>
85 #include <cstdlib>
86 #include <iterator>
87 #include <utility>
88 
89 using namespace llvm;
90 
91 #define DEBUG_TYPE "si-load-store-opt"
92 
93 namespace {
94 enum InstClassEnum {
95   UNKNOWN,
96   DS_READ,
97   DS_WRITE,
98   S_BUFFER_LOAD_IMM,
99   BUFFER_LOAD,
100   BUFFER_STORE,
101   MIMG,
102 };
103 
104 enum RegisterEnum {
105   SBASE = 0x1,
106   SRSRC = 0x2,
107   SOFFSET = 0x4,
108   VADDR = 0x8,
109   ADDR = 0x10,
110   SSAMP = 0x20,
111 };
112 
113 class SILoadStoreOptimizer : public MachineFunctionPass {
114   struct CombineInfo {
115     MachineBasicBlock::iterator I;
116     MachineBasicBlock::iterator Paired;
117     unsigned EltSize;
118     unsigned Offset0;
119     unsigned Offset1;
120     unsigned Width0;
121     unsigned Width1;
122     unsigned BaseOff;
123     unsigned DMask0;
124     unsigned DMask1;
125     InstClassEnum InstClass;
126     bool GLC0;
127     bool GLC1;
128     bool SLC0;
129     bool SLC1;
130     bool DLC0;
131     bool DLC1;
132     bool UseST64;
133     SmallVector<MachineInstr *, 8> InstsToMove;
134     int AddrIdx[5];
135     const MachineOperand *AddrReg[5];
136     unsigned NumAddresses;
137 
138     bool hasSameBaseAddress(const MachineInstr &MI) {
139       for (unsigned i = 0; i < NumAddresses; i++) {
140         const MachineOperand &AddrRegNext = MI.getOperand(AddrIdx[i]);
141 
142         if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
143           if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
144               AddrReg[i]->getImm() != AddrRegNext.getImm()) {
145             return false;
146           }
147           continue;
148         }
149 
150         // Check same base pointer. Be careful of subregisters, which can occur
151         // with vectors of pointers.
152         if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
153             AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
154          return false;
155         }
156       }
157       return true;
158     }
159 
160     bool hasMergeableAddress(const MachineRegisterInfo &MRI) {
161       for (unsigned i = 0; i < NumAddresses; ++i) {
162         const MachineOperand *AddrOp = AddrReg[i];
163         // Immediates are always OK.
164         if (AddrOp->isImm())
165           continue;
166 
167         // Don't try to merge addresses that aren't either immediates or registers.
168         // TODO: Should be possible to merge FrameIndexes and maybe some other
169         // non-register
170         if (!AddrOp->isReg())
171           return false;
172 
173         // TODO: We should be able to merge physical reg addreses.
174         if (Register::isPhysicalRegister(AddrOp->getReg()))
175           return false;
176 
177         // If an address has only one use then there will be on other
178         // instructions with the same address, so we can't merge this one.
179         if (MRI.hasOneNonDBGUse(AddrOp->getReg()))
180           return false;
181       }
182       return true;
183     }
184 
185     void setMI(MachineBasicBlock::iterator MI, const SIInstrInfo &TII,
186                const GCNSubtarget &STM);
187     void setPaired(MachineBasicBlock::iterator MI, const SIInstrInfo &TII);
188   };
189 
190   struct BaseRegisters {
191     unsigned LoReg = 0;
192     unsigned HiReg = 0;
193 
194     unsigned LoSubReg = 0;
195     unsigned HiSubReg = 0;
196   };
197 
198   struct MemAddress {
199     BaseRegisters Base;
200     int64_t Offset = 0;
201   };
202 
203   using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
204 
205 private:
206   const GCNSubtarget *STM = nullptr;
207   const SIInstrInfo *TII = nullptr;
208   const SIRegisterInfo *TRI = nullptr;
209   MachineRegisterInfo *MRI = nullptr;
210   AliasAnalysis *AA = nullptr;
211   bool OptimizeAgain;
212 
213   static bool dmasksCanBeCombined(const CombineInfo &CI, const SIInstrInfo &TII);
214   static bool offsetsCanBeCombined(CombineInfo &CI);
215   static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
216   static unsigned getNewOpcode(const CombineInfo &CI);
217   static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
218   const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
219 
220   bool findMatchingInst(CombineInfo &CI);
221 
222   unsigned read2Opcode(unsigned EltSize) const;
223   unsigned read2ST64Opcode(unsigned EltSize) const;
224   MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
225 
226   unsigned write2Opcode(unsigned EltSize) const;
227   unsigned write2ST64Opcode(unsigned EltSize) const;
228   MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
229   MachineBasicBlock::iterator mergeImagePair(CombineInfo &CI);
230   MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
231   MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
232   MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
233 
234   void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
235                            int32_t NewOffset) const;
236   unsigned computeBase(MachineInstr &MI, const MemAddress &Addr) const;
237   MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const;
238   Optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
239   void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const;
240   /// Promotes constant offset to the immediate by adjusting the base. It
241   /// tries to use a base from the nearby instructions that allows it to have
242   /// a 13bit constant offset which gets promoted to the immediate.
243   bool promoteConstantOffsetToImm(MachineInstr &CI,
244                                   MemInfoMap &Visited,
245                                   SmallPtrSet<MachineInstr *, 4> &Promoted) const;
246   void addInstToMergeableList(const CombineInfo &CI,
247                   std::list<std::list<CombineInfo> > &MergeableInsts) const;
248   bool collectMergeableInsts(MachineBasicBlock &MBB,
249                   std::list<std::list<CombineInfo> > &MergeableInsts) const;
250 
251 public:
252   static char ID;
253 
254   SILoadStoreOptimizer() : MachineFunctionPass(ID) {
255     initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
256   }
257 
258   void removeCombinedInst(std::list<CombineInfo> &MergeList,
259                                          const MachineInstr &MI);
260   bool optimizeInstsWithSameBaseAddr(std::list<CombineInfo> &MergeList,
261                                      bool &OptimizeListAgain);
262   bool optimizeBlock(std::list<std::list<CombineInfo> > &MergeableInsts);
263 
264   bool runOnMachineFunction(MachineFunction &MF) override;
265 
266   StringRef getPassName() const override { return "SI Load Store Optimizer"; }
267 
268   void getAnalysisUsage(AnalysisUsage &AU) const override {
269     AU.setPreservesCFG();
270     AU.addRequired<AAResultsWrapperPass>();
271 
272     MachineFunctionPass::getAnalysisUsage(AU);
273   }
274 };
275 
276 static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
277   const unsigned Opc = MI.getOpcode();
278 
279   if (TII.isMUBUF(Opc)) {
280     // FIXME: Handle d16 correctly
281     return AMDGPU::getMUBUFElements(Opc);
282   }
283   if (TII.isMIMG(MI)) {
284     uint64_t DMaskImm =
285         TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm();
286     return countPopulation(DMaskImm);
287   }
288 
289   switch (Opc) {
290   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
291     return 1;
292   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
293     return 2;
294   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
295     return 4;
296   default:
297     return 0;
298   }
299 }
300 
301 /// Maps instruction opcode to enum InstClassEnum.
302 static InstClassEnum getInstClass(unsigned Opc, const SIInstrInfo &TII) {
303   switch (Opc) {
304   default:
305     if (TII.isMUBUF(Opc)) {
306       switch (AMDGPU::getMUBUFBaseOpcode(Opc)) {
307       default:
308         return UNKNOWN;
309       case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
310       case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
311       case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
312       case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
313         return BUFFER_LOAD;
314       case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
315       case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
316       case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
317       case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
318         return BUFFER_STORE;
319       }
320     }
321     if (TII.isMIMG(Opc)) {
322       // Ignore instructions encoded without vaddr.
323       if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr) == -1)
324         return UNKNOWN;
325       // TODO: Support IMAGE_GET_RESINFO and IMAGE_GET_LOD.
326       if (TII.get(Opc).mayStore() || !TII.get(Opc).mayLoad() || TII.isGather4(Opc))
327         return UNKNOWN;
328       return MIMG;
329     }
330     return UNKNOWN;
331   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
332   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
333   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
334     return S_BUFFER_LOAD_IMM;
335   case AMDGPU::DS_READ_B32:
336   case AMDGPU::DS_READ_B32_gfx9:
337   case AMDGPU::DS_READ_B64:
338   case AMDGPU::DS_READ_B64_gfx9:
339     return DS_READ;
340   case AMDGPU::DS_WRITE_B32:
341   case AMDGPU::DS_WRITE_B32_gfx9:
342   case AMDGPU::DS_WRITE_B64:
343   case AMDGPU::DS_WRITE_B64_gfx9:
344     return DS_WRITE;
345   }
346 }
347 
348 /// Determines instruction subclass from opcode. Only instructions
349 /// of the same subclass can be merged together.
350 static unsigned getInstSubclass(unsigned Opc, const SIInstrInfo &TII) {
351   switch (Opc) {
352   default:
353     if (TII.isMUBUF(Opc))
354       return AMDGPU::getMUBUFBaseOpcode(Opc);
355     if (TII.isMIMG(Opc)) {
356       const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
357       assert(Info);
358       return Info->BaseOpcode;
359     }
360     return -1;
361   case AMDGPU::DS_READ_B32:
362   case AMDGPU::DS_READ_B32_gfx9:
363   case AMDGPU::DS_READ_B64:
364   case AMDGPU::DS_READ_B64_gfx9:
365   case AMDGPU::DS_WRITE_B32:
366   case AMDGPU::DS_WRITE_B32_gfx9:
367   case AMDGPU::DS_WRITE_B64:
368   case AMDGPU::DS_WRITE_B64_gfx9:
369     return Opc;
370   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
371   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
372   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
373     return AMDGPU::S_BUFFER_LOAD_DWORD_IMM;
374   }
375 }
376 
377 static unsigned getRegs(unsigned Opc, const SIInstrInfo &TII) {
378   if (TII.isMUBUF(Opc)) {
379     unsigned result = 0;
380 
381     if (AMDGPU::getMUBUFHasVAddr(Opc)) {
382       result |= VADDR;
383     }
384 
385     if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
386       result |= SRSRC;
387     }
388 
389     if (AMDGPU::getMUBUFHasSoffset(Opc)) {
390       result |= SOFFSET;
391     }
392 
393     return result;
394   }
395 
396   if (TII.isMIMG(Opc)) {
397     unsigned result = VADDR | SRSRC;
398     const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
399     if (Info && AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler)
400       result |= SSAMP;
401     return result;
402   }
403 
404   switch (Opc) {
405   default:
406     return 0;
407   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
408   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
409   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
410     return SBASE;
411   case AMDGPU::DS_READ_B32:
412   case AMDGPU::DS_READ_B64:
413   case AMDGPU::DS_READ_B32_gfx9:
414   case AMDGPU::DS_READ_B64_gfx9:
415   case AMDGPU::DS_WRITE_B32:
416   case AMDGPU::DS_WRITE_B64:
417   case AMDGPU::DS_WRITE_B32_gfx9:
418   case AMDGPU::DS_WRITE_B64_gfx9:
419     return ADDR;
420   }
421 }
422 
423 
424 void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
425                                               const SIInstrInfo &TII,
426                                               const GCNSubtarget &STM) {
427   I = MI;
428   unsigned Opc = MI->getOpcode();
429   InstClass = getInstClass(Opc, TII);
430 
431   if (InstClass == UNKNOWN)
432     return;
433 
434   switch (InstClass) {
435   case DS_READ:
436    EltSize =
437           (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
438                                                                           : 4;
439    break;
440   case DS_WRITE:
441     EltSize =
442           (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
443                                                                             : 4;
444     break;
445   case S_BUFFER_LOAD_IMM:
446     EltSize = AMDGPU::getSMRDEncodedOffset(STM, 4);
447     break;
448   default:
449     EltSize = 4;
450     break;
451   }
452 
453   if (InstClass == MIMG) {
454     DMask0 = TII.getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm();
455   } else {
456     int OffsetIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::offset);
457     Offset0 = I->getOperand(OffsetIdx).getImm();
458   }
459 
460   Width0 = getOpcodeWidth(*I, TII);
461 
462   if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) {
463     Offset0 &= 0xffff;
464   } else if (InstClass != MIMG) {
465     GLC0 = TII.getNamedOperand(*I, AMDGPU::OpName::glc)->getImm();
466     if (InstClass != S_BUFFER_LOAD_IMM) {
467       SLC0 = TII.getNamedOperand(*I, AMDGPU::OpName::slc)->getImm();
468     }
469     DLC0 = TII.getNamedOperand(*I, AMDGPU::OpName::dlc)->getImm();
470   }
471 
472   unsigned AddrOpName[5] = {0};
473   NumAddresses = 0;
474   const unsigned Regs = getRegs(I->getOpcode(), TII);
475 
476   if (Regs & ADDR) {
477     AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
478   }
479 
480   if (Regs & SBASE) {
481     AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
482   }
483 
484   if (Regs & SRSRC) {
485     AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
486   }
487 
488   if (Regs & SOFFSET) {
489     AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
490   }
491 
492   if (Regs & VADDR) {
493     AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
494   }
495 
496   if (Regs & SSAMP) {
497     AddrOpName[NumAddresses++] = AMDGPU::OpName::ssamp;
498   }
499 
500   for (unsigned i = 0; i < NumAddresses; i++) {
501     AddrIdx[i] = AMDGPU::getNamedOperandIdx(I->getOpcode(), AddrOpName[i]);
502     AddrReg[i] = &I->getOperand(AddrIdx[i]);
503   }
504 
505   InstsToMove.clear();
506 }
507 
508 void SILoadStoreOptimizer::CombineInfo::setPaired(MachineBasicBlock::iterator MI,
509                                                   const SIInstrInfo &TII) {
510   Paired = MI;
511   assert(InstClass == getInstClass(Paired->getOpcode(), TII));
512 
513   if (InstClass == MIMG) {
514     DMask1 = TII.getNamedOperand(*Paired, AMDGPU::OpName::dmask)->getImm();
515   } else {
516     int OffsetIdx =
517         AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::offset);
518     Offset1 = Paired->getOperand(OffsetIdx).getImm();
519   }
520 
521   Width1 = getOpcodeWidth(*Paired, TII);
522   if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) {
523     Offset1 &= 0xffff;
524   } else if (InstClass != MIMG) {
525     GLC1 = TII.getNamedOperand(*Paired, AMDGPU::OpName::glc)->getImm();
526     if (InstClass != S_BUFFER_LOAD_IMM) {
527       SLC1 = TII.getNamedOperand(*Paired, AMDGPU::OpName::slc)->getImm();
528     }
529     DLC1 = TII.getNamedOperand(*Paired, AMDGPU::OpName::dlc)->getImm();
530   }
531 }
532 
533 
534 } // end anonymous namespace.
535 
536 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
537                       "SI Load Store Optimizer", false, false)
538 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
539 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
540                     false, false)
541 
542 char SILoadStoreOptimizer::ID = 0;
543 
544 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
545 
546 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
547   return new SILoadStoreOptimizer();
548 }
549 
550 static void moveInstsAfter(MachineBasicBlock::iterator I,
551                            ArrayRef<MachineInstr *> InstsToMove) {
552   MachineBasicBlock *MBB = I->getParent();
553   ++I;
554   for (MachineInstr *MI : InstsToMove) {
555     MI->removeFromParent();
556     MBB->insert(I, MI);
557   }
558 }
559 
560 static void addDefsUsesToList(const MachineInstr &MI,
561                               DenseSet<unsigned> &RegDefs,
562                               DenseSet<unsigned> &PhysRegUses) {
563   for (const MachineOperand &Op : MI.operands()) {
564     if (Op.isReg()) {
565       if (Op.isDef())
566         RegDefs.insert(Op.getReg());
567       else if (Op.readsReg() && Register::isPhysicalRegister(Op.getReg()))
568         PhysRegUses.insert(Op.getReg());
569     }
570   }
571 }
572 
573 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
574                                       MachineBasicBlock::iterator B,
575                                       AliasAnalysis *AA) {
576   // RAW or WAR - cannot reorder
577   // WAW - cannot reorder
578   // RAR - safe to reorder
579   return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
580 }
581 
582 // Add MI and its defs to the lists if MI reads one of the defs that are
583 // already in the list. Returns true in that case.
584 static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
585                                   DenseSet<unsigned> &PhysRegUses,
586                                   SmallVectorImpl<MachineInstr *> &Insts) {
587   for (MachineOperand &Use : MI.operands()) {
588     // If one of the defs is read, then there is a use of Def between I and the
589     // instruction that I will potentially be merged with. We will need to move
590     // this instruction after the merged instructions.
591     //
592     // Similarly, if there is a def which is read by an instruction that is to
593     // be moved for merging, then we need to move the def-instruction as well.
594     // This can only happen for physical registers such as M0; virtual
595     // registers are in SSA form.
596     if (Use.isReg() &&
597         ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
598          (Use.isDef() && RegDefs.count(Use.getReg())) ||
599          (Use.isDef() && Register::isPhysicalRegister(Use.getReg()) &&
600           PhysRegUses.count(Use.getReg())))) {
601       Insts.push_back(&MI);
602       addDefsUsesToList(MI, RegDefs, PhysRegUses);
603       return true;
604     }
605   }
606 
607   return false;
608 }
609 
610 static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
611                                     ArrayRef<MachineInstr *> InstsToMove,
612                                     AliasAnalysis *AA) {
613   assert(MemOp.mayLoadOrStore());
614 
615   for (MachineInstr *InstToMove : InstsToMove) {
616     if (!InstToMove->mayLoadOrStore())
617       continue;
618     if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
619       return false;
620   }
621   return true;
622 }
623 
624 // This function assumes that \p A and \p B have are identical except for
625 // size and offset, and they referecne adjacent memory.
626 static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF,
627                                                    const MachineMemOperand *A,
628                                                    const MachineMemOperand *B) {
629   unsigned MinOffset = std::min(A->getOffset(), B->getOffset());
630   unsigned Size = A->getSize() + B->getSize();
631   // This function adds the offset parameter to the existing offset for A,
632   // so we pass 0 here as the offset and then manually set it to the correct
633   // value after the call.
634   MachineMemOperand *MMO = MF.getMachineMemOperand(A, 0, Size);
635   MMO->setOffset(MinOffset);
636   return MMO;
637 }
638 
639 bool SILoadStoreOptimizer::dmasksCanBeCombined(const CombineInfo &CI, const SIInstrInfo &TII) {
640   assert(CI.InstClass == MIMG);
641 
642   // Ignore instructions with tfe/lwe set.
643   const auto *TFEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::tfe);
644   const auto *LWEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::lwe);
645 
646   if ((TFEOp && TFEOp->getImm()) || (LWEOp && LWEOp->getImm()))
647     return false;
648 
649   // Check other optional immediate operands for equality.
650   unsigned OperandsToMatch[] = {AMDGPU::OpName::glc, AMDGPU::OpName::slc,
651                                 AMDGPU::OpName::d16, AMDGPU::OpName::unorm,
652                                 AMDGPU::OpName::da,  AMDGPU::OpName::r128};
653 
654   for (auto op : OperandsToMatch) {
655     int Idx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), op);
656     if (AMDGPU::getNamedOperandIdx(CI.Paired->getOpcode(), op) != Idx)
657       return false;
658     if (Idx != -1 &&
659         CI.I->getOperand(Idx).getImm() != CI.Paired->getOperand(Idx).getImm())
660       return false;
661   }
662 
663   // Check DMask for overlaps.
664   unsigned MaxMask = std::max(CI.DMask0, CI.DMask1);
665   unsigned MinMask = std::min(CI.DMask0, CI.DMask1);
666 
667   unsigned AllowedBitsForMin = llvm::countTrailingZeros(MaxMask);
668   if ((1u << AllowedBitsForMin) <= MinMask)
669     return false;
670 
671   return true;
672 }
673 
674 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
675   assert(CI.InstClass != MIMG);
676 
677   // XXX - Would the same offset be OK? Is there any reason this would happen or
678   // be useful?
679   if (CI.Offset0 == CI.Offset1)
680     return false;
681 
682   // This won't be valid if the offset isn't aligned.
683   if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
684     return false;
685 
686   unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
687   unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
688   CI.UseST64 = false;
689   CI.BaseOff = 0;
690 
691   // Handle SMEM and VMEM instructions.
692   if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
693     return (EltOffset0 + CI.Width0 == EltOffset1 ||
694             EltOffset1 + CI.Width1 == EltOffset0) &&
695            CI.GLC0 == CI.GLC1 && CI.DLC0 == CI.DLC1 &&
696            (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
697   }
698 
699   // If the offset in elements doesn't fit in 8-bits, we might be able to use
700   // the stride 64 versions.
701   if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
702       isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
703     CI.Offset0 = EltOffset0 / 64;
704     CI.Offset1 = EltOffset1 / 64;
705     CI.UseST64 = true;
706     return true;
707   }
708 
709   // Check if the new offsets fit in the reduced 8-bit range.
710   if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
711     CI.Offset0 = EltOffset0;
712     CI.Offset1 = EltOffset1;
713     return true;
714   }
715 
716   // Try to shift base address to decrease offsets.
717   unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
718   CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
719 
720   if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
721     CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
722     CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
723     CI.UseST64 = true;
724     return true;
725   }
726 
727   if (isUInt<8>(OffsetDiff)) {
728     CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
729     CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
730     return true;
731   }
732 
733   return false;
734 }
735 
736 bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
737                                      const CombineInfo &CI) {
738   const unsigned Width = (CI.Width0 + CI.Width1);
739   switch (CI.InstClass) {
740   default:
741     return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
742   case S_BUFFER_LOAD_IMM:
743     switch (Width) {
744     default:
745       return false;
746     case 2:
747     case 4:
748       return true;
749     }
750   }
751 }
752 
753 bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
754   MachineBasicBlock *MBB = CI.I->getParent();
755   MachineBasicBlock::iterator E = MBB->end();
756   MachineBasicBlock::iterator MBBI = CI.I;
757 
758   const unsigned Opc = CI.I->getOpcode();
759   const InstClassEnum InstClass = getInstClass(Opc, *TII);
760 
761   if (InstClass == UNKNOWN) {
762     return false;
763   }
764   const unsigned InstSubclass = getInstSubclass(Opc, *TII);
765 
766   // Do not merge VMEM buffer instructions with "swizzled" bit set.
767   int Swizzled =
768       AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::swz);
769   if (Swizzled != -1 && CI.I->getOperand(Swizzled).getImm())
770     return false;
771 
772   ++MBBI;
773 
774   DenseSet<unsigned> RegDefsToMove;
775   DenseSet<unsigned> PhysRegUsesToMove;
776   addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
777 
778   for (; MBBI != E; ++MBBI) {
779 
780     if ((getInstClass(MBBI->getOpcode(), *TII) != InstClass) ||
781         (getInstSubclass(MBBI->getOpcode(), *TII) != InstSubclass)) {
782       // This is not a matching instruction, but we can keep looking as
783       // long as one of these conditions are met:
784       // 1. It is safe to move I down past MBBI.
785       // 2. It is safe to move MBBI down past the instruction that I will
786       //    be merged into.
787 
788       if (MBBI->hasUnmodeledSideEffects()) {
789         // We can't re-order this instruction with respect to other memory
790         // operations, so we fail both conditions mentioned above.
791         return false;
792       }
793 
794       if (MBBI->mayLoadOrStore() &&
795           (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
796            !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
797         // We fail condition #1, but we may still be able to satisfy condition
798         // #2.  Add this instruction to the move list and then we will check
799         // if condition #2 holds once we have selected the matching instruction.
800         CI.InstsToMove.push_back(&*MBBI);
801         addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
802         continue;
803       }
804 
805       // When we match I with another DS instruction we will be moving I down
806       // to the location of the matched instruction any uses of I will need to
807       // be moved down as well.
808       addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
809                             CI.InstsToMove);
810       continue;
811     }
812 
813     // Don't merge volatiles.
814     if (MBBI->hasOrderedMemoryRef())
815       return false;
816 
817     // Handle a case like
818     //   DS_WRITE_B32 addr, v, idx0
819     //   w = DS_READ_B32 addr, idx0
820     //   DS_WRITE_B32 addr, f(w), idx1
821     // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
822     // merging of the two writes.
823     if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
824                               CI.InstsToMove))
825       continue;
826 
827     bool Match = CI.hasSameBaseAddress(*MBBI);
828 
829     if (Match) {
830       CI.setPaired(MBBI, *TII);
831 
832       // Check both offsets (or masks for MIMG) can be combined and fit in the
833       // reduced range.
834       bool canBeCombined =
835           CI.InstClass == MIMG
836               ? dmasksCanBeCombined(CI, *TII)
837               : widthsFit(*STM, CI) && offsetsCanBeCombined(CI);
838 
839       // We also need to go through the list of instructions that we plan to
840       // move and make sure they are all safe to move down past the merged
841       // instruction.
842       if (canBeCombined && canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
843         return true;
844     }
845 
846     // We've found a load/store that we couldn't merge for some reason.
847     // We could potentially keep looking, but we'd need to make sure that
848     // it was safe to move I and also all the instruction in InstsToMove
849     // down past this instruction.
850     // check if we can move I across MBBI and if we can move all I's users
851     if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
852         !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
853       break;
854   }
855   return false;
856 }
857 
858 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
859   if (STM->ldsRequiresM0Init())
860     return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
861   return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
862 }
863 
864 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
865   if (STM->ldsRequiresM0Init())
866     return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
867 
868   return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
869                         : AMDGPU::DS_READ2ST64_B64_gfx9;
870 }
871 
872 MachineBasicBlock::iterator
873 SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
874   MachineBasicBlock *MBB = CI.I->getParent();
875 
876   // Be careful, since the addresses could be subregisters themselves in weird
877   // cases, like vectors of pointers.
878   const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
879 
880   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
881   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
882 
883   unsigned NewOffset0 = CI.Offset0;
884   unsigned NewOffset1 = CI.Offset1;
885   unsigned Opc =
886       CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
887 
888   unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
889   unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
890 
891   if (NewOffset0 > NewOffset1) {
892     // Canonicalize the merged instruction so the smaller offset comes first.
893     std::swap(NewOffset0, NewOffset1);
894     std::swap(SubRegIdx0, SubRegIdx1);
895   }
896 
897   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
898          (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
899 
900   const MCInstrDesc &Read2Desc = TII->get(Opc);
901 
902   const TargetRegisterClass *SuperRC =
903       (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
904   Register DestReg = MRI->createVirtualRegister(SuperRC);
905 
906   DebugLoc DL = CI.I->getDebugLoc();
907 
908   Register BaseReg = AddrReg->getReg();
909   unsigned BaseSubReg = AddrReg->getSubReg();
910   unsigned BaseRegFlags = 0;
911   if (CI.BaseOff) {
912     Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
913     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
914         .addImm(CI.BaseOff);
915 
916     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
917     BaseRegFlags = RegState::Kill;
918 
919     TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
920         .addReg(ImmReg)
921         .addReg(AddrReg->getReg(), 0, BaseSubReg)
922         .addImm(0); // clamp bit
923     BaseSubReg = 0;
924   }
925 
926   MachineInstrBuilder Read2 =
927       BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
928           .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
929           .addImm(NewOffset0)                        // offset0
930           .addImm(NewOffset1)                        // offset1
931           .addImm(0)                                 // gds
932           .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
933 
934   (void)Read2;
935 
936   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
937 
938   // Copy to the old destination registers.
939   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
940       .add(*Dest0) // Copy to same destination including flags and sub reg.
941       .addReg(DestReg, 0, SubRegIdx0);
942   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
943                             .add(*Dest1)
944                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
945 
946   moveInstsAfter(Copy1, CI.InstsToMove);
947 
948   CI.I->eraseFromParent();
949   CI.Paired->eraseFromParent();
950 
951   LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
952   return Read2;
953 }
954 
955 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
956   if (STM->ldsRequiresM0Init())
957     return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
958   return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
959                         : AMDGPU::DS_WRITE2_B64_gfx9;
960 }
961 
962 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
963   if (STM->ldsRequiresM0Init())
964     return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
965                           : AMDGPU::DS_WRITE2ST64_B64;
966 
967   return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
968                         : AMDGPU::DS_WRITE2ST64_B64_gfx9;
969 }
970 
971 MachineBasicBlock::iterator
972 SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
973   MachineBasicBlock *MBB = CI.I->getParent();
974 
975   // Be sure to use .addOperand(), and not .addReg() with these. We want to be
976   // sure we preserve the subregister index and any register flags set on them.
977   const MachineOperand *AddrReg =
978       TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
979   const MachineOperand *Data0 =
980       TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
981   const MachineOperand *Data1 =
982       TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
983 
984   unsigned NewOffset0 = CI.Offset0;
985   unsigned NewOffset1 = CI.Offset1;
986   unsigned Opc =
987       CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
988 
989   if (NewOffset0 > NewOffset1) {
990     // Canonicalize the merged instruction so the smaller offset comes first.
991     std::swap(NewOffset0, NewOffset1);
992     std::swap(Data0, Data1);
993   }
994 
995   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
996          (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
997 
998   const MCInstrDesc &Write2Desc = TII->get(Opc);
999   DebugLoc DL = CI.I->getDebugLoc();
1000 
1001   Register BaseReg = AddrReg->getReg();
1002   unsigned BaseSubReg = AddrReg->getSubReg();
1003   unsigned BaseRegFlags = 0;
1004   if (CI.BaseOff) {
1005     Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1006     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
1007         .addImm(CI.BaseOff);
1008 
1009     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1010     BaseRegFlags = RegState::Kill;
1011 
1012     TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
1013         .addReg(ImmReg)
1014         .addReg(AddrReg->getReg(), 0, BaseSubReg)
1015         .addImm(0); // clamp bit
1016     BaseSubReg = 0;
1017   }
1018 
1019   MachineInstrBuilder Write2 =
1020       BuildMI(*MBB, CI.Paired, DL, Write2Desc)
1021           .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
1022           .add(*Data0)                               // data0
1023           .add(*Data1)                               // data1
1024           .addImm(NewOffset0)                        // offset0
1025           .addImm(NewOffset1)                        // offset1
1026           .addImm(0)                                 // gds
1027           .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
1028 
1029   moveInstsAfter(Write2, CI.InstsToMove);
1030 
1031   CI.I->eraseFromParent();
1032   CI.Paired->eraseFromParent();
1033 
1034   LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
1035   return Write2;
1036 }
1037 
1038 MachineBasicBlock::iterator
1039 SILoadStoreOptimizer::mergeImagePair(CombineInfo &CI) {
1040   MachineBasicBlock *MBB = CI.I->getParent();
1041   DebugLoc DL = CI.I->getDebugLoc();
1042   const unsigned Opcode = getNewOpcode(CI);
1043 
1044   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
1045 
1046   Register DestReg = MRI->createVirtualRegister(SuperRC);
1047   unsigned MergedDMask = CI.DMask0 | CI.DMask1;
1048   unsigned DMaskIdx =
1049       AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::dmask);
1050 
1051   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
1052   for (unsigned I = 1, E = (*CI.I).getNumOperands(); I != E; ++I) {
1053     if (I == DMaskIdx)
1054       MIB.addImm(MergedDMask);
1055     else
1056       MIB.add((*CI.I).getOperand(I));
1057   }
1058 
1059   // It shouldn't be possible to get this far if the two instructions
1060   // don't have a single memoperand, because MachineInstr::mayAlias()
1061   // will return true if this is the case.
1062   assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
1063 
1064   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1065   const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
1066 
1067   MachineInstr *New = MIB.addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1068 
1069   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1070   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1071   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1072 
1073   // Copy to the old destination registers.
1074   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1075   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1076   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
1077 
1078   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
1079       .add(*Dest0) // Copy to same destination including flags and sub reg.
1080       .addReg(DestReg, 0, SubRegIdx0);
1081   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
1082                             .add(*Dest1)
1083                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
1084 
1085   moveInstsAfter(Copy1, CI.InstsToMove);
1086 
1087   CI.I->eraseFromParent();
1088   CI.Paired->eraseFromParent();
1089   return New;
1090 }
1091 
1092 MachineBasicBlock::iterator
1093 SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
1094   MachineBasicBlock *MBB = CI.I->getParent();
1095   DebugLoc DL = CI.I->getDebugLoc();
1096   const unsigned Opcode = getNewOpcode(CI);
1097 
1098   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
1099 
1100   Register DestReg = MRI->createVirtualRegister(SuperRC);
1101   unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
1102 
1103   // It shouldn't be possible to get this far if the two instructions
1104   // don't have a single memoperand, because MachineInstr::mayAlias()
1105   // will return true if this is the case.
1106   assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
1107 
1108   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1109   const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
1110 
1111   MachineInstr *New =
1112     BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
1113         .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
1114         .addImm(MergedOffset) // offset
1115         .addImm(CI.GLC0)      // glc
1116         .addImm(CI.DLC0)      // dlc
1117         .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1118 
1119   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1120   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1121   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1122 
1123   // Copy to the old destination registers.
1124   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1125   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
1126   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
1127 
1128   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
1129       .add(*Dest0) // Copy to same destination including flags and sub reg.
1130       .addReg(DestReg, 0, SubRegIdx0);
1131   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
1132                             .add(*Dest1)
1133                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
1134 
1135   moveInstsAfter(Copy1, CI.InstsToMove);
1136 
1137   CI.I->eraseFromParent();
1138   CI.Paired->eraseFromParent();
1139   return New;
1140 }
1141 
1142 MachineBasicBlock::iterator
1143 SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
1144   MachineBasicBlock *MBB = CI.I->getParent();
1145   DebugLoc DL = CI.I->getDebugLoc();
1146 
1147   const unsigned Opcode = getNewOpcode(CI);
1148 
1149   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
1150 
1151   // Copy to the new source register.
1152   Register DestReg = MRI->createVirtualRegister(SuperRC);
1153   unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
1154 
1155   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
1156 
1157   const unsigned Regs = getRegs(Opcode, *TII);
1158 
1159   if (Regs & VADDR)
1160     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1161 
1162   // It shouldn't be possible to get this far if the two instructions
1163   // don't have a single memoperand, because MachineInstr::mayAlias()
1164   // will return true if this is the case.
1165   assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
1166 
1167   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1168   const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
1169 
1170   MachineInstr *New =
1171     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1172         .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1173         .addImm(MergedOffset) // offset
1174         .addImm(CI.GLC0)      // glc
1175         .addImm(CI.SLC0)      // slc
1176         .addImm(0)            // tfe
1177         .addImm(CI.DLC0)      // dlc
1178         .addImm(0)            // swz
1179         .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1180 
1181   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1182   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1183   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1184 
1185   // Copy to the old destination registers.
1186   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1187   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1188   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
1189 
1190   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
1191       .add(*Dest0) // Copy to same destination including flags and sub reg.
1192       .addReg(DestReg, 0, SubRegIdx0);
1193   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
1194                             .add(*Dest1)
1195                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
1196 
1197   moveInstsAfter(Copy1, CI.InstsToMove);
1198 
1199   CI.I->eraseFromParent();
1200   CI.Paired->eraseFromParent();
1201   return New;
1202 }
1203 
1204 unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI) {
1205   const unsigned Width = CI.Width0 + CI.Width1;
1206 
1207   switch (CI.InstClass) {
1208   default:
1209     assert(CI.InstClass == BUFFER_LOAD || CI.InstClass == BUFFER_STORE);
1210     // FIXME: Handle d16 correctly
1211     return AMDGPU::getMUBUFOpcode(AMDGPU::getMUBUFBaseOpcode(CI.I->getOpcode()),
1212                                   Width);
1213   case UNKNOWN:
1214     llvm_unreachable("Unknown instruction class");
1215   case S_BUFFER_LOAD_IMM:
1216     switch (Width) {
1217     default:
1218       return 0;
1219     case 2:
1220       return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
1221     case 4:
1222       return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
1223     }
1224   case MIMG:
1225     assert("No overlaps" && (countPopulation(CI.DMask0 | CI.DMask1) == Width));
1226     return AMDGPU::getMaskedMIMGOp(CI.I->getOpcode(), Width);
1227   }
1228 }
1229 
1230 std::pair<unsigned, unsigned>
1231 SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI) {
1232 
1233   if (CI.Width0 == 0 || CI.Width1 == 0 || CI.Width0 + CI.Width1 > 4)
1234     return std::make_pair(0, 0);
1235 
1236   bool ReverseOrder;
1237   if (CI.InstClass == MIMG) {
1238     assert((countPopulation(CI.DMask0 | CI.DMask1) == CI.Width0 + CI.Width1) &&
1239            "No overlaps");
1240     ReverseOrder = CI.DMask0 > CI.DMask1;
1241   } else
1242     ReverseOrder = CI.Offset0 > CI.Offset1;
1243 
1244   static const unsigned Idxs[4][4] = {
1245       {AMDGPU::sub0, AMDGPU::sub0_sub1, AMDGPU::sub0_sub1_sub2, AMDGPU::sub0_sub1_sub2_sub3},
1246       {AMDGPU::sub1, AMDGPU::sub1_sub2, AMDGPU::sub1_sub2_sub3, 0},
1247       {AMDGPU::sub2, AMDGPU::sub2_sub3, 0, 0},
1248       {AMDGPU::sub3, 0, 0, 0},
1249   };
1250   unsigned Idx0;
1251   unsigned Idx1;
1252 
1253   assert(CI.Width0 >= 1 && CI.Width0 <= 3);
1254   assert(CI.Width1 >= 1 && CI.Width1 <= 3);
1255 
1256   if (ReverseOrder) {
1257     Idx1 = Idxs[0][CI.Width1 - 1];
1258     Idx0 = Idxs[CI.Width1][CI.Width0 - 1];
1259   } else {
1260     Idx0 = Idxs[0][CI.Width0 - 1];
1261     Idx1 = Idxs[CI.Width0][CI.Width1 - 1];
1262   }
1263 
1264   return std::make_pair(Idx0, Idx1);
1265 }
1266 
1267 const TargetRegisterClass *
1268 SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI) {
1269   if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1270     switch (CI.Width0 + CI.Width1) {
1271     default:
1272       return nullptr;
1273     case 2:
1274       return &AMDGPU::SReg_64_XEXECRegClass;
1275     case 4:
1276       return &AMDGPU::SGPR_128RegClass;
1277     case 8:
1278       return &AMDGPU::SReg_256RegClass;
1279     case 16:
1280       return &AMDGPU::SReg_512RegClass;
1281     }
1282   } else {
1283     switch (CI.Width0 + CI.Width1) {
1284     default:
1285       return nullptr;
1286     case 2:
1287       return &AMDGPU::VReg_64RegClass;
1288     case 3:
1289       return &AMDGPU::VReg_96RegClass;
1290     case 4:
1291       return &AMDGPU::VReg_128RegClass;
1292     }
1293   }
1294 }
1295 
1296 MachineBasicBlock::iterator
1297 SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
1298   MachineBasicBlock *MBB = CI.I->getParent();
1299   DebugLoc DL = CI.I->getDebugLoc();
1300 
1301   const unsigned Opcode = getNewOpcode(CI);
1302 
1303   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1304   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1305   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1306 
1307   // Copy to the new source register.
1308   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
1309   Register SrcReg = MRI->createVirtualRegister(SuperRC);
1310 
1311   const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1312   const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
1313 
1314   BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1315       .add(*Src0)
1316       .addImm(SubRegIdx0)
1317       .add(*Src1)
1318       .addImm(SubRegIdx1);
1319 
1320   auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
1321                  .addReg(SrcReg, RegState::Kill);
1322 
1323   const unsigned Regs = getRegs(Opcode, *TII);
1324 
1325   if (Regs & VADDR)
1326     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1327 
1328 
1329   // It shouldn't be possible to get this far if the two instructions
1330   // don't have a single memoperand, because MachineInstr::mayAlias()
1331   // will return true if this is the case.
1332   assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
1333 
1334   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1335   const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
1336 
1337   MachineInstr *New =
1338     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1339         .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1340         .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
1341         .addImm(CI.GLC0)      // glc
1342         .addImm(CI.SLC0)      // slc
1343         .addImm(0)            // tfe
1344         .addImm(CI.DLC0)      // dlc
1345         .addImm(0)            // swz
1346         .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1347 
1348   moveInstsAfter(MIB, CI.InstsToMove);
1349 
1350   CI.I->eraseFromParent();
1351   CI.Paired->eraseFromParent();
1352   return New;
1353 }
1354 
1355 MachineOperand
1356 SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) const {
1357   APInt V(32, Val, true);
1358   if (TII->isInlineConstant(V))
1359     return MachineOperand::CreateImm(Val);
1360 
1361   Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1362   MachineInstr *Mov =
1363   BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1364           TII->get(AMDGPU::S_MOV_B32), Reg)
1365     .addImm(Val);
1366   (void)Mov;
1367   LLVM_DEBUG(dbgs() << "    "; Mov->dump());
1368   return MachineOperand::CreateReg(Reg, false);
1369 }
1370 
1371 // Compute base address using Addr and return the final register.
1372 unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
1373                                            const MemAddress &Addr) const {
1374   MachineBasicBlock *MBB = MI.getParent();
1375   MachineBasicBlock::iterator MBBI = MI.getIterator();
1376   DebugLoc DL = MI.getDebugLoc();
1377 
1378   assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1379           Addr.Base.LoSubReg) &&
1380          "Expected 32-bit Base-Register-Low!!");
1381 
1382   assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1383           Addr.Base.HiSubReg) &&
1384          "Expected 32-bit Base-Register-Hi!!");
1385 
1386   LLVM_DEBUG(dbgs() << "  Re-Computed Anchor-Base:\n");
1387   MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1388   MachineOperand OffsetHi =
1389     createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
1390 
1391   const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1392   Register CarryReg = MRI->createVirtualRegister(CarryRC);
1393   Register DeadCarryReg = MRI->createVirtualRegister(CarryRC);
1394 
1395   Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1396   Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1397   MachineInstr *LoHalf =
1398     BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
1399       .addReg(CarryReg, RegState::Define)
1400       .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
1401       .add(OffsetLo)
1402       .addImm(0); // clamp bit
1403   (void)LoHalf;
1404   LLVM_DEBUG(dbgs() << "    "; LoHalf->dump(););
1405 
1406   MachineInstr *HiHalf =
1407   BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1408     .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1409     .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1410     .add(OffsetHi)
1411     .addReg(CarryReg, RegState::Kill)
1412     .addImm(0); // clamp bit
1413   (void)HiHalf;
1414   LLVM_DEBUG(dbgs() << "    "; HiHalf->dump(););
1415 
1416   Register FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
1417   MachineInstr *FullBase =
1418     BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1419       .addReg(DestSub0)
1420       .addImm(AMDGPU::sub0)
1421       .addReg(DestSub1)
1422       .addImm(AMDGPU::sub1);
1423   (void)FullBase;
1424   LLVM_DEBUG(dbgs() << "    "; FullBase->dump(); dbgs() << "\n";);
1425 
1426   return FullDestReg;
1427 }
1428 
1429 // Update base and offset with the NewBase and NewOffset in MI.
1430 void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1431                                                unsigned NewBase,
1432                                                int32_t NewOffset) const {
1433   auto Base = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1434   Base->setReg(NewBase);
1435   Base->setIsKill(false);
1436   TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1437 }
1438 
1439 Optional<int32_t>
1440 SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const {
1441   if (Op.isImm())
1442     return Op.getImm();
1443 
1444   if (!Op.isReg())
1445     return None;
1446 
1447   MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1448   if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1449       !Def->getOperand(1).isImm())
1450     return None;
1451 
1452   return Def->getOperand(1).getImm();
1453 }
1454 
1455 // Analyze Base and extracts:
1456 //  - 32bit base registers, subregisters
1457 //  - 64bit constant offset
1458 // Expecting base computation as:
1459 //   %OFFSET0:sgpr_32 = S_MOV_B32 8000
1460 //   %LO:vgpr_32, %c:sreg_64_xexec =
1461 //       V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1462 //   %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1463 //   %Base:vreg_64 =
1464 //       REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
1465 void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
1466                                                       MemAddress &Addr) const {
1467   if (!Base.isReg())
1468     return;
1469 
1470   MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1471   if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1472       || Def->getNumOperands() != 5)
1473     return;
1474 
1475   MachineOperand BaseLo = Def->getOperand(1);
1476   MachineOperand BaseHi = Def->getOperand(3);
1477   if (!BaseLo.isReg() || !BaseHi.isReg())
1478     return;
1479 
1480   MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1481   MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1482 
1483   if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
1484       !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1485     return;
1486 
1487   const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1488   const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1489 
1490   auto Offset0P = extractConstOffset(*Src0);
1491   if (Offset0P)
1492     BaseLo = *Src1;
1493   else {
1494     if (!(Offset0P = extractConstOffset(*Src1)))
1495       return;
1496     BaseLo = *Src0;
1497   }
1498 
1499   Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1500   Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1501 
1502   if (Src0->isImm())
1503     std::swap(Src0, Src1);
1504 
1505   if (!Src1->isImm())
1506     return;
1507 
1508   uint64_t Offset1 = Src1->getImm();
1509   BaseHi = *Src0;
1510 
1511   Addr.Base.LoReg = BaseLo.getReg();
1512   Addr.Base.HiReg = BaseHi.getReg();
1513   Addr.Base.LoSubReg = BaseLo.getSubReg();
1514   Addr.Base.HiSubReg = BaseHi.getSubReg();
1515   Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1516 }
1517 
1518 bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1519     MachineInstr &MI,
1520     MemInfoMap &Visited,
1521     SmallPtrSet<MachineInstr *, 4> &AnchorList) const {
1522 
1523   if (!(MI.mayLoad() ^ MI.mayStore()))
1524     return false;
1525 
1526   // TODO: Support flat and scratch.
1527   if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0)
1528     return false;
1529 
1530   if (MI.mayLoad() && TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
1531     return false;
1532 
1533   if (AnchorList.count(&MI))
1534     return false;
1535 
1536   LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1537 
1538   if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1539     LLVM_DEBUG(dbgs() << "  Const-offset is already promoted.\n";);
1540     return false;
1541   }
1542 
1543   // Step1: Find the base-registers and a 64bit constant offset.
1544   MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1545   MemAddress MAddr;
1546   if (Visited.find(&MI) == Visited.end()) {
1547     processBaseWithConstOffset(Base, MAddr);
1548     Visited[&MI] = MAddr;
1549   } else
1550     MAddr = Visited[&MI];
1551 
1552   if (MAddr.Offset == 0) {
1553     LLVM_DEBUG(dbgs() << "  Failed to extract constant-offset or there are no"
1554                          " constant offsets that can be promoted.\n";);
1555     return false;
1556   }
1557 
1558   LLVM_DEBUG(dbgs() << "  BASE: {" << MAddr.Base.HiReg << ", "
1559              << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1560 
1561   // Step2: Traverse through MI's basic block and find an anchor(that has the
1562   // same base-registers) with the highest 13bit distance from MI's offset.
1563   // E.g. (64bit loads)
1564   // bb:
1565   //   addr1 = &a + 4096;   load1 = load(addr1,  0)
1566   //   addr2 = &a + 6144;   load2 = load(addr2,  0)
1567   //   addr3 = &a + 8192;   load3 = load(addr3,  0)
1568   //   addr4 = &a + 10240;  load4 = load(addr4,  0)
1569   //   addr5 = &a + 12288;  load5 = load(addr5,  0)
1570   //
1571   // Starting from the first load, the optimization will try to find a new base
1572   // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1573   // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1574   // as the new-base(anchor) because of the maximum distance which can
1575   // accomodate more intermediate bases presumeably.
1576   //
1577   // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1578   // (&a + 8192) for load1, load2, load4.
1579   //   addr = &a + 8192
1580   //   load1 = load(addr,       -4096)
1581   //   load2 = load(addr,       -2048)
1582   //   load3 = load(addr,       0)
1583   //   load4 = load(addr,       2048)
1584   //   addr5 = &a + 12288;  load5 = load(addr5,  0)
1585   //
1586   MachineInstr *AnchorInst = nullptr;
1587   MemAddress AnchorAddr;
1588   uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1589   SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1590 
1591   MachineBasicBlock *MBB = MI.getParent();
1592   MachineBasicBlock::iterator E = MBB->end();
1593   MachineBasicBlock::iterator MBBI = MI.getIterator();
1594   ++MBBI;
1595   const SITargetLowering *TLI =
1596     static_cast<const SITargetLowering *>(STM->getTargetLowering());
1597 
1598   for ( ; MBBI != E; ++MBBI) {
1599     MachineInstr &MINext = *MBBI;
1600     // TODO: Support finding an anchor(with same base) from store addresses or
1601     // any other load addresses where the opcodes are different.
1602     if (MINext.getOpcode() != MI.getOpcode() ||
1603         TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1604       continue;
1605 
1606     const MachineOperand &BaseNext =
1607       *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1608     MemAddress MAddrNext;
1609     if (Visited.find(&MINext) == Visited.end()) {
1610       processBaseWithConstOffset(BaseNext, MAddrNext);
1611       Visited[&MINext] = MAddrNext;
1612     } else
1613       MAddrNext = Visited[&MINext];
1614 
1615     if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1616         MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1617         MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1618         MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1619       continue;
1620 
1621     InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1622 
1623     int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1624     TargetLoweringBase::AddrMode AM;
1625     AM.HasBaseReg = true;
1626     AM.BaseOffs = Dist;
1627     if (TLI->isLegalGlobalAddressingMode(AM) &&
1628         (uint32_t)std::abs(Dist) > MaxDist) {
1629       MaxDist = std::abs(Dist);
1630 
1631       AnchorAddr = MAddrNext;
1632       AnchorInst = &MINext;
1633     }
1634   }
1635 
1636   if (AnchorInst) {
1637     LLVM_DEBUG(dbgs() << "  Anchor-Inst(with max-distance from Offset): ";
1638                AnchorInst->dump());
1639     LLVM_DEBUG(dbgs() << "  Anchor-Offset from BASE: "
1640                <<  AnchorAddr.Offset << "\n\n");
1641 
1642     // Instead of moving up, just re-compute anchor-instruction's base address.
1643     unsigned Base = computeBase(MI, AnchorAddr);
1644 
1645     updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1646     LLVM_DEBUG(dbgs() << "  After promotion: "; MI.dump(););
1647 
1648     for (auto P : InstsWCommonBase) {
1649       TargetLoweringBase::AddrMode AM;
1650       AM.HasBaseReg = true;
1651       AM.BaseOffs = P.second - AnchorAddr.Offset;
1652 
1653       if (TLI->isLegalGlobalAddressingMode(AM)) {
1654         LLVM_DEBUG(dbgs() << "  Promote Offset(" << P.second;
1655                    dbgs() << ")"; P.first->dump());
1656         updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1657         LLVM_DEBUG(dbgs() << "     After promotion: "; P.first->dump());
1658       }
1659     }
1660     AnchorList.insert(AnchorInst);
1661     return true;
1662   }
1663 
1664   return false;
1665 }
1666 
1667 void SILoadStoreOptimizer::addInstToMergeableList(const CombineInfo &CI,
1668                  std::list<std::list<CombineInfo> > &MergeableInsts) const {
1669   for (std::list<CombineInfo> &AddrList : MergeableInsts) {
1670     if (AddrList.front().InstClass == CI.InstClass &&
1671         AddrList.front().hasSameBaseAddress(*CI.I)) {
1672       AddrList.emplace_back(CI);
1673       return;
1674     }
1675   }
1676 
1677   // Base address not found, so add a new list.
1678   MergeableInsts.emplace_back(1, CI);
1679 }
1680 
1681 bool SILoadStoreOptimizer::collectMergeableInsts(MachineBasicBlock &MBB,
1682                  std::list<std::list<CombineInfo> > &MergeableInsts) const {
1683   bool Modified = false;
1684   // Contain the list
1685   MemInfoMap Visited;
1686   // Contains the list of instructions for which constant offsets are being
1687   // promoted to the IMM.
1688   SmallPtrSet<MachineInstr *, 4> AnchorList;
1689 
1690   // Sort potential mergeable instructions into lists.  One list per base address.
1691   for (MachineInstr &MI : MBB.instrs()) {
1692     // We run this before checking if an address is mergeable, because it can produce
1693     // better code even if the instructions aren't mergeable.
1694     if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
1695       Modified = true;
1696 
1697     const InstClassEnum InstClass = getInstClass(MI.getOpcode(), *TII);
1698     if (InstClass == UNKNOWN)
1699       continue;
1700 
1701     // Don't combine if volatile.
1702     if (MI.hasOrderedMemoryRef())
1703       continue;
1704 
1705     CombineInfo CI;
1706     CI.setMI(MI, *TII, *STM);
1707 
1708     if (!CI.hasMergeableAddress(*MRI))
1709       continue;
1710 
1711     addInstToMergeableList(CI, MergeableInsts);
1712   }
1713   return Modified;
1714 }
1715 
1716 // Scan through looking for adjacent LDS operations with constant offsets from
1717 // the same base register. We rely on the scheduler to do the hard work of
1718 // clustering nearby loads, and assume these are all adjacent.
1719 bool SILoadStoreOptimizer::optimizeBlock(
1720                        std::list<std::list<CombineInfo> > &MergeableInsts) {
1721   bool Modified = false;
1722 
1723   for (std::list<CombineInfo> &MergeList : MergeableInsts) {
1724     if (MergeList.size() < 2)
1725       continue;
1726 
1727     bool OptimizeListAgain = false;
1728     if (!optimizeInstsWithSameBaseAddr(MergeList, OptimizeListAgain)) {
1729       // We weren't able to make any changes, so clear the list so we don't
1730       // process the same instructions the next time we try to optimize this
1731       // block.
1732       MergeList.clear();
1733       continue;
1734     }
1735 
1736     // We made changes, but also determined that there were no more optimization
1737     // opportunities, so we don't need to reprocess the list
1738     if (!OptimizeListAgain)
1739       MergeList.clear();
1740 
1741     OptimizeAgain |= OptimizeListAgain;
1742     Modified = true;
1743   }
1744   return Modified;
1745 }
1746 
1747 void
1748 SILoadStoreOptimizer::removeCombinedInst(std::list<CombineInfo> &MergeList,
1749                                          const MachineInstr &MI) {
1750 
1751   for (auto CI = MergeList.begin(), E = MergeList.end(); CI != E; ++CI) {
1752     if (&*CI->I == &MI) {
1753       MergeList.erase(CI);
1754       return;
1755     }
1756   }
1757 }
1758 
1759 bool
1760 SILoadStoreOptimizer::optimizeInstsWithSameBaseAddr(
1761                                           std::list<CombineInfo> &MergeList,
1762                                           bool &OptimizeListAgain) {
1763   bool Modified = false;
1764   for (auto I = MergeList.begin(); I != MergeList.end(); ++I) {
1765     CombineInfo &CI = *I;
1766 
1767     switch (CI.InstClass) {
1768     default:
1769       break;
1770     case DS_READ:
1771       if (findMatchingInst(CI)) {
1772         Modified = true;
1773         removeCombinedInst(MergeList, *CI.Paired);
1774         MachineBasicBlock::iterator NewMI = mergeRead2Pair(CI);
1775         CI.setMI(NewMI, *TII, *STM);
1776       }
1777       break;
1778     case DS_WRITE:
1779       if (findMatchingInst(CI)) {
1780         Modified = true;
1781         removeCombinedInst(MergeList, *CI.Paired);
1782         MachineBasicBlock::iterator NewMI = mergeWrite2Pair(CI);
1783         CI.setMI(NewMI, *TII, *STM);
1784       }
1785       break;
1786     case S_BUFFER_LOAD_IMM:
1787       if (findMatchingInst(CI)) {
1788         Modified = true;
1789         removeCombinedInst(MergeList, *CI.Paired);
1790         MachineBasicBlock::iterator NewMI = mergeSBufferLoadImmPair(CI);
1791         CI.setMI(NewMI, *TII, *STM);
1792         OptimizeListAgain |= (CI.Width0 + CI.Width1) < 16;
1793       }
1794       break;
1795     case BUFFER_LOAD:
1796       if (findMatchingInst(CI)) {
1797         Modified = true;
1798         removeCombinedInst(MergeList, *CI.Paired);
1799         MachineBasicBlock::iterator NewMI = mergeBufferLoadPair(CI);
1800         CI.setMI(NewMI, *TII, *STM);
1801         OptimizeListAgain |= (CI.Width0 + CI.Width1) < 4;
1802       }
1803       break;
1804     case BUFFER_STORE:
1805       if (findMatchingInst(CI)) {
1806         Modified = true;
1807         removeCombinedInst(MergeList, *CI.Paired);
1808         MachineBasicBlock::iterator NewMI = mergeBufferStorePair(CI);
1809         CI.setMI(NewMI, *TII, *STM);
1810         OptimizeListAgain |= (CI.Width0 + CI.Width1) < 4;
1811       }
1812       break;
1813     case MIMG:
1814       if (findMatchingInst(CI)) {
1815         Modified = true;
1816         removeCombinedInst(MergeList, *CI.Paired);
1817         MachineBasicBlock::iterator NewMI = mergeImagePair(CI);
1818         CI.setMI(NewMI, *TII, *STM);
1819         OptimizeListAgain |= (CI.Width0 + CI.Width1) < 4;
1820       }
1821       break;
1822     }
1823     // Clear the InstsToMove after we have finished searching so we don't have
1824     // stale values left over if we search for this CI again in another pass
1825     // over the block.
1826     CI.InstsToMove.clear();
1827   }
1828 
1829   return Modified;
1830 }
1831 
1832 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
1833   if (skipFunction(MF.getFunction()))
1834     return false;
1835 
1836   STM = &MF.getSubtarget<GCNSubtarget>();
1837   if (!STM->loadStoreOptEnabled())
1838     return false;
1839 
1840   TII = STM->getInstrInfo();
1841   TRI = &TII->getRegisterInfo();
1842 
1843   MRI = &MF.getRegInfo();
1844   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1845 
1846   assert(MRI->isSSA() && "Must be run on SSA");
1847 
1848   LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
1849 
1850   bool Modified = false;
1851 
1852 
1853   for (MachineBasicBlock &MBB : MF) {
1854     std::list<std::list<CombineInfo> > MergeableInsts;
1855     // First pass: Collect list of all instructions we know how to merge.
1856     Modified |= collectMergeableInsts(MBB, MergeableInsts);
1857     do {
1858       OptimizeAgain = false;
1859       Modified |= optimizeBlock(MergeableInsts);
1860     } while (OptimizeAgain);
1861   }
1862 
1863   return Modified;
1864 }
1865