1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass tries to fuse DS instructions with close by immediate offsets.
10 // This will fuse operations such as
11 //  ds_read_b32 v0, v2 offset:16
12 //  ds_read_b32 v1, v2 offset:32
13 // ==>
14 //   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
15 //
16 // The same is done for certain SMEM and VMEM opcodes, e.g.:
17 //  s_buffer_load_dword s4, s[0:3], 4
18 //  s_buffer_load_dword s5, s[0:3], 8
19 // ==>
20 //  s_buffer_load_dwordx2 s[4:5], s[0:3], 4
21 //
22 // This pass also tries to promote constant offset to the immediate by
23 // adjusting the base. It tries to use a base from the nearby instructions that
24 // allows it to have a 13bit constant offset and then promotes the 13bit offset
25 // to the immediate.
26 // E.g.
27 //  s_movk_i32 s0, 0x1800
28 //  v_add_co_u32_e32 v0, vcc, s0, v2
29 //  v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
30 //
31 //  s_movk_i32 s0, 0x1000
32 //  v_add_co_u32_e32 v5, vcc, s0, v2
33 //  v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
34 //  global_load_dwordx2 v[5:6], v[5:6], off
35 //  global_load_dwordx2 v[0:1], v[0:1], off
36 // =>
37 //  s_movk_i32 s0, 0x1000
38 //  v_add_co_u32_e32 v5, vcc, s0, v2
39 //  v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
40 //  global_load_dwordx2 v[5:6], v[5:6], off
41 //  global_load_dwordx2 v[0:1], v[5:6], off offset:2048
42 //
43 // Future improvements:
44 //
45 // - This is currently missing stores of constants because loading
46 //   the constant into the data register is placed between the stores, although
47 //   this is arguably a scheduling problem.
48 //
49 // - Live interval recomputing seems inefficient. This currently only matches
50 //   one pair, and recomputes live intervals and moves on to the next pair. It
51 //   would be better to compute a list of all merges that need to occur.
52 //
53 // - With a list of instructions to process, we can also merge more. If a
54 //   cluster of loads have offsets that are too large to fit in the 8-bit
55 //   offsets, but are close enough to fit in the 8 bits, we can add to the base
56 //   pointer and use the new reduced offsets.
57 //
58 //===----------------------------------------------------------------------===//
59 
60 #include "AMDGPU.h"
61 #include "GCNSubtarget.h"
62 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
63 #include "llvm/Analysis/AliasAnalysis.h"
64 #include "llvm/CodeGen/MachineFunctionPass.h"
65 #include "llvm/InitializePasses.h"
66 
67 using namespace llvm;
68 
69 #define DEBUG_TYPE "si-load-store-opt"
70 
71 namespace {
72 enum InstClassEnum {
73   UNKNOWN,
74   DS_READ,
75   DS_WRITE,
76   S_BUFFER_LOAD_IMM,
77   BUFFER_LOAD,
78   BUFFER_STORE,
79   MIMG,
80   TBUFFER_LOAD,
81   TBUFFER_STORE,
82 };
83 
84 struct AddressRegs {
85   unsigned char NumVAddrs = 0;
86   bool SBase = false;
87   bool SRsrc = false;
88   bool SOffset = false;
89   bool VAddr = false;
90   bool Addr = false;
91   bool SSamp = false;
92 };
93 
94 // GFX10 image_sample instructions can have 12 vaddrs + srsrc + ssamp.
95 const unsigned MaxAddressRegs = 12 + 1 + 1;
96 
97 class SILoadStoreOptimizer : public MachineFunctionPass {
98   struct CombineInfo {
99     MachineBasicBlock::iterator I;
100     unsigned EltSize;
101     unsigned Offset;
102     unsigned Width;
103     unsigned Format;
104     unsigned BaseOff;
105     unsigned DMask;
106     InstClassEnum InstClass;
107     unsigned CPol = 0;
108     bool UseST64;
109     int AddrIdx[MaxAddressRegs];
110     const MachineOperand *AddrReg[MaxAddressRegs];
111     unsigned NumAddresses;
112     unsigned Order;
113 
114     bool hasSameBaseAddress(const MachineInstr &MI) {
115       for (unsigned i = 0; i < NumAddresses; i++) {
116         const MachineOperand &AddrRegNext = MI.getOperand(AddrIdx[i]);
117 
118         if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
119           if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
120               AddrReg[i]->getImm() != AddrRegNext.getImm()) {
121             return false;
122           }
123           continue;
124         }
125 
126         // Check same base pointer. Be careful of subregisters, which can occur
127         // with vectors of pointers.
128         if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
129             AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
130          return false;
131         }
132       }
133       return true;
134     }
135 
136     bool hasMergeableAddress(const MachineRegisterInfo &MRI) {
137       for (unsigned i = 0; i < NumAddresses; ++i) {
138         const MachineOperand *AddrOp = AddrReg[i];
139         // Immediates are always OK.
140         if (AddrOp->isImm())
141           continue;
142 
143         // Don't try to merge addresses that aren't either immediates or registers.
144         // TODO: Should be possible to merge FrameIndexes and maybe some other
145         // non-register
146         if (!AddrOp->isReg())
147           return false;
148 
149         // TODO: We should be able to merge physical reg addresses.
150         if (AddrOp->getReg().isPhysical())
151           return false;
152 
153         // If an address has only one use then there will be on other
154         // instructions with the same address, so we can't merge this one.
155         if (MRI.hasOneNonDBGUse(AddrOp->getReg()))
156           return false;
157       }
158       return true;
159     }
160 
161     void setMI(MachineBasicBlock::iterator MI, const SIInstrInfo &TII,
162                const GCNSubtarget &STM);
163   };
164 
165   struct BaseRegisters {
166     Register LoReg;
167     Register HiReg;
168 
169     unsigned LoSubReg = 0;
170     unsigned HiSubReg = 0;
171   };
172 
173   struct MemAddress {
174     BaseRegisters Base;
175     int64_t Offset = 0;
176   };
177 
178   using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
179 
180 private:
181   const GCNSubtarget *STM = nullptr;
182   const SIInstrInfo *TII = nullptr;
183   const SIRegisterInfo *TRI = nullptr;
184   MachineRegisterInfo *MRI = nullptr;
185   AliasAnalysis *AA = nullptr;
186   bool OptimizeAgain;
187 
188   static bool dmasksCanBeCombined(const CombineInfo &CI,
189                                   const SIInstrInfo &TII,
190                                   const CombineInfo &Paired);
191   static bool offsetsCanBeCombined(CombineInfo &CI, const GCNSubtarget &STI,
192                                    CombineInfo &Paired, bool Modify = false);
193   static bool widthsFit(const GCNSubtarget &STI, const CombineInfo &CI,
194                         const CombineInfo &Paired);
195   static unsigned getNewOpcode(const CombineInfo &CI, const CombineInfo &Paired);
196   static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI,
197                                                      const CombineInfo &Paired);
198   const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI,
199                                                     const CombineInfo &Paired);
200   const TargetRegisterClass *getDataRegClass(const MachineInstr &MI) const;
201 
202   bool checkAndPrepareMerge(CombineInfo &CI, CombineInfo &Paired,
203                             SmallVectorImpl<MachineInstr *> &InstsToMove);
204 
205   unsigned read2Opcode(unsigned EltSize) const;
206   unsigned read2ST64Opcode(unsigned EltSize) const;
207   MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI,
208                                              CombineInfo &Paired,
209                   const SmallVectorImpl<MachineInstr *> &InstsToMove);
210 
211   unsigned write2Opcode(unsigned EltSize) const;
212   unsigned write2ST64Opcode(unsigned EltSize) const;
213   MachineBasicBlock::iterator
214   mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired,
215                   const SmallVectorImpl<MachineInstr *> &InstsToMove);
216   MachineBasicBlock::iterator
217   mergeImagePair(CombineInfo &CI, CombineInfo &Paired,
218                  const SmallVectorImpl<MachineInstr *> &InstsToMove);
219   MachineBasicBlock::iterator
220   mergeSBufferLoadImmPair(CombineInfo &CI, CombineInfo &Paired,
221                           const SmallVectorImpl<MachineInstr *> &InstsToMove);
222   MachineBasicBlock::iterator
223   mergeBufferLoadPair(CombineInfo &CI, CombineInfo &Paired,
224                       const SmallVectorImpl<MachineInstr *> &InstsToMove);
225   MachineBasicBlock::iterator
226   mergeBufferStorePair(CombineInfo &CI, CombineInfo &Paired,
227                        const SmallVectorImpl<MachineInstr *> &InstsToMove);
228   MachineBasicBlock::iterator
229   mergeTBufferLoadPair(CombineInfo &CI, CombineInfo &Paired,
230                        const SmallVectorImpl<MachineInstr *> &InstsToMove);
231   MachineBasicBlock::iterator
232   mergeTBufferStorePair(CombineInfo &CI, CombineInfo &Paired,
233                         const SmallVectorImpl<MachineInstr *> &InstsToMove);
234 
235   void updateBaseAndOffset(MachineInstr &I, Register NewBase,
236                            int32_t NewOffset) const;
237   Register computeBase(MachineInstr &MI, const MemAddress &Addr) const;
238   MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const;
239   Optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
240   void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const;
241   /// Promotes constant offset to the immediate by adjusting the base. It
242   /// tries to use a base from the nearby instructions that allows it to have
243   /// a 13bit constant offset which gets promoted to the immediate.
244   bool promoteConstantOffsetToImm(MachineInstr &CI,
245                                   MemInfoMap &Visited,
246                                   SmallPtrSet<MachineInstr *, 4> &Promoted) const;
247   void addInstToMergeableList(const CombineInfo &CI,
248                   std::list<std::list<CombineInfo> > &MergeableInsts) const;
249 
250   std::pair<MachineBasicBlock::iterator, bool> collectMergeableInsts(
251       MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End,
252       MemInfoMap &Visited, SmallPtrSet<MachineInstr *, 4> &AnchorList,
253       std::list<std::list<CombineInfo>> &MergeableInsts) const;
254 
255 public:
256   static char ID;
257 
258   SILoadStoreOptimizer() : MachineFunctionPass(ID) {
259     initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
260   }
261 
262   bool optimizeInstsWithSameBaseAddr(std::list<CombineInfo> &MergeList,
263                                      bool &OptimizeListAgain);
264   bool optimizeBlock(std::list<std::list<CombineInfo> > &MergeableInsts);
265 
266   bool runOnMachineFunction(MachineFunction &MF) override;
267 
268   StringRef getPassName() const override { return "SI Load Store Optimizer"; }
269 
270   void getAnalysisUsage(AnalysisUsage &AU) const override {
271     AU.setPreservesCFG();
272     AU.addRequired<AAResultsWrapperPass>();
273 
274     MachineFunctionPass::getAnalysisUsage(AU);
275   }
276 
277   MachineFunctionProperties getRequiredProperties() const override {
278     return MachineFunctionProperties()
279       .set(MachineFunctionProperties::Property::IsSSA);
280   }
281 };
282 
283 static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
284   const unsigned Opc = MI.getOpcode();
285 
286   if (TII.isMUBUF(Opc)) {
287     // FIXME: Handle d16 correctly
288     return AMDGPU::getMUBUFElements(Opc);
289   }
290   if (TII.isMIMG(MI)) {
291     uint64_t DMaskImm =
292         TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm();
293     return countPopulation(DMaskImm);
294   }
295   if (TII.isMTBUF(Opc)) {
296     return AMDGPU::getMTBUFElements(Opc);
297   }
298 
299   switch (Opc) {
300   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
301     return 1;
302   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
303     return 2;
304   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
305     return 4;
306   case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM:
307     return 8;
308   case AMDGPU::DS_READ_B32:      LLVM_FALLTHROUGH;
309   case AMDGPU::DS_READ_B32_gfx9: LLVM_FALLTHROUGH;
310   case AMDGPU::DS_WRITE_B32:     LLVM_FALLTHROUGH;
311   case AMDGPU::DS_WRITE_B32_gfx9:
312     return 1;
313   case AMDGPU::DS_READ_B64:      LLVM_FALLTHROUGH;
314   case AMDGPU::DS_READ_B64_gfx9: LLVM_FALLTHROUGH;
315   case AMDGPU::DS_WRITE_B64:     LLVM_FALLTHROUGH;
316   case AMDGPU::DS_WRITE_B64_gfx9:
317     return 2;
318   default:
319     return 0;
320   }
321 }
322 
323 /// Maps instruction opcode to enum InstClassEnum.
324 static InstClassEnum getInstClass(unsigned Opc, const SIInstrInfo &TII) {
325   switch (Opc) {
326   default:
327     if (TII.isMUBUF(Opc)) {
328       switch (AMDGPU::getMUBUFBaseOpcode(Opc)) {
329       default:
330         return UNKNOWN;
331       case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
332       case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
333       case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
334       case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
335         return BUFFER_LOAD;
336       case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
337       case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
338       case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
339       case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
340         return BUFFER_STORE;
341       }
342     }
343     if (TII.isMIMG(Opc)) {
344       // Ignore instructions encoded without vaddr.
345       if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr) == -1 &&
346           AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0) == -1)
347         return UNKNOWN;
348       // Ignore BVH instructions
349       if (AMDGPU::getMIMGBaseOpcode(Opc)->BVH)
350         return UNKNOWN;
351       // TODO: Support IMAGE_GET_RESINFO and IMAGE_GET_LOD.
352       if (TII.get(Opc).mayStore() || !TII.get(Opc).mayLoad() ||
353           TII.isGather4(Opc))
354         return UNKNOWN;
355       return MIMG;
356     }
357     if (TII.isMTBUF(Opc)) {
358       switch (AMDGPU::getMTBUFBaseOpcode(Opc)) {
359       default:
360         return UNKNOWN;
361       case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN:
362       case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN_exact:
363       case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET:
364       case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET_exact:
365         return TBUFFER_LOAD;
366       case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN:
367       case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN_exact:
368       case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET:
369       case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET_exact:
370         return TBUFFER_STORE;
371       }
372     }
373     return UNKNOWN;
374   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
375   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
376   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
377   case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM:
378     return S_BUFFER_LOAD_IMM;
379   case AMDGPU::DS_READ_B32:
380   case AMDGPU::DS_READ_B32_gfx9:
381   case AMDGPU::DS_READ_B64:
382   case AMDGPU::DS_READ_B64_gfx9:
383     return DS_READ;
384   case AMDGPU::DS_WRITE_B32:
385   case AMDGPU::DS_WRITE_B32_gfx9:
386   case AMDGPU::DS_WRITE_B64:
387   case AMDGPU::DS_WRITE_B64_gfx9:
388     return DS_WRITE;
389   }
390 }
391 
392 /// Determines instruction subclass from opcode. Only instructions
393 /// of the same subclass can be merged together.
394 static unsigned getInstSubclass(unsigned Opc, const SIInstrInfo &TII) {
395   switch (Opc) {
396   default:
397     if (TII.isMUBUF(Opc))
398       return AMDGPU::getMUBUFBaseOpcode(Opc);
399     if (TII.isMIMG(Opc)) {
400       const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
401       assert(Info);
402       return Info->BaseOpcode;
403     }
404     if (TII.isMTBUF(Opc))
405       return AMDGPU::getMTBUFBaseOpcode(Opc);
406     return -1;
407   case AMDGPU::DS_READ_B32:
408   case AMDGPU::DS_READ_B32_gfx9:
409   case AMDGPU::DS_READ_B64:
410   case AMDGPU::DS_READ_B64_gfx9:
411   case AMDGPU::DS_WRITE_B32:
412   case AMDGPU::DS_WRITE_B32_gfx9:
413   case AMDGPU::DS_WRITE_B64:
414   case AMDGPU::DS_WRITE_B64_gfx9:
415     return Opc;
416   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
417   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
418   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
419   case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM:
420     return AMDGPU::S_BUFFER_LOAD_DWORD_IMM;
421   }
422 }
423 
424 static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) {
425   AddressRegs Result;
426 
427   if (TII.isMUBUF(Opc)) {
428     if (AMDGPU::getMUBUFHasVAddr(Opc))
429       Result.VAddr = true;
430     if (AMDGPU::getMUBUFHasSrsrc(Opc))
431       Result.SRsrc = true;
432     if (AMDGPU::getMUBUFHasSoffset(Opc))
433       Result.SOffset = true;
434 
435     return Result;
436   }
437 
438   if (TII.isMIMG(Opc)) {
439     int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
440     if (VAddr0Idx >= 0) {
441       int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
442       Result.NumVAddrs = SRsrcIdx - VAddr0Idx;
443     } else {
444       Result.VAddr = true;
445     }
446     Result.SRsrc = true;
447     const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
448     if (Info && AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler)
449       Result.SSamp = true;
450 
451     return Result;
452   }
453   if (TII.isMTBUF(Opc)) {
454     if (AMDGPU::getMTBUFHasVAddr(Opc))
455       Result.VAddr = true;
456     if (AMDGPU::getMTBUFHasSrsrc(Opc))
457       Result.SRsrc = true;
458     if (AMDGPU::getMTBUFHasSoffset(Opc))
459       Result.SOffset = true;
460 
461     return Result;
462   }
463 
464   switch (Opc) {
465   default:
466     return Result;
467   case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
468   case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
469   case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
470   case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM:
471     Result.SBase = true;
472     return Result;
473   case AMDGPU::DS_READ_B32:
474   case AMDGPU::DS_READ_B64:
475   case AMDGPU::DS_READ_B32_gfx9:
476   case AMDGPU::DS_READ_B64_gfx9:
477   case AMDGPU::DS_WRITE_B32:
478   case AMDGPU::DS_WRITE_B64:
479   case AMDGPU::DS_WRITE_B32_gfx9:
480   case AMDGPU::DS_WRITE_B64_gfx9:
481     Result.Addr = true;
482     return Result;
483   }
484 }
485 
486 void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
487                                               const SIInstrInfo &TII,
488                                               const GCNSubtarget &STM) {
489   I = MI;
490   unsigned Opc = MI->getOpcode();
491   InstClass = getInstClass(Opc, TII);
492 
493   if (InstClass == UNKNOWN)
494     return;
495 
496   switch (InstClass) {
497   case DS_READ:
498    EltSize =
499           (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
500                                                                           : 4;
501    break;
502   case DS_WRITE:
503     EltSize =
504           (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
505                                                                             : 4;
506     break;
507   case S_BUFFER_LOAD_IMM:
508     EltSize = AMDGPU::convertSMRDOffsetUnits(STM, 4);
509     break;
510   default:
511     EltSize = 4;
512     break;
513   }
514 
515   if (InstClass == MIMG) {
516     DMask = TII.getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm();
517     // Offset is not considered for MIMG instructions.
518     Offset = 0;
519   } else {
520     int OffsetIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::offset);
521     Offset = I->getOperand(OffsetIdx).getImm();
522   }
523 
524   if (InstClass == TBUFFER_LOAD || InstClass == TBUFFER_STORE)
525     Format = TII.getNamedOperand(*I, AMDGPU::OpName::format)->getImm();
526 
527   Width = getOpcodeWidth(*I, TII);
528 
529   if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) {
530     Offset &= 0xffff;
531   } else if (InstClass != MIMG) {
532     CPol = TII.getNamedOperand(*I, AMDGPU::OpName::cpol)->getImm();
533   }
534 
535   AddressRegs Regs = getRegs(Opc, TII);
536 
537   NumAddresses = 0;
538   for (unsigned J = 0; J < Regs.NumVAddrs; J++)
539     AddrIdx[NumAddresses++] =
540         AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0) + J;
541   if (Regs.Addr)
542     AddrIdx[NumAddresses++] =
543         AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::addr);
544   if (Regs.SBase)
545     AddrIdx[NumAddresses++] =
546         AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sbase);
547   if (Regs.SRsrc)
548     AddrIdx[NumAddresses++] =
549         AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
550   if (Regs.SOffset)
551     AddrIdx[NumAddresses++] =
552         AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset);
553   if (Regs.VAddr)
554     AddrIdx[NumAddresses++] =
555         AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
556   if (Regs.SSamp)
557     AddrIdx[NumAddresses++] =
558         AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::ssamp);
559   assert(NumAddresses <= MaxAddressRegs);
560 
561   for (unsigned J = 0; J < NumAddresses; J++)
562     AddrReg[J] = &I->getOperand(AddrIdx[J]);
563 }
564 
565 } // end anonymous namespace.
566 
567 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
568                       "SI Load Store Optimizer", false, false)
569 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
570 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
571                     false, false)
572 
573 char SILoadStoreOptimizer::ID = 0;
574 
575 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
576 
577 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
578   return new SILoadStoreOptimizer();
579 }
580 
581 static void moveInstsAfter(MachineBasicBlock::iterator I,
582                            ArrayRef<MachineInstr *> InstsToMove) {
583   MachineBasicBlock *MBB = I->getParent();
584   ++I;
585   for (MachineInstr *MI : InstsToMove) {
586     MI->removeFromParent();
587     MBB->insert(I, MI);
588   }
589 }
590 
591 static void addDefsUsesToList(const MachineInstr &MI,
592                               DenseSet<Register> &RegDefs,
593                               DenseSet<Register> &PhysRegUses) {
594   for (const MachineOperand &Op : MI.operands()) {
595     if (Op.isReg()) {
596       if (Op.isDef())
597         RegDefs.insert(Op.getReg());
598       else if (Op.readsReg() && Op.getReg().isPhysical())
599         PhysRegUses.insert(Op.getReg());
600     }
601   }
602 }
603 
604 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
605                                       MachineBasicBlock::iterator B,
606                                       AliasAnalysis *AA) {
607   // RAW or WAR - cannot reorder
608   // WAW - cannot reorder
609   // RAR - safe to reorder
610   return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
611 }
612 
613 // Add MI and its defs to the lists if MI reads one of the defs that are
614 // already in the list. Returns true in that case.
615 static bool addToListsIfDependent(MachineInstr &MI, DenseSet<Register> &RegDefs,
616                                   DenseSet<Register> &PhysRegUses,
617                                   SmallVectorImpl<MachineInstr *> &Insts) {
618   for (MachineOperand &Use : MI.operands()) {
619     // If one of the defs is read, then there is a use of Def between I and the
620     // instruction that I will potentially be merged with. We will need to move
621     // this instruction after the merged instructions.
622     //
623     // Similarly, if there is a def which is read by an instruction that is to
624     // be moved for merging, then we need to move the def-instruction as well.
625     // This can only happen for physical registers such as M0; virtual
626     // registers are in SSA form.
627     if (Use.isReg() && ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
628                         (Use.isDef() && RegDefs.count(Use.getReg())) ||
629                         (Use.isDef() && Use.getReg().isPhysical() &&
630                          PhysRegUses.count(Use.getReg())))) {
631       Insts.push_back(&MI);
632       addDefsUsesToList(MI, RegDefs, PhysRegUses);
633       return true;
634     }
635   }
636 
637   return false;
638 }
639 
640 static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
641                                     ArrayRef<MachineInstr *> InstsToMove,
642                                     AliasAnalysis *AA) {
643   assert(MemOp.mayLoadOrStore());
644 
645   for (MachineInstr *InstToMove : InstsToMove) {
646     if (!InstToMove->mayLoadOrStore())
647       continue;
648     if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
649       return false;
650   }
651   return true;
652 }
653 
654 // This function assumes that \p A and \p B have are identical except for
655 // size and offset, and they reference adjacent memory.
656 static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF,
657                                                    const MachineMemOperand *A,
658                                                    const MachineMemOperand *B) {
659   unsigned MinOffset = std::min(A->getOffset(), B->getOffset());
660   unsigned Size = A->getSize() + B->getSize();
661   // This function adds the offset parameter to the existing offset for A,
662   // so we pass 0 here as the offset and then manually set it to the correct
663   // value after the call.
664   MachineMemOperand *MMO = MF.getMachineMemOperand(A, 0, Size);
665   MMO->setOffset(MinOffset);
666   return MMO;
667 }
668 
669 bool SILoadStoreOptimizer::dmasksCanBeCombined(const CombineInfo &CI,
670                                                const SIInstrInfo &TII,
671                                                const CombineInfo &Paired) {
672   assert(CI.InstClass == MIMG);
673 
674   // Ignore instructions with tfe/lwe set.
675   const auto *TFEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::tfe);
676   const auto *LWEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::lwe);
677 
678   if ((TFEOp && TFEOp->getImm()) || (LWEOp && LWEOp->getImm()))
679     return false;
680 
681   // Check other optional immediate operands for equality.
682   unsigned OperandsToMatch[] = {AMDGPU::OpName::cpol, AMDGPU::OpName::d16,
683                                 AMDGPU::OpName::unorm, AMDGPU::OpName::da,
684                                 AMDGPU::OpName::r128, AMDGPU::OpName::a16};
685 
686   for (auto op : OperandsToMatch) {
687     int Idx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), op);
688     if (AMDGPU::getNamedOperandIdx(Paired.I->getOpcode(), op) != Idx)
689       return false;
690     if (Idx != -1 &&
691         CI.I->getOperand(Idx).getImm() != Paired.I->getOperand(Idx).getImm())
692       return false;
693   }
694 
695   // Check DMask for overlaps.
696   unsigned MaxMask = std::max(CI.DMask, Paired.DMask);
697   unsigned MinMask = std::min(CI.DMask, Paired.DMask);
698 
699   unsigned AllowedBitsForMin = llvm::countTrailingZeros(MaxMask);
700   if ((1u << AllowedBitsForMin) <= MinMask)
701     return false;
702 
703   return true;
704 }
705 
706 static unsigned getBufferFormatWithCompCount(unsigned OldFormat,
707                                        unsigned ComponentCount,
708                                        const GCNSubtarget &STI) {
709   if (ComponentCount > 4)
710     return 0;
711 
712   const llvm::AMDGPU::GcnBufferFormatInfo *OldFormatInfo =
713       llvm::AMDGPU::getGcnBufferFormatInfo(OldFormat, STI);
714   if (!OldFormatInfo)
715     return 0;
716 
717   const llvm::AMDGPU::GcnBufferFormatInfo *NewFormatInfo =
718       llvm::AMDGPU::getGcnBufferFormatInfo(OldFormatInfo->BitsPerComp,
719                                            ComponentCount,
720                                            OldFormatInfo->NumFormat, STI);
721 
722   if (!NewFormatInfo)
723     return 0;
724 
725   assert(NewFormatInfo->NumFormat == OldFormatInfo->NumFormat &&
726          NewFormatInfo->BitsPerComp == OldFormatInfo->BitsPerComp);
727 
728   return NewFormatInfo->Format;
729 }
730 
731 // Return the value in the inclusive range [Lo,Hi] that is aligned to the
732 // highest power of two. Note that the result is well defined for all inputs
733 // including corner cases like:
734 // - if Lo == Hi, return that value
735 // - if Lo == 0, return 0 (even though the "- 1" below underflows
736 // - if Lo > Hi, return 0 (as if the range wrapped around)
737 static uint32_t mostAlignedValueInRange(uint32_t Lo, uint32_t Hi) {
738   return Hi & maskLeadingOnes<uint32_t>(countLeadingZeros((Lo - 1) ^ Hi) + 1);
739 }
740 
741 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI,
742                                                 const GCNSubtarget &STI,
743                                                 CombineInfo &Paired,
744                                                 bool Modify) {
745   assert(CI.InstClass != MIMG);
746 
747   // XXX - Would the same offset be OK? Is there any reason this would happen or
748   // be useful?
749   if (CI.Offset == Paired.Offset)
750     return false;
751 
752   // This won't be valid if the offset isn't aligned.
753   if ((CI.Offset % CI.EltSize != 0) || (Paired.Offset % CI.EltSize != 0))
754     return false;
755 
756   if (CI.InstClass == TBUFFER_LOAD || CI.InstClass == TBUFFER_STORE) {
757 
758     const llvm::AMDGPU::GcnBufferFormatInfo *Info0 =
759         llvm::AMDGPU::getGcnBufferFormatInfo(CI.Format, STI);
760     if (!Info0)
761       return false;
762     const llvm::AMDGPU::GcnBufferFormatInfo *Info1 =
763         llvm::AMDGPU::getGcnBufferFormatInfo(Paired.Format, STI);
764     if (!Info1)
765       return false;
766 
767     if (Info0->BitsPerComp != Info1->BitsPerComp ||
768         Info0->NumFormat != Info1->NumFormat)
769       return false;
770 
771     // TODO: Should be possible to support more formats, but if format loads
772     // are not dword-aligned, the merged load might not be valid.
773     if (Info0->BitsPerComp != 32)
774       return false;
775 
776     if (getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, STI) == 0)
777       return false;
778   }
779 
780   uint32_t EltOffset0 = CI.Offset / CI.EltSize;
781   uint32_t EltOffset1 = Paired.Offset / CI.EltSize;
782   CI.UseST64 = false;
783   CI.BaseOff = 0;
784 
785   // Handle all non-DS instructions.
786   if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
787     return (EltOffset0 + CI.Width == EltOffset1 ||
788             EltOffset1 + Paired.Width == EltOffset0) &&
789            CI.CPol == Paired.CPol &&
790            (CI.InstClass == S_BUFFER_LOAD_IMM || CI.CPol == Paired.CPol);
791   }
792 
793   // If the offset in elements doesn't fit in 8-bits, we might be able to use
794   // the stride 64 versions.
795   if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
796       isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
797     if (Modify) {
798       CI.Offset = EltOffset0 / 64;
799       Paired.Offset = EltOffset1 / 64;
800       CI.UseST64 = true;
801     }
802     return true;
803   }
804 
805   // Check if the new offsets fit in the reduced 8-bit range.
806   if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
807     if (Modify) {
808       CI.Offset = EltOffset0;
809       Paired.Offset = EltOffset1;
810     }
811     return true;
812   }
813 
814   // Try to shift base address to decrease offsets.
815   uint32_t Min = std::min(EltOffset0, EltOffset1);
816   uint32_t Max = std::max(EltOffset0, EltOffset1);
817 
818   const uint32_t Mask = maskTrailingOnes<uint32_t>(8) * 64;
819   if (((Max - Min) & ~Mask) == 0) {
820     if (Modify) {
821       // From the range of values we could use for BaseOff, choose the one that
822       // is aligned to the highest power of two, to maximise the chance that
823       // the same offset can be reused for other load/store pairs.
824       uint32_t BaseOff = mostAlignedValueInRange(Max - 0xff * 64, Min);
825       // Copy the low bits of the offsets, so that when we adjust them by
826       // subtracting BaseOff they will be multiples of 64.
827       BaseOff |= Min & maskTrailingOnes<uint32_t>(6);
828       CI.BaseOff = BaseOff * CI.EltSize;
829       CI.Offset = (EltOffset0 - BaseOff) / 64;
830       Paired.Offset = (EltOffset1 - BaseOff) / 64;
831       CI.UseST64 = true;
832     }
833     return true;
834   }
835 
836   if (isUInt<8>(Max - Min)) {
837     if (Modify) {
838       // From the range of values we could use for BaseOff, choose the one that
839       // is aligned to the highest power of two, to maximise the chance that
840       // the same offset can be reused for other load/store pairs.
841       uint32_t BaseOff = mostAlignedValueInRange(Max - 0xff, Min);
842       CI.BaseOff = BaseOff * CI.EltSize;
843       CI.Offset = EltOffset0 - BaseOff;
844       Paired.Offset = EltOffset1 - BaseOff;
845     }
846     return true;
847   }
848 
849   return false;
850 }
851 
852 bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
853                                      const CombineInfo &CI,
854                                      const CombineInfo &Paired) {
855   const unsigned Width = (CI.Width + Paired.Width);
856   switch (CI.InstClass) {
857   default:
858     return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
859   case S_BUFFER_LOAD_IMM:
860     switch (Width) {
861     default:
862       return false;
863     case 2:
864     case 4:
865     case 8:
866       return true;
867     }
868   }
869 }
870 
871 const TargetRegisterClass *
872 SILoadStoreOptimizer::getDataRegClass(const MachineInstr &MI) const {
873   if (const auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) {
874     return TRI->getRegClassForReg(*MRI, Dst->getReg());
875   }
876   if (const auto *Src = TII->getNamedOperand(MI, AMDGPU::OpName::vdata)) {
877     return TRI->getRegClassForReg(*MRI, Src->getReg());
878   }
879   if (const auto *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0)) {
880     return TRI->getRegClassForReg(*MRI, Src->getReg());
881   }
882   if (const auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) {
883     return TRI->getRegClassForReg(*MRI, Dst->getReg());
884   }
885   if (const auto *Src = TII->getNamedOperand(MI, AMDGPU::OpName::sdata)) {
886     return TRI->getRegClassForReg(*MRI, Src->getReg());
887   }
888   return nullptr;
889 }
890 
891 /// This function assumes that CI comes before Paired in a basic block.
892 bool SILoadStoreOptimizer::checkAndPrepareMerge(
893     CombineInfo &CI, CombineInfo &Paired,
894     SmallVectorImpl<MachineInstr *> &InstsToMove) {
895 
896   // Check both offsets (or masks for MIMG) can be combined and fit in the
897   // reduced range.
898   if (CI.InstClass == MIMG && !dmasksCanBeCombined(CI, *TII, Paired))
899     return false;
900 
901   if (CI.InstClass != MIMG &&
902       (!widthsFit(*STM, CI, Paired) || !offsetsCanBeCombined(CI, *STM, Paired)))
903     return false;
904 
905   const unsigned Opc = CI.I->getOpcode();
906   const InstClassEnum InstClass = getInstClass(Opc, *TII);
907 
908   if (InstClass == UNKNOWN) {
909     return false;
910   }
911   const unsigned InstSubclass = getInstSubclass(Opc, *TII);
912 
913   // Do not merge VMEM buffer instructions with "swizzled" bit set.
914   int Swizzled =
915       AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::swz);
916   if (Swizzled != -1 && CI.I->getOperand(Swizzled).getImm())
917     return false;
918 
919   DenseSet<Register> RegDefsToMove;
920   DenseSet<Register> PhysRegUsesToMove;
921   addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
922 
923   const TargetRegisterClass *DataRC = getDataRegClass(*CI.I);
924   bool IsAGPR = TRI->hasAGPRs(DataRC);
925 
926   MachineBasicBlock::iterator E = std::next(Paired.I);
927   MachineBasicBlock::iterator MBBI = std::next(CI.I);
928   MachineBasicBlock::iterator MBBE = CI.I->getParent()->end();
929   for (; MBBI != E; ++MBBI) {
930 
931     if (MBBI == MBBE) {
932       // CombineInfo::Order is a hint on the instruction ordering within the
933       // basic block. This hint suggests that CI precedes Paired, which is
934       // true most of the time. However, moveInstsAfter() processing a
935       // previous list may have changed this order in a situation when it
936       // moves an instruction which exists in some other merge list.
937       // In this case it must be dependent.
938       return false;
939     }
940 
941     if ((getInstClass(MBBI->getOpcode(), *TII) != InstClass) ||
942         (getInstSubclass(MBBI->getOpcode(), *TII) != InstSubclass)) {
943       // This is not a matching instruction, but we can keep looking as
944       // long as one of these conditions are met:
945       // 1. It is safe to move I down past MBBI.
946       // 2. It is safe to move MBBI down past the instruction that I will
947       //    be merged into.
948 
949       if (MBBI->hasUnmodeledSideEffects()) {
950         // We can't re-order this instruction with respect to other memory
951         // operations, so we fail both conditions mentioned above.
952         return false;
953       }
954 
955       if (MBBI->mayLoadOrStore() &&
956           (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
957            !canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA))) {
958         // We fail condition #1, but we may still be able to satisfy condition
959         // #2.  Add this instruction to the move list and then we will check
960         // if condition #2 holds once we have selected the matching instruction.
961         InstsToMove.push_back(&*MBBI);
962         addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
963         continue;
964       }
965 
966       // When we match I with another DS instruction we will be moving I down
967       // to the location of the matched instruction any uses of I will need to
968       // be moved down as well.
969       addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
970                             InstsToMove);
971       continue;
972     }
973 
974     int Swizzled =
975         AMDGPU::getNamedOperandIdx(MBBI->getOpcode(), AMDGPU::OpName::swz);
976     if (Swizzled != -1 && MBBI->getOperand(Swizzled).getImm())
977       return false;
978 
979     // Handle a case like
980     //   DS_WRITE_B32 addr, v, idx0
981     //   w = DS_READ_B32 addr, idx0
982     //   DS_WRITE_B32 addr, f(w), idx1
983     // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
984     // merging of the two writes.
985     if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
986                               InstsToMove))
987       continue;
988 
989     if (&*MBBI == &*Paired.I) {
990       if (TRI->hasAGPRs(getDataRegClass(*MBBI)) != IsAGPR)
991         return false;
992       // FIXME: nothing is illegal in a ds_write2 opcode with two AGPR data
993       //        operands. However we are reporting that ds_write2 shall have
994       //        only VGPR data so that machine copy propagation does not
995       //        create an illegal instruction with a VGPR and AGPR sources.
996       //        Consequenctially if we create such instruction the verifier
997       //        will complain.
998       if (IsAGPR && CI.InstClass == DS_WRITE)
999         return false;
1000 
1001       // We need to go through the list of instructions that we plan to
1002       // move and make sure they are all safe to move down past the merged
1003       // instruction.
1004       if (canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA)) {
1005 
1006         // Call offsetsCanBeCombined with modify = true so that the offsets are
1007         // correct for the new instruction.  This should return true, because
1008         // this function should only be called on CombineInfo objects that
1009         // have already been confirmed to be mergeable.
1010         if (CI.InstClass != MIMG)
1011           offsetsCanBeCombined(CI, *STM, Paired, true);
1012         return true;
1013       }
1014       return false;
1015     }
1016 
1017     // We've found a load/store that we couldn't merge for some reason.
1018     // We could potentially keep looking, but we'd need to make sure that
1019     // it was safe to move I and also all the instruction in InstsToMove
1020     // down past this instruction.
1021     // check if we can move I across MBBI and if we can move all I's users
1022     if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
1023         !canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA))
1024       break;
1025   }
1026   return false;
1027 }
1028 
1029 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
1030   if (STM->ldsRequiresM0Init())
1031     return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
1032   return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
1033 }
1034 
1035 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
1036   if (STM->ldsRequiresM0Init())
1037     return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
1038 
1039   return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
1040                         : AMDGPU::DS_READ2ST64_B64_gfx9;
1041 }
1042 
1043 MachineBasicBlock::iterator
1044 SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI, CombineInfo &Paired,
1045     const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1046   MachineBasicBlock *MBB = CI.I->getParent();
1047 
1048   // Be careful, since the addresses could be subregisters themselves in weird
1049   // cases, like vectors of pointers.
1050   const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
1051 
1052   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
1053   const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdst);
1054 
1055   unsigned NewOffset0 = CI.Offset;
1056   unsigned NewOffset1 = Paired.Offset;
1057   unsigned Opc =
1058       CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
1059 
1060   unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
1061   unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
1062 
1063   if (NewOffset0 > NewOffset1) {
1064     // Canonicalize the merged instruction so the smaller offset comes first.
1065     std::swap(NewOffset0, NewOffset1);
1066     std::swap(SubRegIdx0, SubRegIdx1);
1067   }
1068 
1069   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
1070          (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
1071 
1072   const MCInstrDesc &Read2Desc = TII->get(Opc);
1073 
1074   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1075   Register DestReg = MRI->createVirtualRegister(SuperRC);
1076 
1077   DebugLoc DL = CI.I->getDebugLoc();
1078 
1079   Register BaseReg = AddrReg->getReg();
1080   unsigned BaseSubReg = AddrReg->getSubReg();
1081   unsigned BaseRegFlags = 0;
1082   if (CI.BaseOff) {
1083     Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1084     BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
1085         .addImm(CI.BaseOff);
1086 
1087     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1088     BaseRegFlags = RegState::Kill;
1089 
1090     TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg)
1091         .addReg(ImmReg)
1092         .addReg(AddrReg->getReg(), 0, BaseSubReg)
1093         .addImm(0); // clamp bit
1094     BaseSubReg = 0;
1095   }
1096 
1097   MachineInstrBuilder Read2 =
1098       BuildMI(*MBB, Paired.I, DL, Read2Desc, DestReg)
1099           .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
1100           .addImm(NewOffset0)                        // offset0
1101           .addImm(NewOffset1)                        // offset1
1102           .addImm(0)                                 // gds
1103           .cloneMergedMemRefs({&*CI.I, &*Paired.I});
1104 
1105   (void)Read2;
1106 
1107   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1108 
1109   // Copy to the old destination registers.
1110   BuildMI(*MBB, Paired.I, DL, CopyDesc)
1111       .add(*Dest0) // Copy to same destination including flags and sub reg.
1112       .addReg(DestReg, 0, SubRegIdx0);
1113   MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1114                             .add(*Dest1)
1115                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
1116 
1117   moveInstsAfter(Copy1, InstsToMove);
1118 
1119   CI.I->eraseFromParent();
1120   Paired.I->eraseFromParent();
1121 
1122   LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
1123   return Read2;
1124 }
1125 
1126 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
1127   if (STM->ldsRequiresM0Init())
1128     return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
1129   return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
1130                         : AMDGPU::DS_WRITE2_B64_gfx9;
1131 }
1132 
1133 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
1134   if (STM->ldsRequiresM0Init())
1135     return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
1136                           : AMDGPU::DS_WRITE2ST64_B64;
1137 
1138   return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
1139                         : AMDGPU::DS_WRITE2ST64_B64_gfx9;
1140 }
1141 
1142 MachineBasicBlock::iterator
1143 SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired,
1144                                       const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1145   MachineBasicBlock *MBB = CI.I->getParent();
1146 
1147   // Be sure to use .addOperand(), and not .addReg() with these. We want to be
1148   // sure we preserve the subregister index and any register flags set on them.
1149   const MachineOperand *AddrReg =
1150       TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
1151   const MachineOperand *Data0 =
1152       TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
1153   const MachineOperand *Data1 =
1154       TII->getNamedOperand(*Paired.I, AMDGPU::OpName::data0);
1155 
1156   unsigned NewOffset0 = CI.Offset;
1157   unsigned NewOffset1 = Paired.Offset;
1158   unsigned Opc =
1159       CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
1160 
1161   if (NewOffset0 > NewOffset1) {
1162     // Canonicalize the merged instruction so the smaller offset comes first.
1163     std::swap(NewOffset0, NewOffset1);
1164     std::swap(Data0, Data1);
1165   }
1166 
1167   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
1168          (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
1169 
1170   const MCInstrDesc &Write2Desc = TII->get(Opc);
1171   DebugLoc DL = CI.I->getDebugLoc();
1172 
1173   Register BaseReg = AddrReg->getReg();
1174   unsigned BaseSubReg = AddrReg->getSubReg();
1175   unsigned BaseRegFlags = 0;
1176   if (CI.BaseOff) {
1177     Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1178     BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
1179         .addImm(CI.BaseOff);
1180 
1181     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1182     BaseRegFlags = RegState::Kill;
1183 
1184     TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg)
1185         .addReg(ImmReg)
1186         .addReg(AddrReg->getReg(), 0, BaseSubReg)
1187         .addImm(0); // clamp bit
1188     BaseSubReg = 0;
1189   }
1190 
1191   MachineInstrBuilder Write2 =
1192       BuildMI(*MBB, Paired.I, DL, Write2Desc)
1193           .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
1194           .add(*Data0)                               // data0
1195           .add(*Data1)                               // data1
1196           .addImm(NewOffset0)                        // offset0
1197           .addImm(NewOffset1)                        // offset1
1198           .addImm(0)                                 // gds
1199           .cloneMergedMemRefs({&*CI.I, &*Paired.I});
1200 
1201   moveInstsAfter(Write2, InstsToMove);
1202 
1203   CI.I->eraseFromParent();
1204   Paired.I->eraseFromParent();
1205 
1206   LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
1207   return Write2;
1208 }
1209 
1210 MachineBasicBlock::iterator
1211 SILoadStoreOptimizer::mergeImagePair(CombineInfo &CI, CombineInfo &Paired,
1212                            const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1213   MachineBasicBlock *MBB = CI.I->getParent();
1214   DebugLoc DL = CI.I->getDebugLoc();
1215   const unsigned Opcode = getNewOpcode(CI, Paired);
1216 
1217   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1218 
1219   Register DestReg = MRI->createVirtualRegister(SuperRC);
1220   unsigned MergedDMask = CI.DMask | Paired.DMask;
1221   unsigned DMaskIdx =
1222       AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::dmask);
1223 
1224   auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1225   for (unsigned I = 1, E = (*CI.I).getNumOperands(); I != E; ++I) {
1226     if (I == DMaskIdx)
1227       MIB.addImm(MergedDMask);
1228     else
1229       MIB.add((*CI.I).getOperand(I));
1230   }
1231 
1232   // It shouldn't be possible to get this far if the two instructions
1233   // don't have a single memoperand, because MachineInstr::mayAlias()
1234   // will return true if this is the case.
1235   assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1236 
1237   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1238   const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1239 
1240   MachineInstr *New = MIB.addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1241 
1242   unsigned SubRegIdx0, SubRegIdx1;
1243   std::tie(SubRegIdx0, SubRegIdx1) = getSubRegIdxs(CI, Paired);
1244 
1245   // Copy to the old destination registers.
1246   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1247   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1248   const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1249 
1250   BuildMI(*MBB, Paired.I, DL, CopyDesc)
1251       .add(*Dest0) // Copy to same destination including flags and sub reg.
1252       .addReg(DestReg, 0, SubRegIdx0);
1253   MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1254                             .add(*Dest1)
1255                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
1256 
1257   moveInstsAfter(Copy1, InstsToMove);
1258 
1259   CI.I->eraseFromParent();
1260   Paired.I->eraseFromParent();
1261   return New;
1262 }
1263 
1264 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair(
1265     CombineInfo &CI, CombineInfo &Paired,
1266     const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1267   MachineBasicBlock *MBB = CI.I->getParent();
1268   DebugLoc DL = CI.I->getDebugLoc();
1269   const unsigned Opcode = getNewOpcode(CI, Paired);
1270 
1271   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1272 
1273   Register DestReg = MRI->createVirtualRegister(SuperRC);
1274   unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1275 
1276   // It shouldn't be possible to get this far if the two instructions
1277   // don't have a single memoperand, because MachineInstr::mayAlias()
1278   // will return true if this is the case.
1279   assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1280 
1281   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1282   const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1283 
1284   MachineInstr *New =
1285     BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg)
1286         .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
1287         .addImm(MergedOffset) // offset
1288         .addImm(CI.CPol)      // cpol
1289         .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1290 
1291   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1292   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1293   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1294 
1295   // Copy to the old destination registers.
1296   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1297   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
1298   const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::sdst);
1299 
1300   BuildMI(*MBB, Paired.I, DL, CopyDesc)
1301       .add(*Dest0) // Copy to same destination including flags and sub reg.
1302       .addReg(DestReg, 0, SubRegIdx0);
1303   MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1304                             .add(*Dest1)
1305                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
1306 
1307   moveInstsAfter(Copy1, InstsToMove);
1308 
1309   CI.I->eraseFromParent();
1310   Paired.I->eraseFromParent();
1311   return New;
1312 }
1313 
1314 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
1315     CombineInfo &CI, CombineInfo &Paired,
1316     const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1317   MachineBasicBlock *MBB = CI.I->getParent();
1318   DebugLoc DL = CI.I->getDebugLoc();
1319 
1320   const unsigned Opcode = getNewOpcode(CI, Paired);
1321 
1322   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1323 
1324   // Copy to the new source register.
1325   Register DestReg = MRI->createVirtualRegister(SuperRC);
1326   unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1327 
1328   auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1329 
1330   AddressRegs Regs = getRegs(Opcode, *TII);
1331 
1332   if (Regs.VAddr)
1333     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1334 
1335   // It shouldn't be possible to get this far if the two instructions
1336   // don't have a single memoperand, because MachineInstr::mayAlias()
1337   // will return true if this is the case.
1338   assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1339 
1340   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1341   const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1342 
1343   MachineInstr *New =
1344     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1345         .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1346         .addImm(MergedOffset) // offset
1347         .addImm(CI.CPol)      // cpol
1348         .addImm(0)            // tfe
1349         .addImm(0)            // swz
1350         .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1351 
1352   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1353   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1354   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1355 
1356   // Copy to the old destination registers.
1357   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1358   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1359   const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1360 
1361   BuildMI(*MBB, Paired.I, DL, CopyDesc)
1362       .add(*Dest0) // Copy to same destination including flags and sub reg.
1363       .addReg(DestReg, 0, SubRegIdx0);
1364   MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1365                             .add(*Dest1)
1366                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
1367 
1368   moveInstsAfter(Copy1, InstsToMove);
1369 
1370   CI.I->eraseFromParent();
1371   Paired.I->eraseFromParent();
1372   return New;
1373 }
1374 
1375 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferLoadPair(
1376     CombineInfo &CI, CombineInfo &Paired,
1377     const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1378   MachineBasicBlock *MBB = CI.I->getParent();
1379   DebugLoc DL = CI.I->getDebugLoc();
1380 
1381   const unsigned Opcode = getNewOpcode(CI, Paired);
1382 
1383   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1384 
1385   // Copy to the new source register.
1386   Register DestReg = MRI->createVirtualRegister(SuperRC);
1387   unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1388 
1389   auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1390 
1391   AddressRegs Regs = getRegs(Opcode, *TII);
1392 
1393   if (Regs.VAddr)
1394     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1395 
1396   unsigned JoinedFormat =
1397       getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STM);
1398 
1399   // It shouldn't be possible to get this far if the two instructions
1400   // don't have a single memoperand, because MachineInstr::mayAlias()
1401   // will return true if this is the case.
1402   assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1403 
1404   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1405   const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1406 
1407   MachineInstr *New =
1408       MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1409           .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1410           .addImm(MergedOffset) // offset
1411           .addImm(JoinedFormat) // format
1412           .addImm(CI.CPol)      // cpol
1413           .addImm(0)            // tfe
1414           .addImm(0)            // swz
1415           .addMemOperand(
1416               combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1417 
1418   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1419   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1420   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1421 
1422   // Copy to the old destination registers.
1423   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1424   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1425   const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1426 
1427   BuildMI(*MBB, Paired.I, DL, CopyDesc)
1428       .add(*Dest0) // Copy to same destination including flags and sub reg.
1429       .addReg(DestReg, 0, SubRegIdx0);
1430   MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1431                             .add(*Dest1)
1432                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
1433 
1434   moveInstsAfter(Copy1, InstsToMove);
1435 
1436   CI.I->eraseFromParent();
1437   Paired.I->eraseFromParent();
1438   return New;
1439 }
1440 
1441 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferStorePair(
1442     CombineInfo &CI, CombineInfo &Paired,
1443     const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1444   MachineBasicBlock *MBB = CI.I->getParent();
1445   DebugLoc DL = CI.I->getDebugLoc();
1446 
1447   const unsigned Opcode = getNewOpcode(CI, Paired);
1448 
1449   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1450   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1451   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1452 
1453   // Copy to the new source register.
1454   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1455   Register SrcReg = MRI->createVirtualRegister(SuperRC);
1456 
1457   const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1458   const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1459 
1460   BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1461       .add(*Src0)
1462       .addImm(SubRegIdx0)
1463       .add(*Src1)
1464       .addImm(SubRegIdx1);
1465 
1466   auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode))
1467                  .addReg(SrcReg, RegState::Kill);
1468 
1469   AddressRegs Regs = getRegs(Opcode, *TII);
1470 
1471   if (Regs.VAddr)
1472     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1473 
1474   unsigned JoinedFormat =
1475       getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STM);
1476 
1477   // It shouldn't be possible to get this far if the two instructions
1478   // don't have a single memoperand, because MachineInstr::mayAlias()
1479   // will return true if this is the case.
1480   assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1481 
1482   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1483   const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1484 
1485   MachineInstr *New =
1486       MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1487           .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1488           .addImm(std::min(CI.Offset, Paired.Offset)) // offset
1489           .addImm(JoinedFormat)                     // format
1490           .addImm(CI.CPol)                          // cpol
1491           .addImm(0)                                // tfe
1492           .addImm(0)                                // swz
1493           .addMemOperand(
1494               combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1495 
1496   moveInstsAfter(MIB, InstsToMove);
1497 
1498   CI.I->eraseFromParent();
1499   Paired.I->eraseFromParent();
1500   return New;
1501 }
1502 
1503 unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI,
1504                                             const CombineInfo &Paired) {
1505   const unsigned Width = CI.Width + Paired.Width;
1506 
1507   switch (CI.InstClass) {
1508   default:
1509     assert(CI.InstClass == BUFFER_LOAD || CI.InstClass == BUFFER_STORE);
1510     // FIXME: Handle d16 correctly
1511     return AMDGPU::getMUBUFOpcode(AMDGPU::getMUBUFBaseOpcode(CI.I->getOpcode()),
1512                                   Width);
1513   case TBUFFER_LOAD:
1514   case TBUFFER_STORE:
1515     return AMDGPU::getMTBUFOpcode(AMDGPU::getMTBUFBaseOpcode(CI.I->getOpcode()),
1516                                   Width);
1517 
1518   case UNKNOWN:
1519     llvm_unreachable("Unknown instruction class");
1520   case S_BUFFER_LOAD_IMM:
1521     switch (Width) {
1522     default:
1523       return 0;
1524     case 2:
1525       return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
1526     case 4:
1527       return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
1528     case 8:
1529       return AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM;
1530     }
1531   case MIMG:
1532     assert((countPopulation(CI.DMask | Paired.DMask) == Width) &&
1533            "No overlaps");
1534     return AMDGPU::getMaskedMIMGOp(CI.I->getOpcode(), Width);
1535   }
1536 }
1537 
1538 std::pair<unsigned, unsigned>
1539 SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI,
1540                                     const CombineInfo &Paired) {
1541   bool ReverseOrder;
1542   if (CI.InstClass == MIMG) {
1543     assert(
1544         (countPopulation(CI.DMask | Paired.DMask) == CI.Width + Paired.Width) &&
1545         "No overlaps");
1546     ReverseOrder = CI.DMask > Paired.DMask;
1547   } else {
1548     ReverseOrder = CI.Offset > Paired.Offset;
1549   }
1550 
1551   unsigned Idx0;
1552   unsigned Idx1;
1553 
1554   static const unsigned Idxs[5][4] = {
1555       {AMDGPU::sub0, AMDGPU::sub0_sub1, AMDGPU::sub0_sub1_sub2, AMDGPU::sub0_sub1_sub2_sub3},
1556       {AMDGPU::sub1, AMDGPU::sub1_sub2, AMDGPU::sub1_sub2_sub3, AMDGPU::sub1_sub2_sub3_sub4},
1557       {AMDGPU::sub2, AMDGPU::sub2_sub3, AMDGPU::sub2_sub3_sub4, AMDGPU::sub2_sub3_sub4_sub5},
1558       {AMDGPU::sub3, AMDGPU::sub3_sub4, AMDGPU::sub3_sub4_sub5, AMDGPU::sub3_sub4_sub5_sub6},
1559       {AMDGPU::sub4, AMDGPU::sub4_sub5, AMDGPU::sub4_sub5_sub6, AMDGPU::sub4_sub5_sub6_sub7},
1560   };
1561 
1562   assert(CI.Width >= 1 && CI.Width <= 4);
1563   assert(Paired.Width >= 1 && Paired.Width <= 4);
1564 
1565   if (ReverseOrder) {
1566     Idx1 = Idxs[0][Paired.Width - 1];
1567     Idx0 = Idxs[Paired.Width][CI.Width - 1];
1568   } else {
1569     Idx0 = Idxs[0][CI.Width - 1];
1570     Idx1 = Idxs[CI.Width][Paired.Width - 1];
1571   }
1572 
1573   return std::make_pair(Idx0, Idx1);
1574 }
1575 
1576 const TargetRegisterClass *
1577 SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI,
1578                                              const CombineInfo &Paired) {
1579   if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1580     switch (CI.Width + Paired.Width) {
1581     default:
1582       return nullptr;
1583     case 2:
1584       return &AMDGPU::SReg_64_XEXECRegClass;
1585     case 4:
1586       return &AMDGPU::SGPR_128RegClass;
1587     case 8:
1588       return &AMDGPU::SGPR_256RegClass;
1589     case 16:
1590       return &AMDGPU::SGPR_512RegClass;
1591     }
1592   }
1593 
1594   unsigned BitWidth = 32 * (CI.Width + Paired.Width);
1595   return TRI->isAGPRClass(getDataRegClass(*CI.I))
1596              ? TRI->getAGPRClassForBitWidth(BitWidth)
1597              : TRI->getVGPRClassForBitWidth(BitWidth);
1598 }
1599 
1600 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
1601     CombineInfo &CI, CombineInfo &Paired,
1602     const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1603   MachineBasicBlock *MBB = CI.I->getParent();
1604   DebugLoc DL = CI.I->getDebugLoc();
1605 
1606   const unsigned Opcode = getNewOpcode(CI, Paired);
1607 
1608   std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1609   const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1610   const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1611 
1612   // Copy to the new source register.
1613   const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1614   Register SrcReg = MRI->createVirtualRegister(SuperRC);
1615 
1616   const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1617   const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1618 
1619   BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1620       .add(*Src0)
1621       .addImm(SubRegIdx0)
1622       .add(*Src1)
1623       .addImm(SubRegIdx1);
1624 
1625   auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode))
1626                  .addReg(SrcReg, RegState::Kill);
1627 
1628   AddressRegs Regs = getRegs(Opcode, *TII);
1629 
1630   if (Regs.VAddr)
1631     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1632 
1633 
1634   // It shouldn't be possible to get this far if the two instructions
1635   // don't have a single memoperand, because MachineInstr::mayAlias()
1636   // will return true if this is the case.
1637   assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1638 
1639   const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1640   const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1641 
1642   MachineInstr *New =
1643     MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1644         .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1645         .addImm(std::min(CI.Offset, Paired.Offset)) // offset
1646         .addImm(CI.CPol)      // cpol
1647         .addImm(0)            // tfe
1648         .addImm(0)            // swz
1649         .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1650 
1651   moveInstsAfter(MIB, InstsToMove);
1652 
1653   CI.I->eraseFromParent();
1654   Paired.I->eraseFromParent();
1655   return New;
1656 }
1657 
1658 MachineOperand
1659 SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) const {
1660   APInt V(32, Val, true);
1661   if (TII->isInlineConstant(V))
1662     return MachineOperand::CreateImm(Val);
1663 
1664   Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1665   MachineInstr *Mov =
1666   BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1667           TII->get(AMDGPU::S_MOV_B32), Reg)
1668     .addImm(Val);
1669   (void)Mov;
1670   LLVM_DEBUG(dbgs() << "    "; Mov->dump());
1671   return MachineOperand::CreateReg(Reg, false);
1672 }
1673 
1674 // Compute base address using Addr and return the final register.
1675 Register SILoadStoreOptimizer::computeBase(MachineInstr &MI,
1676                                            const MemAddress &Addr) const {
1677   MachineBasicBlock *MBB = MI.getParent();
1678   MachineBasicBlock::iterator MBBI = MI.getIterator();
1679   DebugLoc DL = MI.getDebugLoc();
1680 
1681   assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1682           Addr.Base.LoSubReg) &&
1683          "Expected 32-bit Base-Register-Low!!");
1684 
1685   assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1686           Addr.Base.HiSubReg) &&
1687          "Expected 32-bit Base-Register-Hi!!");
1688 
1689   LLVM_DEBUG(dbgs() << "  Re-Computed Anchor-Base:\n");
1690   MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1691   MachineOperand OffsetHi =
1692     createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
1693 
1694   const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1695   Register CarryReg = MRI->createVirtualRegister(CarryRC);
1696   Register DeadCarryReg = MRI->createVirtualRegister(CarryRC);
1697 
1698   Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1699   Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1700   MachineInstr *LoHalf =
1701     BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_CO_U32_e64), DestSub0)
1702       .addReg(CarryReg, RegState::Define)
1703       .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
1704       .add(OffsetLo)
1705       .addImm(0); // clamp bit
1706   (void)LoHalf;
1707   LLVM_DEBUG(dbgs() << "    "; LoHalf->dump(););
1708 
1709   MachineInstr *HiHalf =
1710   BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1711     .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1712     .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1713     .add(OffsetHi)
1714     .addReg(CarryReg, RegState::Kill)
1715     .addImm(0); // clamp bit
1716   (void)HiHalf;
1717   LLVM_DEBUG(dbgs() << "    "; HiHalf->dump(););
1718 
1719   Register FullDestReg = MRI->createVirtualRegister(TRI->getVGPR64Class());
1720   MachineInstr *FullBase =
1721     BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1722       .addReg(DestSub0)
1723       .addImm(AMDGPU::sub0)
1724       .addReg(DestSub1)
1725       .addImm(AMDGPU::sub1);
1726   (void)FullBase;
1727   LLVM_DEBUG(dbgs() << "    "; FullBase->dump(); dbgs() << "\n";);
1728 
1729   return FullDestReg;
1730 }
1731 
1732 // Update base and offset with the NewBase and NewOffset in MI.
1733 void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1734                                                Register NewBase,
1735                                                int32_t NewOffset) const {
1736   auto Base = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1737   Base->setReg(NewBase);
1738   Base->setIsKill(false);
1739   TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1740 }
1741 
1742 Optional<int32_t>
1743 SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const {
1744   if (Op.isImm())
1745     return Op.getImm();
1746 
1747   if (!Op.isReg())
1748     return None;
1749 
1750   MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1751   if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1752       !Def->getOperand(1).isImm())
1753     return None;
1754 
1755   return Def->getOperand(1).getImm();
1756 }
1757 
1758 // Analyze Base and extracts:
1759 //  - 32bit base registers, subregisters
1760 //  - 64bit constant offset
1761 // Expecting base computation as:
1762 //   %OFFSET0:sgpr_32 = S_MOV_B32 8000
1763 //   %LO:vgpr_32, %c:sreg_64_xexec =
1764 //       V_ADD_CO_U32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1765 //   %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1766 //   %Base:vreg_64 =
1767 //       REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
1768 void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
1769                                                       MemAddress &Addr) const {
1770   if (!Base.isReg())
1771     return;
1772 
1773   MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1774   if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1775       || Def->getNumOperands() != 5)
1776     return;
1777 
1778   MachineOperand BaseLo = Def->getOperand(1);
1779   MachineOperand BaseHi = Def->getOperand(3);
1780   if (!BaseLo.isReg() || !BaseHi.isReg())
1781     return;
1782 
1783   MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1784   MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1785 
1786   if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_CO_U32_e64 ||
1787       !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1788     return;
1789 
1790   const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1791   const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1792 
1793   auto Offset0P = extractConstOffset(*Src0);
1794   if (Offset0P)
1795     BaseLo = *Src1;
1796   else {
1797     if (!(Offset0P = extractConstOffset(*Src1)))
1798       return;
1799     BaseLo = *Src0;
1800   }
1801 
1802   Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1803   Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1804 
1805   if (Src0->isImm())
1806     std::swap(Src0, Src1);
1807 
1808   if (!Src1->isImm())
1809     return;
1810 
1811   uint64_t Offset1 = Src1->getImm();
1812   BaseHi = *Src0;
1813 
1814   Addr.Base.LoReg = BaseLo.getReg();
1815   Addr.Base.HiReg = BaseHi.getReg();
1816   Addr.Base.LoSubReg = BaseLo.getSubReg();
1817   Addr.Base.HiSubReg = BaseHi.getSubReg();
1818   Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1819 }
1820 
1821 bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1822     MachineInstr &MI,
1823     MemInfoMap &Visited,
1824     SmallPtrSet<MachineInstr *, 4> &AnchorList) const {
1825 
1826   if (!(MI.mayLoad() ^ MI.mayStore()))
1827     return false;
1828 
1829   // TODO: Support flat and scratch.
1830   if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0)
1831     return false;
1832 
1833   if (MI.mayLoad() &&
1834       TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != nullptr)
1835     return false;
1836 
1837   if (AnchorList.count(&MI))
1838     return false;
1839 
1840   LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1841 
1842   if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1843     LLVM_DEBUG(dbgs() << "  Const-offset is already promoted.\n";);
1844     return false;
1845   }
1846 
1847   // Step1: Find the base-registers and a 64bit constant offset.
1848   MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1849   MemAddress MAddr;
1850   if (Visited.find(&MI) == Visited.end()) {
1851     processBaseWithConstOffset(Base, MAddr);
1852     Visited[&MI] = MAddr;
1853   } else
1854     MAddr = Visited[&MI];
1855 
1856   if (MAddr.Offset == 0) {
1857     LLVM_DEBUG(dbgs() << "  Failed to extract constant-offset or there are no"
1858                          " constant offsets that can be promoted.\n";);
1859     return false;
1860   }
1861 
1862   LLVM_DEBUG(dbgs() << "  BASE: {" << MAddr.Base.HiReg << ", "
1863              << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1864 
1865   // Step2: Traverse through MI's basic block and find an anchor(that has the
1866   // same base-registers) with the highest 13bit distance from MI's offset.
1867   // E.g. (64bit loads)
1868   // bb:
1869   //   addr1 = &a + 4096;   load1 = load(addr1,  0)
1870   //   addr2 = &a + 6144;   load2 = load(addr2,  0)
1871   //   addr3 = &a + 8192;   load3 = load(addr3,  0)
1872   //   addr4 = &a + 10240;  load4 = load(addr4,  0)
1873   //   addr5 = &a + 12288;  load5 = load(addr5,  0)
1874   //
1875   // Starting from the first load, the optimization will try to find a new base
1876   // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1877   // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1878   // as the new-base(anchor) because of the maximum distance which can
1879   // accomodate more intermediate bases presumeably.
1880   //
1881   // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1882   // (&a + 8192) for load1, load2, load4.
1883   //   addr = &a + 8192
1884   //   load1 = load(addr,       -4096)
1885   //   load2 = load(addr,       -2048)
1886   //   load3 = load(addr,       0)
1887   //   load4 = load(addr,       2048)
1888   //   addr5 = &a + 12288;  load5 = load(addr5,  0)
1889   //
1890   MachineInstr *AnchorInst = nullptr;
1891   MemAddress AnchorAddr;
1892   uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1893   SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1894 
1895   MachineBasicBlock *MBB = MI.getParent();
1896   MachineBasicBlock::iterator E = MBB->end();
1897   MachineBasicBlock::iterator MBBI = MI.getIterator();
1898   ++MBBI;
1899   const SITargetLowering *TLI =
1900     static_cast<const SITargetLowering *>(STM->getTargetLowering());
1901 
1902   for ( ; MBBI != E; ++MBBI) {
1903     MachineInstr &MINext = *MBBI;
1904     // TODO: Support finding an anchor(with same base) from store addresses or
1905     // any other load addresses where the opcodes are different.
1906     if (MINext.getOpcode() != MI.getOpcode() ||
1907         TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1908       continue;
1909 
1910     const MachineOperand &BaseNext =
1911       *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1912     MemAddress MAddrNext;
1913     if (Visited.find(&MINext) == Visited.end()) {
1914       processBaseWithConstOffset(BaseNext, MAddrNext);
1915       Visited[&MINext] = MAddrNext;
1916     } else
1917       MAddrNext = Visited[&MINext];
1918 
1919     if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1920         MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1921         MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1922         MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1923       continue;
1924 
1925     InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1926 
1927     int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1928     TargetLoweringBase::AddrMode AM;
1929     AM.HasBaseReg = true;
1930     AM.BaseOffs = Dist;
1931     if (TLI->isLegalGlobalAddressingMode(AM) &&
1932         (uint32_t)std::abs(Dist) > MaxDist) {
1933       MaxDist = std::abs(Dist);
1934 
1935       AnchorAddr = MAddrNext;
1936       AnchorInst = &MINext;
1937     }
1938   }
1939 
1940   if (AnchorInst) {
1941     LLVM_DEBUG(dbgs() << "  Anchor-Inst(with max-distance from Offset): ";
1942                AnchorInst->dump());
1943     LLVM_DEBUG(dbgs() << "  Anchor-Offset from BASE: "
1944                <<  AnchorAddr.Offset << "\n\n");
1945 
1946     // Instead of moving up, just re-compute anchor-instruction's base address.
1947     Register Base = computeBase(MI, AnchorAddr);
1948 
1949     updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1950     LLVM_DEBUG(dbgs() << "  After promotion: "; MI.dump(););
1951 
1952     for (auto P : InstsWCommonBase) {
1953       TargetLoweringBase::AddrMode AM;
1954       AM.HasBaseReg = true;
1955       AM.BaseOffs = P.second - AnchorAddr.Offset;
1956 
1957       if (TLI->isLegalGlobalAddressingMode(AM)) {
1958         LLVM_DEBUG(dbgs() << "  Promote Offset(" << P.second;
1959                    dbgs() << ")"; P.first->dump());
1960         updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1961         LLVM_DEBUG(dbgs() << "     After promotion: "; P.first->dump());
1962       }
1963     }
1964     AnchorList.insert(AnchorInst);
1965     return true;
1966   }
1967 
1968   return false;
1969 }
1970 
1971 void SILoadStoreOptimizer::addInstToMergeableList(const CombineInfo &CI,
1972                  std::list<std::list<CombineInfo> > &MergeableInsts) const {
1973   for (std::list<CombineInfo> &AddrList : MergeableInsts) {
1974     if (AddrList.front().InstClass == CI.InstClass &&
1975         AddrList.front().hasSameBaseAddress(*CI.I)) {
1976       AddrList.emplace_back(CI);
1977       return;
1978     }
1979   }
1980 
1981   // Base address not found, so add a new list.
1982   MergeableInsts.emplace_back(1, CI);
1983 }
1984 
1985 std::pair<MachineBasicBlock::iterator, bool>
1986 SILoadStoreOptimizer::collectMergeableInsts(
1987     MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End,
1988     MemInfoMap &Visited, SmallPtrSet<MachineInstr *, 4> &AnchorList,
1989     std::list<std::list<CombineInfo>> &MergeableInsts) const {
1990   bool Modified = false;
1991 
1992   // Sort potential mergeable instructions into lists.  One list per base address.
1993   unsigned Order = 0;
1994   MachineBasicBlock::iterator BlockI = Begin;
1995   for (; BlockI != End; ++BlockI) {
1996     MachineInstr &MI = *BlockI;
1997 
1998     // We run this before checking if an address is mergeable, because it can produce
1999     // better code even if the instructions aren't mergeable.
2000     if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
2001       Modified = true;
2002 
2003     // Don't combine if volatile. We also won't be able to merge across this, so
2004     // break the search. We can look after this barrier for separate merges.
2005     if (MI.hasOrderedMemoryRef()) {
2006       LLVM_DEBUG(dbgs() << "Breaking search on memory fence: " << MI);
2007 
2008       // Search will resume after this instruction in a separate merge list.
2009       ++BlockI;
2010       break;
2011     }
2012 
2013     const InstClassEnum InstClass = getInstClass(MI.getOpcode(), *TII);
2014     if (InstClass == UNKNOWN)
2015       continue;
2016 
2017     CombineInfo CI;
2018     CI.setMI(MI, *TII, *STM);
2019     CI.Order = Order++;
2020 
2021     if (!CI.hasMergeableAddress(*MRI))
2022       continue;
2023 
2024     LLVM_DEBUG(dbgs() << "Mergeable: " << MI);
2025 
2026     addInstToMergeableList(CI, MergeableInsts);
2027   }
2028 
2029   // At this point we have lists of Mergeable instructions.
2030   //
2031   // Part 2: Sort lists by offset and then for each CombineInfo object in the
2032   // list try to find an instruction that can be merged with I.  If an instruction
2033   // is found, it is stored in the Paired field.  If no instructions are found, then
2034   // the CombineInfo object is deleted from the list.
2035 
2036   for (std::list<std::list<CombineInfo>>::iterator I = MergeableInsts.begin(),
2037                                                    E = MergeableInsts.end(); I != E;) {
2038 
2039     std::list<CombineInfo> &MergeList = *I;
2040     if (MergeList.size() <= 1) {
2041       // This means we have found only one instruction with a given address
2042       // that can be merged, and we need at least 2 instructions to do a merge,
2043       // so this list can be discarded.
2044       I = MergeableInsts.erase(I);
2045       continue;
2046     }
2047 
2048     // Sort the lists by offsets, this way mergeable instructions will be
2049     // adjacent to each other in the list, which will make it easier to find
2050     // matches.
2051     MergeList.sort(
2052         [] (const CombineInfo &A, const CombineInfo &B) {
2053           return A.Offset < B.Offset;
2054         });
2055     ++I;
2056   }
2057 
2058   return std::make_pair(BlockI, Modified);
2059 }
2060 
2061 // Scan through looking for adjacent LDS operations with constant offsets from
2062 // the same base register. We rely on the scheduler to do the hard work of
2063 // clustering nearby loads, and assume these are all adjacent.
2064 bool SILoadStoreOptimizer::optimizeBlock(
2065                        std::list<std::list<CombineInfo> > &MergeableInsts) {
2066   bool Modified = false;
2067 
2068   for (std::list<std::list<CombineInfo>>::iterator I = MergeableInsts.begin(),
2069                                                    E = MergeableInsts.end(); I != E;) {
2070     std::list<CombineInfo> &MergeList = *I;
2071 
2072     bool OptimizeListAgain = false;
2073     if (!optimizeInstsWithSameBaseAddr(MergeList, OptimizeListAgain)) {
2074       // We weren't able to make any changes, so delete the list so we don't
2075       // process the same instructions the next time we try to optimize this
2076       // block.
2077       I = MergeableInsts.erase(I);
2078       continue;
2079     }
2080 
2081     Modified = true;
2082 
2083     // We made changes, but also determined that there were no more optimization
2084     // opportunities, so we don't need to reprocess the list
2085     if (!OptimizeListAgain) {
2086       I = MergeableInsts.erase(I);
2087       continue;
2088     }
2089     OptimizeAgain = true;
2090   }
2091   return Modified;
2092 }
2093 
2094 bool
2095 SILoadStoreOptimizer::optimizeInstsWithSameBaseAddr(
2096                                           std::list<CombineInfo> &MergeList,
2097                                           bool &OptimizeListAgain) {
2098   if (MergeList.empty())
2099     return false;
2100 
2101   bool Modified = false;
2102 
2103   for (auto I = MergeList.begin(), Next = std::next(I); Next != MergeList.end();
2104        Next = std::next(I)) {
2105 
2106     auto First = I;
2107     auto Second = Next;
2108 
2109     if ((*First).Order > (*Second).Order)
2110       std::swap(First, Second);
2111     CombineInfo &CI = *First;
2112     CombineInfo &Paired = *Second;
2113 
2114     SmallVector<MachineInstr *, 8> InstsToMove;
2115     if (!checkAndPrepareMerge(CI, Paired, InstsToMove)) {
2116       ++I;
2117       continue;
2118     }
2119 
2120     Modified = true;
2121 
2122     LLVM_DEBUG(dbgs() << "Merging: " << *CI.I << "   with: " << *Paired.I);
2123 
2124     switch (CI.InstClass) {
2125     default:
2126       llvm_unreachable("unknown InstClass");
2127       break;
2128     case DS_READ: {
2129       MachineBasicBlock::iterator NewMI =
2130           mergeRead2Pair(CI, Paired, InstsToMove);
2131       CI.setMI(NewMI, *TII, *STM);
2132       break;
2133     }
2134     case DS_WRITE: {
2135       MachineBasicBlock::iterator NewMI =
2136           mergeWrite2Pair(CI, Paired, InstsToMove);
2137       CI.setMI(NewMI, *TII, *STM);
2138       break;
2139     }
2140     case S_BUFFER_LOAD_IMM: {
2141       MachineBasicBlock::iterator NewMI =
2142           mergeSBufferLoadImmPair(CI, Paired, InstsToMove);
2143       CI.setMI(NewMI, *TII, *STM);
2144       OptimizeListAgain |= (CI.Width + Paired.Width) < 8;
2145       break;
2146     }
2147     case BUFFER_LOAD: {
2148       MachineBasicBlock::iterator NewMI =
2149           mergeBufferLoadPair(CI, Paired, InstsToMove);
2150       CI.setMI(NewMI, *TII, *STM);
2151       OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2152       break;
2153     }
2154     case BUFFER_STORE: {
2155       MachineBasicBlock::iterator NewMI =
2156           mergeBufferStorePair(CI, Paired, InstsToMove);
2157       CI.setMI(NewMI, *TII, *STM);
2158       OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2159       break;
2160     }
2161     case MIMG: {
2162       MachineBasicBlock::iterator NewMI =
2163           mergeImagePair(CI, Paired, InstsToMove);
2164       CI.setMI(NewMI, *TII, *STM);
2165       OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2166       break;
2167     }
2168     case TBUFFER_LOAD: {
2169       MachineBasicBlock::iterator NewMI =
2170           mergeTBufferLoadPair(CI, Paired, InstsToMove);
2171       CI.setMI(NewMI, *TII, *STM);
2172       OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2173       break;
2174     }
2175     case TBUFFER_STORE: {
2176       MachineBasicBlock::iterator NewMI =
2177           mergeTBufferStorePair(CI, Paired, InstsToMove);
2178       CI.setMI(NewMI, *TII, *STM);
2179       OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2180       break;
2181     }
2182     }
2183     CI.Order = Paired.Order;
2184     if (I == Second)
2185       I = Next;
2186 
2187     MergeList.erase(Second);
2188   }
2189 
2190   return Modified;
2191 }
2192 
2193 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
2194   if (skipFunction(MF.getFunction()))
2195     return false;
2196 
2197   STM = &MF.getSubtarget<GCNSubtarget>();
2198   if (!STM->loadStoreOptEnabled())
2199     return false;
2200 
2201   TII = STM->getInstrInfo();
2202   TRI = &TII->getRegisterInfo();
2203 
2204   MRI = &MF.getRegInfo();
2205   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2206 
2207   LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
2208 
2209   bool Modified = false;
2210 
2211   // Contains the list of instructions for which constant offsets are being
2212   // promoted to the IMM. This is tracked for an entire block at time.
2213   SmallPtrSet<MachineInstr *, 4> AnchorList;
2214   MemInfoMap Visited;
2215 
2216   for (MachineBasicBlock &MBB : MF) {
2217     MachineBasicBlock::iterator SectionEnd;
2218     for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;
2219          I = SectionEnd) {
2220       bool CollectModified;
2221       std::list<std::list<CombineInfo>> MergeableInsts;
2222 
2223       // First pass: Collect list of all instructions we know how to merge in a
2224       // subset of the block.
2225       std::tie(SectionEnd, CollectModified) =
2226           collectMergeableInsts(I, E, Visited, AnchorList, MergeableInsts);
2227 
2228       Modified |= CollectModified;
2229 
2230       do {
2231         OptimizeAgain = false;
2232         Modified |= optimizeBlock(MergeableInsts);
2233       } while (OptimizeAgain);
2234     }
2235 
2236     Visited.clear();
2237     AnchorList.clear();
2238   }
2239 
2240   return Modified;
2241 }
2242