1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass tries to fuse DS instructions with close by immediate offsets.
11 // This will fuse operations such as
12 //  ds_read_b32 v0, v2 offset:16
13 //  ds_read_b32 v1, v2 offset:32
14 // ==>
15 //   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16 //
17 //
18 // Future improvements:
19 //
20 // - This currently relies on the scheduler to place loads and stores next to
21 //   each other, and then only merges adjacent pairs of instructions. It would
22 //   be good to be more flexible with interleaved instructions, and possibly run
23 //   before scheduling. It currently missing stores of constants because loading
24 //   the constant into the data register is placed between the stores, although
25 //   this is arguably a scheduling problem.
26 //
27 // - Live interval recomputing seems inefficient. This currently only matches
28 //   one pair, and recomputes live intervals and moves on to the next pair. It
29 //   would be better to compute a list of all merges that need to occur.
30 //
31 // - With a list of instructions to process, we can also merge more. If a
32 //   cluster of loads have offsets that are too large to fit in the 8-bit
33 //   offsets, but are close enough to fit in the 8 bits, we can add to the base
34 //   pointer and use the new reduced offsets.
35 //
36 //===----------------------------------------------------------------------===//
37 
38 #include "AMDGPU.h"
39 #include "AMDGPUSubtarget.h"
40 #include "SIInstrInfo.h"
41 #include "SIRegisterInfo.h"
42 #include "Utils/AMDGPUBaseInfo.h"
43 #include "llvm/ADT/ArrayRef.h"
44 #include "llvm/ADT/SmallVector.h"
45 #include "llvm/ADT/StringRef.h"
46 #include "llvm/Analysis/AliasAnalysis.h"
47 #include "llvm/CodeGen/MachineBasicBlock.h"
48 #include "llvm/CodeGen/MachineFunction.h"
49 #include "llvm/CodeGen/MachineFunctionPass.h"
50 #include "llvm/CodeGen/MachineInstr.h"
51 #include "llvm/CodeGen/MachineInstrBuilder.h"
52 #include "llvm/CodeGen/MachineOperand.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/Pass.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/MathExtras.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstdlib>
62 #include <iterator>
63 #include <utility>
64 
65 using namespace llvm;
66 
67 #define DEBUG_TYPE "si-load-store-opt"
68 
69 namespace {
70 
71 class SILoadStoreOptimizer : public MachineFunctionPass {
72   using CombineInfo = struct {
73     MachineBasicBlock::iterator I;
74     MachineBasicBlock::iterator Paired;
75     unsigned EltSize;
76     unsigned Offset0;
77     unsigned Offset1;
78     unsigned BaseOff;
79     bool UseST64;
80     SmallVector<MachineInstr*, 8> InstsToMove;
81    };
82 
83 private:
84   const SIInstrInfo *TII = nullptr;
85   const SIRegisterInfo *TRI = nullptr;
86   MachineRegisterInfo *MRI = nullptr;
87   AliasAnalysis *AA = nullptr;
88 
89   static bool offsetsCanBeCombined(CombineInfo &CI);
90 
91   bool findMatchingDSInst(CombineInfo &CI);
92 
93   MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
94 
95   MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
96 
97 public:
98   static char ID;
99 
100   SILoadStoreOptimizer() : MachineFunctionPass(ID) {
101     initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
102   }
103 
104   bool optimizeBlock(MachineBasicBlock &MBB);
105 
106   bool runOnMachineFunction(MachineFunction &MF) override;
107 
108   StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
109 
110   void getAnalysisUsage(AnalysisUsage &AU) const override {
111     AU.setPreservesCFG();
112     AU.addRequired<AAResultsWrapperPass>();
113 
114     MachineFunctionPass::getAnalysisUsage(AU);
115   }
116 };
117 
118 } // end anonymous namespace.
119 
120 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
121                       "SI Load / Store Optimizer", false, false)
122 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
123 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
124                     "SI Load / Store Optimizer", false, false)
125 
126 char SILoadStoreOptimizer::ID = 0;
127 
128 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
129 
130 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
131   return new SILoadStoreOptimizer();
132 }
133 
134 static void moveInstsAfter(MachineBasicBlock::iterator I,
135                            ArrayRef<MachineInstr*> InstsToMove) {
136   MachineBasicBlock *MBB = I->getParent();
137   ++I;
138   for (MachineInstr *MI : InstsToMove) {
139     MI->removeFromParent();
140     MBB->insert(I, MI);
141   }
142 }
143 
144 static void addDefsToList(const MachineInstr &MI,
145                           SmallVectorImpl<const MachineOperand *> &Defs) {
146   for (const MachineOperand &Def : MI.defs()) {
147     Defs.push_back(&Def);
148   }
149 }
150 
151 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
152                                       MachineBasicBlock::iterator B,
153                                       const SIInstrInfo *TII,
154                                       AliasAnalysis * AA) {
155   return (TII->areMemAccessesTriviallyDisjoint(*A, *B, AA) ||
156     // RAW or WAR - cannot reorder
157     // WAW - cannot reorder
158     // RAR - safe to reorder
159     !(A->mayStore() || B->mayStore()));
160 }
161 
162 // Add MI and its defs to the lists if MI reads one of the defs that are
163 // already in the list. Returns true in that case.
164 static bool
165 addToListsIfDependent(MachineInstr &MI,
166                       SmallVectorImpl<const MachineOperand *> &Defs,
167                       SmallVectorImpl<MachineInstr*> &Insts) {
168   for (const MachineOperand *Def : Defs) {
169     bool ReadDef = MI.readsVirtualRegister(Def->getReg());
170     // If ReadDef is true, then there is a use of Def between I
171     // and the instruction that I will potentially be merged with. We
172     // will need to move this instruction after the merged instructions.
173     if (ReadDef) {
174       Insts.push_back(&MI);
175       addDefsToList(MI, Defs);
176       return true;
177     }
178   }
179 
180   return false;
181 }
182 
183 static bool
184 canMoveInstsAcrossMemOp(MachineInstr &MemOp,
185                         ArrayRef<MachineInstr*> InstsToMove,
186                         const SIInstrInfo *TII,
187                         AliasAnalysis *AA) {
188   assert(MemOp.mayLoadOrStore());
189 
190   for (MachineInstr *InstToMove : InstsToMove) {
191     if (!InstToMove->mayLoadOrStore())
192       continue;
193     if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
194         return false;
195   }
196   return true;
197 }
198 
199 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
200   // XXX - Would the same offset be OK? Is there any reason this would happen or
201   // be useful?
202   if (CI.Offset0 == CI.Offset1)
203     return false;
204 
205   // This won't be valid if the offset isn't aligned.
206   if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
207     return false;
208 
209   unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
210   unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
211   CI.UseST64 = false;
212   CI.BaseOff = 0;
213 
214   // If the offset in elements doesn't fit in 8-bits, we might be able to use
215   // the stride 64 versions.
216   if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
217       isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
218     CI.Offset0 = EltOffset0 / 64;
219     CI.Offset1 = EltOffset1 / 64;
220     CI.UseST64 = true;
221     return true;
222   }
223 
224   // Check if the new offsets fit in the reduced 8-bit range.
225   if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
226     CI.Offset0 = EltOffset0;
227     CI.Offset1 = EltOffset1;
228     return true;
229   }
230 
231   // Try to shift base address to decrease offsets.
232   unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
233   CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
234 
235   if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
236     CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
237     CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
238     CI.UseST64 = true;
239     return true;
240   }
241 
242   if (isUInt<8>(OffsetDiff)) {
243     CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
244     CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
245     return true;
246   }
247 
248   return false;
249 }
250 
251 bool SILoadStoreOptimizer::findMatchingDSInst(CombineInfo &CI) {
252   MachineBasicBlock::iterator E = CI.I->getParent()->end();
253   MachineBasicBlock::iterator MBBI = CI.I;
254   ++MBBI;
255 
256   SmallVector<const MachineOperand *, 8> DefsToMove;
257   addDefsToList(*CI.I, DefsToMove);
258 
259   for ( ; MBBI != E; ++MBBI) {
260     if (MBBI->getOpcode() != CI.I->getOpcode()) {
261       // This is not a matching DS instruction, but we can keep looking as
262       // long as one of these conditions are met:
263       // 1. It is safe to move I down past MBBI.
264       // 2. It is safe to move MBBI down past the instruction that I will
265       //    be merged into.
266 
267       if (MBBI->hasUnmodeledSideEffects())
268         // We can't re-order this instruction with respect to other memory
269         // opeations, so we fail both conditions mentioned above.
270         return false;
271 
272       if (MBBI->mayLoadOrStore() &&
273         !memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA)) {
274         // We fail condition #1, but we may still be able to satisfy condition
275         // #2.  Add this instruction to the move list and then we will check
276         // if condition #2 holds once we have selected the matching instruction.
277         CI.InstsToMove.push_back(&*MBBI);
278         addDefsToList(*MBBI, DefsToMove);
279         continue;
280       }
281 
282       // When we match I with another DS instruction we will be moving I down
283       // to the location of the matched instruction any uses of I will need to
284       // be moved down as well.
285       addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
286       continue;
287     }
288 
289     // Don't merge volatiles.
290     if (MBBI->hasOrderedMemoryRef())
291       return false;
292 
293     // Handle a case like
294     //   DS_WRITE_B32 addr, v, idx0
295     //   w = DS_READ_B32 addr, idx0
296     //   DS_WRITE_B32 addr, f(w), idx1
297     // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
298     // merging of the two writes.
299     if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
300       continue;
301 
302     int AddrIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
303                                              AMDGPU::OpName::addr);
304     const MachineOperand &AddrReg0 = CI.I->getOperand(AddrIdx);
305     const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
306 
307     // Check same base pointer. Be careful of subregisters, which can occur with
308     // vectors of pointers.
309     if (AddrReg0.getReg() == AddrReg1.getReg() &&
310         AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
311       int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
312                                                  AMDGPU::OpName::offset);
313       CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm() & 0xffff;
314       CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
315       CI.Paired = MBBI;
316 
317       // Check both offsets fit in the reduced range.
318       // We also need to go through the list of instructions that we plan to
319       // move and make sure they are all safe to move down past the merged
320       // instruction.
321       if (offsetsCanBeCombined(CI))
322         if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
323           return true;
324     }
325 
326     // We've found a load/store that we couldn't merge for some reason.
327     // We could potentially keep looking, but we'd need to make sure that
328     // it was safe to move I and also all the instruction in InstsToMove
329     // down past this instruction.
330     // check if we can move I across MBBI and if we can move all I's users
331     if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
332       !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
333       break;
334   }
335   return false;
336 }
337 
338 MachineBasicBlock::iterator  SILoadStoreOptimizer::mergeRead2Pair(
339   CombineInfo &CI) {
340   MachineBasicBlock *MBB = CI.I->getParent();
341 
342   // Be careful, since the addresses could be subregisters themselves in weird
343   // cases, like vectors of pointers.
344   const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
345 
346   const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
347   const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
348 
349   unsigned NewOffset0 = CI.Offset0;
350   unsigned NewOffset1 = CI.Offset1;
351   unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32
352                                    : AMDGPU::DS_READ2_B64;
353 
354   if (CI.UseST64)
355     Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32
356                             : AMDGPU::DS_READ2ST64_B64;
357 
358   unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
359   unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
360 
361   if (NewOffset0 > NewOffset1) {
362     // Canonicalize the merged instruction so the smaller offset comes first.
363     std::swap(NewOffset0, NewOffset1);
364     std::swap(SubRegIdx0, SubRegIdx1);
365   }
366 
367   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
368          (NewOffset0 != NewOffset1) &&
369          "Computed offset doesn't fit");
370 
371   const MCInstrDesc &Read2Desc = TII->get(Opc);
372 
373   const TargetRegisterClass *SuperRC
374     = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
375   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
376 
377   DebugLoc DL = CI.I->getDebugLoc();
378 
379   unsigned BaseReg = AddrReg->getReg();
380   unsigned BaseRegFlags = 0;
381   if (CI.BaseOff) {
382     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
383     BaseRegFlags = RegState::Kill;
384     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
385            .addImm(CI.BaseOff)
386            .addReg(AddrReg->getReg());
387   }
388 
389   MachineInstrBuilder Read2 =
390     BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
391       .addReg(BaseReg, BaseRegFlags) // addr
392       .addImm(NewOffset0)            // offset0
393       .addImm(NewOffset1)            // offset1
394       .addImm(0)                     // gds
395       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
396 
397   (void)Read2;
398 
399   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
400 
401   // Copy to the old destination registers.
402   BuildMI(*MBB, CI.Paired, DL, CopyDesc)
403       .add(*Dest0) // Copy to same destination including flags and sub reg.
404       .addReg(DestReg, 0, SubRegIdx0);
405   MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
406                             .add(*Dest1)
407                             .addReg(DestReg, RegState::Kill, SubRegIdx1);
408 
409   moveInstsAfter(Copy1, CI.InstsToMove);
410 
411   MachineBasicBlock::iterator Next = std::next(CI.I);
412   CI.I->eraseFromParent();
413   CI.Paired->eraseFromParent();
414 
415   DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
416   return Next;
417 }
418 
419 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
420   CombineInfo &CI) {
421   MachineBasicBlock *MBB = CI.I->getParent();
422 
423   // Be sure to use .addOperand(), and not .addReg() with these. We want to be
424   // sure we preserve the subregister index and any register flags set on them.
425   const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
426   const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
427   const MachineOperand *Data1
428     = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
429 
430   unsigned NewOffset0 = CI.Offset0;
431   unsigned NewOffset1 = CI.Offset1;
432   unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32
433                                    : AMDGPU::DS_WRITE2_B64;
434 
435   if (CI.UseST64)
436     Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
437                             : AMDGPU::DS_WRITE2ST64_B64;
438 
439   if (NewOffset0 > NewOffset1) {
440     // Canonicalize the merged instruction so the smaller offset comes first.
441     std::swap(NewOffset0, NewOffset1);
442     std::swap(Data0, Data1);
443   }
444 
445   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
446          (NewOffset0 != NewOffset1) &&
447          "Computed offset doesn't fit");
448 
449   const MCInstrDesc &Write2Desc = TII->get(Opc);
450   DebugLoc DL = CI.I->getDebugLoc();
451 
452   unsigned BaseReg = Addr->getReg();
453   unsigned BaseRegFlags = 0;
454   if (CI.BaseOff) {
455     BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
456     BaseRegFlags = RegState::Kill;
457     BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
458            .addImm(CI.BaseOff)
459            .addReg(Addr->getReg());
460   }
461 
462   MachineInstrBuilder Write2 =
463     BuildMI(*MBB, CI.Paired, DL, Write2Desc)
464       .addReg(BaseReg, BaseRegFlags) // addr
465       .add(*Data0)                   // data0
466       .add(*Data1)                   // data1
467       .addImm(NewOffset0)            // offset0
468       .addImm(NewOffset1)            // offset1
469       .addImm(0)                     // gds
470       .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
471 
472   moveInstsAfter(Write2, CI.InstsToMove);
473 
474   MachineBasicBlock::iterator Next = std::next(CI.I);
475   CI.I->eraseFromParent();
476   CI.Paired->eraseFromParent();
477 
478   DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
479   return Next;
480 }
481 
482 // Scan through looking for adjacent LDS operations with constant offsets from
483 // the same base register. We rely on the scheduler to do the hard work of
484 // clustering nearby loads, and assume these are all adjacent.
485 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
486   bool Modified = false;
487 
488   for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
489     MachineInstr &MI = *I;
490 
491     // Don't combine if volatile.
492     if (MI.hasOrderedMemoryRef()) {
493       ++I;
494       continue;
495     }
496 
497     CombineInfo CI;
498     CI.I = I;
499     unsigned Opc = MI.getOpcode();
500     if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
501       CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
502       if (findMatchingDSInst(CI)) {
503         Modified = true;
504         I = mergeRead2Pair(CI);
505       } else {
506         ++I;
507       }
508 
509       continue;
510     } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
511       CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
512       if (findMatchingDSInst(CI)) {
513         Modified = true;
514         I = mergeWrite2Pair(CI);
515       } else {
516         ++I;
517       }
518 
519       continue;
520     }
521 
522     ++I;
523   }
524 
525   return Modified;
526 }
527 
528 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
529   if (skipFunction(*MF.getFunction()))
530     return false;
531 
532   const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
533   if (!STM.loadStoreOptEnabled())
534     return false;
535 
536   TII = STM.getInstrInfo();
537   TRI = &TII->getRegisterInfo();
538 
539   MRI = &MF.getRegInfo();
540   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
541 
542   DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
543 
544   bool Modified = false;
545 
546   for (MachineBasicBlock &MBB : MF)
547     Modified |= optimizeBlock(MBB);
548 
549   return Modified;
550 }
551