1 //===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass tries to fuse DS instructions with close by immediate offsets.
11 // This will fuse operations such as
12 //  ds_read_b32 v0, v2 offset:16
13 //  ds_read_b32 v1, v2 offset:32
14 // ==>
15 //   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16 //
17 //
18 // Future improvements:
19 //
20 // - This currently relies on the scheduler to place loads and stores next to
21 //   each other, and then only merges adjacent pairs of instructions. It would
22 //   be good to be more flexible with interleaved instructions, and possibly run
23 //   before scheduling. It currently missing stores of constants because loading
24 //   the constant into the data register is placed between the stores, although
25 //   this is arguably a scheduling problem.
26 //
27 // - Live interval recomputing seems inefficient. This currently only matches
28 //   one pair, and recomputes live intervals and moves on to the next pair. It
29 //   would be better to compute a list of all merges that need to occur.
30 //
31 // - With a list of instructions to process, we can also merge more. If a
32 //   cluster of loads have offsets that are too large to fit in the 8-bit
33 //   offsets, but are close enough to fit in the 8 bits, we can add to the base
34 //   pointer and use the new reduced offsets.
35 //
36 //===----------------------------------------------------------------------===//
37 
38 #include "AMDGPU.h"
39 #include "AMDGPUSubtarget.h"
40 #include "SIInstrInfo.h"
41 #include "SIRegisterInfo.h"
42 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
43 #include "llvm/CodeGen/LiveVariables.h"
44 #include "llvm/CodeGen/MachineFunction.h"
45 #include "llvm/CodeGen/MachineFunctionPass.h"
46 #include "llvm/CodeGen/MachineInstrBuilder.h"
47 #include "llvm/CodeGen/MachineRegisterInfo.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include "llvm/Target/TargetMachine.h"
51 
52 using namespace llvm;
53 
54 #define DEBUG_TYPE "si-load-store-opt"
55 
56 namespace {
57 
58 class SILoadStoreOptimizer : public MachineFunctionPass {
59 private:
60   const SIInstrInfo *TII;
61   const SIRegisterInfo *TRI;
62   MachineRegisterInfo *MRI;
63   AliasAnalysis *AA;
64 
65   static bool offsetsCanBeCombined(unsigned Offset0,
66                                    unsigned Offset1,
67                                    unsigned EltSize);
68 
69   MachineBasicBlock::iterator findMatchingDSInst(
70     MachineBasicBlock::iterator I,
71     unsigned EltSize,
72     SmallVectorImpl<MachineInstr*> &InstsToMove);
73 
74   MachineBasicBlock::iterator mergeRead2Pair(
75     MachineBasicBlock::iterator I,
76     MachineBasicBlock::iterator Paired,
77     unsigned EltSize,
78     ArrayRef<MachineInstr*> InstsToMove);
79 
80   MachineBasicBlock::iterator mergeWrite2Pair(
81     MachineBasicBlock::iterator I,
82     MachineBasicBlock::iterator Paired,
83     unsigned EltSize,
84     ArrayRef<MachineInstr*> InstsToMove);
85 
86 public:
87   static char ID;
88 
89   SILoadStoreOptimizer()
90       : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), MRI(nullptr),
91         AA(nullptr) {}
92 
93   SILoadStoreOptimizer(const TargetMachine &TM_) : MachineFunctionPass(ID) {
94     initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
95   }
96 
97   bool optimizeBlock(MachineBasicBlock &MBB);
98 
99   bool runOnMachineFunction(MachineFunction &MF) override;
100 
101   StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
102 
103   void getAnalysisUsage(AnalysisUsage &AU) const override {
104     AU.setPreservesCFG();
105     AU.addRequired<AAResultsWrapperPass>();
106 
107     MachineFunctionPass::getAnalysisUsage(AU);
108   }
109 };
110 
111 } // End anonymous namespace.
112 
113 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
114                       "SI Load / Store Optimizer", false, false)
115 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
116 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
117                     "SI Load / Store Optimizer", false, false)
118 
119 char SILoadStoreOptimizer::ID = 0;
120 
121 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
122 
123 FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) {
124   return new SILoadStoreOptimizer(TM);
125 }
126 
127 static void moveInstsAfter(MachineBasicBlock::iterator I,
128                            ArrayRef<MachineInstr*> InstsToMove) {
129   MachineBasicBlock *MBB = I->getParent();
130   ++I;
131   for (MachineInstr *MI : InstsToMove) {
132     MI->removeFromParent();
133     MBB->insert(I, MI);
134   }
135 }
136 
137 static void addDefsToList(const MachineInstr &MI,
138                           SmallVectorImpl<const MachineOperand *> &Defs) {
139   for (const MachineOperand &Def : MI.defs()) {
140     Defs.push_back(&Def);
141   }
142 }
143 
144 // Add MI and its defs to the lists if MI reads one of the defs that are
145 // already in the list. Returns true in that case.
146 static bool
147 addToListsIfDependent(MachineInstr &MI,
148                       SmallVectorImpl<const MachineOperand *> &Defs,
149                       SmallVectorImpl<MachineInstr*> &Insts) {
150   for (const MachineOperand *Def : Defs) {
151     bool ReadDef = MI.readsVirtualRegister(Def->getReg());
152     // If ReadDef is true, then there is a use of Def between I
153     // and the instruction that I will potentially be merged with. We
154     // will need to move this instruction after the merged instructions.
155     if (ReadDef) {
156       Insts.push_back(&MI);
157       addDefsToList(MI, Defs);
158       return true;
159     }
160   }
161 
162   return false;
163 }
164 
165 static bool
166 canMoveInstsAcrossMemOp(MachineInstr &MemOp,
167                         ArrayRef<MachineInstr*> InstsToMove,
168                         const SIInstrInfo *TII,
169                         AliasAnalysis *AA) {
170 
171   assert(MemOp.mayLoadOrStore());
172 
173   for (MachineInstr *InstToMove : InstsToMove) {
174     if (!InstToMove->mayLoadOrStore())
175       continue;
176     if (!TII->areMemAccessesTriviallyDisjoint(MemOp, *InstToMove, AA))
177       return false;
178   }
179   return true;
180 }
181 
182 bool SILoadStoreOptimizer::offsetsCanBeCombined(unsigned Offset0,
183                                                 unsigned Offset1,
184                                                 unsigned Size) {
185   // XXX - Would the same offset be OK? Is there any reason this would happen or
186   // be useful?
187   if (Offset0 == Offset1)
188     return false;
189 
190   // This won't be valid if the offset isn't aligned.
191   if ((Offset0 % Size != 0) || (Offset1 % Size != 0))
192     return false;
193 
194   unsigned EltOffset0 = Offset0 / Size;
195   unsigned EltOffset1 = Offset1 / Size;
196 
197   // Check if the new offsets fit in the reduced 8-bit range.
198   if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1))
199     return true;
200 
201   // If the offset in elements doesn't fit in 8-bits, we might be able to use
202   // the stride 64 versions.
203   if ((EltOffset0 % 64 != 0) || (EltOffset1 % 64) != 0)
204     return false;
205 
206   return isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64);
207 }
208 
209 MachineBasicBlock::iterator
210 SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
211                                   unsigned EltSize,
212                                   SmallVectorImpl<MachineInstr*> &InstsToMove) {
213   MachineBasicBlock::iterator E = I->getParent()->end();
214   MachineBasicBlock::iterator MBBI = I;
215   ++MBBI;
216 
217   SmallVector<const MachineOperand *, 8> DefsToMove;
218   addDefsToList(*I, DefsToMove);
219 
220   for ( ; MBBI != E; ++MBBI) {
221 
222     if (MBBI->getOpcode() != I->getOpcode()) {
223 
224       // This is not a matching DS instruction, but we can keep looking as
225       // long as one of these conditions are met:
226       // 1. It is safe to move I down past MBBI.
227       // 2. It is safe to move MBBI down past the instruction that I will
228       //    be merged into.
229 
230       if (MBBI->hasUnmodeledSideEffects())
231         // We can't re-order this instruction with respect to other memory
232         // opeations, so we fail both conditions mentioned above.
233         return E;
234 
235       if (MBBI->mayLoadOrStore() &&
236           !TII->areMemAccessesTriviallyDisjoint(*I, *MBBI, AA)) {
237         // We fail condition #1, but we may still be able to satisfy condition
238         // #2.  Add this instruction to the move list and then we will check
239         // if condition #2 holds once we have selected the matching instruction.
240         InstsToMove.push_back(&*MBBI);
241         addDefsToList(*MBBI, DefsToMove);
242         continue;
243       }
244 
245       // When we match I with another DS instruction we will be moving I down
246       // to the location of the matched instruction any uses of I will need to
247       // be moved down as well.
248       addToListsIfDependent(*MBBI, DefsToMove, InstsToMove);
249       continue;
250     }
251 
252     // Don't merge volatiles.
253     if (MBBI->hasOrderedMemoryRef())
254       return E;
255 
256     // Handle a case like
257     //   DS_WRITE_B32 addr, v, idx0
258     //   w = DS_READ_B32 addr, idx0
259     //   DS_WRITE_B32 addr, f(w), idx1
260     // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
261     // merging of the two writes.
262     if (addToListsIfDependent(*MBBI, DefsToMove, InstsToMove))
263       continue;
264 
265     int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr);
266     const MachineOperand &AddrReg0 = I->getOperand(AddrIdx);
267     const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
268 
269     // Check same base pointer. Be careful of subregisters, which can occur with
270     // vectors of pointers.
271     if (AddrReg0.getReg() == AddrReg1.getReg() &&
272         AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
273       int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(),
274                                                  AMDGPU::OpName::offset);
275       unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff;
276       unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
277 
278       // Check both offsets fit in the reduced range.
279       // We also need to go through the list of instructions that we plan to
280       // move and make sure they are all safe to move down past the merged
281       // instruction.
282       if (offsetsCanBeCombined(Offset0, Offset1, EltSize) &&
283           canMoveInstsAcrossMemOp(*MBBI, InstsToMove, TII, AA))
284         return MBBI;
285     }
286 
287     // We've found a load/store that we couldn't merge for some reason.
288     // We could potentially keep looking, but we'd need to make sure that
289     // it was safe to move I and also all the instruction in InstsToMove
290     // down past this instruction.
291     // FIXME: This is too conservative.
292     break;
293   }
294   return E;
295 }
296 
297 MachineBasicBlock::iterator  SILoadStoreOptimizer::mergeRead2Pair(
298   MachineBasicBlock::iterator I,
299   MachineBasicBlock::iterator Paired,
300   unsigned EltSize,
301   ArrayRef<MachineInstr*> InstsToMove) {
302   MachineBasicBlock *MBB = I->getParent();
303 
304   // Be careful, since the addresses could be subregisters themselves in weird
305   // cases, like vectors of pointers.
306   const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
307 
308   const MachineOperand *Dest0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst);
309   const MachineOperand *Dest1 = TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst);
310 
311   unsigned Offset0
312     = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
313   unsigned Offset1
314     = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
315 
316   unsigned NewOffset0 = Offset0 / EltSize;
317   unsigned NewOffset1 = Offset1 / EltSize;
318   unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
319 
320   // Prefer the st64 form if we can use it, even if we can fit the offset in the
321   // non st64 version. I'm not sure if there's any real reason to do this.
322   bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
323   if (UseST64) {
324     NewOffset0 /= 64;
325     NewOffset1 /= 64;
326     Opc = (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
327   }
328 
329   unsigned SubRegIdx0 = (EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
330   unsigned SubRegIdx1 = (EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
331 
332   if (NewOffset0 > NewOffset1) {
333     // Canonicalize the merged instruction so the smaller offset comes first.
334     std::swap(NewOffset0, NewOffset1);
335     std::swap(SubRegIdx0, SubRegIdx1);
336   }
337 
338   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
339          (NewOffset0 != NewOffset1) &&
340          "Computed offset doesn't fit");
341 
342   const MCInstrDesc &Read2Desc = TII->get(Opc);
343 
344   const TargetRegisterClass *SuperRC
345     = (EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
346   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
347 
348   DebugLoc DL = I->getDebugLoc();
349   MachineInstrBuilder Read2
350     = BuildMI(*MBB, Paired, DL, Read2Desc, DestReg)
351     .addOperand(*AddrReg) // addr
352     .addImm(NewOffset0) // offset0
353     .addImm(NewOffset1) // offset1
354     .addImm(0) // gds
355     .addMemOperand(*I->memoperands_begin())
356     .addMemOperand(*Paired->memoperands_begin());
357   (void)Read2;
358 
359   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
360 
361   // Copy to the old destination registers.
362   BuildMI(*MBB, Paired, DL, CopyDesc)
363     .addOperand(*Dest0) // Copy to same destination including flags and sub reg.
364     .addReg(DestReg, 0, SubRegIdx0);
365   MachineInstr *Copy1 = BuildMI(*MBB, Paired, DL, CopyDesc)
366     .addOperand(*Dest1)
367     .addReg(DestReg, RegState::Kill, SubRegIdx1);
368 
369   moveInstsAfter(Copy1, InstsToMove);
370 
371   MachineBasicBlock::iterator Next = std::next(I);
372   I->eraseFromParent();
373   Paired->eraseFromParent();
374 
375   DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
376   return Next;
377 }
378 
379 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
380   MachineBasicBlock::iterator I,
381   MachineBasicBlock::iterator Paired,
382   unsigned EltSize,
383   ArrayRef<MachineInstr*> InstsToMove) {
384   MachineBasicBlock *MBB = I->getParent();
385 
386   // Be sure to use .addOperand(), and not .addReg() with these. We want to be
387   // sure we preserve the subregister index and any register flags set on them.
388   const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
389   const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0);
390   const MachineOperand *Data1
391     = TII->getNamedOperand(*Paired, AMDGPU::OpName::data0);
392 
393 
394   unsigned Offset0
395     = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
396   unsigned Offset1
397     = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
398 
399   unsigned NewOffset0 = Offset0 / EltSize;
400   unsigned NewOffset1 = Offset1 / EltSize;
401   unsigned Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
402 
403   // Prefer the st64 form if we can use it, even if we can fit the offset in the
404   // non st64 version. I'm not sure if there's any real reason to do this.
405   bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
406   if (UseST64) {
407     NewOffset0 /= 64;
408     NewOffset1 /= 64;
409     Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
410   }
411 
412   if (NewOffset0 > NewOffset1) {
413     // Canonicalize the merged instruction so the smaller offset comes first.
414     std::swap(NewOffset0, NewOffset1);
415     std::swap(Data0, Data1);
416   }
417 
418   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
419          (NewOffset0 != NewOffset1) &&
420          "Computed offset doesn't fit");
421 
422   const MCInstrDesc &Write2Desc = TII->get(Opc);
423   DebugLoc DL = I->getDebugLoc();
424 
425   MachineInstrBuilder Write2
426     = BuildMI(*MBB, Paired, DL, Write2Desc)
427     .addOperand(*Addr) // addr
428     .addOperand(*Data0) // data0
429     .addOperand(*Data1) // data1
430     .addImm(NewOffset0) // offset0
431     .addImm(NewOffset1) // offset1
432     .addImm(0) // gds
433     .addMemOperand(*I->memoperands_begin())
434     .addMemOperand(*Paired->memoperands_begin());
435 
436   moveInstsAfter(Write2, InstsToMove);
437 
438   MachineBasicBlock::iterator Next = std::next(I);
439   I->eraseFromParent();
440   Paired->eraseFromParent();
441 
442   DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
443   return Next;
444 }
445 
446 // Scan through looking for adjacent LDS operations with constant offsets from
447 // the same base register. We rely on the scheduler to do the hard work of
448 // clustering nearby loads, and assume these are all adjacent.
449 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
450   bool Modified = false;
451 
452   for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
453     MachineInstr &MI = *I;
454 
455     // Don't combine if volatile.
456     if (MI.hasOrderedMemoryRef()) {
457       ++I;
458       continue;
459     }
460 
461     SmallVector<MachineInstr*, 8> InstsToMove;
462     unsigned Opc = MI.getOpcode();
463     if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
464       unsigned Size = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
465       MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size,
466                                                              InstsToMove);
467       if (Match != E) {
468         Modified = true;
469         I = mergeRead2Pair(I, Match, Size, InstsToMove);
470       } else {
471         ++I;
472       }
473 
474       continue;
475     } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
476       unsigned Size = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
477       MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size,
478                                                              InstsToMove);
479       if (Match != E) {
480         Modified = true;
481         I = mergeWrite2Pair(I, Match, Size, InstsToMove);
482       } else {
483         ++I;
484       }
485 
486       continue;
487     }
488 
489     ++I;
490   }
491 
492   return Modified;
493 }
494 
495 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
496   if (skipFunction(*MF.getFunction()))
497     return false;
498 
499   const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
500   if (!STM.loadStoreOptEnabled())
501     return false;
502 
503   TII = STM.getInstrInfo();
504   TRI = &TII->getRegisterInfo();
505 
506   MRI = &MF.getRegInfo();
507   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
508 
509   DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
510 
511   bool Modified = false;
512 
513   for (MachineBasicBlock &MBB : MF)
514     Modified |= optimizeBlock(MBB);
515 
516   return Modified;
517 }
518