1 //== ---lib/CodeGen/GlobalISel/GICombinerHelper.cpp --------------------- == //
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
10 #include "llvm/CodeGen/GlobalISel/Combiner.h"
11 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
12 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
13 #include "llvm/CodeGen/GlobalISel/Utils.h"
14 #include "llvm/CodeGen/MachineInstr.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 
18 #define DEBUG_TYPE "gi-combine"
19 
20 using namespace llvm;
21 
22 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
23                                MachineIRBuilder &B)
24     : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer) {}
25 
26 void CombinerHelper::scheduleForVisit(MachineInstr &MI) {
27   Observer.createdInstr(MI);
28 }
29 
30 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
31   if (MI.getOpcode() != TargetOpcode::COPY)
32     return false;
33   unsigned DstReg = MI.getOperand(0).getReg();
34   unsigned SrcReg = MI.getOperand(1).getReg();
35   LLT DstTy = MRI.getType(DstReg);
36   LLT SrcTy = MRI.getType(SrcReg);
37   // Simple Copy Propagation.
38   // a(sx) = COPY b(sx) -> Replace all uses of a with b.
39   if (DstTy.isValid() && SrcTy.isValid() && DstTy == SrcTy) {
40     MI.eraseFromParent();
41     MRI.replaceRegWith(DstReg, SrcReg);
42     return true;
43   }
44   return false;
45 }
46 
47 namespace {
48 struct PreferredTuple {
49   LLT Ty;                // The result type of the extend.
50   unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
51   MachineInstr *MI;
52 };
53 
54 /// Select a preference between two uses. CurrentUse is the current preference
55 /// while *ForCandidate is attributes of the candidate under consideration.
56 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
57                                   const LLT &TyForCandidate,
58                                   unsigned OpcodeForCandidate,
59                                   MachineInstr *MIForCandidate) {
60   if (!CurrentUse.Ty.isValid()) {
61     if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
62         CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
63       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
64     return CurrentUse;
65   }
66 
67   // We permit the extend to hoist through basic blocks but this is only
68   // sensible if the target has extending loads. If you end up lowering back
69   // into a load and extend during the legalizer then the end result is
70   // hoisting the extend up to the load.
71 
72   // Prefer defined extensions to undefined extensions as these are more
73   // likely to reduce the number of instructions.
74   if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
75       CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
76     return CurrentUse;
77   else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
78            OpcodeForCandidate != TargetOpcode::G_ANYEXT)
79     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
80 
81   // Prefer sign extensions to zero extensions as sign-extensions tend to be
82   // more expensive.
83   if (CurrentUse.Ty == TyForCandidate) {
84     if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
85         OpcodeForCandidate == TargetOpcode::G_ZEXT)
86       return CurrentUse;
87     else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
88              OpcodeForCandidate == TargetOpcode::G_SEXT)
89       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
90   }
91 
92   // This is potentially target specific. We've chosen the largest type
93   // because G_TRUNC is usually free. One potential catch with this is that
94   // some targets have a reduced number of larger registers than smaller
95   // registers and this choice potentially increases the live-range for the
96   // larger value.
97   if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
98     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
99   }
100   return CurrentUse;
101 }
102 
103 /// Find a suitable place to insert some instructions and insert them. This
104 /// function accounts for special cases like inserting before a PHI node.
105 /// The current strategy for inserting before PHI's is to duplicate the
106 /// instructions for each predecessor. However, while that's ok for G_TRUNC
107 /// on most targets since it generally requires no code, other targets/cases may
108 /// want to try harder to find a dominating block.
109 static void InsertInsnsWithoutSideEffectsBeforeUse(
110     MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
111     std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator)>
112         Inserter) {
113   MachineInstr &UseMI = *UseMO.getParent();
114 
115   MachineBasicBlock *InsertBB = UseMI.getParent();
116 
117   // If the use is a PHI then we want the predecessor block instead.
118   if (UseMI.isPHI()) {
119     MachineOperand *PredBB = std::next(&UseMO);
120     InsertBB = PredBB->getMBB();
121   }
122 
123   // If the block is the same block as the def then we want to insert just after
124   // the def instead of at the start of the block.
125   if (InsertBB == DefMI.getParent()) {
126     MachineBasicBlock::iterator InsertPt = &DefMI;
127     Inserter(InsertBB, std::next(InsertPt));
128     return;
129   }
130 
131   // Otherwise we want the start of the BB
132   Inserter(InsertBB, InsertBB->getFirstNonPHI());
133 }
134 } // end anonymous namespace
135 
136 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
137   struct InsertionPoint {
138     MachineOperand *UseMO;
139     MachineBasicBlock *InsertIntoBB;
140     MachineBasicBlock::iterator InsertBefore;
141     InsertionPoint(MachineOperand *UseMO, MachineBasicBlock *InsertIntoBB,
142                    MachineBasicBlock::iterator InsertBefore)
143         : UseMO(UseMO), InsertIntoBB(InsertIntoBB), InsertBefore(InsertBefore) {
144     }
145   };
146 
147   // We match the loads and follow the uses to the extend instead of matching
148   // the extends and following the def to the load. This is because the load
149   // must remain in the same position for correctness (unless we also add code
150   // to find a safe place to sink it) whereas the extend is freely movable.
151   // It also prevents us from duplicating the load for the volatile case or just
152   // for performance.
153 
154   if (MI.getOpcode() != TargetOpcode::G_LOAD &&
155       MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
156       MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
157     return false;
158 
159   auto &LoadValue = MI.getOperand(0);
160   assert(LoadValue.isReg() && "Result wasn't a register?");
161 
162   LLT LoadValueTy = MRI.getType(LoadValue.getReg());
163   if (!LoadValueTy.isScalar())
164     return false;
165 
166   // Find the preferred type aside from the any-extends (unless it's the only
167   // one) and non-extending ops. We'll emit an extending load to that type and
168   // and emit a variant of (extend (trunc X)) for the others according to the
169   // relative type sizes. At the same time, pick an extend to use based on the
170   // extend involved in the chosen type.
171   unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
172                                  ? TargetOpcode::G_ANYEXT
173                                  : MI.getOpcode() == TargetOpcode::G_SEXTLOAD
174                                        ? TargetOpcode::G_SEXT
175                                        : TargetOpcode::G_ZEXT;
176   PreferredTuple Preferred = {LLT(), PreferredOpcode, nullptr};
177   for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) {
178     if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
179         UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
180         UseMI.getOpcode() == TargetOpcode::G_ANYEXT) {
181       Preferred = ChoosePreferredUse(Preferred,
182                                      MRI.getType(UseMI.getOperand(0).getReg()),
183                                      UseMI.getOpcode(), &UseMI);
184     }
185   }
186 
187   // There were no extends
188   if (!Preferred.MI)
189     return false;
190   // It should be impossible to chose an extend without selecting a different
191   // type since by definition the result of an extend is larger.
192   assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
193 
194   // Rewrite the load to the chosen extending load.
195   unsigned ChosenDstReg = Preferred.MI->getOperand(0).getReg();
196   MI.setDesc(
197       Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
198                                ? TargetOpcode::G_SEXTLOAD
199                                : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
200                                      ? TargetOpcode::G_ZEXTLOAD
201                                      : TargetOpcode::G_LOAD));
202 
203   // Rewrite all the uses to fix up the types.
204   SmallVector<MachineInstr *, 1> ScheduleForErase;
205   SmallVector<InsertionPoint, 4> ScheduleForInsert;
206   for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) {
207     MachineInstr *UseMI = UseMO.getParent();
208 
209     // If the extend is compatible with the preferred extend then we should fix
210     // up the type and extend so that it uses the preferred use.
211     if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
212         UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
213       unsigned UseDstReg = UseMI->getOperand(0).getReg();
214       unsigned UseSrcReg = UseMI->getOperand(1).getReg();
215       const LLT &UseDstTy = MRI.getType(UseDstReg);
216       if (UseDstReg != ChosenDstReg) {
217         if (Preferred.Ty == UseDstTy) {
218           // If the use has the same type as the preferred use, then merge
219           // the vregs and erase the extend. For example:
220           //    %1:_(s8) = G_LOAD ...
221           //    %2:_(s32) = G_SEXT %1(s8)
222           //    %3:_(s32) = G_ANYEXT %1(s8)
223           //    ... = ... %3(s32)
224           // rewrites to:
225           //    %2:_(s32) = G_SEXTLOAD ...
226           //    ... = ... %2(s32)
227           MRI.replaceRegWith(UseDstReg, ChosenDstReg);
228           ScheduleForErase.push_back(UseMO.getParent());
229         } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
230           // If the preferred size is smaller, then keep the extend but extend
231           // from the result of the extending load. For example:
232           //    %1:_(s8) = G_LOAD ...
233           //    %2:_(s32) = G_SEXT %1(s8)
234           //    %3:_(s64) = G_ANYEXT %1(s8)
235           //    ... = ... %3(s64)
236           /// rewrites to:
237           //    %2:_(s32) = G_SEXTLOAD ...
238           //    %3:_(s64) = G_ANYEXT %2:_(s32)
239           //    ... = ... %3(s64)
240           MRI.replaceRegWith(UseSrcReg, ChosenDstReg);
241         } else {
242           // If the preferred size is large, then insert a truncate. For
243           // example:
244           //    %1:_(s8) = G_LOAD ...
245           //    %2:_(s64) = G_SEXT %1(s8)
246           //    %3:_(s32) = G_ZEXT %1(s8)
247           //    ... = ... %3(s32)
248           /// rewrites to:
249           //    %2:_(s64) = G_SEXTLOAD ...
250           //    %4:_(s8) = G_TRUNC %2:_(s32)
251           //    %3:_(s64) = G_ZEXT %2:_(s8)
252           //    ... = ... %3(s64)
253           InsertInsnsWithoutSideEffectsBeforeUse(
254               Builder, MI, UseMO,
255               [&](MachineBasicBlock *InsertIntoBB,
256                   MachineBasicBlock::iterator InsertBefore) {
257                 ScheduleForInsert.emplace_back(&UseMO, InsertIntoBB, InsertBefore);
258               });
259         }
260         continue;
261       }
262       // The use is (one of) the uses of the preferred use we chose earlier.
263       // We're going to update the load to def this value later so just erase
264       // the old extend.
265       ScheduleForErase.push_back(UseMO.getParent());
266       continue;
267     }
268 
269     // The use isn't an extend. Truncate back to the type we originally loaded.
270     // This is free on many targets.
271     InsertInsnsWithoutSideEffectsBeforeUse(
272         Builder, MI, UseMO,
273         [&](MachineBasicBlock *InsertIntoBB,
274             MachineBasicBlock::iterator InsertBefore) {
275           ScheduleForInsert.emplace_back(&UseMO, InsertIntoBB, InsertBefore);
276         });
277   }
278 
279   DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
280   for (auto &InsertionInfo : ScheduleForInsert) {
281     MachineOperand *UseMO = InsertionInfo.UseMO;
282     MachineBasicBlock *InsertIntoBB = InsertionInfo.InsertIntoBB;
283     MachineBasicBlock::iterator InsertBefore = InsertionInfo.InsertBefore;
284 
285     MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
286     if (PreviouslyEmitted) {
287       UseMO->setReg(PreviouslyEmitted->getOperand(0).getReg());
288       continue;
289     }
290 
291     Builder.setInsertPt(*InsertIntoBB, InsertBefore);
292     unsigned NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
293     MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
294     EmittedInsns[InsertIntoBB] = NewMI;
295     UseMO->setReg(NewDstReg);
296     Observer.createdInstr(*NewMI);
297   }
298   for (auto &EraseMI : ScheduleForErase) {
299     Observer.erasingInstr(*EraseMI);
300     EraseMI->eraseFromParent();
301   }
302   MI.getOperand(0).setReg(ChosenDstReg);
303 
304   return true;
305 }
306 
307 bool CombinerHelper::tryCombine(MachineInstr &MI) {
308   if (tryCombineCopy(MI))
309     return true;
310   return tryCombineExtendingLoads(MI);
311 }
312