1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/CodeGen/GlobalISel/Combiner.h"
10 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineRegisterInfo.h"
15 #include "llvm/CodeGen/TargetInstrInfo.h"
16 
17 #define DEBUG_TYPE "gi-combiner"
18 
19 using namespace llvm;
20 
21 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
22                                MachineIRBuilder &B)
23     : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer) {}
24 
25 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg,
26                                     unsigned ToReg) const {
27   Observer.changingAllUsesOfReg(MRI, FromReg);
28 
29   if (MRI.constrainRegAttrs(ToReg, FromReg))
30     MRI.replaceRegWith(FromReg, ToReg);
31   else
32     Builder.buildCopy(ToReg, FromReg);
33 
34   Observer.finishedChangingAllUsesOfReg();
35 }
36 
37 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
38                                       MachineOperand &FromRegOp,
39                                       unsigned ToReg) const {
40   assert(FromRegOp.getParent() && "Expected an operand in an MI");
41   Observer.changingInstr(*FromRegOp.getParent());
42 
43   FromRegOp.setReg(ToReg);
44 
45   Observer.changedInstr(*FromRegOp.getParent());
46 }
47 
48 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
49   if (MI.getOpcode() != TargetOpcode::COPY)
50     return false;
51   unsigned DstReg = MI.getOperand(0).getReg();
52   unsigned SrcReg = MI.getOperand(1).getReg();
53   LLT DstTy = MRI.getType(DstReg);
54   LLT SrcTy = MRI.getType(SrcReg);
55   // Simple Copy Propagation.
56   // a(sx) = COPY b(sx) -> Replace all uses of a with b.
57   if (DstTy.isValid() && SrcTy.isValid() && DstTy == SrcTy) {
58     MI.eraseFromParent();
59     replaceRegWith(MRI, DstReg, SrcReg);
60     return true;
61   }
62   return false;
63 }
64 
65 namespace {
66 struct PreferredTuple {
67   LLT Ty;                // The result type of the extend.
68   unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
69   MachineInstr *MI;
70 };
71 
72 /// Select a preference between two uses. CurrentUse is the current preference
73 /// while *ForCandidate is attributes of the candidate under consideration.
74 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
75                                   const LLT &TyForCandidate,
76                                   unsigned OpcodeForCandidate,
77                                   MachineInstr *MIForCandidate) {
78   if (!CurrentUse.Ty.isValid()) {
79     if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
80         CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
81       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
82     return CurrentUse;
83   }
84 
85   // We permit the extend to hoist through basic blocks but this is only
86   // sensible if the target has extending loads. If you end up lowering back
87   // into a load and extend during the legalizer then the end result is
88   // hoisting the extend up to the load.
89 
90   // Prefer defined extensions to undefined extensions as these are more
91   // likely to reduce the number of instructions.
92   if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
93       CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
94     return CurrentUse;
95   else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
96            OpcodeForCandidate != TargetOpcode::G_ANYEXT)
97     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
98 
99   // Prefer sign extensions to zero extensions as sign-extensions tend to be
100   // more expensive.
101   if (CurrentUse.Ty == TyForCandidate) {
102     if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
103         OpcodeForCandidate == TargetOpcode::G_ZEXT)
104       return CurrentUse;
105     else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
106              OpcodeForCandidate == TargetOpcode::G_SEXT)
107       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
108   }
109 
110   // This is potentially target specific. We've chosen the largest type
111   // because G_TRUNC is usually free. One potential catch with this is that
112   // some targets have a reduced number of larger registers than smaller
113   // registers and this choice potentially increases the live-range for the
114   // larger value.
115   if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
116     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
117   }
118   return CurrentUse;
119 }
120 
121 /// Find a suitable place to insert some instructions and insert them. This
122 /// function accounts for special cases like inserting before a PHI node.
123 /// The current strategy for inserting before PHI's is to duplicate the
124 /// instructions for each predecessor. However, while that's ok for G_TRUNC
125 /// on most targets since it generally requires no code, other targets/cases may
126 /// want to try harder to find a dominating block.
127 static void InsertInsnsWithoutSideEffectsBeforeUse(
128     MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
129     std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator)>
130         Inserter) {
131   MachineInstr &UseMI = *UseMO.getParent();
132 
133   MachineBasicBlock *InsertBB = UseMI.getParent();
134 
135   // If the use is a PHI then we want the predecessor block instead.
136   if (UseMI.isPHI()) {
137     MachineOperand *PredBB = std::next(&UseMO);
138     InsertBB = PredBB->getMBB();
139   }
140 
141   // If the block is the same block as the def then we want to insert just after
142   // the def instead of at the start of the block.
143   if (InsertBB == DefMI.getParent()) {
144     MachineBasicBlock::iterator InsertPt = &DefMI;
145     Inserter(InsertBB, std::next(InsertPt));
146     return;
147   }
148 
149   // Otherwise we want the start of the BB
150   Inserter(InsertBB, InsertBB->getFirstNonPHI());
151 }
152 } // end anonymous namespace
153 
154 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
155   struct InsertionPoint {
156     MachineOperand *UseMO;
157     MachineBasicBlock *InsertIntoBB;
158     MachineBasicBlock::iterator InsertBefore;
159     InsertionPoint(MachineOperand *UseMO, MachineBasicBlock *InsertIntoBB,
160                    MachineBasicBlock::iterator InsertBefore)
161         : UseMO(UseMO), InsertIntoBB(InsertIntoBB), InsertBefore(InsertBefore) {
162     }
163   };
164 
165   // We match the loads and follow the uses to the extend instead of matching
166   // the extends and following the def to the load. This is because the load
167   // must remain in the same position for correctness (unless we also add code
168   // to find a safe place to sink it) whereas the extend is freely movable.
169   // It also prevents us from duplicating the load for the volatile case or just
170   // for performance.
171 
172   if (MI.getOpcode() != TargetOpcode::G_LOAD &&
173       MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
174       MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
175     return false;
176 
177   auto &LoadValue = MI.getOperand(0);
178   assert(LoadValue.isReg() && "Result wasn't a register?");
179 
180   LLT LoadValueTy = MRI.getType(LoadValue.getReg());
181   if (!LoadValueTy.isScalar())
182     return false;
183 
184   // Most architectures are going to legalize <s8 loads into at least a 1 byte
185   // load, and the MMOs can only describe memory accesses in multiples of bytes.
186   // If we try to perform extload combining on those, we can end up with
187   // %a(s8) = extload %ptr (load 1 byte from %ptr)
188   // ... which is an illegal extload instruction.
189   if (LoadValueTy.getSizeInBits() < 8)
190     return false;
191 
192   // Find the preferred type aside from the any-extends (unless it's the only
193   // one) and non-extending ops. We'll emit an extending load to that type and
194   // and emit a variant of (extend (trunc X)) for the others according to the
195   // relative type sizes. At the same time, pick an extend to use based on the
196   // extend involved in the chosen type.
197   unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
198                                  ? TargetOpcode::G_ANYEXT
199                                  : MI.getOpcode() == TargetOpcode::G_SEXTLOAD
200                                        ? TargetOpcode::G_SEXT
201                                        : TargetOpcode::G_ZEXT;
202   PreferredTuple Preferred = {LLT(), PreferredOpcode, nullptr};
203   for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) {
204     if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
205         UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
206         UseMI.getOpcode() == TargetOpcode::G_ANYEXT) {
207       Preferred = ChoosePreferredUse(Preferred,
208                                      MRI.getType(UseMI.getOperand(0).getReg()),
209                                      UseMI.getOpcode(), &UseMI);
210     }
211   }
212 
213   // There were no extends
214   if (!Preferred.MI)
215     return false;
216   // It should be impossible to chose an extend without selecting a different
217   // type since by definition the result of an extend is larger.
218   assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
219 
220   LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
221 
222   // Rewrite the load to the chosen extending load.
223   unsigned ChosenDstReg = Preferred.MI->getOperand(0).getReg();
224   Observer.changingInstr(MI);
225   MI.setDesc(
226       Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
227                                ? TargetOpcode::G_SEXTLOAD
228                                : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
229                                      ? TargetOpcode::G_ZEXTLOAD
230                                      : TargetOpcode::G_LOAD));
231 
232   // Rewrite all the uses to fix up the types.
233   SmallVector<MachineInstr *, 1> ScheduleForErase;
234   SmallVector<InsertionPoint, 4> ScheduleForInsert;
235   for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) {
236     MachineInstr *UseMI = UseMO.getParent();
237 
238     // If the extend is compatible with the preferred extend then we should fix
239     // up the type and extend so that it uses the preferred use.
240     if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
241         UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
242       unsigned UseDstReg = UseMI->getOperand(0).getReg();
243       MachineOperand &UseSrcMO = UseMI->getOperand(1);
244       const LLT &UseDstTy = MRI.getType(UseDstReg);
245       if (UseDstReg != ChosenDstReg) {
246         if (Preferred.Ty == UseDstTy) {
247           // If the use has the same type as the preferred use, then merge
248           // the vregs and erase the extend. For example:
249           //    %1:_(s8) = G_LOAD ...
250           //    %2:_(s32) = G_SEXT %1(s8)
251           //    %3:_(s32) = G_ANYEXT %1(s8)
252           //    ... = ... %3(s32)
253           // rewrites to:
254           //    %2:_(s32) = G_SEXTLOAD ...
255           //    ... = ... %2(s32)
256           replaceRegWith(MRI, UseDstReg, ChosenDstReg);
257           ScheduleForErase.push_back(UseMO.getParent());
258         } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
259           // If the preferred size is smaller, then keep the extend but extend
260           // from the result of the extending load. For example:
261           //    %1:_(s8) = G_LOAD ...
262           //    %2:_(s32) = G_SEXT %1(s8)
263           //    %3:_(s64) = G_ANYEXT %1(s8)
264           //    ... = ... %3(s64)
265           /// rewrites to:
266           //    %2:_(s32) = G_SEXTLOAD ...
267           //    %3:_(s64) = G_ANYEXT %2:_(s32)
268           //    ... = ... %3(s64)
269           replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
270         } else {
271           // If the preferred size is large, then insert a truncate. For
272           // example:
273           //    %1:_(s8) = G_LOAD ...
274           //    %2:_(s64) = G_SEXT %1(s8)
275           //    %3:_(s32) = G_ZEXT %1(s8)
276           //    ... = ... %3(s32)
277           /// rewrites to:
278           //    %2:_(s64) = G_SEXTLOAD ...
279           //    %4:_(s8) = G_TRUNC %2:_(s32)
280           //    %3:_(s64) = G_ZEXT %2:_(s8)
281           //    ... = ... %3(s64)
282           InsertInsnsWithoutSideEffectsBeforeUse(
283               Builder, MI, UseMO,
284               [&](MachineBasicBlock *InsertIntoBB,
285                   MachineBasicBlock::iterator InsertBefore) {
286                 ScheduleForInsert.emplace_back(&UseMO, InsertIntoBB, InsertBefore);
287               });
288         }
289         continue;
290       }
291       // The use is (one of) the uses of the preferred use we chose earlier.
292       // We're going to update the load to def this value later so just erase
293       // the old extend.
294       ScheduleForErase.push_back(UseMO.getParent());
295       continue;
296     }
297 
298     // The use isn't an extend. Truncate back to the type we originally loaded.
299     // This is free on many targets.
300     InsertInsnsWithoutSideEffectsBeforeUse(
301         Builder, MI, UseMO,
302         [&](MachineBasicBlock *InsertIntoBB,
303             MachineBasicBlock::iterator InsertBefore) {
304           ScheduleForInsert.emplace_back(&UseMO, InsertIntoBB, InsertBefore);
305         });
306   }
307 
308   DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
309   for (auto &InsertionInfo : ScheduleForInsert) {
310     MachineOperand *UseMO = InsertionInfo.UseMO;
311     MachineBasicBlock *InsertIntoBB = InsertionInfo.InsertIntoBB;
312     MachineBasicBlock::iterator InsertBefore = InsertionInfo.InsertBefore;
313 
314     MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
315     if (PreviouslyEmitted) {
316       Observer.changingInstr(*UseMO->getParent());
317       UseMO->setReg(PreviouslyEmitted->getOperand(0).getReg());
318       Observer.changedInstr(*UseMO->getParent());
319       continue;
320     }
321 
322     Builder.setInsertPt(*InsertIntoBB, InsertBefore);
323     unsigned NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
324     MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
325     EmittedInsns[InsertIntoBB] = NewMI;
326     replaceRegOpWith(MRI, *UseMO, NewDstReg);
327   }
328   for (auto &EraseMI : ScheduleForErase) {
329     Observer.erasingInstr(*EraseMI);
330     EraseMI->eraseFromParent();
331   }
332   MI.getOperand(0).setReg(ChosenDstReg);
333   Observer.changedInstr(MI);
334 
335   return true;
336 }
337 
338 bool CombinerHelper::tryCombine(MachineInstr &MI) {
339   if (tryCombineCopy(MI))
340     return true;
341   return tryCombineExtendingLoads(MI);
342 }
343