1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/CodeGen/GlobalISel/Combiner.h"
10 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
11 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
12 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
13 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
14 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
15 #include "llvm/CodeGen/GlobalISel/Utils.h"
16 #include "llvm/CodeGen/MachineDominators.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstr.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Target/TargetMachine.h"
25 
26 #define DEBUG_TYPE "gi-combiner"
27 
28 using namespace llvm;
29 using namespace MIPatternMatch;
30 
31 // Option to allow testing of the combiner while no targets know about indexed
32 // addressing.
33 static cl::opt<bool>
34     ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
35                        cl::desc("Force all indexed operations to be "
36                                 "legal for the GlobalISel combiner"));
37 
38 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
39                                MachineIRBuilder &B, GISelKnownBits *KB,
40                                MachineDominatorTree *MDT,
41                                const LegalizerInfo *LI)
42     : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer),
43       KB(KB), MDT(MDT), LI(LI) {
44   (void)this->KB;
45 }
46 
47 const TargetLowering &CombinerHelper::getTargetLowering() const {
48   return *Builder.getMF().getSubtarget().getTargetLowering();
49 }
50 
51 bool CombinerHelper::isLegalOrBeforeLegalizer(
52     const LegalityQuery &Query) const {
53   return !LI || LI->getAction(Query).Action == LegalizeActions::Legal;
54 }
55 
56 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
57                                     Register ToReg) const {
58   Observer.changingAllUsesOfReg(MRI, FromReg);
59 
60   if (MRI.constrainRegAttrs(ToReg, FromReg))
61     MRI.replaceRegWith(FromReg, ToReg);
62   else
63     Builder.buildCopy(ToReg, FromReg);
64 
65   Observer.finishedChangingAllUsesOfReg();
66 }
67 
68 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
69                                       MachineOperand &FromRegOp,
70                                       Register ToReg) const {
71   assert(FromRegOp.getParent() && "Expected an operand in an MI");
72   Observer.changingInstr(*FromRegOp.getParent());
73 
74   FromRegOp.setReg(ToReg);
75 
76   Observer.changedInstr(*FromRegOp.getParent());
77 }
78 
79 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
80   if (matchCombineCopy(MI)) {
81     applyCombineCopy(MI);
82     return true;
83   }
84   return false;
85 }
86 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
87   if (MI.getOpcode() != TargetOpcode::COPY)
88     return false;
89   Register DstReg = MI.getOperand(0).getReg();
90   Register SrcReg = MI.getOperand(1).getReg();
91   return canReplaceReg(DstReg, SrcReg, MRI);
92 }
93 void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
94   Register DstReg = MI.getOperand(0).getReg();
95   Register SrcReg = MI.getOperand(1).getReg();
96   MI.eraseFromParent();
97   replaceRegWith(MRI, DstReg, SrcReg);
98 }
99 
100 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
101   bool IsUndef = false;
102   SmallVector<Register, 4> Ops;
103   if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
104     applyCombineConcatVectors(MI, IsUndef, Ops);
105     return true;
106   }
107   return false;
108 }
109 
110 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
111                                                SmallVectorImpl<Register> &Ops) {
112   assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
113          "Invalid instruction");
114   IsUndef = true;
115   MachineInstr *Undef = nullptr;
116 
117   // Walk over all the operands of concat vectors and check if they are
118   // build_vector themselves or undef.
119   // Then collect their operands in Ops.
120   for (const MachineOperand &MO : MI.uses()) {
121     Register Reg = MO.getReg();
122     MachineInstr *Def = MRI.getVRegDef(Reg);
123     assert(Def && "Operand not defined");
124     switch (Def->getOpcode()) {
125     case TargetOpcode::G_BUILD_VECTOR:
126       IsUndef = false;
127       // Remember the operands of the build_vector to fold
128       // them into the yet-to-build flattened concat vectors.
129       for (const MachineOperand &BuildVecMO : Def->uses())
130         Ops.push_back(BuildVecMO.getReg());
131       break;
132     case TargetOpcode::G_IMPLICIT_DEF: {
133       LLT OpType = MRI.getType(Reg);
134       // Keep one undef value for all the undef operands.
135       if (!Undef) {
136         Builder.setInsertPt(*MI.getParent(), MI);
137         Undef = Builder.buildUndef(OpType.getScalarType());
138       }
139       assert(MRI.getType(Undef->getOperand(0).getReg()) ==
140                  OpType.getScalarType() &&
141              "All undefs should have the same type");
142       // Break the undef vector in as many scalar elements as needed
143       // for the flattening.
144       for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
145            EltIdx != EltEnd; ++EltIdx)
146         Ops.push_back(Undef->getOperand(0).getReg());
147       break;
148     }
149     default:
150       return false;
151     }
152   }
153   return true;
154 }
155 void CombinerHelper::applyCombineConcatVectors(
156     MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
157   // We determined that the concat_vectors can be flatten.
158   // Generate the flattened build_vector.
159   Register DstReg = MI.getOperand(0).getReg();
160   Builder.setInsertPt(*MI.getParent(), MI);
161   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
162 
163   // Note: IsUndef is sort of redundant. We could have determine it by
164   // checking that at all Ops are undef.  Alternatively, we could have
165   // generate a build_vector of undefs and rely on another combine to
166   // clean that up.  For now, given we already gather this information
167   // in tryCombineConcatVectors, just save compile time and issue the
168   // right thing.
169   if (IsUndef)
170     Builder.buildUndef(NewDstReg);
171   else
172     Builder.buildBuildVector(NewDstReg, Ops);
173   MI.eraseFromParent();
174   replaceRegWith(MRI, DstReg, NewDstReg);
175 }
176 
177 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
178   SmallVector<Register, 4> Ops;
179   if (matchCombineShuffleVector(MI, Ops)) {
180     applyCombineShuffleVector(MI, Ops);
181     return true;
182   }
183   return false;
184 }
185 
186 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
187                                                SmallVectorImpl<Register> &Ops) {
188   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
189          "Invalid instruction kind");
190   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
191   Register Src1 = MI.getOperand(1).getReg();
192   LLT SrcType = MRI.getType(Src1);
193   // As bizarre as it may look, shuffle vector can actually produce
194   // scalar! This is because at the IR level a <1 x ty> shuffle
195   // vector is perfectly valid.
196   unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
197   unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
198 
199   // If the resulting vector is smaller than the size of the source
200   // vectors being concatenated, we won't be able to replace the
201   // shuffle vector into a concat_vectors.
202   //
203   // Note: We may still be able to produce a concat_vectors fed by
204   //       extract_vector_elt and so on. It is less clear that would
205   //       be better though, so don't bother for now.
206   //
207   // If the destination is a scalar, the size of the sources doesn't
208   // matter. we will lower the shuffle to a plain copy. This will
209   // work only if the source and destination have the same size. But
210   // that's covered by the next condition.
211   //
212   // TODO: If the size between the source and destination don't match
213   //       we could still emit an extract vector element in that case.
214   if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
215     return false;
216 
217   // Check that the shuffle mask can be broken evenly between the
218   // different sources.
219   if (DstNumElts % SrcNumElts != 0)
220     return false;
221 
222   // Mask length is a multiple of the source vector length.
223   // Check if the shuffle is some kind of concatenation of the input
224   // vectors.
225   unsigned NumConcat = DstNumElts / SrcNumElts;
226   SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
227   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
228   for (unsigned i = 0; i != DstNumElts; ++i) {
229     int Idx = Mask[i];
230     // Undef value.
231     if (Idx < 0)
232       continue;
233     // Ensure the indices in each SrcType sized piece are sequential and that
234     // the same source is used for the whole piece.
235     if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
236         (ConcatSrcs[i / SrcNumElts] >= 0 &&
237          ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
238       return false;
239     // Remember which source this index came from.
240     ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
241   }
242 
243   // The shuffle is concatenating multiple vectors together.
244   // Collect the different operands for that.
245   Register UndefReg;
246   Register Src2 = MI.getOperand(2).getReg();
247   for (auto Src : ConcatSrcs) {
248     if (Src < 0) {
249       if (!UndefReg) {
250         Builder.setInsertPt(*MI.getParent(), MI);
251         UndefReg = Builder.buildUndef(SrcType).getReg(0);
252       }
253       Ops.push_back(UndefReg);
254     } else if (Src == 0)
255       Ops.push_back(Src1);
256     else
257       Ops.push_back(Src2);
258   }
259   return true;
260 }
261 
262 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
263                                                const ArrayRef<Register> Ops) {
264   Register DstReg = MI.getOperand(0).getReg();
265   Builder.setInsertPt(*MI.getParent(), MI);
266   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
267 
268   if (Ops.size() == 1)
269     Builder.buildCopy(NewDstReg, Ops[0]);
270   else
271     Builder.buildMerge(NewDstReg, Ops);
272 
273   MI.eraseFromParent();
274   replaceRegWith(MRI, DstReg, NewDstReg);
275 }
276 
277 namespace {
278 
279 /// Select a preference between two uses. CurrentUse is the current preference
280 /// while *ForCandidate is attributes of the candidate under consideration.
281 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
282                                   const LLT TyForCandidate,
283                                   unsigned OpcodeForCandidate,
284                                   MachineInstr *MIForCandidate) {
285   if (!CurrentUse.Ty.isValid()) {
286     if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
287         CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
288       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
289     return CurrentUse;
290   }
291 
292   // We permit the extend to hoist through basic blocks but this is only
293   // sensible if the target has extending loads. If you end up lowering back
294   // into a load and extend during the legalizer then the end result is
295   // hoisting the extend up to the load.
296 
297   // Prefer defined extensions to undefined extensions as these are more
298   // likely to reduce the number of instructions.
299   if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
300       CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
301     return CurrentUse;
302   else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
303            OpcodeForCandidate != TargetOpcode::G_ANYEXT)
304     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
305 
306   // Prefer sign extensions to zero extensions as sign-extensions tend to be
307   // more expensive.
308   if (CurrentUse.Ty == TyForCandidate) {
309     if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
310         OpcodeForCandidate == TargetOpcode::G_ZEXT)
311       return CurrentUse;
312     else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
313              OpcodeForCandidate == TargetOpcode::G_SEXT)
314       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
315   }
316 
317   // This is potentially target specific. We've chosen the largest type
318   // because G_TRUNC is usually free. One potential catch with this is that
319   // some targets have a reduced number of larger registers than smaller
320   // registers and this choice potentially increases the live-range for the
321   // larger value.
322   if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
323     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
324   }
325   return CurrentUse;
326 }
327 
328 /// Find a suitable place to insert some instructions and insert them. This
329 /// function accounts for special cases like inserting before a PHI node.
330 /// The current strategy for inserting before PHI's is to duplicate the
331 /// instructions for each predecessor. However, while that's ok for G_TRUNC
332 /// on most targets since it generally requires no code, other targets/cases may
333 /// want to try harder to find a dominating block.
334 static void InsertInsnsWithoutSideEffectsBeforeUse(
335     MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
336     std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
337                        MachineOperand &UseMO)>
338         Inserter) {
339   MachineInstr &UseMI = *UseMO.getParent();
340 
341   MachineBasicBlock *InsertBB = UseMI.getParent();
342 
343   // If the use is a PHI then we want the predecessor block instead.
344   if (UseMI.isPHI()) {
345     MachineOperand *PredBB = std::next(&UseMO);
346     InsertBB = PredBB->getMBB();
347   }
348 
349   // If the block is the same block as the def then we want to insert just after
350   // the def instead of at the start of the block.
351   if (InsertBB == DefMI.getParent()) {
352     MachineBasicBlock::iterator InsertPt = &DefMI;
353     Inserter(InsertBB, std::next(InsertPt), UseMO);
354     return;
355   }
356 
357   // Otherwise we want the start of the BB
358   Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
359 }
360 } // end anonymous namespace
361 
362 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
363   PreferredTuple Preferred;
364   if (matchCombineExtendingLoads(MI, Preferred)) {
365     applyCombineExtendingLoads(MI, Preferred);
366     return true;
367   }
368   return false;
369 }
370 
371 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
372                                                 PreferredTuple &Preferred) {
373   // We match the loads and follow the uses to the extend instead of matching
374   // the extends and following the def to the load. This is because the load
375   // must remain in the same position for correctness (unless we also add code
376   // to find a safe place to sink it) whereas the extend is freely movable.
377   // It also prevents us from duplicating the load for the volatile case or just
378   // for performance.
379 
380   if (MI.getOpcode() != TargetOpcode::G_LOAD &&
381       MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
382       MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
383     return false;
384 
385   auto &LoadValue = MI.getOperand(0);
386   assert(LoadValue.isReg() && "Result wasn't a register?");
387 
388   LLT LoadValueTy = MRI.getType(LoadValue.getReg());
389   if (!LoadValueTy.isScalar())
390     return false;
391 
392   // Most architectures are going to legalize <s8 loads into at least a 1 byte
393   // load, and the MMOs can only describe memory accesses in multiples of bytes.
394   // If we try to perform extload combining on those, we can end up with
395   // %a(s8) = extload %ptr (load 1 byte from %ptr)
396   // ... which is an illegal extload instruction.
397   if (LoadValueTy.getSizeInBits() < 8)
398     return false;
399 
400   // For non power-of-2 types, they will very likely be legalized into multiple
401   // loads. Don't bother trying to match them into extending loads.
402   if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
403     return false;
404 
405   // Find the preferred type aside from the any-extends (unless it's the only
406   // one) and non-extending ops. We'll emit an extending load to that type and
407   // and emit a variant of (extend (trunc X)) for the others according to the
408   // relative type sizes. At the same time, pick an extend to use based on the
409   // extend involved in the chosen type.
410   unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
411                                  ? TargetOpcode::G_ANYEXT
412                                  : MI.getOpcode() == TargetOpcode::G_SEXTLOAD
413                                        ? TargetOpcode::G_SEXT
414                                        : TargetOpcode::G_ZEXT;
415   Preferred = {LLT(), PreferredOpcode, nullptr};
416   for (auto &UseMI : MRI.use_nodbg_instructions(LoadValue.getReg())) {
417     if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
418         UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
419         (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
420       // Check for legality.
421       if (LI) {
422         LegalityQuery::MemDesc MMDesc;
423         const auto &MMO = **MI.memoperands_begin();
424         MMDesc.SizeInBits = MMO.getSizeInBits();
425         MMDesc.AlignInBits = MMO.getAlign().value() * 8;
426         MMDesc.Ordering = MMO.getOrdering();
427         LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
428         LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
429         if (LI->getAction({MI.getOpcode(), {UseTy, SrcTy}, {MMDesc}}).Action !=
430             LegalizeActions::Legal)
431           continue;
432       }
433       Preferred = ChoosePreferredUse(Preferred,
434                                      MRI.getType(UseMI.getOperand(0).getReg()),
435                                      UseMI.getOpcode(), &UseMI);
436     }
437   }
438 
439   // There were no extends
440   if (!Preferred.MI)
441     return false;
442   // It should be impossible to chose an extend without selecting a different
443   // type since by definition the result of an extend is larger.
444   assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
445 
446   LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
447   return true;
448 }
449 
450 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
451                                                 PreferredTuple &Preferred) {
452   // Rewrite the load to the chosen extending load.
453   Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
454 
455   // Inserter to insert a truncate back to the original type at a given point
456   // with some basic CSE to limit truncate duplication to one per BB.
457   DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
458   auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
459                            MachineBasicBlock::iterator InsertBefore,
460                            MachineOperand &UseMO) {
461     MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
462     if (PreviouslyEmitted) {
463       Observer.changingInstr(*UseMO.getParent());
464       UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
465       Observer.changedInstr(*UseMO.getParent());
466       return;
467     }
468 
469     Builder.setInsertPt(*InsertIntoBB, InsertBefore);
470     Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
471     MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
472     EmittedInsns[InsertIntoBB] = NewMI;
473     replaceRegOpWith(MRI, UseMO, NewDstReg);
474   };
475 
476   Observer.changingInstr(MI);
477   MI.setDesc(
478       Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
479                                ? TargetOpcode::G_SEXTLOAD
480                                : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
481                                      ? TargetOpcode::G_ZEXTLOAD
482                                      : TargetOpcode::G_LOAD));
483 
484   // Rewrite all the uses to fix up the types.
485   auto &LoadValue = MI.getOperand(0);
486   SmallVector<MachineOperand *, 4> Uses;
487   for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
488     Uses.push_back(&UseMO);
489 
490   for (auto *UseMO : Uses) {
491     MachineInstr *UseMI = UseMO->getParent();
492 
493     // If the extend is compatible with the preferred extend then we should fix
494     // up the type and extend so that it uses the preferred use.
495     if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
496         UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
497       Register UseDstReg = UseMI->getOperand(0).getReg();
498       MachineOperand &UseSrcMO = UseMI->getOperand(1);
499       const LLT UseDstTy = MRI.getType(UseDstReg);
500       if (UseDstReg != ChosenDstReg) {
501         if (Preferred.Ty == UseDstTy) {
502           // If the use has the same type as the preferred use, then merge
503           // the vregs and erase the extend. For example:
504           //    %1:_(s8) = G_LOAD ...
505           //    %2:_(s32) = G_SEXT %1(s8)
506           //    %3:_(s32) = G_ANYEXT %1(s8)
507           //    ... = ... %3(s32)
508           // rewrites to:
509           //    %2:_(s32) = G_SEXTLOAD ...
510           //    ... = ... %2(s32)
511           replaceRegWith(MRI, UseDstReg, ChosenDstReg);
512           Observer.erasingInstr(*UseMO->getParent());
513           UseMO->getParent()->eraseFromParent();
514         } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
515           // If the preferred size is smaller, then keep the extend but extend
516           // from the result of the extending load. For example:
517           //    %1:_(s8) = G_LOAD ...
518           //    %2:_(s32) = G_SEXT %1(s8)
519           //    %3:_(s64) = G_ANYEXT %1(s8)
520           //    ... = ... %3(s64)
521           /// rewrites to:
522           //    %2:_(s32) = G_SEXTLOAD ...
523           //    %3:_(s64) = G_ANYEXT %2:_(s32)
524           //    ... = ... %3(s64)
525           replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
526         } else {
527           // If the preferred size is large, then insert a truncate. For
528           // example:
529           //    %1:_(s8) = G_LOAD ...
530           //    %2:_(s64) = G_SEXT %1(s8)
531           //    %3:_(s32) = G_ZEXT %1(s8)
532           //    ... = ... %3(s32)
533           /// rewrites to:
534           //    %2:_(s64) = G_SEXTLOAD ...
535           //    %4:_(s8) = G_TRUNC %2:_(s32)
536           //    %3:_(s64) = G_ZEXT %2:_(s8)
537           //    ... = ... %3(s64)
538           InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
539                                                  InsertTruncAt);
540         }
541         continue;
542       }
543       // The use is (one of) the uses of the preferred use we chose earlier.
544       // We're going to update the load to def this value later so just erase
545       // the old extend.
546       Observer.erasingInstr(*UseMO->getParent());
547       UseMO->getParent()->eraseFromParent();
548       continue;
549     }
550 
551     // The use isn't an extend. Truncate back to the type we originally loaded.
552     // This is free on many targets.
553     InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
554   }
555 
556   MI.getOperand(0).setReg(ChosenDstReg);
557   Observer.changedInstr(MI);
558 }
559 
560 bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
561                                    const MachineInstr &UseMI) {
562   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
563          "shouldn't consider debug uses");
564   assert(DefMI.getParent() == UseMI.getParent());
565   if (&DefMI == &UseMI)
566     return false;
567 
568   // Loop through the basic block until we find one of the instructions.
569   MachineBasicBlock::const_iterator I = DefMI.getParent()->begin();
570   for (; &*I != &DefMI && &*I != &UseMI; ++I)
571     return &*I == &DefMI;
572 
573   llvm_unreachable("Block must contain instructions");
574 }
575 
576 bool CombinerHelper::dominates(const MachineInstr &DefMI,
577                                const MachineInstr &UseMI) {
578   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
579          "shouldn't consider debug uses");
580   if (MDT)
581     return MDT->dominates(&DefMI, &UseMI);
582   else if (DefMI.getParent() != UseMI.getParent())
583     return false;
584 
585   return isPredecessor(DefMI, UseMI);
586 }
587 
588 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
589   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
590   Register SrcReg = MI.getOperand(1).getReg();
591   Register LoadUser = SrcReg;
592 
593   if (MRI.getType(SrcReg).isVector())
594     return false;
595 
596   Register TruncSrc;
597   if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
598     LoadUser = TruncSrc;
599 
600   uint64_t SizeInBits = MI.getOperand(2).getImm();
601   // If the source is a G_SEXTLOAD from the same bit width, then we don't
602   // need any extend at all, just a truncate.
603   if (auto *LoadMI = getOpcodeDef(TargetOpcode::G_SEXTLOAD, LoadUser, MRI)) {
604     const auto &MMO = **LoadMI->memoperands_begin();
605     // If truncating more than the original extended value, abort.
606     if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < MMO.getSizeInBits())
607       return false;
608     if (MMO.getSizeInBits() == SizeInBits)
609       return true;
610   }
611   return false;
612 }
613 
614 bool CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
615   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
616   Builder.setInstrAndDebugLoc(MI);
617   Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
618   MI.eraseFromParent();
619   return true;
620 }
621 
622 bool CombinerHelper::matchSextInRegOfLoad(
623     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
624   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
625 
626   // Only supports scalars for now.
627   if (MRI.getType(MI.getOperand(0).getReg()).isVector())
628     return false;
629 
630   Register SrcReg = MI.getOperand(1).getReg();
631   MachineInstr *LoadDef = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI);
632   if (!LoadDef || !MRI.hasOneNonDBGUse(LoadDef->getOperand(0).getReg()))
633     return false;
634 
635   // If the sign extend extends from a narrower width than the load's width,
636   // then we can narrow the load width when we combine to a G_SEXTLOAD.
637   auto &MMO = **LoadDef->memoperands_begin();
638   // Don't do this for non-simple loads.
639   if (MMO.isAtomic() || MMO.isVolatile())
640     return false;
641 
642   // Avoid widening the load at all.
643   unsigned NewSizeBits =
644       std::min((uint64_t)MI.getOperand(2).getImm(), MMO.getSizeInBits());
645 
646   // Don't generate G_SEXTLOADs with a < 1 byte width.
647   if (NewSizeBits < 8)
648     return false;
649   // Don't bother creating a non-power-2 sextload, it will likely be broken up
650   // anyway for most targets.
651   if (!isPowerOf2_32(NewSizeBits))
652     return false;
653   MatchInfo = std::make_tuple(LoadDef->getOperand(0).getReg(), NewSizeBits);
654   return true;
655 }
656 
657 bool CombinerHelper::applySextInRegOfLoad(
658     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
659   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
660   Register LoadReg;
661   unsigned ScalarSizeBits;
662   std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
663   auto *LoadDef = MRI.getVRegDef(LoadReg);
664   assert(LoadDef && "Expected a load reg");
665 
666   // If we have the following:
667   // %ld = G_LOAD %ptr, (load 2)
668   // %ext = G_SEXT_INREG %ld, 8
669   //    ==>
670   // %ld = G_SEXTLOAD %ptr (load 1)
671 
672   auto &MMO = **LoadDef->memoperands_begin();
673   Builder.setInstrAndDebugLoc(MI);
674   auto &MF = Builder.getMF();
675   auto PtrInfo = MMO.getPointerInfo();
676   auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
677   Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
678                          LoadDef->getOperand(1).getReg(), *NewMMO);
679   MI.eraseFromParent();
680   return true;
681 }
682 
683 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
684                                             Register &Base, Register &Offset) {
685   auto &MF = *MI.getParent()->getParent();
686   const auto &TLI = *MF.getSubtarget().getTargetLowering();
687 
688 #ifndef NDEBUG
689   unsigned Opcode = MI.getOpcode();
690   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
691          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
692 #endif
693 
694   Base = MI.getOperand(1).getReg();
695   MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
696   if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
697     return false;
698 
699   LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
700   // FIXME: The following use traversal needs a bail out for patholigical cases.
701   for (auto &Use : MRI.use_nodbg_instructions(Base)) {
702     if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
703       continue;
704 
705     Offset = Use.getOperand(2).getReg();
706     if (!ForceLegalIndexing &&
707         !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
708       LLVM_DEBUG(dbgs() << "    Ignoring candidate with illegal addrmode: "
709                         << Use);
710       continue;
711     }
712 
713     // Make sure the offset calculation is before the potentially indexed op.
714     // FIXME: we really care about dependency here. The offset calculation might
715     // be movable.
716     MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
717     if (!OffsetDef || !dominates(*OffsetDef, MI)) {
718       LLVM_DEBUG(dbgs() << "    Ignoring candidate with offset after mem-op: "
719                         << Use);
720       continue;
721     }
722 
723     // FIXME: check whether all uses of Base are load/store with foldable
724     // addressing modes. If so, using the normal addr-modes is better than
725     // forming an indexed one.
726 
727     bool MemOpDominatesAddrUses = true;
728     for (auto &PtrAddUse :
729          MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
730       if (!dominates(MI, PtrAddUse)) {
731         MemOpDominatesAddrUses = false;
732         break;
733       }
734     }
735 
736     if (!MemOpDominatesAddrUses) {
737       LLVM_DEBUG(
738           dbgs() << "    Ignoring candidate as memop does not dominate uses: "
739                  << Use);
740       continue;
741     }
742 
743     LLVM_DEBUG(dbgs() << "    Found match: " << Use);
744     Addr = Use.getOperand(0).getReg();
745     return true;
746   }
747 
748   return false;
749 }
750 
751 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
752                                            Register &Base, Register &Offset) {
753   auto &MF = *MI.getParent()->getParent();
754   const auto &TLI = *MF.getSubtarget().getTargetLowering();
755 
756 #ifndef NDEBUG
757   unsigned Opcode = MI.getOpcode();
758   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
759          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
760 #endif
761 
762   Addr = MI.getOperand(1).getReg();
763   MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
764   if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
765     return false;
766 
767   Base = AddrDef->getOperand(1).getReg();
768   Offset = AddrDef->getOperand(2).getReg();
769 
770   LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
771 
772   if (!ForceLegalIndexing &&
773       !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
774     LLVM_DEBUG(dbgs() << "    Skipping, not legal for target");
775     return false;
776   }
777 
778   MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
779   if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
780     LLVM_DEBUG(dbgs() << "    Skipping, frame index would need copy anyway.");
781     return false;
782   }
783 
784   if (MI.getOpcode() == TargetOpcode::G_STORE) {
785     // Would require a copy.
786     if (Base == MI.getOperand(0).getReg()) {
787       LLVM_DEBUG(dbgs() << "    Skipping, storing base so need copy anyway.");
788       return false;
789     }
790 
791     // We're expecting one use of Addr in MI, but it could also be the
792     // value stored, which isn't actually dominated by the instruction.
793     if (MI.getOperand(0).getReg() == Addr) {
794       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses");
795       return false;
796     }
797   }
798 
799   // FIXME: check whether all uses of the base pointer are constant PtrAdds.
800   // That might allow us to end base's liveness here by adjusting the constant.
801 
802   for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
803     if (!dominates(MI, UseMI)) {
804       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses.");
805       return false;
806     }
807   }
808 
809   return true;
810 }
811 
812 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
813   IndexedLoadStoreMatchInfo MatchInfo;
814   if (matchCombineIndexedLoadStore(MI, MatchInfo)) {
815     applyCombineIndexedLoadStore(MI, MatchInfo);
816     return true;
817   }
818   return false;
819 }
820 
821 bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
822   unsigned Opcode = MI.getOpcode();
823   if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
824       Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
825     return false;
826 
827   // For now, no targets actually support these opcodes so don't waste time
828   // running these unless we're forced to for testing.
829   if (!ForceLegalIndexing)
830     return false;
831 
832   MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
833                                           MatchInfo.Offset);
834   if (!MatchInfo.IsPre &&
835       !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
836                               MatchInfo.Offset))
837     return false;
838 
839   return true;
840 }
841 
842 void CombinerHelper::applyCombineIndexedLoadStore(
843     MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
844   MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
845   MachineIRBuilder MIRBuilder(MI);
846   unsigned Opcode = MI.getOpcode();
847   bool IsStore = Opcode == TargetOpcode::G_STORE;
848   unsigned NewOpcode;
849   switch (Opcode) {
850   case TargetOpcode::G_LOAD:
851     NewOpcode = TargetOpcode::G_INDEXED_LOAD;
852     break;
853   case TargetOpcode::G_SEXTLOAD:
854     NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
855     break;
856   case TargetOpcode::G_ZEXTLOAD:
857     NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
858     break;
859   case TargetOpcode::G_STORE:
860     NewOpcode = TargetOpcode::G_INDEXED_STORE;
861     break;
862   default:
863     llvm_unreachable("Unknown load/store opcode");
864   }
865 
866   auto MIB = MIRBuilder.buildInstr(NewOpcode);
867   if (IsStore) {
868     MIB.addDef(MatchInfo.Addr);
869     MIB.addUse(MI.getOperand(0).getReg());
870   } else {
871     MIB.addDef(MI.getOperand(0).getReg());
872     MIB.addDef(MatchInfo.Addr);
873   }
874 
875   MIB.addUse(MatchInfo.Base);
876   MIB.addUse(MatchInfo.Offset);
877   MIB.addImm(MatchInfo.IsPre);
878   MI.eraseFromParent();
879   AddrDef.eraseFromParent();
880 
881   LLVM_DEBUG(dbgs() << "    Combinined to indexed operation");
882 }
883 
884 bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI) {
885   if (MI.getOpcode() != TargetOpcode::G_BR)
886     return false;
887 
888   // Try to match the following:
889   // bb1:
890   //   G_BRCOND %c1, %bb2
891   //   G_BR %bb3
892   // bb2:
893   // ...
894   // bb3:
895 
896   // The above pattern does not have a fall through to the successor bb2, always
897   // resulting in a branch no matter which path is taken. Here we try to find
898   // and replace that pattern with conditional branch to bb3 and otherwise
899   // fallthrough to bb2. This is generally better for branch predictors.
900 
901   MachineBasicBlock *MBB = MI.getParent();
902   MachineBasicBlock::iterator BrIt(MI);
903   if (BrIt == MBB->begin())
904     return false;
905   assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
906 
907   MachineInstr *BrCond = &*std::prev(BrIt);
908   if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
909     return false;
910 
911   // Check that the next block is the conditional branch target.
912   if (!MBB->isLayoutSuccessor(BrCond->getOperand(1).getMBB()))
913     return false;
914   return true;
915 }
916 
917 void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI) {
918   MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
919   MachineBasicBlock::iterator BrIt(MI);
920   MachineInstr *BrCond = &*std::prev(BrIt);
921 
922   Builder.setInstrAndDebugLoc(*BrCond);
923   LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
924   // FIXME: Does int/fp matter for this? If so, we might need to restrict
925   // this to i1 only since we might not know for sure what kind of
926   // compare generated the condition value.
927   auto True = Builder.buildConstant(
928       Ty, getICmpTrueVal(getTargetLowering(), false, false));
929   auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True);
930 
931   auto *FallthroughBB = BrCond->getOperand(1).getMBB();
932   Observer.changingInstr(MI);
933   MI.getOperand(0).setMBB(FallthroughBB);
934   Observer.changedInstr(MI);
935 
936   // Change the conditional branch to use the inverted condition and
937   // new target block.
938   Observer.changingInstr(*BrCond);
939   BrCond->getOperand(0).setReg(Xor.getReg(0));
940   BrCond->getOperand(1).setMBB(BrTarget);
941   Observer.changedInstr(*BrCond);
942 }
943 
944 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
945   // On Darwin, -Os means optimize for size without hurting performance, so
946   // only really optimize for size when -Oz (MinSize) is used.
947   if (MF.getTarget().getTargetTriple().isOSDarwin())
948     return MF.getFunction().hasMinSize();
949   return MF.getFunction().hasOptSize();
950 }
951 
952 // Returns a list of types to use for memory op lowering in MemOps. A partial
953 // port of findOptimalMemOpLowering in TargetLowering.
954 static bool findGISelOptimalMemOpLowering(std::vector<LLT> &MemOps,
955                                           unsigned Limit, const MemOp &Op,
956                                           unsigned DstAS, unsigned SrcAS,
957                                           const AttributeList &FuncAttributes,
958                                           const TargetLowering &TLI) {
959   if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign())
960     return false;
961 
962   LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes);
963 
964   if (Ty == LLT()) {
965     // Use the largest scalar type whose alignment constraints are satisfied.
966     // We only need to check DstAlign here as SrcAlign is always greater or
967     // equal to DstAlign (or zero).
968     Ty = LLT::scalar(64);
969     if (Op.isFixedDstAlign())
970       while (Op.getDstAlign() < Ty.getSizeInBytes() &&
971              !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, Op.getDstAlign()))
972         Ty = LLT::scalar(Ty.getSizeInBytes());
973     assert(Ty.getSizeInBits() > 0 && "Could not find valid type");
974     // FIXME: check for the largest legal type we can load/store to.
975   }
976 
977   unsigned NumMemOps = 0;
978   uint64_t Size = Op.size();
979   while (Size) {
980     unsigned TySize = Ty.getSizeInBytes();
981     while (TySize > Size) {
982       // For now, only use non-vector load / store's for the left-over pieces.
983       LLT NewTy = Ty;
984       // FIXME: check for mem op safety and legality of the types. Not all of
985       // SDAGisms map cleanly to GISel concepts.
986       if (NewTy.isVector())
987         NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32);
988       NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1));
989       unsigned NewTySize = NewTy.getSizeInBytes();
990       assert(NewTySize > 0 && "Could not find appropriate type");
991 
992       // If the new LLT cannot cover all of the remaining bits, then consider
993       // issuing a (or a pair of) unaligned and overlapping load / store.
994       bool Fast;
995       // Need to get a VT equivalent for allowMisalignedMemoryAccesses().
996       MVT VT = getMVTForLLT(Ty);
997       if (NumMemOps && Op.allowOverlap() && NewTySize < Size &&
998           TLI.allowsMisalignedMemoryAccesses(
999               VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0,
1000               MachineMemOperand::MONone, &Fast) &&
1001           Fast)
1002         TySize = Size;
1003       else {
1004         Ty = NewTy;
1005         TySize = NewTySize;
1006       }
1007     }
1008 
1009     if (++NumMemOps > Limit)
1010       return false;
1011 
1012     MemOps.push_back(Ty);
1013     Size -= TySize;
1014   }
1015 
1016   return true;
1017 }
1018 
1019 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
1020   if (Ty.isVector())
1021     return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1022                                 Ty.getNumElements());
1023   return IntegerType::get(C, Ty.getSizeInBits());
1024 }
1025 
1026 // Get a vectorized representation of the memset value operand, GISel edition.
1027 static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
1028   MachineRegisterInfo &MRI = *MIB.getMRI();
1029   unsigned NumBits = Ty.getScalarSizeInBits();
1030   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
1031   if (!Ty.isVector() && ValVRegAndVal) {
1032     unsigned KnownVal = ValVRegAndVal->Value;
1033     APInt Scalar = APInt(8, KnownVal);
1034     APInt SplatVal = APInt::getSplat(NumBits, Scalar);
1035     return MIB.buildConstant(Ty, SplatVal).getReg(0);
1036   }
1037 
1038   // Extend the byte value to the larger type, and then multiply by a magic
1039   // value 0x010101... in order to replicate it across every byte.
1040   // Unless it's zero, in which case just emit a larger G_CONSTANT 0.
1041   if (ValVRegAndVal && ValVRegAndVal->Value == 0) {
1042     return MIB.buildConstant(Ty, 0).getReg(0);
1043   }
1044 
1045   LLT ExtType = Ty.getScalarType();
1046   auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val);
1047   if (NumBits > 8) {
1048     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
1049     auto MagicMI = MIB.buildConstant(ExtType, Magic);
1050     Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0);
1051   }
1052 
1053   // For vector types create a G_BUILD_VECTOR.
1054   if (Ty.isVector())
1055     Val = MIB.buildSplatVector(Ty, Val).getReg(0);
1056 
1057   return Val;
1058 }
1059 
1060 bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst,
1061                                     Register Val, unsigned KnownLen,
1062                                     Align Alignment, bool IsVolatile) {
1063   auto &MF = *MI.getParent()->getParent();
1064   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1065   auto &DL = MF.getDataLayout();
1066   LLVMContext &C = MF.getFunction().getContext();
1067 
1068   assert(KnownLen != 0 && "Have a zero length memset length!");
1069 
1070   bool DstAlignCanChange = false;
1071   MachineFrameInfo &MFI = MF.getFrameInfo();
1072   bool OptSize = shouldLowerMemFuncForSize(MF);
1073 
1074   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1075   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1076     DstAlignCanChange = true;
1077 
1078   unsigned Limit = TLI.getMaxStoresPerMemset(OptSize);
1079   std::vector<LLT> MemOps;
1080 
1081   const auto &DstMMO = **MI.memoperands_begin();
1082   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1083 
1084   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
1085   bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0;
1086 
1087   if (!findGISelOptimalMemOpLowering(MemOps, Limit,
1088                                      MemOp::Set(KnownLen, DstAlignCanChange,
1089                                                 Alignment,
1090                                                 /*IsZeroMemset=*/IsZeroVal,
1091                                                 /*IsVolatile=*/IsVolatile),
1092                                      DstPtrInfo.getAddrSpace(), ~0u,
1093                                      MF.getFunction().getAttributes(), TLI))
1094     return false;
1095 
1096   if (DstAlignCanChange) {
1097     // Get an estimate of the type from the LLT.
1098     Type *IRTy = getTypeForLLT(MemOps[0], C);
1099     Align NewAlign = DL.getABITypeAlign(IRTy);
1100     if (NewAlign > Alignment) {
1101       Alignment = NewAlign;
1102       unsigned FI = FIDef->getOperand(1).getIndex();
1103       // Give the stack frame object a larger alignment if needed.
1104       if (MFI.getObjectAlign(FI) < Alignment)
1105         MFI.setObjectAlignment(FI, Alignment);
1106     }
1107   }
1108 
1109   MachineIRBuilder MIB(MI);
1110   // Find the largest store and generate the bit pattern for it.
1111   LLT LargestTy = MemOps[0];
1112   for (unsigned i = 1; i < MemOps.size(); i++)
1113     if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits())
1114       LargestTy = MemOps[i];
1115 
1116   // The memset stored value is always defined as an s8, so in order to make it
1117   // work with larger store types we need to repeat the bit pattern across the
1118   // wider type.
1119   Register MemSetValue = getMemsetValue(Val, LargestTy, MIB);
1120 
1121   if (!MemSetValue)
1122     return false;
1123 
1124   // Generate the stores. For each store type in the list, we generate the
1125   // matching store of that type to the destination address.
1126   LLT PtrTy = MRI.getType(Dst);
1127   unsigned DstOff = 0;
1128   unsigned Size = KnownLen;
1129   for (unsigned I = 0; I < MemOps.size(); I++) {
1130     LLT Ty = MemOps[I];
1131     unsigned TySize = Ty.getSizeInBytes();
1132     if (TySize > Size) {
1133       // Issuing an unaligned load / store pair that overlaps with the previous
1134       // pair. Adjust the offset accordingly.
1135       assert(I == MemOps.size() - 1 && I != 0);
1136       DstOff -= TySize - Size;
1137     }
1138 
1139     // If this store is smaller than the largest store see whether we can get
1140     // the smaller value for free with a truncate.
1141     Register Value = MemSetValue;
1142     if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) {
1143       MVT VT = getMVTForLLT(Ty);
1144       MVT LargestVT = getMVTForLLT(LargestTy);
1145       if (!LargestTy.isVector() && !Ty.isVector() &&
1146           TLI.isTruncateFree(LargestVT, VT))
1147         Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0);
1148       else
1149         Value = getMemsetValue(Val, Ty, MIB);
1150       if (!Value)
1151         return false;
1152     }
1153 
1154     auto *StoreMMO =
1155         MF.getMachineMemOperand(&DstMMO, DstOff, Ty.getSizeInBytes());
1156 
1157     Register Ptr = Dst;
1158     if (DstOff != 0) {
1159       auto Offset =
1160           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
1161       Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1162     }
1163 
1164     MIB.buildStore(Value, Ptr, *StoreMMO);
1165     DstOff += Ty.getSizeInBytes();
1166     Size -= TySize;
1167   }
1168 
1169   MI.eraseFromParent();
1170   return true;
1171 }
1172 
1173 bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
1174                                     Register Src, unsigned KnownLen,
1175                                     Align DstAlign, Align SrcAlign,
1176                                     bool IsVolatile) {
1177   auto &MF = *MI.getParent()->getParent();
1178   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1179   auto &DL = MF.getDataLayout();
1180   LLVMContext &C = MF.getFunction().getContext();
1181 
1182   assert(KnownLen != 0 && "Have a zero length memcpy length!");
1183 
1184   bool DstAlignCanChange = false;
1185   MachineFrameInfo &MFI = MF.getFrameInfo();
1186   bool OptSize = shouldLowerMemFuncForSize(MF);
1187   Align Alignment = commonAlignment(DstAlign, SrcAlign);
1188 
1189   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1190   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1191     DstAlignCanChange = true;
1192 
1193   // FIXME: infer better src pointer alignment like SelectionDAG does here.
1194   // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining
1195   // if the memcpy is in a tail call position.
1196 
1197   unsigned Limit = TLI.getMaxStoresPerMemcpy(OptSize);
1198   std::vector<LLT> MemOps;
1199 
1200   const auto &DstMMO = **MI.memoperands_begin();
1201   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1202   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1203   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1204 
1205   if (!findGISelOptimalMemOpLowering(
1206           MemOps, Limit,
1207           MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
1208                       IsVolatile),
1209           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
1210           MF.getFunction().getAttributes(), TLI))
1211     return false;
1212 
1213   if (DstAlignCanChange) {
1214     // Get an estimate of the type from the LLT.
1215     Type *IRTy = getTypeForLLT(MemOps[0], C);
1216     Align NewAlign = DL.getABITypeAlign(IRTy);
1217 
1218     // Don't promote to an alignment that would require dynamic stack
1219     // realignment.
1220     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1221     if (!TRI->needsStackRealignment(MF))
1222       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
1223         NewAlign = NewAlign / 2;
1224 
1225     if (NewAlign > Alignment) {
1226       Alignment = NewAlign;
1227       unsigned FI = FIDef->getOperand(1).getIndex();
1228       // Give the stack frame object a larger alignment if needed.
1229       if (MFI.getObjectAlign(FI) < Alignment)
1230         MFI.setObjectAlignment(FI, Alignment);
1231     }
1232   }
1233 
1234   LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n");
1235 
1236   MachineIRBuilder MIB(MI);
1237   // Now we need to emit a pair of load and stores for each of the types we've
1238   // collected. I.e. for each type, generate a load from the source pointer of
1239   // that type width, and then generate a corresponding store to the dest buffer
1240   // of that value loaded. This can result in a sequence of loads and stores
1241   // mixed types, depending on what the target specifies as good types to use.
1242   unsigned CurrOffset = 0;
1243   LLT PtrTy = MRI.getType(Src);
1244   unsigned Size = KnownLen;
1245   for (auto CopyTy : MemOps) {
1246     // Issuing an unaligned load / store pair  that overlaps with the previous
1247     // pair. Adjust the offset accordingly.
1248     if (CopyTy.getSizeInBytes() > Size)
1249       CurrOffset -= CopyTy.getSizeInBytes() - Size;
1250 
1251     // Construct MMOs for the accesses.
1252     auto *LoadMMO =
1253         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1254     auto *StoreMMO =
1255         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1256 
1257     // Create the load.
1258     Register LoadPtr = Src;
1259     Register Offset;
1260     if (CurrOffset != 0) {
1261       Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset)
1262                    .getReg(0);
1263       LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1264     }
1265     auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
1266 
1267     // Create the store.
1268     Register StorePtr =
1269         CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1270     MIB.buildStore(LdVal, StorePtr, *StoreMMO);
1271     CurrOffset += CopyTy.getSizeInBytes();
1272     Size -= CopyTy.getSizeInBytes();
1273   }
1274 
1275   MI.eraseFromParent();
1276   return true;
1277 }
1278 
1279 bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
1280                                      Register Src, unsigned KnownLen,
1281                                      Align DstAlign, Align SrcAlign,
1282                                      bool IsVolatile) {
1283   auto &MF = *MI.getParent()->getParent();
1284   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1285   auto &DL = MF.getDataLayout();
1286   LLVMContext &C = MF.getFunction().getContext();
1287 
1288   assert(KnownLen != 0 && "Have a zero length memmove length!");
1289 
1290   bool DstAlignCanChange = false;
1291   MachineFrameInfo &MFI = MF.getFrameInfo();
1292   bool OptSize = shouldLowerMemFuncForSize(MF);
1293   Align Alignment = commonAlignment(DstAlign, SrcAlign);
1294 
1295   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1296   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1297     DstAlignCanChange = true;
1298 
1299   unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize);
1300   std::vector<LLT> MemOps;
1301 
1302   const auto &DstMMO = **MI.memoperands_begin();
1303   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1304   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1305   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1306 
1307   // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due
1308   // to a bug in it's findOptimalMemOpLowering implementation. For now do the
1309   // same thing here.
1310   if (!findGISelOptimalMemOpLowering(
1311           MemOps, Limit,
1312           MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
1313                       /*IsVolatile*/ true),
1314           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
1315           MF.getFunction().getAttributes(), TLI))
1316     return false;
1317 
1318   if (DstAlignCanChange) {
1319     // Get an estimate of the type from the LLT.
1320     Type *IRTy = getTypeForLLT(MemOps[0], C);
1321     Align NewAlign = DL.getABITypeAlign(IRTy);
1322 
1323     // Don't promote to an alignment that would require dynamic stack
1324     // realignment.
1325     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1326     if (!TRI->needsStackRealignment(MF))
1327       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
1328         NewAlign = NewAlign / 2;
1329 
1330     if (NewAlign > Alignment) {
1331       Alignment = NewAlign;
1332       unsigned FI = FIDef->getOperand(1).getIndex();
1333       // Give the stack frame object a larger alignment if needed.
1334       if (MFI.getObjectAlign(FI) < Alignment)
1335         MFI.setObjectAlignment(FI, Alignment);
1336     }
1337   }
1338 
1339   LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n");
1340 
1341   MachineIRBuilder MIB(MI);
1342   // Memmove requires that we perform the loads first before issuing the stores.
1343   // Apart from that, this loop is pretty much doing the same thing as the
1344   // memcpy codegen function.
1345   unsigned CurrOffset = 0;
1346   LLT PtrTy = MRI.getType(Src);
1347   SmallVector<Register, 16> LoadVals;
1348   for (auto CopyTy : MemOps) {
1349     // Construct MMO for the load.
1350     auto *LoadMMO =
1351         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1352 
1353     // Create the load.
1354     Register LoadPtr = Src;
1355     if (CurrOffset != 0) {
1356       auto Offset =
1357           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1358       LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1359     }
1360     LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
1361     CurrOffset += CopyTy.getSizeInBytes();
1362   }
1363 
1364   CurrOffset = 0;
1365   for (unsigned I = 0; I < MemOps.size(); ++I) {
1366     LLT CopyTy = MemOps[I];
1367     // Now store the values loaded.
1368     auto *StoreMMO =
1369         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1370 
1371     Register StorePtr = Dst;
1372     if (CurrOffset != 0) {
1373       auto Offset =
1374           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1375       StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1376     }
1377     MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
1378     CurrOffset += CopyTy.getSizeInBytes();
1379   }
1380   MI.eraseFromParent();
1381   return true;
1382 }
1383 
1384 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1385   const unsigned Opc = MI.getOpcode();
1386   // This combine is fairly complex so it's not written with a separate
1387   // matcher function.
1388   assert((Opc == TargetOpcode::G_MEMCPY || Opc == TargetOpcode::G_MEMMOVE ||
1389           Opc == TargetOpcode::G_MEMSET) && "Expected memcpy like instruction");
1390 
1391   auto MMOIt = MI.memoperands_begin();
1392   const MachineMemOperand *MemOp = *MMOIt;
1393   bool IsVolatile = MemOp->isVolatile();
1394   // Don't try to optimize volatile.
1395   if (IsVolatile)
1396     return false;
1397 
1398   Align DstAlign = MemOp->getBaseAlign();
1399   Align SrcAlign;
1400   Register Dst = MI.getOperand(0).getReg();
1401   Register Src = MI.getOperand(1).getReg();
1402   Register Len = MI.getOperand(2).getReg();
1403 
1404   if (Opc != TargetOpcode::G_MEMSET) {
1405     assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
1406     MemOp = *(++MMOIt);
1407     SrcAlign = MemOp->getBaseAlign();
1408   }
1409 
1410   // See if this is a constant length copy
1411   auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI);
1412   if (!LenVRegAndVal)
1413     return false; // Leave it to the legalizer to lower it to a libcall.
1414   unsigned KnownLen = LenVRegAndVal->Value;
1415 
1416   if (KnownLen == 0) {
1417     MI.eraseFromParent();
1418     return true;
1419   }
1420 
1421   if (MaxLen && KnownLen > MaxLen)
1422     return false;
1423 
1424   if (Opc == TargetOpcode::G_MEMCPY)
1425     return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1426   if (Opc == TargetOpcode::G_MEMMOVE)
1427     return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1428   if (Opc == TargetOpcode::G_MEMSET)
1429     return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile);
1430   return false;
1431 }
1432 
1433 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
1434                                            PtrAddChain &MatchInfo) {
1435   // We're trying to match the following pattern:
1436   //   %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1437   //   %root = G_PTR_ADD %t1, G_CONSTANT imm2
1438   // -->
1439   //   %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1440 
1441   if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1442     return false;
1443 
1444   Register Add2 = MI.getOperand(1).getReg();
1445   Register Imm1 = MI.getOperand(2).getReg();
1446   auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI);
1447   if (!MaybeImmVal)
1448     return false;
1449 
1450   MachineInstr *Add2Def = MRI.getUniqueVRegDef(Add2);
1451   if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1452     return false;
1453 
1454   Register Base = Add2Def->getOperand(1).getReg();
1455   Register Imm2 = Add2Def->getOperand(2).getReg();
1456   auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI);
1457   if (!MaybeImm2Val)
1458     return false;
1459 
1460   // Pass the combined immediate to the apply function.
1461   MatchInfo.Imm = MaybeImmVal->Value + MaybeImm2Val->Value;
1462   MatchInfo.Base = Base;
1463   return true;
1464 }
1465 
1466 bool CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
1467                                            PtrAddChain &MatchInfo) {
1468   assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1469   MachineIRBuilder MIB(MI);
1470   LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1471   auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1472   Observer.changingInstr(MI);
1473   MI.getOperand(1).setReg(MatchInfo.Base);
1474   MI.getOperand(2).setReg(NewOffset.getReg(0));
1475   Observer.changedInstr(MI);
1476   return true;
1477 }
1478 
1479 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
1480                                           unsigned &ShiftVal) {
1481   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1482   auto MaybeImmVal =
1483       getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1484   if (!MaybeImmVal || !isPowerOf2_64(MaybeImmVal->Value))
1485     return false;
1486   ShiftVal = Log2_64(MaybeImmVal->Value);
1487   return true;
1488 }
1489 
1490 bool CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
1491                                           unsigned &ShiftVal) {
1492   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1493   MachineIRBuilder MIB(MI);
1494   LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1495   auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1496   Observer.changingInstr(MI);
1497   MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1498   MI.getOperand(2).setReg(ShiftCst.getReg(0));
1499   Observer.changedInstr(MI);
1500   return true;
1501 }
1502 
1503 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1504 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
1505                                              RegisterImmPair &MatchData) {
1506   assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
1507 
1508   Register LHS = MI.getOperand(1).getReg();
1509 
1510   Register ExtSrc;
1511   if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1512       !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1513       !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1514     return false;
1515 
1516   // TODO: Should handle vector splat.
1517   Register RHS = MI.getOperand(2).getReg();
1518   auto MaybeShiftAmtVal = getConstantVRegValWithLookThrough(RHS, MRI);
1519   if (!MaybeShiftAmtVal)
1520     return false;
1521 
1522   if (LI) {
1523     LLT SrcTy = MRI.getType(ExtSrc);
1524 
1525     // We only really care about the legality with the shifted value. We can
1526     // pick any type the constant shift amount, so ask the target what to
1527     // use. Otherwise we would have to guess and hope it is reported as legal.
1528     LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
1529     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
1530       return false;
1531   }
1532 
1533   int64_t ShiftAmt = MaybeShiftAmtVal->Value;
1534   MatchData.Reg = ExtSrc;
1535   MatchData.Imm = ShiftAmt;
1536 
1537   unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countLeadingOnes();
1538   return MinLeadingZeros >= ShiftAmt;
1539 }
1540 
1541 bool CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
1542                                              const RegisterImmPair &MatchData) {
1543   Register ExtSrcReg = MatchData.Reg;
1544   int64_t ShiftAmtVal = MatchData.Imm;
1545 
1546   LLT ExtSrcTy = MRI.getType(ExtSrcReg);
1547   Builder.setInstrAndDebugLoc(MI);
1548   auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
1549   auto NarrowShift =
1550       Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
1551   Builder.buildZExt(MI.getOperand(0), NarrowShift);
1552   MI.eraseFromParent();
1553   return true;
1554 }
1555 
1556 static Register peekThroughBitcast(Register Reg,
1557                                    const MachineRegisterInfo &MRI) {
1558   while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg))))
1559     ;
1560 
1561   return Reg;
1562 }
1563 
1564 bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
1565     MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1566   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1567          "Expected an unmerge");
1568   Register SrcReg =
1569       peekThroughBitcast(MI.getOperand(MI.getNumOperands() - 1).getReg(), MRI);
1570 
1571   MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1572   if (SrcInstr->getOpcode() != TargetOpcode::G_MERGE_VALUES &&
1573       SrcInstr->getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
1574       SrcInstr->getOpcode() != TargetOpcode::G_CONCAT_VECTORS)
1575     return false;
1576 
1577   // Check the source type of the merge.
1578   LLT SrcMergeTy = MRI.getType(SrcInstr->getOperand(1).getReg());
1579   LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1580   bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
1581   if (SrcMergeTy != Dst0Ty && !SameSize)
1582     return false;
1583   // They are the same now (modulo a bitcast).
1584   // We can collect all the src registers.
1585   for (unsigned Idx = 1, EndIdx = SrcInstr->getNumOperands(); Idx != EndIdx;
1586        ++Idx)
1587     Operands.push_back(SrcInstr->getOperand(Idx).getReg());
1588   return true;
1589 }
1590 
1591 bool CombinerHelper::applyCombineUnmergeMergeToPlainValues(
1592     MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1593   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1594          "Expected an unmerge");
1595   assert((MI.getNumOperands() - 1 == Operands.size()) &&
1596          "Not enough operands to replace all defs");
1597   unsigned NumElems = MI.getNumOperands() - 1;
1598 
1599   LLT SrcTy = MRI.getType(Operands[0]);
1600   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1601   bool CanReuseInputDirectly = DstTy == SrcTy;
1602   Builder.setInstrAndDebugLoc(MI);
1603   for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1604     Register DstReg = MI.getOperand(Idx).getReg();
1605     Register SrcReg = Operands[Idx];
1606     if (CanReuseInputDirectly)
1607       replaceRegWith(MRI, DstReg, SrcReg);
1608     else
1609       Builder.buildCast(DstReg, SrcReg);
1610   }
1611   MI.eraseFromParent();
1612   return true;
1613 }
1614 
1615 bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI,
1616                                                  SmallVectorImpl<APInt> &Csts) {
1617   unsigned SrcIdx = MI.getNumOperands() - 1;
1618   Register SrcReg = MI.getOperand(SrcIdx).getReg();
1619   MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1620   if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
1621       SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
1622     return false;
1623   // Break down the big constant in smaller ones.
1624   const MachineOperand &CstVal = SrcInstr->getOperand(1);
1625   APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
1626                   ? CstVal.getCImm()->getValue()
1627                   : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
1628 
1629   LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1630   unsigned ShiftAmt = Dst0Ty.getSizeInBits();
1631   // Unmerge a constant.
1632   for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
1633     Csts.emplace_back(Val.trunc(ShiftAmt));
1634     Val = Val.lshr(ShiftAmt);
1635   }
1636 
1637   return true;
1638 }
1639 
1640 bool CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
1641                                                  SmallVectorImpl<APInt> &Csts) {
1642   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1643          "Expected an unmerge");
1644   assert((MI.getNumOperands() - 1 == Csts.size()) &&
1645          "Not enough operands to replace all defs");
1646   unsigned NumElems = MI.getNumOperands() - 1;
1647   Builder.setInstrAndDebugLoc(MI);
1648   for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1649     Register DstReg = MI.getOperand(Idx).getReg();
1650     Builder.buildConstant(DstReg, Csts[Idx]);
1651   }
1652 
1653   MI.eraseFromParent();
1654   return true;
1655 }
1656 
1657 bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1658   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1659          "Expected an unmerge");
1660   // Check that all the lanes are dead except the first one.
1661   for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1662     if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg()))
1663       return false;
1664   }
1665   return true;
1666 }
1667 
1668 bool CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1669   Builder.setInstrAndDebugLoc(MI);
1670   Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1671   // Truncating a vector is going to truncate every single lane,
1672   // whereas we want the full lowbits.
1673   // Do the operation on a scalar instead.
1674   LLT SrcTy = MRI.getType(SrcReg);
1675   if (SrcTy.isVector())
1676     SrcReg =
1677         Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0);
1678 
1679   Register Dst0Reg = MI.getOperand(0).getReg();
1680   LLT Dst0Ty = MRI.getType(Dst0Reg);
1681   if (Dst0Ty.isVector()) {
1682     auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg);
1683     Builder.buildCast(Dst0Reg, MIB);
1684   } else
1685     Builder.buildTrunc(Dst0Reg, SrcReg);
1686   MI.eraseFromParent();
1687   return true;
1688 }
1689 
1690 bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) {
1691   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1692          "Expected an unmerge");
1693   Register Dst0Reg = MI.getOperand(0).getReg();
1694   LLT Dst0Ty = MRI.getType(Dst0Reg);
1695   // G_ZEXT on vector applies to each lane, so it will
1696   // affect all destinations. Therefore we won't be able
1697   // to simplify the unmerge to just the first definition.
1698   if (Dst0Ty.isVector())
1699     return false;
1700   Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1701   LLT SrcTy = MRI.getType(SrcReg);
1702   if (SrcTy.isVector())
1703     return false;
1704 
1705   Register ZExtSrcReg;
1706   if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg))))
1707     return false;
1708 
1709   // Finally we can replace the first definition with
1710   // a zext of the source if the definition is big enough to hold
1711   // all of ZExtSrc bits.
1712   LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1713   return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
1714 }
1715 
1716 bool CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
1717   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1718          "Expected an unmerge");
1719 
1720   Register Dst0Reg = MI.getOperand(0).getReg();
1721 
1722   MachineInstr *ZExtInstr =
1723       MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg());
1724   assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&
1725          "Expecting a G_ZEXT");
1726 
1727   Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg();
1728   LLT Dst0Ty = MRI.getType(Dst0Reg);
1729   LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1730 
1731   Builder.setInstrAndDebugLoc(MI);
1732 
1733   if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
1734     Builder.buildZExt(Dst0Reg, ZExtSrcReg);
1735   } else {
1736     assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&
1737            "ZExt src doesn't fit in destination");
1738     replaceRegWith(MRI, Dst0Reg, ZExtSrcReg);
1739   }
1740 
1741   Register ZeroReg;
1742   for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1743     if (!ZeroReg)
1744       ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0);
1745     replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg);
1746   }
1747   MI.eraseFromParent();
1748   return true;
1749 }
1750 
1751 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
1752                                                 unsigned TargetShiftSize,
1753                                                 unsigned &ShiftVal) {
1754   assert((MI.getOpcode() == TargetOpcode::G_SHL ||
1755           MI.getOpcode() == TargetOpcode::G_LSHR ||
1756           MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
1757 
1758   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1759   if (Ty.isVector()) // TODO:
1760     return false;
1761 
1762   // Don't narrow further than the requested size.
1763   unsigned Size = Ty.getSizeInBits();
1764   if (Size <= TargetShiftSize)
1765     return false;
1766 
1767   auto MaybeImmVal =
1768     getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1769   if (!MaybeImmVal)
1770     return false;
1771 
1772   ShiftVal = MaybeImmVal->Value;
1773   return ShiftVal >= Size / 2 && ShiftVal < Size;
1774 }
1775 
1776 bool CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
1777                                                 const unsigned &ShiftVal) {
1778   Register DstReg = MI.getOperand(0).getReg();
1779   Register SrcReg = MI.getOperand(1).getReg();
1780   LLT Ty = MRI.getType(SrcReg);
1781   unsigned Size = Ty.getSizeInBits();
1782   unsigned HalfSize = Size / 2;
1783   assert(ShiftVal >= HalfSize);
1784 
1785   LLT HalfTy = LLT::scalar(HalfSize);
1786 
1787   Builder.setInstr(MI);
1788   auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
1789   unsigned NarrowShiftAmt = ShiftVal - HalfSize;
1790 
1791   if (MI.getOpcode() == TargetOpcode::G_LSHR) {
1792     Register Narrowed = Unmerge.getReg(1);
1793 
1794     //  dst = G_LSHR s64:x, C for C >= 32
1795     // =>
1796     //   lo, hi = G_UNMERGE_VALUES x
1797     //   dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
1798 
1799     if (NarrowShiftAmt != 0) {
1800       Narrowed = Builder.buildLShr(HalfTy, Narrowed,
1801         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1802     }
1803 
1804     auto Zero = Builder.buildConstant(HalfTy, 0);
1805     Builder.buildMerge(DstReg, { Narrowed, Zero });
1806   } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
1807     Register Narrowed = Unmerge.getReg(0);
1808     //  dst = G_SHL s64:x, C for C >= 32
1809     // =>
1810     //   lo, hi = G_UNMERGE_VALUES x
1811     //   dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
1812     if (NarrowShiftAmt != 0) {
1813       Narrowed = Builder.buildShl(HalfTy, Narrowed,
1814         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1815     }
1816 
1817     auto Zero = Builder.buildConstant(HalfTy, 0);
1818     Builder.buildMerge(DstReg, { Zero, Narrowed });
1819   } else {
1820     assert(MI.getOpcode() == TargetOpcode::G_ASHR);
1821     auto Hi = Builder.buildAShr(
1822       HalfTy, Unmerge.getReg(1),
1823       Builder.buildConstant(HalfTy, HalfSize - 1));
1824 
1825     if (ShiftVal == HalfSize) {
1826       // (G_ASHR i64:x, 32) ->
1827       //   G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
1828       Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi });
1829     } else if (ShiftVal == Size - 1) {
1830       // Don't need a second shift.
1831       // (G_ASHR i64:x, 63) ->
1832       //   %narrowed = (G_ASHR hi_32(x), 31)
1833       //   G_MERGE_VALUES %narrowed, %narrowed
1834       Builder.buildMerge(DstReg, { Hi, Hi });
1835     } else {
1836       auto Lo = Builder.buildAShr(
1837         HalfTy, Unmerge.getReg(1),
1838         Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
1839 
1840       // (G_ASHR i64:x, C) ->, for C >= 32
1841       //   G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
1842       Builder.buildMerge(DstReg, { Lo, Hi });
1843     }
1844   }
1845 
1846   MI.eraseFromParent();
1847   return true;
1848 }
1849 
1850 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
1851                                               unsigned TargetShiftAmount) {
1852   unsigned ShiftAmt;
1853   if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
1854     applyCombineShiftToUnmerge(MI, ShiftAmt);
1855     return true;
1856   }
1857 
1858   return false;
1859 }
1860 
1861 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
1862   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
1863   Register DstReg = MI.getOperand(0).getReg();
1864   LLT DstTy = MRI.getType(DstReg);
1865   Register SrcReg = MI.getOperand(1).getReg();
1866   return mi_match(SrcReg, MRI,
1867                   m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
1868 }
1869 
1870 bool CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
1871   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
1872   Register DstReg = MI.getOperand(0).getReg();
1873   Builder.setInstr(MI);
1874   Builder.buildCopy(DstReg, Reg);
1875   MI.eraseFromParent();
1876   return true;
1877 }
1878 
1879 bool CombinerHelper::matchCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
1880   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
1881   Register SrcReg = MI.getOperand(1).getReg();
1882   return mi_match(SrcReg, MRI, m_GIntToPtr(m_Reg(Reg)));
1883 }
1884 
1885 bool CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
1886   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
1887   Register DstReg = MI.getOperand(0).getReg();
1888   Builder.setInstr(MI);
1889   Builder.buildZExtOrTrunc(DstReg, Reg);
1890   MI.eraseFromParent();
1891   return true;
1892 }
1893 
1894 bool CombinerHelper::matchCombineAddP2IToPtrAdd(
1895     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
1896   assert(MI.getOpcode() == TargetOpcode::G_ADD);
1897   Register LHS = MI.getOperand(1).getReg();
1898   Register RHS = MI.getOperand(2).getReg();
1899   LLT IntTy = MRI.getType(LHS);
1900 
1901   // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
1902   // instruction.
1903   PtrReg.second = false;
1904   for (Register SrcReg : {LHS, RHS}) {
1905     if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
1906       // Don't handle cases where the integer is implicitly converted to the
1907       // pointer width.
1908       LLT PtrTy = MRI.getType(PtrReg.first);
1909       if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
1910         return true;
1911     }
1912 
1913     PtrReg.second = true;
1914   }
1915 
1916   return false;
1917 }
1918 
1919 bool CombinerHelper::applyCombineAddP2IToPtrAdd(
1920     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
1921   Register Dst = MI.getOperand(0).getReg();
1922   Register LHS = MI.getOperand(1).getReg();
1923   Register RHS = MI.getOperand(2).getReg();
1924 
1925   const bool DoCommute = PtrReg.second;
1926   if (DoCommute)
1927     std::swap(LHS, RHS);
1928   LHS = PtrReg.first;
1929 
1930   LLT PtrTy = MRI.getType(LHS);
1931 
1932   Builder.setInstrAndDebugLoc(MI);
1933   auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
1934   Builder.buildPtrToInt(Dst, PtrAdd);
1935   MI.eraseFromParent();
1936   return true;
1937 }
1938 
1939 bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
1940   assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
1941   Register DstReg = MI.getOperand(0).getReg();
1942   Register SrcReg = MI.getOperand(1).getReg();
1943   LLT DstTy = MRI.getType(DstReg);
1944   return mi_match(SrcReg, MRI,
1945                   m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))));
1946 }
1947 
1948 bool CombinerHelper::applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
1949   assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
1950   Register DstReg = MI.getOperand(0).getReg();
1951   MI.eraseFromParent();
1952   replaceRegWith(MRI, DstReg, Reg);
1953   return true;
1954 }
1955 
1956 bool CombinerHelper::matchCombineExtOfExt(
1957     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
1958   assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
1959           MI.getOpcode() == TargetOpcode::G_SEXT ||
1960           MI.getOpcode() == TargetOpcode::G_ZEXT) &&
1961          "Expected a G_[ASZ]EXT");
1962   Register SrcReg = MI.getOperand(1).getReg();
1963   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
1964   // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
1965   unsigned Opc = MI.getOpcode();
1966   unsigned SrcOpc = SrcMI->getOpcode();
1967   if (Opc == SrcOpc ||
1968       (Opc == TargetOpcode::G_ANYEXT &&
1969        (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
1970       (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
1971     MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc);
1972     return true;
1973   }
1974   return false;
1975 }
1976 
1977 bool CombinerHelper::applyCombineExtOfExt(
1978     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
1979   assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
1980           MI.getOpcode() == TargetOpcode::G_SEXT ||
1981           MI.getOpcode() == TargetOpcode::G_ZEXT) &&
1982          "Expected a G_[ASZ]EXT");
1983 
1984   Register Reg = std::get<0>(MatchInfo);
1985   unsigned SrcExtOp = std::get<1>(MatchInfo);
1986 
1987   // Combine exts with the same opcode.
1988   if (MI.getOpcode() == SrcExtOp) {
1989     Observer.changingInstr(MI);
1990     MI.getOperand(1).setReg(Reg);
1991     Observer.changedInstr(MI);
1992     return true;
1993   }
1994 
1995   // Combine:
1996   // - anyext([sz]ext x) to [sz]ext x
1997   // - sext(zext x) to zext x
1998   if (MI.getOpcode() == TargetOpcode::G_ANYEXT ||
1999       (MI.getOpcode() == TargetOpcode::G_SEXT &&
2000        SrcExtOp == TargetOpcode::G_ZEXT)) {
2001     Register DstReg = MI.getOperand(0).getReg();
2002     Builder.setInstrAndDebugLoc(MI);
2003     Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
2004     MI.eraseFromParent();
2005     return true;
2006   }
2007 
2008   return false;
2009 }
2010 
2011 bool CombinerHelper::matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg) {
2012   assert(MI.getOpcode() == TargetOpcode::G_FNEG && "Expected a G_FNEG");
2013   Register SrcReg = MI.getOperand(1).getReg();
2014   return mi_match(SrcReg, MRI, m_GFNeg(m_Reg(Reg)));
2015 }
2016 
2017 bool CombinerHelper::matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src) {
2018   assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2019   Src = MI.getOperand(1).getReg();
2020   Register AbsSrc;
2021   return mi_match(Src, MRI, m_GFabs(m_Reg(AbsSrc)));
2022 }
2023 
2024 bool CombinerHelper::applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src) {
2025   assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2026   Register Dst = MI.getOperand(0).getReg();
2027   MI.eraseFromParent();
2028   replaceRegWith(MRI, Dst, Src);
2029   return true;
2030 }
2031 
2032 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
2033   return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2034     return MO.isReg() &&
2035            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2036   });
2037 }
2038 
2039 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
2040   return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2041     return !MO.isReg() ||
2042            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2043   });
2044 }
2045 
2046 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
2047   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2048   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
2049   return all_of(Mask, [](int Elt) { return Elt < 0; });
2050 }
2051 
2052 bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
2053   assert(MI.getOpcode() == TargetOpcode::G_STORE);
2054   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
2055                       MRI);
2056 }
2057 
2058 bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) {
2059   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2060   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
2061                       MRI);
2062 }
2063 
2064 bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
2065   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2066   if (auto MaybeCstCmp =
2067           getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) {
2068     OpIdx = MaybeCstCmp->Value ? 2 : 3;
2069     return true;
2070   }
2071   return false;
2072 }
2073 
2074 bool CombinerHelper::eraseInst(MachineInstr &MI) {
2075   MI.eraseFromParent();
2076   return true;
2077 }
2078 
2079 bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
2080                                     const MachineOperand &MOP2) {
2081   if (!MOP1.isReg() || !MOP2.isReg())
2082     return false;
2083   MachineInstr *I1 = getDefIgnoringCopies(MOP1.getReg(), MRI);
2084   if (!I1)
2085     return false;
2086   MachineInstr *I2 = getDefIgnoringCopies(MOP2.getReg(), MRI);
2087   if (!I2)
2088     return false;
2089 
2090   // Handle a case like this:
2091   //
2092   // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2093   //
2094   // Even though %0 and %1 are produced by the same instruction they are not
2095   // the same values.
2096   if (I1 == I2)
2097     return MOP1.getReg() == MOP2.getReg();
2098 
2099   // If we have an instruction which loads or stores, we can't guarantee that
2100   // it is identical.
2101   //
2102   // For example, we may have
2103   //
2104   // %x1 = G_LOAD %addr (load N from @somewhere)
2105   // ...
2106   // call @foo
2107   // ...
2108   // %x2 = G_LOAD %addr (load N from @somewhere)
2109   // ...
2110   // %or = G_OR %x1, %x2
2111   //
2112   // It's possible that @foo will modify whatever lives at the address we're
2113   // loading from. To be safe, let's just assume that all loads and stores
2114   // are different (unless we have something which is guaranteed to not
2115   // change.)
2116   if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad(nullptr))
2117     return false;
2118 
2119   // Check for physical registers on the instructions first to avoid cases
2120   // like this:
2121   //
2122   // %a = COPY $physreg
2123   // ...
2124   // SOMETHING implicit-def $physreg
2125   // ...
2126   // %b = COPY $physreg
2127   //
2128   // These copies are not equivalent.
2129   if (any_of(I1->uses(), [](const MachineOperand &MO) {
2130         return MO.isReg() && MO.getReg().isPhysical();
2131       })) {
2132     // Check if we have a case like this:
2133     //
2134     // %a = COPY $physreg
2135     // %b = COPY %a
2136     //
2137     // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2138     // From that, we know that they must have the same value, since they must
2139     // have come from the same COPY.
2140     return I1->isIdenticalTo(*I2);
2141   }
2142 
2143   // We don't have any physical registers, so we don't necessarily need the
2144   // same vreg defs.
2145   //
2146   // On the off-chance that there's some target instruction feeding into the
2147   // instruction, let's use produceSameValue instead of isIdenticalTo.
2148   return Builder.getTII().produceSameValue(*I1, *I2, &MRI);
2149 }
2150 
2151 bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
2152   if (!MOP.isReg())
2153     return false;
2154   // MIPatternMatch doesn't let us look through G_ZEXT etc.
2155   auto ValAndVReg = getConstantVRegValWithLookThrough(MOP.getReg(), MRI);
2156   return ValAndVReg && ValAndVReg->Value == C;
2157 }
2158 
2159 bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
2160                                                      unsigned OpIdx) {
2161   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2162   Register OldReg = MI.getOperand(0).getReg();
2163   Register Replacement = MI.getOperand(OpIdx).getReg();
2164   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2165   MI.eraseFromParent();
2166   replaceRegWith(MRI, OldReg, Replacement);
2167   return true;
2168 }
2169 
2170 bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
2171                                                  Register Replacement) {
2172   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2173   Register OldReg = MI.getOperand(0).getReg();
2174   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2175   MI.eraseFromParent();
2176   replaceRegWith(MRI, OldReg, Replacement);
2177   return true;
2178 }
2179 
2180 bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
2181   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2182   // Match (cond ? x : x)
2183   return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
2184          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
2185                        MRI);
2186 }
2187 
2188 bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
2189   return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
2190          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
2191                        MRI);
2192 }
2193 
2194 bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
2195   return matchConstantOp(MI.getOperand(OpIdx), 0) &&
2196          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
2197                        MRI);
2198 }
2199 
2200 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) {
2201   MachineOperand &MO = MI.getOperand(OpIdx);
2202   return MO.isReg() &&
2203          getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2204 }
2205 
2206 bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
2207   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2208   Builder.setInstr(MI);
2209   Builder.buildFConstant(MI.getOperand(0), C);
2210   MI.eraseFromParent();
2211   return true;
2212 }
2213 
2214 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
2215   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2216   Builder.setInstr(MI);
2217   Builder.buildConstant(MI.getOperand(0), C);
2218   MI.eraseFromParent();
2219   return true;
2220 }
2221 
2222 bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
2223   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2224   Builder.setInstr(MI);
2225   Builder.buildUndef(MI.getOperand(0));
2226   MI.eraseFromParent();
2227   return true;
2228 }
2229 
2230 bool CombinerHelper::matchSimplifyAddToSub(
2231     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2232   Register LHS = MI.getOperand(1).getReg();
2233   Register RHS = MI.getOperand(2).getReg();
2234   Register &NewLHS = std::get<0>(MatchInfo);
2235   Register &NewRHS = std::get<1>(MatchInfo);
2236 
2237   // Helper lambda to check for opportunities for
2238   // ((0-A) + B) -> B - A
2239   // (A + (0-B)) -> A - B
2240   auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
2241     int64_t Cst;
2242     if (!mi_match(MaybeSub, MRI, m_GSub(m_ICst(Cst), m_Reg(NewRHS))) ||
2243         Cst != 0)
2244       return false;
2245     NewLHS = MaybeNewLHS;
2246     return true;
2247   };
2248 
2249   return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
2250 }
2251 
2252 bool CombinerHelper::applySimplifyAddToSub(
2253     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2254   Builder.setInstr(MI);
2255   Register SubLHS, SubRHS;
2256   std::tie(SubLHS, SubRHS) = MatchInfo;
2257   Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
2258   MI.eraseFromParent();
2259   return true;
2260 }
2261 
2262 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
2263     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2264   // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2265   //
2266   // Creates the new hand + logic instruction (but does not insert them.)
2267   //
2268   // On success, MatchInfo is populated with the new instructions. These are
2269   // inserted in applyHoistLogicOpWithSameOpcodeHands.
2270   unsigned LogicOpcode = MI.getOpcode();
2271   assert(LogicOpcode == TargetOpcode::G_AND ||
2272          LogicOpcode == TargetOpcode::G_OR ||
2273          LogicOpcode == TargetOpcode::G_XOR);
2274   MachineIRBuilder MIB(MI);
2275   Register Dst = MI.getOperand(0).getReg();
2276   Register LHSReg = MI.getOperand(1).getReg();
2277   Register RHSReg = MI.getOperand(2).getReg();
2278 
2279   // Don't recompute anything.
2280   if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
2281     return false;
2282 
2283   // Make sure we have (hand x, ...), (hand y, ...)
2284   MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
2285   MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
2286   if (!LeftHandInst || !RightHandInst)
2287     return false;
2288   unsigned HandOpcode = LeftHandInst->getOpcode();
2289   if (HandOpcode != RightHandInst->getOpcode())
2290     return false;
2291   if (!LeftHandInst->getOperand(1).isReg() ||
2292       !RightHandInst->getOperand(1).isReg())
2293     return false;
2294 
2295   // Make sure the types match up, and if we're doing this post-legalization,
2296   // we end up with legal types.
2297   Register X = LeftHandInst->getOperand(1).getReg();
2298   Register Y = RightHandInst->getOperand(1).getReg();
2299   LLT XTy = MRI.getType(X);
2300   LLT YTy = MRI.getType(Y);
2301   if (XTy != YTy)
2302     return false;
2303   if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
2304     return false;
2305 
2306   // Optional extra source register.
2307   Register ExtraHandOpSrcReg;
2308   switch (HandOpcode) {
2309   default:
2310     return false;
2311   case TargetOpcode::G_ANYEXT:
2312   case TargetOpcode::G_SEXT:
2313   case TargetOpcode::G_ZEXT: {
2314     // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2315     break;
2316   }
2317   case TargetOpcode::G_AND:
2318   case TargetOpcode::G_ASHR:
2319   case TargetOpcode::G_LSHR:
2320   case TargetOpcode::G_SHL: {
2321     // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
2322     MachineOperand &ZOp = LeftHandInst->getOperand(2);
2323     if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
2324       return false;
2325     ExtraHandOpSrcReg = ZOp.getReg();
2326     break;
2327   }
2328   }
2329 
2330   // Record the steps to build the new instructions.
2331   //
2332   // Steps to build (logic x, y)
2333   auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
2334   OperandBuildSteps LogicBuildSteps = {
2335       [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
2336       [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
2337       [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
2338   InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
2339 
2340   // Steps to build hand (logic x, y), ...z
2341   OperandBuildSteps HandBuildSteps = {
2342       [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
2343       [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
2344   if (ExtraHandOpSrcReg.isValid())
2345     HandBuildSteps.push_back(
2346         [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
2347   InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
2348 
2349   MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
2350   return true;
2351 }
2352 
2353 bool CombinerHelper::applyBuildInstructionSteps(
2354     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2355   assert(MatchInfo.InstrsToBuild.size() &&
2356          "Expected at least one instr to build?");
2357   Builder.setInstr(MI);
2358   for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
2359     assert(InstrToBuild.Opcode && "Expected a valid opcode?");
2360     assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
2361     MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
2362     for (auto &OperandFn : InstrToBuild.OperandFns)
2363       OperandFn(Instr);
2364   }
2365   MI.eraseFromParent();
2366   return true;
2367 }
2368 
2369 bool CombinerHelper::matchAshrShlToSextInreg(
2370     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2371   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2372   int64_t ShlCst, AshrCst;
2373   Register Src;
2374   // FIXME: detect splat constant vectors.
2375   if (!mi_match(MI.getOperand(0).getReg(), MRI,
2376                 m_GAShr(m_GShl(m_Reg(Src), m_ICst(ShlCst)), m_ICst(AshrCst))))
2377     return false;
2378   if (ShlCst != AshrCst)
2379     return false;
2380   if (!isLegalOrBeforeLegalizer(
2381           {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
2382     return false;
2383   MatchInfo = std::make_tuple(Src, ShlCst);
2384   return true;
2385 }
2386 bool CombinerHelper::applyAshShlToSextInreg(
2387     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2388   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2389   Register Src;
2390   int64_t ShiftAmt;
2391   std::tie(Src, ShiftAmt) = MatchInfo;
2392   unsigned Size = MRI.getType(Src).getScalarSizeInBits();
2393   Builder.setInstrAndDebugLoc(MI);
2394   Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
2395   MI.eraseFromParent();
2396   return true;
2397 }
2398 
2399 bool CombinerHelper::matchAndWithTrivialMask(MachineInstr &MI,
2400                                              Register &Replacement) {
2401   // Given
2402   //
2403   // %mask:_(sN) = G_CONSTANT iN 000...0111...1
2404   // %x:_(sN) = G_SOMETHING
2405   // %y:_(sN) = G_AND %x, %mask
2406   //
2407   // Eliminate the G_AND when it is known that x & mask == x.
2408   //
2409   // Patterns like this can appear as a result of legalization. E.g.
2410   //
2411   // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
2412   // %one:_(s32) = G_CONSTANT i32 1
2413   // %and:_(s32) = G_AND %cmp, %one
2414   //
2415   // In this case, G_ICMP only produces a single bit, so x & 1 == x.
2416   assert(MI.getOpcode() == TargetOpcode::G_AND);
2417   if (!KB)
2418     return false;
2419 
2420   // Replacement = %x, AndDst = %y. Check that we can replace AndDst with the
2421   // LHS of the G_AND.
2422   Replacement = MI.getOperand(1).getReg();
2423   Register AndDst = MI.getOperand(0).getReg();
2424   LLT DstTy = MRI.getType(AndDst);
2425 
2426   // FIXME: This should be removed once GISelKnownBits supports vectors.
2427   if (DstTy.isVector())
2428     return false;
2429   if (!canReplaceReg(AndDst, Replacement, MRI))
2430     return false;
2431 
2432   // Check that we have a constant on the RHS of the G_AND, which is of the form
2433   // 000...0111...1.
2434   int64_t Cst;
2435   if (!mi_match(MI.getOperand(2).getReg(), MRI, m_ICst(Cst)))
2436     return false;
2437   APInt Mask(DstTy.getSizeInBits(), Cst);
2438   if (!Mask.isMask())
2439     return false;
2440 
2441   // Now, let's check that x & Mask == x. If this is true, then x & ~Mask == 0.
2442   return KB->maskedValueIsZero(Replacement, ~Mask);
2443 }
2444 
2445 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) {
2446   // If the input is already sign extended, just drop the extension.
2447   Register Src = MI.getOperand(1).getReg();
2448   unsigned ExtBits = MI.getOperand(2).getImm();
2449   unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
2450   return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
2451 }
2452 
2453 static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
2454                              int64_t Cst, bool IsVector, bool IsFP) {
2455   // For i1, Cst will always be -1 regardless of boolean contents.
2456   return (ScalarSizeBits == 1 && Cst == -1) ||
2457          isConstTrueVal(TLI, Cst, IsVector, IsFP);
2458 }
2459 
2460 bool CombinerHelper::matchNotCmp(MachineInstr &MI,
2461                                  SmallVectorImpl<Register> &RegsToNegate) {
2462   assert(MI.getOpcode() == TargetOpcode::G_XOR);
2463   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
2464   const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
2465   Register XorSrc;
2466   Register CstReg;
2467   // We match xor(src, true) here.
2468   if (!mi_match(MI.getOperand(0).getReg(), MRI,
2469                 m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
2470     return false;
2471 
2472   if (!MRI.hasOneNonDBGUse(XorSrc))
2473     return false;
2474 
2475   // Check that XorSrc is the root of a tree of comparisons combined with ANDs
2476   // and ORs. The suffix of RegsToNegate starting from index I is used a work
2477   // list of tree nodes to visit.
2478   RegsToNegate.push_back(XorSrc);
2479   // Remember whether the comparisons are all integer or all floating point.
2480   bool IsInt = false;
2481   bool IsFP = false;
2482   for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
2483     Register Reg = RegsToNegate[I];
2484     if (!MRI.hasOneNonDBGUse(Reg))
2485       return false;
2486     MachineInstr *Def = MRI.getVRegDef(Reg);
2487     switch (Def->getOpcode()) {
2488     default:
2489       // Don't match if the tree contains anything other than ANDs, ORs and
2490       // comparisons.
2491       return false;
2492     case TargetOpcode::G_ICMP:
2493       if (IsFP)
2494         return false;
2495       IsInt = true;
2496       // When we apply the combine we will invert the predicate.
2497       break;
2498     case TargetOpcode::G_FCMP:
2499       if (IsInt)
2500         return false;
2501       IsFP = true;
2502       // When we apply the combine we will invert the predicate.
2503       break;
2504     case TargetOpcode::G_AND:
2505     case TargetOpcode::G_OR:
2506       // Implement De Morgan's laws:
2507       // ~(x & y) -> ~x | ~y
2508       // ~(x | y) -> ~x & ~y
2509       // When we apply the combine we will change the opcode and recursively
2510       // negate the operands.
2511       RegsToNegate.push_back(Def->getOperand(1).getReg());
2512       RegsToNegate.push_back(Def->getOperand(2).getReg());
2513       break;
2514     }
2515   }
2516 
2517   // Now we know whether the comparisons are integer or floating point, check
2518   // the constant in the xor.
2519   int64_t Cst;
2520   if (Ty.isVector()) {
2521     MachineInstr *CstDef = MRI.getVRegDef(CstReg);
2522     auto MaybeCst = getBuildVectorConstantSplat(*CstDef, MRI);
2523     if (!MaybeCst)
2524       return false;
2525     if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP))
2526       return false;
2527   } else {
2528     if (!mi_match(CstReg, MRI, m_ICst(Cst)))
2529       return false;
2530     if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP))
2531       return false;
2532   }
2533 
2534   return true;
2535 }
2536 
2537 bool CombinerHelper::applyNotCmp(MachineInstr &MI,
2538                                  SmallVectorImpl<Register> &RegsToNegate) {
2539   for (Register Reg : RegsToNegate) {
2540     MachineInstr *Def = MRI.getVRegDef(Reg);
2541     Observer.changingInstr(*Def);
2542     // For each comparison, invert the opcode. For each AND and OR, change the
2543     // opcode.
2544     switch (Def->getOpcode()) {
2545     default:
2546       llvm_unreachable("Unexpected opcode");
2547     case TargetOpcode::G_ICMP:
2548     case TargetOpcode::G_FCMP: {
2549       MachineOperand &PredOp = Def->getOperand(1);
2550       CmpInst::Predicate NewP = CmpInst::getInversePredicate(
2551           (CmpInst::Predicate)PredOp.getPredicate());
2552       PredOp.setPredicate(NewP);
2553       break;
2554     }
2555     case TargetOpcode::G_AND:
2556       Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
2557       break;
2558     case TargetOpcode::G_OR:
2559       Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
2560       break;
2561     }
2562     Observer.changedInstr(*Def);
2563   }
2564 
2565   replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
2566   MI.eraseFromParent();
2567   return true;
2568 }
2569 
2570 bool CombinerHelper::tryCombine(MachineInstr &MI) {
2571   if (tryCombineCopy(MI))
2572     return true;
2573   if (tryCombineExtendingLoads(MI))
2574     return true;
2575   if (tryCombineIndexedLoadStore(MI))
2576     return true;
2577   return false;
2578 }
2579