1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/CodeGen/GlobalISel/Combiner.h"
10 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
11 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
12 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
13 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
14 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
15 #include "llvm/CodeGen/GlobalISel/Utils.h"
16 #include "llvm/CodeGen/MachineDominators.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstr.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Target/TargetMachine.h"
25 
26 #define DEBUG_TYPE "gi-combiner"
27 
28 using namespace llvm;
29 using namespace MIPatternMatch;
30 
31 // Option to allow testing of the combiner while no targets know about indexed
32 // addressing.
33 static cl::opt<bool>
34     ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
35                        cl::desc("Force all indexed operations to be "
36                                 "legal for the GlobalISel combiner"));
37 
38 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
39                                MachineIRBuilder &B, GISelKnownBits *KB,
40                                MachineDominatorTree *MDT,
41                                const LegalizerInfo *LI)
42     : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer),
43       KB(KB), MDT(MDT), LI(LI) {
44   (void)this->KB;
45 }
46 
47 const TargetLowering &CombinerHelper::getTargetLowering() const {
48   return *Builder.getMF().getSubtarget().getTargetLowering();
49 }
50 
51 bool CombinerHelper::isLegalOrBeforeLegalizer(
52     const LegalityQuery &Query) const {
53   return !LI || LI->getAction(Query).Action == LegalizeActions::Legal;
54 }
55 
56 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
57                                     Register ToReg) const {
58   Observer.changingAllUsesOfReg(MRI, FromReg);
59 
60   if (MRI.constrainRegAttrs(ToReg, FromReg))
61     MRI.replaceRegWith(FromReg, ToReg);
62   else
63     Builder.buildCopy(ToReg, FromReg);
64 
65   Observer.finishedChangingAllUsesOfReg();
66 }
67 
68 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
69                                       MachineOperand &FromRegOp,
70                                       Register ToReg) const {
71   assert(FromRegOp.getParent() && "Expected an operand in an MI");
72   Observer.changingInstr(*FromRegOp.getParent());
73 
74   FromRegOp.setReg(ToReg);
75 
76   Observer.changedInstr(*FromRegOp.getParent());
77 }
78 
79 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
80   if (matchCombineCopy(MI)) {
81     applyCombineCopy(MI);
82     return true;
83   }
84   return false;
85 }
86 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
87   if (MI.getOpcode() != TargetOpcode::COPY)
88     return false;
89   Register DstReg = MI.getOperand(0).getReg();
90   Register SrcReg = MI.getOperand(1).getReg();
91   return canReplaceReg(DstReg, SrcReg, MRI);
92 }
93 void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
94   Register DstReg = MI.getOperand(0).getReg();
95   Register SrcReg = MI.getOperand(1).getReg();
96   MI.eraseFromParent();
97   replaceRegWith(MRI, DstReg, SrcReg);
98 }
99 
100 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
101   bool IsUndef = false;
102   SmallVector<Register, 4> Ops;
103   if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
104     applyCombineConcatVectors(MI, IsUndef, Ops);
105     return true;
106   }
107   return false;
108 }
109 
110 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
111                                                SmallVectorImpl<Register> &Ops) {
112   assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
113          "Invalid instruction");
114   IsUndef = true;
115   MachineInstr *Undef = nullptr;
116 
117   // Walk over all the operands of concat vectors and check if they are
118   // build_vector themselves or undef.
119   // Then collect their operands in Ops.
120   for (const MachineOperand &MO : MI.uses()) {
121     Register Reg = MO.getReg();
122     MachineInstr *Def = MRI.getVRegDef(Reg);
123     assert(Def && "Operand not defined");
124     switch (Def->getOpcode()) {
125     case TargetOpcode::G_BUILD_VECTOR:
126       IsUndef = false;
127       // Remember the operands of the build_vector to fold
128       // them into the yet-to-build flattened concat vectors.
129       for (const MachineOperand &BuildVecMO : Def->uses())
130         Ops.push_back(BuildVecMO.getReg());
131       break;
132     case TargetOpcode::G_IMPLICIT_DEF: {
133       LLT OpType = MRI.getType(Reg);
134       // Keep one undef value for all the undef operands.
135       if (!Undef) {
136         Builder.setInsertPt(*MI.getParent(), MI);
137         Undef = Builder.buildUndef(OpType.getScalarType());
138       }
139       assert(MRI.getType(Undef->getOperand(0).getReg()) ==
140                  OpType.getScalarType() &&
141              "All undefs should have the same type");
142       // Break the undef vector in as many scalar elements as needed
143       // for the flattening.
144       for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
145            EltIdx != EltEnd; ++EltIdx)
146         Ops.push_back(Undef->getOperand(0).getReg());
147       break;
148     }
149     default:
150       return false;
151     }
152   }
153   return true;
154 }
155 void CombinerHelper::applyCombineConcatVectors(
156     MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
157   // We determined that the concat_vectors can be flatten.
158   // Generate the flattened build_vector.
159   Register DstReg = MI.getOperand(0).getReg();
160   Builder.setInsertPt(*MI.getParent(), MI);
161   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
162 
163   // Note: IsUndef is sort of redundant. We could have determine it by
164   // checking that at all Ops are undef.  Alternatively, we could have
165   // generate a build_vector of undefs and rely on another combine to
166   // clean that up.  For now, given we already gather this information
167   // in tryCombineConcatVectors, just save compile time and issue the
168   // right thing.
169   if (IsUndef)
170     Builder.buildUndef(NewDstReg);
171   else
172     Builder.buildBuildVector(NewDstReg, Ops);
173   MI.eraseFromParent();
174   replaceRegWith(MRI, DstReg, NewDstReg);
175 }
176 
177 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
178   SmallVector<Register, 4> Ops;
179   if (matchCombineShuffleVector(MI, Ops)) {
180     applyCombineShuffleVector(MI, Ops);
181     return true;
182   }
183   return false;
184 }
185 
186 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
187                                                SmallVectorImpl<Register> &Ops) {
188   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
189          "Invalid instruction kind");
190   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
191   Register Src1 = MI.getOperand(1).getReg();
192   LLT SrcType = MRI.getType(Src1);
193   // As bizarre as it may look, shuffle vector can actually produce
194   // scalar! This is because at the IR level a <1 x ty> shuffle
195   // vector is perfectly valid.
196   unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
197   unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
198 
199   // If the resulting vector is smaller than the size of the source
200   // vectors being concatenated, we won't be able to replace the
201   // shuffle vector into a concat_vectors.
202   //
203   // Note: We may still be able to produce a concat_vectors fed by
204   //       extract_vector_elt and so on. It is less clear that would
205   //       be better though, so don't bother for now.
206   //
207   // If the destination is a scalar, the size of the sources doesn't
208   // matter. we will lower the shuffle to a plain copy. This will
209   // work only if the source and destination have the same size. But
210   // that's covered by the next condition.
211   //
212   // TODO: If the size between the source and destination don't match
213   //       we could still emit an extract vector element in that case.
214   if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
215     return false;
216 
217   // Check that the shuffle mask can be broken evenly between the
218   // different sources.
219   if (DstNumElts % SrcNumElts != 0)
220     return false;
221 
222   // Mask length is a multiple of the source vector length.
223   // Check if the shuffle is some kind of concatenation of the input
224   // vectors.
225   unsigned NumConcat = DstNumElts / SrcNumElts;
226   SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
227   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
228   for (unsigned i = 0; i != DstNumElts; ++i) {
229     int Idx = Mask[i];
230     // Undef value.
231     if (Idx < 0)
232       continue;
233     // Ensure the indices in each SrcType sized piece are sequential and that
234     // the same source is used for the whole piece.
235     if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
236         (ConcatSrcs[i / SrcNumElts] >= 0 &&
237          ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
238       return false;
239     // Remember which source this index came from.
240     ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
241   }
242 
243   // The shuffle is concatenating multiple vectors together.
244   // Collect the different operands for that.
245   Register UndefReg;
246   Register Src2 = MI.getOperand(2).getReg();
247   for (auto Src : ConcatSrcs) {
248     if (Src < 0) {
249       if (!UndefReg) {
250         Builder.setInsertPt(*MI.getParent(), MI);
251         UndefReg = Builder.buildUndef(SrcType).getReg(0);
252       }
253       Ops.push_back(UndefReg);
254     } else if (Src == 0)
255       Ops.push_back(Src1);
256     else
257       Ops.push_back(Src2);
258   }
259   return true;
260 }
261 
262 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
263                                                const ArrayRef<Register> Ops) {
264   Register DstReg = MI.getOperand(0).getReg();
265   Builder.setInsertPt(*MI.getParent(), MI);
266   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
267 
268   if (Ops.size() == 1)
269     Builder.buildCopy(NewDstReg, Ops[0]);
270   else
271     Builder.buildMerge(NewDstReg, Ops);
272 
273   MI.eraseFromParent();
274   replaceRegWith(MRI, DstReg, NewDstReg);
275 }
276 
277 namespace {
278 
279 /// Select a preference between two uses. CurrentUse is the current preference
280 /// while *ForCandidate is attributes of the candidate under consideration.
281 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
282                                   const LLT TyForCandidate,
283                                   unsigned OpcodeForCandidate,
284                                   MachineInstr *MIForCandidate) {
285   if (!CurrentUse.Ty.isValid()) {
286     if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
287         CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
288       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
289     return CurrentUse;
290   }
291 
292   // We permit the extend to hoist through basic blocks but this is only
293   // sensible if the target has extending loads. If you end up lowering back
294   // into a load and extend during the legalizer then the end result is
295   // hoisting the extend up to the load.
296 
297   // Prefer defined extensions to undefined extensions as these are more
298   // likely to reduce the number of instructions.
299   if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
300       CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
301     return CurrentUse;
302   else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
303            OpcodeForCandidate != TargetOpcode::G_ANYEXT)
304     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
305 
306   // Prefer sign extensions to zero extensions as sign-extensions tend to be
307   // more expensive.
308   if (CurrentUse.Ty == TyForCandidate) {
309     if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
310         OpcodeForCandidate == TargetOpcode::G_ZEXT)
311       return CurrentUse;
312     else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
313              OpcodeForCandidate == TargetOpcode::G_SEXT)
314       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
315   }
316 
317   // This is potentially target specific. We've chosen the largest type
318   // because G_TRUNC is usually free. One potential catch with this is that
319   // some targets have a reduced number of larger registers than smaller
320   // registers and this choice potentially increases the live-range for the
321   // larger value.
322   if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
323     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
324   }
325   return CurrentUse;
326 }
327 
328 /// Find a suitable place to insert some instructions and insert them. This
329 /// function accounts for special cases like inserting before a PHI node.
330 /// The current strategy for inserting before PHI's is to duplicate the
331 /// instructions for each predecessor. However, while that's ok for G_TRUNC
332 /// on most targets since it generally requires no code, other targets/cases may
333 /// want to try harder to find a dominating block.
334 static void InsertInsnsWithoutSideEffectsBeforeUse(
335     MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
336     std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
337                        MachineOperand &UseMO)>
338         Inserter) {
339   MachineInstr &UseMI = *UseMO.getParent();
340 
341   MachineBasicBlock *InsertBB = UseMI.getParent();
342 
343   // If the use is a PHI then we want the predecessor block instead.
344   if (UseMI.isPHI()) {
345     MachineOperand *PredBB = std::next(&UseMO);
346     InsertBB = PredBB->getMBB();
347   }
348 
349   // If the block is the same block as the def then we want to insert just after
350   // the def instead of at the start of the block.
351   if (InsertBB == DefMI.getParent()) {
352     MachineBasicBlock::iterator InsertPt = &DefMI;
353     Inserter(InsertBB, std::next(InsertPt), UseMO);
354     return;
355   }
356 
357   // Otherwise we want the start of the BB
358   Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
359 }
360 } // end anonymous namespace
361 
362 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
363   PreferredTuple Preferred;
364   if (matchCombineExtendingLoads(MI, Preferred)) {
365     applyCombineExtendingLoads(MI, Preferred);
366     return true;
367   }
368   return false;
369 }
370 
371 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
372                                                 PreferredTuple &Preferred) {
373   // We match the loads and follow the uses to the extend instead of matching
374   // the extends and following the def to the load. This is because the load
375   // must remain in the same position for correctness (unless we also add code
376   // to find a safe place to sink it) whereas the extend is freely movable.
377   // It also prevents us from duplicating the load for the volatile case or just
378   // for performance.
379 
380   if (MI.getOpcode() != TargetOpcode::G_LOAD &&
381       MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
382       MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
383     return false;
384 
385   auto &LoadValue = MI.getOperand(0);
386   assert(LoadValue.isReg() && "Result wasn't a register?");
387 
388   LLT LoadValueTy = MRI.getType(LoadValue.getReg());
389   if (!LoadValueTy.isScalar())
390     return false;
391 
392   // Most architectures are going to legalize <s8 loads into at least a 1 byte
393   // load, and the MMOs can only describe memory accesses in multiples of bytes.
394   // If we try to perform extload combining on those, we can end up with
395   // %a(s8) = extload %ptr (load 1 byte from %ptr)
396   // ... which is an illegal extload instruction.
397   if (LoadValueTy.getSizeInBits() < 8)
398     return false;
399 
400   // For non power-of-2 types, they will very likely be legalized into multiple
401   // loads. Don't bother trying to match them into extending loads.
402   if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
403     return false;
404 
405   // Find the preferred type aside from the any-extends (unless it's the only
406   // one) and non-extending ops. We'll emit an extending load to that type and
407   // and emit a variant of (extend (trunc X)) for the others according to the
408   // relative type sizes. At the same time, pick an extend to use based on the
409   // extend involved in the chosen type.
410   unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
411                                  ? TargetOpcode::G_ANYEXT
412                                  : MI.getOpcode() == TargetOpcode::G_SEXTLOAD
413                                        ? TargetOpcode::G_SEXT
414                                        : TargetOpcode::G_ZEXT;
415   Preferred = {LLT(), PreferredOpcode, nullptr};
416   for (auto &UseMI : MRI.use_nodbg_instructions(LoadValue.getReg())) {
417     if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
418         UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
419         (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
420       // Check for legality.
421       if (LI) {
422         LegalityQuery::MemDesc MMDesc;
423         const auto &MMO = **MI.memoperands_begin();
424         MMDesc.SizeInBits = MMO.getSizeInBits();
425         MMDesc.AlignInBits = MMO.getAlign().value() * 8;
426         MMDesc.Ordering = MMO.getOrdering();
427         LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
428         LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
429         if (LI->getAction({MI.getOpcode(), {UseTy, SrcTy}, {MMDesc}}).Action !=
430             LegalizeActions::Legal)
431           continue;
432       }
433       Preferred = ChoosePreferredUse(Preferred,
434                                      MRI.getType(UseMI.getOperand(0).getReg()),
435                                      UseMI.getOpcode(), &UseMI);
436     }
437   }
438 
439   // There were no extends
440   if (!Preferred.MI)
441     return false;
442   // It should be impossible to chose an extend without selecting a different
443   // type since by definition the result of an extend is larger.
444   assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
445 
446   LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
447   return true;
448 }
449 
450 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
451                                                 PreferredTuple &Preferred) {
452   // Rewrite the load to the chosen extending load.
453   Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
454 
455   // Inserter to insert a truncate back to the original type at a given point
456   // with some basic CSE to limit truncate duplication to one per BB.
457   DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
458   auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
459                            MachineBasicBlock::iterator InsertBefore,
460                            MachineOperand &UseMO) {
461     MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
462     if (PreviouslyEmitted) {
463       Observer.changingInstr(*UseMO.getParent());
464       UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
465       Observer.changedInstr(*UseMO.getParent());
466       return;
467     }
468 
469     Builder.setInsertPt(*InsertIntoBB, InsertBefore);
470     Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
471     MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
472     EmittedInsns[InsertIntoBB] = NewMI;
473     replaceRegOpWith(MRI, UseMO, NewDstReg);
474   };
475 
476   Observer.changingInstr(MI);
477   MI.setDesc(
478       Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
479                                ? TargetOpcode::G_SEXTLOAD
480                                : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
481                                      ? TargetOpcode::G_ZEXTLOAD
482                                      : TargetOpcode::G_LOAD));
483 
484   // Rewrite all the uses to fix up the types.
485   auto &LoadValue = MI.getOperand(0);
486   SmallVector<MachineOperand *, 4> Uses;
487   for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
488     Uses.push_back(&UseMO);
489 
490   for (auto *UseMO : Uses) {
491     MachineInstr *UseMI = UseMO->getParent();
492 
493     // If the extend is compatible with the preferred extend then we should fix
494     // up the type and extend so that it uses the preferred use.
495     if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
496         UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
497       Register UseDstReg = UseMI->getOperand(0).getReg();
498       MachineOperand &UseSrcMO = UseMI->getOperand(1);
499       const LLT UseDstTy = MRI.getType(UseDstReg);
500       if (UseDstReg != ChosenDstReg) {
501         if (Preferred.Ty == UseDstTy) {
502           // If the use has the same type as the preferred use, then merge
503           // the vregs and erase the extend. For example:
504           //    %1:_(s8) = G_LOAD ...
505           //    %2:_(s32) = G_SEXT %1(s8)
506           //    %3:_(s32) = G_ANYEXT %1(s8)
507           //    ... = ... %3(s32)
508           // rewrites to:
509           //    %2:_(s32) = G_SEXTLOAD ...
510           //    ... = ... %2(s32)
511           replaceRegWith(MRI, UseDstReg, ChosenDstReg);
512           Observer.erasingInstr(*UseMO->getParent());
513           UseMO->getParent()->eraseFromParent();
514         } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
515           // If the preferred size is smaller, then keep the extend but extend
516           // from the result of the extending load. For example:
517           //    %1:_(s8) = G_LOAD ...
518           //    %2:_(s32) = G_SEXT %1(s8)
519           //    %3:_(s64) = G_ANYEXT %1(s8)
520           //    ... = ... %3(s64)
521           /// rewrites to:
522           //    %2:_(s32) = G_SEXTLOAD ...
523           //    %3:_(s64) = G_ANYEXT %2:_(s32)
524           //    ... = ... %3(s64)
525           replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
526         } else {
527           // If the preferred size is large, then insert a truncate. For
528           // example:
529           //    %1:_(s8) = G_LOAD ...
530           //    %2:_(s64) = G_SEXT %1(s8)
531           //    %3:_(s32) = G_ZEXT %1(s8)
532           //    ... = ... %3(s32)
533           /// rewrites to:
534           //    %2:_(s64) = G_SEXTLOAD ...
535           //    %4:_(s8) = G_TRUNC %2:_(s32)
536           //    %3:_(s64) = G_ZEXT %2:_(s8)
537           //    ... = ... %3(s64)
538           InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
539                                                  InsertTruncAt);
540         }
541         continue;
542       }
543       // The use is (one of) the uses of the preferred use we chose earlier.
544       // We're going to update the load to def this value later so just erase
545       // the old extend.
546       Observer.erasingInstr(*UseMO->getParent());
547       UseMO->getParent()->eraseFromParent();
548       continue;
549     }
550 
551     // The use isn't an extend. Truncate back to the type we originally loaded.
552     // This is free on many targets.
553     InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
554   }
555 
556   MI.getOperand(0).setReg(ChosenDstReg);
557   Observer.changedInstr(MI);
558 }
559 
560 bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
561                                    const MachineInstr &UseMI) {
562   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
563          "shouldn't consider debug uses");
564   assert(DefMI.getParent() == UseMI.getParent());
565   if (&DefMI == &UseMI)
566     return false;
567 
568   // Loop through the basic block until we find one of the instructions.
569   MachineBasicBlock::const_iterator I = DefMI.getParent()->begin();
570   for (; &*I != &DefMI && &*I != &UseMI; ++I)
571     return &*I == &DefMI;
572 
573   llvm_unreachable("Block must contain instructions");
574 }
575 
576 bool CombinerHelper::dominates(const MachineInstr &DefMI,
577                                const MachineInstr &UseMI) {
578   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
579          "shouldn't consider debug uses");
580   if (MDT)
581     return MDT->dominates(&DefMI, &UseMI);
582   else if (DefMI.getParent() != UseMI.getParent())
583     return false;
584 
585   return isPredecessor(DefMI, UseMI);
586 }
587 
588 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
589   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
590   Register SrcReg = MI.getOperand(1).getReg();
591   Register LoadUser = SrcReg;
592 
593   if (MRI.getType(SrcReg).isVector())
594     return false;
595 
596   Register TruncSrc;
597   if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
598     LoadUser = TruncSrc;
599 
600   uint64_t SizeInBits = MI.getOperand(2).getImm();
601   // If the source is a G_SEXTLOAD from the same bit width, then we don't
602   // need any extend at all, just a truncate.
603   if (auto *LoadMI = getOpcodeDef(TargetOpcode::G_SEXTLOAD, LoadUser, MRI)) {
604     const auto &MMO = **LoadMI->memoperands_begin();
605     // If truncating more than the original extended value, abort.
606     if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < MMO.getSizeInBits())
607       return false;
608     if (MMO.getSizeInBits() == SizeInBits)
609       return true;
610   }
611   return false;
612 }
613 
614 bool CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
615   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
616   Builder.setInstrAndDebugLoc(MI);
617   Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
618   MI.eraseFromParent();
619   return true;
620 }
621 
622 bool CombinerHelper::matchSextInRegOfLoad(
623     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
624   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
625 
626   // Only supports scalars for now.
627   if (MRI.getType(MI.getOperand(0).getReg()).isVector())
628     return false;
629 
630   Register SrcReg = MI.getOperand(1).getReg();
631   MachineInstr *LoadDef = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI);
632   if (!LoadDef || !MRI.hasOneNonDBGUse(LoadDef->getOperand(0).getReg()))
633     return false;
634 
635   // If the sign extend extends from a narrower width than the load's width,
636   // then we can narrow the load width when we combine to a G_SEXTLOAD.
637   auto &MMO = **LoadDef->memoperands_begin();
638   // Don't do this for non-simple loads.
639   if (MMO.isAtomic() || MMO.isVolatile())
640     return false;
641 
642   // Avoid widening the load at all.
643   unsigned NewSizeBits =
644       std::min((uint64_t)MI.getOperand(2).getImm(), MMO.getSizeInBits());
645 
646   // Don't generate G_SEXTLOADs with a < 1 byte width.
647   if (NewSizeBits < 8)
648     return false;
649   // Don't bother creating a non-power-2 sextload, it will likely be broken up
650   // anyway for most targets.
651   if (!isPowerOf2_32(NewSizeBits))
652     return false;
653   MatchInfo = std::make_tuple(LoadDef->getOperand(0).getReg(), NewSizeBits);
654   return true;
655 }
656 
657 bool CombinerHelper::applySextInRegOfLoad(
658     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
659   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
660   Register LoadReg;
661   unsigned ScalarSizeBits;
662   std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
663   auto *LoadDef = MRI.getVRegDef(LoadReg);
664   assert(LoadDef && "Expected a load reg");
665 
666   // If we have the following:
667   // %ld = G_LOAD %ptr, (load 2)
668   // %ext = G_SEXT_INREG %ld, 8
669   //    ==>
670   // %ld = G_SEXTLOAD %ptr (load 1)
671 
672   auto &MMO = **LoadDef->memoperands_begin();
673   Builder.setInstrAndDebugLoc(MI);
674   auto &MF = Builder.getMF();
675   auto PtrInfo = MMO.getPointerInfo();
676   auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
677   Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
678                          LoadDef->getOperand(1).getReg(), *NewMMO);
679   MI.eraseFromParent();
680   return true;
681 }
682 
683 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
684                                             Register &Base, Register &Offset) {
685   auto &MF = *MI.getParent()->getParent();
686   const auto &TLI = *MF.getSubtarget().getTargetLowering();
687 
688 #ifndef NDEBUG
689   unsigned Opcode = MI.getOpcode();
690   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
691          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
692 #endif
693 
694   Base = MI.getOperand(1).getReg();
695   MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
696   if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
697     return false;
698 
699   LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
700 
701   for (auto &Use : MRI.use_nodbg_instructions(Base)) {
702     if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
703       continue;
704 
705     Offset = Use.getOperand(2).getReg();
706     if (!ForceLegalIndexing &&
707         !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
708       LLVM_DEBUG(dbgs() << "    Ignoring candidate with illegal addrmode: "
709                         << Use);
710       continue;
711     }
712 
713     // Make sure the offset calculation is before the potentially indexed op.
714     // FIXME: we really care about dependency here. The offset calculation might
715     // be movable.
716     MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
717     if (!OffsetDef || !dominates(*OffsetDef, MI)) {
718       LLVM_DEBUG(dbgs() << "    Ignoring candidate with offset after mem-op: "
719                         << Use);
720       continue;
721     }
722 
723     // FIXME: check whether all uses of Base are load/store with foldable
724     // addressing modes. If so, using the normal addr-modes is better than
725     // forming an indexed one.
726 
727     bool MemOpDominatesAddrUses = true;
728     for (auto &PtrAddUse :
729          MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
730       if (!dominates(MI, PtrAddUse)) {
731         MemOpDominatesAddrUses = false;
732         break;
733       }
734     }
735 
736     if (!MemOpDominatesAddrUses) {
737       LLVM_DEBUG(
738           dbgs() << "    Ignoring candidate as memop does not dominate uses: "
739                  << Use);
740       continue;
741     }
742 
743     LLVM_DEBUG(dbgs() << "    Found match: " << Use);
744     Addr = Use.getOperand(0).getReg();
745     return true;
746   }
747 
748   return false;
749 }
750 
751 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
752                                            Register &Base, Register &Offset) {
753   auto &MF = *MI.getParent()->getParent();
754   const auto &TLI = *MF.getSubtarget().getTargetLowering();
755 
756 #ifndef NDEBUG
757   unsigned Opcode = MI.getOpcode();
758   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
759          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
760 #endif
761 
762   Addr = MI.getOperand(1).getReg();
763   MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
764   if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
765     return false;
766 
767   Base = AddrDef->getOperand(1).getReg();
768   Offset = AddrDef->getOperand(2).getReg();
769 
770   LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
771 
772   if (!ForceLegalIndexing &&
773       !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
774     LLVM_DEBUG(dbgs() << "    Skipping, not legal for target");
775     return false;
776   }
777 
778   MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
779   if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
780     LLVM_DEBUG(dbgs() << "    Skipping, frame index would need copy anyway.");
781     return false;
782   }
783 
784   if (MI.getOpcode() == TargetOpcode::G_STORE) {
785     // Would require a copy.
786     if (Base == MI.getOperand(0).getReg()) {
787       LLVM_DEBUG(dbgs() << "    Skipping, storing base so need copy anyway.");
788       return false;
789     }
790 
791     // We're expecting one use of Addr in MI, but it could also be the
792     // value stored, which isn't actually dominated by the instruction.
793     if (MI.getOperand(0).getReg() == Addr) {
794       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses");
795       return false;
796     }
797   }
798 
799   // FIXME: check whether all uses of the base pointer are constant PtrAdds.
800   // That might allow us to end base's liveness here by adjusting the constant.
801 
802   for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
803     if (!dominates(MI, UseMI)) {
804       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses.");
805       return false;
806     }
807   }
808 
809   return true;
810 }
811 
812 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
813   IndexedLoadStoreMatchInfo MatchInfo;
814   if (matchCombineIndexedLoadStore(MI, MatchInfo)) {
815     applyCombineIndexedLoadStore(MI, MatchInfo);
816     return true;
817   }
818   return false;
819 }
820 
821 bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
822   unsigned Opcode = MI.getOpcode();
823   if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
824       Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
825     return false;
826 
827   MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
828                                           MatchInfo.Offset);
829   if (!MatchInfo.IsPre &&
830       !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
831                               MatchInfo.Offset))
832     return false;
833 
834   return true;
835 }
836 
837 void CombinerHelper::applyCombineIndexedLoadStore(
838     MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
839   MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
840   MachineIRBuilder MIRBuilder(MI);
841   unsigned Opcode = MI.getOpcode();
842   bool IsStore = Opcode == TargetOpcode::G_STORE;
843   unsigned NewOpcode;
844   switch (Opcode) {
845   case TargetOpcode::G_LOAD:
846     NewOpcode = TargetOpcode::G_INDEXED_LOAD;
847     break;
848   case TargetOpcode::G_SEXTLOAD:
849     NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
850     break;
851   case TargetOpcode::G_ZEXTLOAD:
852     NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
853     break;
854   case TargetOpcode::G_STORE:
855     NewOpcode = TargetOpcode::G_INDEXED_STORE;
856     break;
857   default:
858     llvm_unreachable("Unknown load/store opcode");
859   }
860 
861   auto MIB = MIRBuilder.buildInstr(NewOpcode);
862   if (IsStore) {
863     MIB.addDef(MatchInfo.Addr);
864     MIB.addUse(MI.getOperand(0).getReg());
865   } else {
866     MIB.addDef(MI.getOperand(0).getReg());
867     MIB.addDef(MatchInfo.Addr);
868   }
869 
870   MIB.addUse(MatchInfo.Base);
871   MIB.addUse(MatchInfo.Offset);
872   MIB.addImm(MatchInfo.IsPre);
873   MI.eraseFromParent();
874   AddrDef.eraseFromParent();
875 
876   LLVM_DEBUG(dbgs() << "    Combinined to indexed operation");
877 }
878 
879 bool CombinerHelper::matchElideBrByInvertingCond(MachineInstr &MI) {
880   if (MI.getOpcode() != TargetOpcode::G_BR)
881     return false;
882 
883   // Try to match the following:
884   // bb1:
885   //   %c(s32) = G_ICMP pred, %a, %b
886   //   %c1(s1) = G_TRUNC %c(s32)
887   //   G_BRCOND %c1, %bb2
888   //   G_BR %bb3
889   // bb2:
890   // ...
891   // bb3:
892 
893   // The above pattern does not have a fall through to the successor bb2, always
894   // resulting in a branch no matter which path is taken. Here we try to find
895   // and replace that pattern with conditional branch to bb3 and otherwise
896   // fallthrough to bb2.
897 
898   MachineBasicBlock *MBB = MI.getParent();
899   MachineBasicBlock::iterator BrIt(MI);
900   if (BrIt == MBB->begin())
901     return false;
902   assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
903 
904   MachineInstr *BrCond = &*std::prev(BrIt);
905   if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
906     return false;
907 
908   // Check that the next block is the conditional branch target.
909   if (!MBB->isLayoutSuccessor(BrCond->getOperand(1).getMBB()))
910     return false;
911 
912   MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg());
913   if (!CmpMI || CmpMI->getOpcode() != TargetOpcode::G_ICMP ||
914       !MRI.hasOneNonDBGUse(CmpMI->getOperand(0).getReg()))
915     return false;
916   return true;
917 }
918 
919 bool CombinerHelper::tryElideBrByInvertingCond(MachineInstr &MI) {
920   if (!matchElideBrByInvertingCond(MI))
921     return false;
922   applyElideBrByInvertingCond(MI);
923   return true;
924 }
925 
926 void CombinerHelper::applyElideBrByInvertingCond(MachineInstr &MI) {
927   MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
928   MachineBasicBlock::iterator BrIt(MI);
929   MachineInstr *BrCond = &*std::prev(BrIt);
930   MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg());
931 
932   CmpInst::Predicate InversePred = CmpInst::getInversePredicate(
933       (CmpInst::Predicate)CmpMI->getOperand(1).getPredicate());
934 
935   // Invert the G_ICMP condition.
936   Observer.changingInstr(*CmpMI);
937   CmpMI->getOperand(1).setPredicate(InversePred);
938   Observer.changedInstr(*CmpMI);
939 
940   // Change the conditional branch target.
941   Observer.changingInstr(*BrCond);
942   BrCond->getOperand(1).setMBB(BrTarget);
943   Observer.changedInstr(*BrCond);
944   MI.eraseFromParent();
945 }
946 
947 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
948   // On Darwin, -Os means optimize for size without hurting performance, so
949   // only really optimize for size when -Oz (MinSize) is used.
950   if (MF.getTarget().getTargetTriple().isOSDarwin())
951     return MF.getFunction().hasMinSize();
952   return MF.getFunction().hasOptSize();
953 }
954 
955 // Returns a list of types to use for memory op lowering in MemOps. A partial
956 // port of findOptimalMemOpLowering in TargetLowering.
957 static bool findGISelOptimalMemOpLowering(std::vector<LLT> &MemOps,
958                                           unsigned Limit, const MemOp &Op,
959                                           unsigned DstAS, unsigned SrcAS,
960                                           const AttributeList &FuncAttributes,
961                                           const TargetLowering &TLI) {
962   if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign())
963     return false;
964 
965   LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes);
966 
967   if (Ty == LLT()) {
968     // Use the largest scalar type whose alignment constraints are satisfied.
969     // We only need to check DstAlign here as SrcAlign is always greater or
970     // equal to DstAlign (or zero).
971     Ty = LLT::scalar(64);
972     if (Op.isFixedDstAlign())
973       while (Op.getDstAlign() < Ty.getSizeInBytes() &&
974              !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, Op.getDstAlign()))
975         Ty = LLT::scalar(Ty.getSizeInBytes());
976     assert(Ty.getSizeInBits() > 0 && "Could not find valid type");
977     // FIXME: check for the largest legal type we can load/store to.
978   }
979 
980   unsigned NumMemOps = 0;
981   uint64_t Size = Op.size();
982   while (Size) {
983     unsigned TySize = Ty.getSizeInBytes();
984     while (TySize > Size) {
985       // For now, only use non-vector load / store's for the left-over pieces.
986       LLT NewTy = Ty;
987       // FIXME: check for mem op safety and legality of the types. Not all of
988       // SDAGisms map cleanly to GISel concepts.
989       if (NewTy.isVector())
990         NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32);
991       NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1));
992       unsigned NewTySize = NewTy.getSizeInBytes();
993       assert(NewTySize > 0 && "Could not find appropriate type");
994 
995       // If the new LLT cannot cover all of the remaining bits, then consider
996       // issuing a (or a pair of) unaligned and overlapping load / store.
997       bool Fast;
998       // Need to get a VT equivalent for allowMisalignedMemoryAccesses().
999       MVT VT = getMVTForLLT(Ty);
1000       if (NumMemOps && Op.allowOverlap() && NewTySize < Size &&
1001           TLI.allowsMisalignedMemoryAccesses(
1002               VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0,
1003               MachineMemOperand::MONone, &Fast) &&
1004           Fast)
1005         TySize = Size;
1006       else {
1007         Ty = NewTy;
1008         TySize = NewTySize;
1009       }
1010     }
1011 
1012     if (++NumMemOps > Limit)
1013       return false;
1014 
1015     MemOps.push_back(Ty);
1016     Size -= TySize;
1017   }
1018 
1019   return true;
1020 }
1021 
1022 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
1023   if (Ty.isVector())
1024     return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1025                                 Ty.getNumElements());
1026   return IntegerType::get(C, Ty.getSizeInBits());
1027 }
1028 
1029 // Get a vectorized representation of the memset value operand, GISel edition.
1030 static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
1031   MachineRegisterInfo &MRI = *MIB.getMRI();
1032   unsigned NumBits = Ty.getScalarSizeInBits();
1033   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
1034   if (!Ty.isVector() && ValVRegAndVal) {
1035     unsigned KnownVal = ValVRegAndVal->Value;
1036     APInt Scalar = APInt(8, KnownVal);
1037     APInt SplatVal = APInt::getSplat(NumBits, Scalar);
1038     return MIB.buildConstant(Ty, SplatVal).getReg(0);
1039   }
1040 
1041   // Extend the byte value to the larger type, and then multiply by a magic
1042   // value 0x010101... in order to replicate it across every byte.
1043   // Unless it's zero, in which case just emit a larger G_CONSTANT 0.
1044   if (ValVRegAndVal && ValVRegAndVal->Value == 0) {
1045     return MIB.buildConstant(Ty, 0).getReg(0);
1046   }
1047 
1048   LLT ExtType = Ty.getScalarType();
1049   auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val);
1050   if (NumBits > 8) {
1051     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
1052     auto MagicMI = MIB.buildConstant(ExtType, Magic);
1053     Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0);
1054   }
1055 
1056   // For vector types create a G_BUILD_VECTOR.
1057   if (Ty.isVector())
1058     Val = MIB.buildSplatVector(Ty, Val).getReg(0);
1059 
1060   return Val;
1061 }
1062 
1063 bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst,
1064                                     Register Val, unsigned KnownLen,
1065                                     Align Alignment, bool IsVolatile) {
1066   auto &MF = *MI.getParent()->getParent();
1067   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1068   auto &DL = MF.getDataLayout();
1069   LLVMContext &C = MF.getFunction().getContext();
1070 
1071   assert(KnownLen != 0 && "Have a zero length memset length!");
1072 
1073   bool DstAlignCanChange = false;
1074   MachineFrameInfo &MFI = MF.getFrameInfo();
1075   bool OptSize = shouldLowerMemFuncForSize(MF);
1076 
1077   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1078   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1079     DstAlignCanChange = true;
1080 
1081   unsigned Limit = TLI.getMaxStoresPerMemset(OptSize);
1082   std::vector<LLT> MemOps;
1083 
1084   const auto &DstMMO = **MI.memoperands_begin();
1085   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1086 
1087   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
1088   bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0;
1089 
1090   if (!findGISelOptimalMemOpLowering(MemOps, Limit,
1091                                      MemOp::Set(KnownLen, DstAlignCanChange,
1092                                                 Alignment,
1093                                                 /*IsZeroMemset=*/IsZeroVal,
1094                                                 /*IsVolatile=*/IsVolatile),
1095                                      DstPtrInfo.getAddrSpace(), ~0u,
1096                                      MF.getFunction().getAttributes(), TLI))
1097     return false;
1098 
1099   if (DstAlignCanChange) {
1100     // Get an estimate of the type from the LLT.
1101     Type *IRTy = getTypeForLLT(MemOps[0], C);
1102     Align NewAlign = DL.getABITypeAlign(IRTy);
1103     if (NewAlign > Alignment) {
1104       Alignment = NewAlign;
1105       unsigned FI = FIDef->getOperand(1).getIndex();
1106       // Give the stack frame object a larger alignment if needed.
1107       if (MFI.getObjectAlign(FI) < Alignment)
1108         MFI.setObjectAlignment(FI, Alignment);
1109     }
1110   }
1111 
1112   MachineIRBuilder MIB(MI);
1113   // Find the largest store and generate the bit pattern for it.
1114   LLT LargestTy = MemOps[0];
1115   for (unsigned i = 1; i < MemOps.size(); i++)
1116     if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits())
1117       LargestTy = MemOps[i];
1118 
1119   // The memset stored value is always defined as an s8, so in order to make it
1120   // work with larger store types we need to repeat the bit pattern across the
1121   // wider type.
1122   Register MemSetValue = getMemsetValue(Val, LargestTy, MIB);
1123 
1124   if (!MemSetValue)
1125     return false;
1126 
1127   // Generate the stores. For each store type in the list, we generate the
1128   // matching store of that type to the destination address.
1129   LLT PtrTy = MRI.getType(Dst);
1130   unsigned DstOff = 0;
1131   unsigned Size = KnownLen;
1132   for (unsigned I = 0; I < MemOps.size(); I++) {
1133     LLT Ty = MemOps[I];
1134     unsigned TySize = Ty.getSizeInBytes();
1135     if (TySize > Size) {
1136       // Issuing an unaligned load / store pair that overlaps with the previous
1137       // pair. Adjust the offset accordingly.
1138       assert(I == MemOps.size() - 1 && I != 0);
1139       DstOff -= TySize - Size;
1140     }
1141 
1142     // If this store is smaller than the largest store see whether we can get
1143     // the smaller value for free with a truncate.
1144     Register Value = MemSetValue;
1145     if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) {
1146       MVT VT = getMVTForLLT(Ty);
1147       MVT LargestVT = getMVTForLLT(LargestTy);
1148       if (!LargestTy.isVector() && !Ty.isVector() &&
1149           TLI.isTruncateFree(LargestVT, VT))
1150         Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0);
1151       else
1152         Value = getMemsetValue(Val, Ty, MIB);
1153       if (!Value)
1154         return false;
1155     }
1156 
1157     auto *StoreMMO =
1158         MF.getMachineMemOperand(&DstMMO, DstOff, Ty.getSizeInBytes());
1159 
1160     Register Ptr = Dst;
1161     if (DstOff != 0) {
1162       auto Offset =
1163           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
1164       Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1165     }
1166 
1167     MIB.buildStore(Value, Ptr, *StoreMMO);
1168     DstOff += Ty.getSizeInBytes();
1169     Size -= TySize;
1170   }
1171 
1172   MI.eraseFromParent();
1173   return true;
1174 }
1175 
1176 bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
1177                                     Register Src, unsigned KnownLen,
1178                                     Align DstAlign, Align SrcAlign,
1179                                     bool IsVolatile) {
1180   auto &MF = *MI.getParent()->getParent();
1181   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1182   auto &DL = MF.getDataLayout();
1183   LLVMContext &C = MF.getFunction().getContext();
1184 
1185   assert(KnownLen != 0 && "Have a zero length memcpy length!");
1186 
1187   bool DstAlignCanChange = false;
1188   MachineFrameInfo &MFI = MF.getFrameInfo();
1189   bool OptSize = shouldLowerMemFuncForSize(MF);
1190   Align Alignment = commonAlignment(DstAlign, SrcAlign);
1191 
1192   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1193   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1194     DstAlignCanChange = true;
1195 
1196   // FIXME: infer better src pointer alignment like SelectionDAG does here.
1197   // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining
1198   // if the memcpy is in a tail call position.
1199 
1200   unsigned Limit = TLI.getMaxStoresPerMemcpy(OptSize);
1201   std::vector<LLT> MemOps;
1202 
1203   const auto &DstMMO = **MI.memoperands_begin();
1204   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1205   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1206   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1207 
1208   if (!findGISelOptimalMemOpLowering(
1209           MemOps, Limit,
1210           MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
1211                       IsVolatile),
1212           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
1213           MF.getFunction().getAttributes(), TLI))
1214     return false;
1215 
1216   if (DstAlignCanChange) {
1217     // Get an estimate of the type from the LLT.
1218     Type *IRTy = getTypeForLLT(MemOps[0], C);
1219     Align NewAlign = DL.getABITypeAlign(IRTy);
1220 
1221     // Don't promote to an alignment that would require dynamic stack
1222     // realignment.
1223     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1224     if (!TRI->needsStackRealignment(MF))
1225       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
1226         NewAlign = NewAlign / 2;
1227 
1228     if (NewAlign > Alignment) {
1229       Alignment = NewAlign;
1230       unsigned FI = FIDef->getOperand(1).getIndex();
1231       // Give the stack frame object a larger alignment if needed.
1232       if (MFI.getObjectAlign(FI) < Alignment)
1233         MFI.setObjectAlignment(FI, Alignment);
1234     }
1235   }
1236 
1237   LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n");
1238 
1239   MachineIRBuilder MIB(MI);
1240   // Now we need to emit a pair of load and stores for each of the types we've
1241   // collected. I.e. for each type, generate a load from the source pointer of
1242   // that type width, and then generate a corresponding store to the dest buffer
1243   // of that value loaded. This can result in a sequence of loads and stores
1244   // mixed types, depending on what the target specifies as good types to use.
1245   unsigned CurrOffset = 0;
1246   LLT PtrTy = MRI.getType(Src);
1247   unsigned Size = KnownLen;
1248   for (auto CopyTy : MemOps) {
1249     // Issuing an unaligned load / store pair  that overlaps with the previous
1250     // pair. Adjust the offset accordingly.
1251     if (CopyTy.getSizeInBytes() > Size)
1252       CurrOffset -= CopyTy.getSizeInBytes() - Size;
1253 
1254     // Construct MMOs for the accesses.
1255     auto *LoadMMO =
1256         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1257     auto *StoreMMO =
1258         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1259 
1260     // Create the load.
1261     Register LoadPtr = Src;
1262     Register Offset;
1263     if (CurrOffset != 0) {
1264       Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset)
1265                    .getReg(0);
1266       LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1267     }
1268     auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
1269 
1270     // Create the store.
1271     Register StorePtr =
1272         CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1273     MIB.buildStore(LdVal, StorePtr, *StoreMMO);
1274     CurrOffset += CopyTy.getSizeInBytes();
1275     Size -= CopyTy.getSizeInBytes();
1276   }
1277 
1278   MI.eraseFromParent();
1279   return true;
1280 }
1281 
1282 bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
1283                                      Register Src, unsigned KnownLen,
1284                                      Align DstAlign, Align SrcAlign,
1285                                      bool IsVolatile) {
1286   auto &MF = *MI.getParent()->getParent();
1287   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1288   auto &DL = MF.getDataLayout();
1289   LLVMContext &C = MF.getFunction().getContext();
1290 
1291   assert(KnownLen != 0 && "Have a zero length memmove length!");
1292 
1293   bool DstAlignCanChange = false;
1294   MachineFrameInfo &MFI = MF.getFrameInfo();
1295   bool OptSize = shouldLowerMemFuncForSize(MF);
1296   Align Alignment = commonAlignment(DstAlign, SrcAlign);
1297 
1298   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1299   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1300     DstAlignCanChange = true;
1301 
1302   unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize);
1303   std::vector<LLT> MemOps;
1304 
1305   const auto &DstMMO = **MI.memoperands_begin();
1306   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1307   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1308   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1309 
1310   // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due
1311   // to a bug in it's findOptimalMemOpLowering implementation. For now do the
1312   // same thing here.
1313   if (!findGISelOptimalMemOpLowering(
1314           MemOps, Limit,
1315           MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
1316                       /*IsVolatile*/ true),
1317           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
1318           MF.getFunction().getAttributes(), TLI))
1319     return false;
1320 
1321   if (DstAlignCanChange) {
1322     // Get an estimate of the type from the LLT.
1323     Type *IRTy = getTypeForLLT(MemOps[0], C);
1324     Align NewAlign = DL.getABITypeAlign(IRTy);
1325 
1326     // Don't promote to an alignment that would require dynamic stack
1327     // realignment.
1328     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1329     if (!TRI->needsStackRealignment(MF))
1330       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
1331         NewAlign = NewAlign / 2;
1332 
1333     if (NewAlign > Alignment) {
1334       Alignment = NewAlign;
1335       unsigned FI = FIDef->getOperand(1).getIndex();
1336       // Give the stack frame object a larger alignment if needed.
1337       if (MFI.getObjectAlign(FI) < Alignment)
1338         MFI.setObjectAlignment(FI, Alignment);
1339     }
1340   }
1341 
1342   LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n");
1343 
1344   MachineIRBuilder MIB(MI);
1345   // Memmove requires that we perform the loads first before issuing the stores.
1346   // Apart from that, this loop is pretty much doing the same thing as the
1347   // memcpy codegen function.
1348   unsigned CurrOffset = 0;
1349   LLT PtrTy = MRI.getType(Src);
1350   SmallVector<Register, 16> LoadVals;
1351   for (auto CopyTy : MemOps) {
1352     // Construct MMO for the load.
1353     auto *LoadMMO =
1354         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1355 
1356     // Create the load.
1357     Register LoadPtr = Src;
1358     if (CurrOffset != 0) {
1359       auto Offset =
1360           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1361       LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1362     }
1363     LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
1364     CurrOffset += CopyTy.getSizeInBytes();
1365   }
1366 
1367   CurrOffset = 0;
1368   for (unsigned I = 0; I < MemOps.size(); ++I) {
1369     LLT CopyTy = MemOps[I];
1370     // Now store the values loaded.
1371     auto *StoreMMO =
1372         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1373 
1374     Register StorePtr = Dst;
1375     if (CurrOffset != 0) {
1376       auto Offset =
1377           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1378       StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1379     }
1380     MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
1381     CurrOffset += CopyTy.getSizeInBytes();
1382   }
1383   MI.eraseFromParent();
1384   return true;
1385 }
1386 
1387 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1388   const unsigned Opc = MI.getOpcode();
1389   // This combine is fairly complex so it's not written with a separate
1390   // matcher function.
1391   assert((Opc == TargetOpcode::G_MEMCPY || Opc == TargetOpcode::G_MEMMOVE ||
1392           Opc == TargetOpcode::G_MEMSET) && "Expected memcpy like instruction");
1393 
1394   auto MMOIt = MI.memoperands_begin();
1395   const MachineMemOperand *MemOp = *MMOIt;
1396   bool IsVolatile = MemOp->isVolatile();
1397   // Don't try to optimize volatile.
1398   if (IsVolatile)
1399     return false;
1400 
1401   Align DstAlign = MemOp->getBaseAlign();
1402   Align SrcAlign;
1403   Register Dst = MI.getOperand(0).getReg();
1404   Register Src = MI.getOperand(1).getReg();
1405   Register Len = MI.getOperand(2).getReg();
1406 
1407   if (Opc != TargetOpcode::G_MEMSET) {
1408     assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
1409     MemOp = *(++MMOIt);
1410     SrcAlign = MemOp->getBaseAlign();
1411   }
1412 
1413   // See if this is a constant length copy
1414   auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI);
1415   if (!LenVRegAndVal)
1416     return false; // Leave it to the legalizer to lower it to a libcall.
1417   unsigned KnownLen = LenVRegAndVal->Value;
1418 
1419   if (KnownLen == 0) {
1420     MI.eraseFromParent();
1421     return true;
1422   }
1423 
1424   if (MaxLen && KnownLen > MaxLen)
1425     return false;
1426 
1427   if (Opc == TargetOpcode::G_MEMCPY)
1428     return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1429   if (Opc == TargetOpcode::G_MEMMOVE)
1430     return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1431   if (Opc == TargetOpcode::G_MEMSET)
1432     return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile);
1433   return false;
1434 }
1435 
1436 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
1437                                            PtrAddChain &MatchInfo) {
1438   // We're trying to match the following pattern:
1439   //   %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1440   //   %root = G_PTR_ADD %t1, G_CONSTANT imm2
1441   // -->
1442   //   %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1443 
1444   if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1445     return false;
1446 
1447   Register Add2 = MI.getOperand(1).getReg();
1448   Register Imm1 = MI.getOperand(2).getReg();
1449   auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI);
1450   if (!MaybeImmVal)
1451     return false;
1452 
1453   MachineInstr *Add2Def = MRI.getUniqueVRegDef(Add2);
1454   if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1455     return false;
1456 
1457   Register Base = Add2Def->getOperand(1).getReg();
1458   Register Imm2 = Add2Def->getOperand(2).getReg();
1459   auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI);
1460   if (!MaybeImm2Val)
1461     return false;
1462 
1463   // Pass the combined immediate to the apply function.
1464   MatchInfo.Imm = MaybeImmVal->Value + MaybeImm2Val->Value;
1465   MatchInfo.Base = Base;
1466   return true;
1467 }
1468 
1469 bool CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
1470                                            PtrAddChain &MatchInfo) {
1471   assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1472   MachineIRBuilder MIB(MI);
1473   LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1474   auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1475   Observer.changingInstr(MI);
1476   MI.getOperand(1).setReg(MatchInfo.Base);
1477   MI.getOperand(2).setReg(NewOffset.getReg(0));
1478   Observer.changedInstr(MI);
1479   return true;
1480 }
1481 
1482 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
1483                                           unsigned &ShiftVal) {
1484   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1485   auto MaybeImmVal =
1486       getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1487   if (!MaybeImmVal || !isPowerOf2_64(MaybeImmVal->Value))
1488     return false;
1489   ShiftVal = Log2_64(MaybeImmVal->Value);
1490   return true;
1491 }
1492 
1493 bool CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
1494                                           unsigned &ShiftVal) {
1495   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1496   MachineIRBuilder MIB(MI);
1497   LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1498   auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1499   Observer.changingInstr(MI);
1500   MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1501   MI.getOperand(2).setReg(ShiftCst.getReg(0));
1502   Observer.changedInstr(MI);
1503   return true;
1504 }
1505 
1506 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1507 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
1508                                              RegisterImmPair &MatchData) {
1509   assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
1510 
1511   Register LHS = MI.getOperand(1).getReg();
1512 
1513   Register ExtSrc;
1514   if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1515       !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1516       !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1517     return false;
1518 
1519   // TODO: Should handle vector splat.
1520   Register RHS = MI.getOperand(2).getReg();
1521   auto MaybeShiftAmtVal = getConstantVRegValWithLookThrough(RHS, MRI);
1522   if (!MaybeShiftAmtVal)
1523     return false;
1524 
1525   if (LI) {
1526     LLT SrcTy = MRI.getType(ExtSrc);
1527 
1528     // We only really care about the legality with the shifted value. We can
1529     // pick any type the constant shift amount, so ask the target what to
1530     // use. Otherwise we would have to guess and hope it is reported as legal.
1531     LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
1532     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
1533       return false;
1534   }
1535 
1536   int64_t ShiftAmt = MaybeShiftAmtVal->Value;
1537   MatchData.Reg = ExtSrc;
1538   MatchData.Imm = ShiftAmt;
1539 
1540   unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countLeadingOnes();
1541   return MinLeadingZeros >= ShiftAmt;
1542 }
1543 
1544 bool CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
1545                                              const RegisterImmPair &MatchData) {
1546   Register ExtSrcReg = MatchData.Reg;
1547   int64_t ShiftAmtVal = MatchData.Imm;
1548 
1549   LLT ExtSrcTy = MRI.getType(ExtSrcReg);
1550   Builder.setInstrAndDebugLoc(MI);
1551   auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
1552   auto NarrowShift =
1553       Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
1554   Builder.buildZExt(MI.getOperand(0), NarrowShift);
1555   MI.eraseFromParent();
1556   return true;
1557 }
1558 
1559 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
1560                                                 unsigned TargetShiftSize,
1561                                                 unsigned &ShiftVal) {
1562   assert((MI.getOpcode() == TargetOpcode::G_SHL ||
1563           MI.getOpcode() == TargetOpcode::G_LSHR ||
1564           MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
1565 
1566   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1567   if (Ty.isVector()) // TODO:
1568     return false;
1569 
1570   // Don't narrow further than the requested size.
1571   unsigned Size = Ty.getSizeInBits();
1572   if (Size <= TargetShiftSize)
1573     return false;
1574 
1575   auto MaybeImmVal =
1576     getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1577   if (!MaybeImmVal)
1578     return false;
1579 
1580   ShiftVal = MaybeImmVal->Value;
1581   return ShiftVal >= Size / 2 && ShiftVal < Size;
1582 }
1583 
1584 bool CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
1585                                                 const unsigned &ShiftVal) {
1586   Register DstReg = MI.getOperand(0).getReg();
1587   Register SrcReg = MI.getOperand(1).getReg();
1588   LLT Ty = MRI.getType(SrcReg);
1589   unsigned Size = Ty.getSizeInBits();
1590   unsigned HalfSize = Size / 2;
1591   assert(ShiftVal >= HalfSize);
1592 
1593   LLT HalfTy = LLT::scalar(HalfSize);
1594 
1595   Builder.setInstr(MI);
1596   auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
1597   unsigned NarrowShiftAmt = ShiftVal - HalfSize;
1598 
1599   if (MI.getOpcode() == TargetOpcode::G_LSHR) {
1600     Register Narrowed = Unmerge.getReg(1);
1601 
1602     //  dst = G_LSHR s64:x, C for C >= 32
1603     // =>
1604     //   lo, hi = G_UNMERGE_VALUES x
1605     //   dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
1606 
1607     if (NarrowShiftAmt != 0) {
1608       Narrowed = Builder.buildLShr(HalfTy, Narrowed,
1609         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1610     }
1611 
1612     auto Zero = Builder.buildConstant(HalfTy, 0);
1613     Builder.buildMerge(DstReg, { Narrowed, Zero });
1614   } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
1615     Register Narrowed = Unmerge.getReg(0);
1616     //  dst = G_SHL s64:x, C for C >= 32
1617     // =>
1618     //   lo, hi = G_UNMERGE_VALUES x
1619     //   dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
1620     if (NarrowShiftAmt != 0) {
1621       Narrowed = Builder.buildShl(HalfTy, Narrowed,
1622         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1623     }
1624 
1625     auto Zero = Builder.buildConstant(HalfTy, 0);
1626     Builder.buildMerge(DstReg, { Zero, Narrowed });
1627   } else {
1628     assert(MI.getOpcode() == TargetOpcode::G_ASHR);
1629     auto Hi = Builder.buildAShr(
1630       HalfTy, Unmerge.getReg(1),
1631       Builder.buildConstant(HalfTy, HalfSize - 1));
1632 
1633     if (ShiftVal == HalfSize) {
1634       // (G_ASHR i64:x, 32) ->
1635       //   G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
1636       Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi });
1637     } else if (ShiftVal == Size - 1) {
1638       // Don't need a second shift.
1639       // (G_ASHR i64:x, 63) ->
1640       //   %narrowed = (G_ASHR hi_32(x), 31)
1641       //   G_MERGE_VALUES %narrowed, %narrowed
1642       Builder.buildMerge(DstReg, { Hi, Hi });
1643     } else {
1644       auto Lo = Builder.buildAShr(
1645         HalfTy, Unmerge.getReg(1),
1646         Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
1647 
1648       // (G_ASHR i64:x, C) ->, for C >= 32
1649       //   G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
1650       Builder.buildMerge(DstReg, { Lo, Hi });
1651     }
1652   }
1653 
1654   MI.eraseFromParent();
1655   return true;
1656 }
1657 
1658 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
1659                                               unsigned TargetShiftAmount) {
1660   unsigned ShiftAmt;
1661   if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
1662     applyCombineShiftToUnmerge(MI, ShiftAmt);
1663     return true;
1664   }
1665 
1666   return false;
1667 }
1668 
1669 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
1670   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
1671   Register DstReg = MI.getOperand(0).getReg();
1672   LLT DstTy = MRI.getType(DstReg);
1673   Register SrcReg = MI.getOperand(1).getReg();
1674   return mi_match(SrcReg, MRI,
1675                   m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
1676 }
1677 
1678 bool CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
1679   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
1680   Register DstReg = MI.getOperand(0).getReg();
1681   Builder.setInstr(MI);
1682   Builder.buildCopy(DstReg, Reg);
1683   MI.eraseFromParent();
1684   return true;
1685 }
1686 
1687 bool CombinerHelper::matchCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
1688   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
1689   Register SrcReg = MI.getOperand(1).getReg();
1690   return mi_match(SrcReg, MRI, m_GIntToPtr(m_Reg(Reg)));
1691 }
1692 
1693 bool CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
1694   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
1695   Register DstReg = MI.getOperand(0).getReg();
1696   Builder.setInstr(MI);
1697   Builder.buildZExtOrTrunc(DstReg, Reg);
1698   MI.eraseFromParent();
1699   return true;
1700 }
1701 
1702 bool CombinerHelper::matchCombineAddP2IToPtrAdd(
1703     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
1704   assert(MI.getOpcode() == TargetOpcode::G_ADD);
1705   Register LHS = MI.getOperand(1).getReg();
1706   Register RHS = MI.getOperand(2).getReg();
1707   LLT IntTy = MRI.getType(LHS);
1708 
1709   // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
1710   // instruction.
1711   PtrReg.second = false;
1712   for (Register SrcReg : {LHS, RHS}) {
1713     if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
1714       // Don't handle cases where the integer is implicitly converted to the
1715       // pointer width.
1716       LLT PtrTy = MRI.getType(PtrReg.first);
1717       if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
1718         return true;
1719     }
1720 
1721     PtrReg.second = true;
1722   }
1723 
1724   return false;
1725 }
1726 
1727 bool CombinerHelper::applyCombineAddP2IToPtrAdd(
1728     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
1729   Register Dst = MI.getOperand(0).getReg();
1730   Register LHS = MI.getOperand(1).getReg();
1731   Register RHS = MI.getOperand(2).getReg();
1732 
1733   const bool DoCommute = PtrReg.second;
1734   if (DoCommute)
1735     std::swap(LHS, RHS);
1736   LHS = PtrReg.first;
1737 
1738   LLT PtrTy = MRI.getType(LHS);
1739 
1740   Builder.setInstrAndDebugLoc(MI);
1741   auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
1742   Builder.buildPtrToInt(Dst, PtrAdd);
1743   MI.eraseFromParent();
1744   return true;
1745 }
1746 
1747 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
1748   return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
1749     return MO.isReg() &&
1750            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
1751   });
1752 }
1753 
1754 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
1755   return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
1756     return !MO.isReg() ||
1757            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
1758   });
1759 }
1760 
1761 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
1762   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
1763   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1764   return all_of(Mask, [](int Elt) { return Elt < 0; });
1765 }
1766 
1767 bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
1768   assert(MI.getOpcode() == TargetOpcode::G_STORE);
1769   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
1770                       MRI);
1771 }
1772 
1773 bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) {
1774   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
1775   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
1776                       MRI);
1777 }
1778 
1779 bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
1780   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
1781   if (auto MaybeCstCmp =
1782           getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) {
1783     OpIdx = MaybeCstCmp->Value ? 2 : 3;
1784     return true;
1785   }
1786   return false;
1787 }
1788 
1789 bool CombinerHelper::eraseInst(MachineInstr &MI) {
1790   MI.eraseFromParent();
1791   return true;
1792 }
1793 
1794 bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
1795                                     const MachineOperand &MOP2) {
1796   if (!MOP1.isReg() || !MOP2.isReg())
1797     return false;
1798   MachineInstr *I1 = getDefIgnoringCopies(MOP1.getReg(), MRI);
1799   if (!I1)
1800     return false;
1801   MachineInstr *I2 = getDefIgnoringCopies(MOP2.getReg(), MRI);
1802   if (!I2)
1803     return false;
1804 
1805   // Handle a case like this:
1806   //
1807   // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
1808   //
1809   // Even though %0 and %1 are produced by the same instruction they are not
1810   // the same values.
1811   if (I1 == I2)
1812     return MOP1.getReg() == MOP2.getReg();
1813 
1814   // If we have an instruction which loads or stores, we can't guarantee that
1815   // it is identical.
1816   //
1817   // For example, we may have
1818   //
1819   // %x1 = G_LOAD %addr (load N from @somewhere)
1820   // ...
1821   // call @foo
1822   // ...
1823   // %x2 = G_LOAD %addr (load N from @somewhere)
1824   // ...
1825   // %or = G_OR %x1, %x2
1826   //
1827   // It's possible that @foo will modify whatever lives at the address we're
1828   // loading from. To be safe, let's just assume that all loads and stores
1829   // are different (unless we have something which is guaranteed to not
1830   // change.)
1831   if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad(nullptr))
1832     return false;
1833 
1834   // Check for physical registers on the instructions first to avoid cases
1835   // like this:
1836   //
1837   // %a = COPY $physreg
1838   // ...
1839   // SOMETHING implicit-def $physreg
1840   // ...
1841   // %b = COPY $physreg
1842   //
1843   // These copies are not equivalent.
1844   if (any_of(I1->uses(), [](const MachineOperand &MO) {
1845         return MO.isReg() && MO.getReg().isPhysical();
1846       })) {
1847     // Check if we have a case like this:
1848     //
1849     // %a = COPY $physreg
1850     // %b = COPY %a
1851     //
1852     // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
1853     // From that, we know that they must have the same value, since they must
1854     // have come from the same COPY.
1855     return I1->isIdenticalTo(*I2);
1856   }
1857 
1858   // We don't have any physical registers, so we don't necessarily need the
1859   // same vreg defs.
1860   //
1861   // On the off-chance that there's some target instruction feeding into the
1862   // instruction, let's use produceSameValue instead of isIdenticalTo.
1863   return Builder.getTII().produceSameValue(*I1, *I2, &MRI);
1864 }
1865 
1866 bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
1867   if (!MOP.isReg())
1868     return false;
1869   // MIPatternMatch doesn't let us look through G_ZEXT etc.
1870   auto ValAndVReg = getConstantVRegValWithLookThrough(MOP.getReg(), MRI);
1871   return ValAndVReg && ValAndVReg->Value == C;
1872 }
1873 
1874 bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
1875                                                      unsigned OpIdx) {
1876   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
1877   Register OldReg = MI.getOperand(0).getReg();
1878   Register Replacement = MI.getOperand(OpIdx).getReg();
1879   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
1880   MI.eraseFromParent();
1881   replaceRegWith(MRI, OldReg, Replacement);
1882   return true;
1883 }
1884 
1885 bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
1886                                                  Register Replacement) {
1887   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
1888   Register OldReg = MI.getOperand(0).getReg();
1889   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
1890   MI.eraseFromParent();
1891   replaceRegWith(MRI, OldReg, Replacement);
1892   return true;
1893 }
1894 
1895 bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
1896   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
1897   // Match (cond ? x : x)
1898   return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
1899          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
1900                        MRI);
1901 }
1902 
1903 bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
1904   return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
1905          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
1906                        MRI);
1907 }
1908 
1909 bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
1910   return matchConstantOp(MI.getOperand(OpIdx), 0) &&
1911          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
1912                        MRI);
1913 }
1914 
1915 bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
1916   assert(MI.getNumDefs() == 1 && "Expected only one def?");
1917   Builder.setInstr(MI);
1918   Builder.buildFConstant(MI.getOperand(0), C);
1919   MI.eraseFromParent();
1920   return true;
1921 }
1922 
1923 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
1924   assert(MI.getNumDefs() == 1 && "Expected only one def?");
1925   Builder.setInstr(MI);
1926   Builder.buildConstant(MI.getOperand(0), C);
1927   MI.eraseFromParent();
1928   return true;
1929 }
1930 
1931 bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
1932   assert(MI.getNumDefs() == 1 && "Expected only one def?");
1933   Builder.setInstr(MI);
1934   Builder.buildUndef(MI.getOperand(0));
1935   MI.eraseFromParent();
1936   return true;
1937 }
1938 
1939 bool CombinerHelper::matchSimplifyAddToSub(
1940     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
1941   Register LHS = MI.getOperand(1).getReg();
1942   Register RHS = MI.getOperand(2).getReg();
1943   Register &NewLHS = std::get<0>(MatchInfo);
1944   Register &NewRHS = std::get<1>(MatchInfo);
1945 
1946   // Helper lambda to check for opportunities for
1947   // ((0-A) + B) -> B - A
1948   // (A + (0-B)) -> A - B
1949   auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
1950     int64_t Cst;
1951     if (!mi_match(MaybeSub, MRI, m_GSub(m_ICst(Cst), m_Reg(NewRHS))) ||
1952         Cst != 0)
1953       return false;
1954     NewLHS = MaybeNewLHS;
1955     return true;
1956   };
1957 
1958   return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
1959 }
1960 
1961 bool CombinerHelper::applySimplifyAddToSub(
1962     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
1963   Builder.setInstr(MI);
1964   Register SubLHS, SubRHS;
1965   std::tie(SubLHS, SubRHS) = MatchInfo;
1966   Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
1967   MI.eraseFromParent();
1968   return true;
1969 }
1970 
1971 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
1972     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
1973   // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
1974   //
1975   // Creates the new hand + logic instruction (but does not insert them.)
1976   //
1977   // On success, MatchInfo is populated with the new instructions. These are
1978   // inserted in applyHoistLogicOpWithSameOpcodeHands.
1979   unsigned LogicOpcode = MI.getOpcode();
1980   assert(LogicOpcode == TargetOpcode::G_AND ||
1981          LogicOpcode == TargetOpcode::G_OR ||
1982          LogicOpcode == TargetOpcode::G_XOR);
1983   MachineIRBuilder MIB(MI);
1984   Register Dst = MI.getOperand(0).getReg();
1985   Register LHSReg = MI.getOperand(1).getReg();
1986   Register RHSReg = MI.getOperand(2).getReg();
1987 
1988   // Don't recompute anything.
1989   if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
1990     return false;
1991 
1992   // Make sure we have (hand x, ...), (hand y, ...)
1993   MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
1994   MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
1995   if (!LeftHandInst || !RightHandInst)
1996     return false;
1997   unsigned HandOpcode = LeftHandInst->getOpcode();
1998   if (HandOpcode != RightHandInst->getOpcode())
1999     return false;
2000   if (!LeftHandInst->getOperand(1).isReg() ||
2001       !RightHandInst->getOperand(1).isReg())
2002     return false;
2003 
2004   // Make sure the types match up, and if we're doing this post-legalization,
2005   // we end up with legal types.
2006   Register X = LeftHandInst->getOperand(1).getReg();
2007   Register Y = RightHandInst->getOperand(1).getReg();
2008   LLT XTy = MRI.getType(X);
2009   LLT YTy = MRI.getType(Y);
2010   if (XTy != YTy)
2011     return false;
2012   if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
2013     return false;
2014 
2015   // Optional extra source register.
2016   Register ExtraHandOpSrcReg;
2017   switch (HandOpcode) {
2018   default:
2019     return false;
2020   case TargetOpcode::G_ANYEXT:
2021   case TargetOpcode::G_SEXT:
2022   case TargetOpcode::G_ZEXT: {
2023     // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2024     break;
2025   }
2026   case TargetOpcode::G_AND:
2027   case TargetOpcode::G_ASHR:
2028   case TargetOpcode::G_LSHR:
2029   case TargetOpcode::G_SHL: {
2030     // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
2031     MachineOperand &ZOp = LeftHandInst->getOperand(2);
2032     if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
2033       return false;
2034     ExtraHandOpSrcReg = ZOp.getReg();
2035     break;
2036   }
2037   }
2038 
2039   // Record the steps to build the new instructions.
2040   //
2041   // Steps to build (logic x, y)
2042   auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
2043   OperandBuildSteps LogicBuildSteps = {
2044       [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
2045       [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
2046       [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
2047   InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
2048 
2049   // Steps to build hand (logic x, y), ...z
2050   OperandBuildSteps HandBuildSteps = {
2051       [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
2052       [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
2053   if (ExtraHandOpSrcReg.isValid())
2054     HandBuildSteps.push_back(
2055         [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
2056   InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
2057 
2058   MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
2059   return true;
2060 }
2061 
2062 bool CombinerHelper::applyBuildInstructionSteps(
2063     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2064   assert(MatchInfo.InstrsToBuild.size() &&
2065          "Expected at least one instr to build?");
2066   Builder.setInstr(MI);
2067   for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
2068     assert(InstrToBuild.Opcode && "Expected a valid opcode?");
2069     assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
2070     MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
2071     for (auto &OperandFn : InstrToBuild.OperandFns)
2072       OperandFn(Instr);
2073   }
2074   MI.eraseFromParent();
2075   return true;
2076 }
2077 
2078 bool CombinerHelper::matchAshrShlToSextInreg(
2079     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2080   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2081   int64_t ShlCst, AshrCst;
2082   Register Src;
2083   // FIXME: detect splat constant vectors.
2084   if (!mi_match(MI.getOperand(0).getReg(), MRI,
2085                 m_GAShr(m_GShl(m_Reg(Src), m_ICst(ShlCst)), m_ICst(AshrCst))))
2086     return false;
2087   if (ShlCst != AshrCst)
2088     return false;
2089   if (!isLegalOrBeforeLegalizer(
2090           {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
2091     return false;
2092   MatchInfo = std::make_tuple(Src, ShlCst);
2093   return true;
2094 }
2095 bool CombinerHelper::applyAshShlToSextInreg(
2096     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2097   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2098   Register Src;
2099   int64_t ShiftAmt;
2100   std::tie(Src, ShiftAmt) = MatchInfo;
2101   unsigned Size = MRI.getType(Src).getScalarSizeInBits();
2102   Builder.setInstrAndDebugLoc(MI);
2103   Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
2104   MI.eraseFromParent();
2105   return true;
2106 }
2107 
2108 bool CombinerHelper::matchAndWithTrivialMask(MachineInstr &MI,
2109                                              Register &Replacement) {
2110   // Given
2111   //
2112   // %mask:_(sN) = G_CONSTANT iN 000...0111...1
2113   // %x:_(sN) = G_SOMETHING
2114   // %y:_(sN) = G_AND %x, %mask
2115   //
2116   // Eliminate the G_AND when it is known that x & mask == x.
2117   //
2118   // Patterns like this can appear as a result of legalization. E.g.
2119   //
2120   // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
2121   // %one:_(s32) = G_CONSTANT i32 1
2122   // %and:_(s32) = G_AND %cmp, %one
2123   //
2124   // In this case, G_ICMP only produces a single bit, so x & 1 == x.
2125   assert(MI.getOpcode() == TargetOpcode::G_AND);
2126   if (!KB)
2127     return false;
2128 
2129   // Replacement = %x, AndDst = %y. Check that we can replace AndDst with the
2130   // LHS of the G_AND.
2131   Replacement = MI.getOperand(1).getReg();
2132   Register AndDst = MI.getOperand(0).getReg();
2133   LLT DstTy = MRI.getType(AndDst);
2134 
2135   // FIXME: This should be removed once GISelKnownBits supports vectors.
2136   if (DstTy.isVector())
2137     return false;
2138   if (!canReplaceReg(AndDst, Replacement, MRI))
2139     return false;
2140 
2141   // Check that we have a constant on the RHS of the G_AND, which is of the form
2142   // 000...0111...1.
2143   int64_t Cst;
2144   if (!mi_match(MI.getOperand(2).getReg(), MRI, m_ICst(Cst)))
2145     return false;
2146   APInt Mask(DstTy.getSizeInBits(), Cst);
2147   if (!Mask.isMask())
2148     return false;
2149 
2150   // Now, let's check that x & Mask == x. If this is true, then x & ~Mask == 0.
2151   return KB->maskedValueIsZero(Replacement, ~Mask);
2152 }
2153 
2154 bool CombinerHelper::tryCombine(MachineInstr &MI) {
2155   if (tryCombineCopy(MI))
2156     return true;
2157   if (tryCombineExtendingLoads(MI))
2158     return true;
2159   if (tryCombineIndexedLoadStore(MI))
2160     return true;
2161   return false;
2162 }
2163