1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/CodeGen/GlobalISel/Combiner.h"
10 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
11 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
12 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
13 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
14 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
15 #include "llvm/CodeGen/GlobalISel/Utils.h"
16 #include "llvm/CodeGen/MachineDominators.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstr.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Target/TargetMachine.h"
25 
26 #define DEBUG_TYPE "gi-combiner"
27 
28 using namespace llvm;
29 using namespace MIPatternMatch;
30 
31 // Option to allow testing of the combiner while no targets know about indexed
32 // addressing.
33 static cl::opt<bool>
34     ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
35                        cl::desc("Force all indexed operations to be "
36                                 "legal for the GlobalISel combiner"));
37 
38 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
39                                MachineIRBuilder &B, GISelKnownBits *KB,
40                                MachineDominatorTree *MDT,
41                                const LegalizerInfo *LI)
42     : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer),
43       KB(KB), MDT(MDT), LI(LI) {
44   (void)this->KB;
45 }
46 
47 const TargetLowering &CombinerHelper::getTargetLowering() const {
48   return *Builder.getMF().getSubtarget().getTargetLowering();
49 }
50 
51 bool CombinerHelper::isLegalOrBeforeLegalizer(
52     const LegalityQuery &Query) const {
53   return !LI || LI->getAction(Query).Action == LegalizeActions::Legal;
54 }
55 
56 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
57                                     Register ToReg) const {
58   Observer.changingAllUsesOfReg(MRI, FromReg);
59 
60   if (MRI.constrainRegAttrs(ToReg, FromReg))
61     MRI.replaceRegWith(FromReg, ToReg);
62   else
63     Builder.buildCopy(ToReg, FromReg);
64 
65   Observer.finishedChangingAllUsesOfReg();
66 }
67 
68 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
69                                       MachineOperand &FromRegOp,
70                                       Register ToReg) const {
71   assert(FromRegOp.getParent() && "Expected an operand in an MI");
72   Observer.changingInstr(*FromRegOp.getParent());
73 
74   FromRegOp.setReg(ToReg);
75 
76   Observer.changedInstr(*FromRegOp.getParent());
77 }
78 
79 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
80   if (matchCombineCopy(MI)) {
81     applyCombineCopy(MI);
82     return true;
83   }
84   return false;
85 }
86 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
87   if (MI.getOpcode() != TargetOpcode::COPY)
88     return false;
89   Register DstReg = MI.getOperand(0).getReg();
90   Register SrcReg = MI.getOperand(1).getReg();
91   return canReplaceReg(DstReg, SrcReg, MRI);
92 }
93 void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
94   Register DstReg = MI.getOperand(0).getReg();
95   Register SrcReg = MI.getOperand(1).getReg();
96   MI.eraseFromParent();
97   replaceRegWith(MRI, DstReg, SrcReg);
98 }
99 
100 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
101   bool IsUndef = false;
102   SmallVector<Register, 4> Ops;
103   if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
104     applyCombineConcatVectors(MI, IsUndef, Ops);
105     return true;
106   }
107   return false;
108 }
109 
110 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
111                                                SmallVectorImpl<Register> &Ops) {
112   assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
113          "Invalid instruction");
114   IsUndef = true;
115   MachineInstr *Undef = nullptr;
116 
117   // Walk over all the operands of concat vectors and check if they are
118   // build_vector themselves or undef.
119   // Then collect their operands in Ops.
120   for (const MachineOperand &MO : MI.uses()) {
121     Register Reg = MO.getReg();
122     MachineInstr *Def = MRI.getVRegDef(Reg);
123     assert(Def && "Operand not defined");
124     switch (Def->getOpcode()) {
125     case TargetOpcode::G_BUILD_VECTOR:
126       IsUndef = false;
127       // Remember the operands of the build_vector to fold
128       // them into the yet-to-build flattened concat vectors.
129       for (const MachineOperand &BuildVecMO : Def->uses())
130         Ops.push_back(BuildVecMO.getReg());
131       break;
132     case TargetOpcode::G_IMPLICIT_DEF: {
133       LLT OpType = MRI.getType(Reg);
134       // Keep one undef value for all the undef operands.
135       if (!Undef) {
136         Builder.setInsertPt(*MI.getParent(), MI);
137         Undef = Builder.buildUndef(OpType.getScalarType());
138       }
139       assert(MRI.getType(Undef->getOperand(0).getReg()) ==
140                  OpType.getScalarType() &&
141              "All undefs should have the same type");
142       // Break the undef vector in as many scalar elements as needed
143       // for the flattening.
144       for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
145            EltIdx != EltEnd; ++EltIdx)
146         Ops.push_back(Undef->getOperand(0).getReg());
147       break;
148     }
149     default:
150       return false;
151     }
152   }
153   return true;
154 }
155 void CombinerHelper::applyCombineConcatVectors(
156     MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
157   // We determined that the concat_vectors can be flatten.
158   // Generate the flattened build_vector.
159   Register DstReg = MI.getOperand(0).getReg();
160   Builder.setInsertPt(*MI.getParent(), MI);
161   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
162 
163   // Note: IsUndef is sort of redundant. We could have determine it by
164   // checking that at all Ops are undef.  Alternatively, we could have
165   // generate a build_vector of undefs and rely on another combine to
166   // clean that up.  For now, given we already gather this information
167   // in tryCombineConcatVectors, just save compile time and issue the
168   // right thing.
169   if (IsUndef)
170     Builder.buildUndef(NewDstReg);
171   else
172     Builder.buildBuildVector(NewDstReg, Ops);
173   MI.eraseFromParent();
174   replaceRegWith(MRI, DstReg, NewDstReg);
175 }
176 
177 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
178   SmallVector<Register, 4> Ops;
179   if (matchCombineShuffleVector(MI, Ops)) {
180     applyCombineShuffleVector(MI, Ops);
181     return true;
182   }
183   return false;
184 }
185 
186 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
187                                                SmallVectorImpl<Register> &Ops) {
188   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
189          "Invalid instruction kind");
190   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
191   Register Src1 = MI.getOperand(1).getReg();
192   LLT SrcType = MRI.getType(Src1);
193   // As bizarre as it may look, shuffle vector can actually produce
194   // scalar! This is because at the IR level a <1 x ty> shuffle
195   // vector is perfectly valid.
196   unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
197   unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
198 
199   // If the resulting vector is smaller than the size of the source
200   // vectors being concatenated, we won't be able to replace the
201   // shuffle vector into a concat_vectors.
202   //
203   // Note: We may still be able to produce a concat_vectors fed by
204   //       extract_vector_elt and so on. It is less clear that would
205   //       be better though, so don't bother for now.
206   //
207   // If the destination is a scalar, the size of the sources doesn't
208   // matter. we will lower the shuffle to a plain copy. This will
209   // work only if the source and destination have the same size. But
210   // that's covered by the next condition.
211   //
212   // TODO: If the size between the source and destination don't match
213   //       we could still emit an extract vector element in that case.
214   if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
215     return false;
216 
217   // Check that the shuffle mask can be broken evenly between the
218   // different sources.
219   if (DstNumElts % SrcNumElts != 0)
220     return false;
221 
222   // Mask length is a multiple of the source vector length.
223   // Check if the shuffle is some kind of concatenation of the input
224   // vectors.
225   unsigned NumConcat = DstNumElts / SrcNumElts;
226   SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
227   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
228   for (unsigned i = 0; i != DstNumElts; ++i) {
229     int Idx = Mask[i];
230     // Undef value.
231     if (Idx < 0)
232       continue;
233     // Ensure the indices in each SrcType sized piece are sequential and that
234     // the same source is used for the whole piece.
235     if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
236         (ConcatSrcs[i / SrcNumElts] >= 0 &&
237          ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
238       return false;
239     // Remember which source this index came from.
240     ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
241   }
242 
243   // The shuffle is concatenating multiple vectors together.
244   // Collect the different operands for that.
245   Register UndefReg;
246   Register Src2 = MI.getOperand(2).getReg();
247   for (auto Src : ConcatSrcs) {
248     if (Src < 0) {
249       if (!UndefReg) {
250         Builder.setInsertPt(*MI.getParent(), MI);
251         UndefReg = Builder.buildUndef(SrcType).getReg(0);
252       }
253       Ops.push_back(UndefReg);
254     } else if (Src == 0)
255       Ops.push_back(Src1);
256     else
257       Ops.push_back(Src2);
258   }
259   return true;
260 }
261 
262 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
263                                                const ArrayRef<Register> Ops) {
264   Register DstReg = MI.getOperand(0).getReg();
265   Builder.setInsertPt(*MI.getParent(), MI);
266   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
267 
268   if (Ops.size() == 1)
269     Builder.buildCopy(NewDstReg, Ops[0]);
270   else
271     Builder.buildMerge(NewDstReg, Ops);
272 
273   MI.eraseFromParent();
274   replaceRegWith(MRI, DstReg, NewDstReg);
275 }
276 
277 namespace {
278 
279 /// Select a preference between two uses. CurrentUse is the current preference
280 /// while *ForCandidate is attributes of the candidate under consideration.
281 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
282                                   const LLT TyForCandidate,
283                                   unsigned OpcodeForCandidate,
284                                   MachineInstr *MIForCandidate) {
285   if (!CurrentUse.Ty.isValid()) {
286     if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
287         CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
288       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
289     return CurrentUse;
290   }
291 
292   // We permit the extend to hoist through basic blocks but this is only
293   // sensible if the target has extending loads. If you end up lowering back
294   // into a load and extend during the legalizer then the end result is
295   // hoisting the extend up to the load.
296 
297   // Prefer defined extensions to undefined extensions as these are more
298   // likely to reduce the number of instructions.
299   if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
300       CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
301     return CurrentUse;
302   else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
303            OpcodeForCandidate != TargetOpcode::G_ANYEXT)
304     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
305 
306   // Prefer sign extensions to zero extensions as sign-extensions tend to be
307   // more expensive.
308   if (CurrentUse.Ty == TyForCandidate) {
309     if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
310         OpcodeForCandidate == TargetOpcode::G_ZEXT)
311       return CurrentUse;
312     else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
313              OpcodeForCandidate == TargetOpcode::G_SEXT)
314       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
315   }
316 
317   // This is potentially target specific. We've chosen the largest type
318   // because G_TRUNC is usually free. One potential catch with this is that
319   // some targets have a reduced number of larger registers than smaller
320   // registers and this choice potentially increases the live-range for the
321   // larger value.
322   if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
323     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
324   }
325   return CurrentUse;
326 }
327 
328 /// Find a suitable place to insert some instructions and insert them. This
329 /// function accounts for special cases like inserting before a PHI node.
330 /// The current strategy for inserting before PHI's is to duplicate the
331 /// instructions for each predecessor. However, while that's ok for G_TRUNC
332 /// on most targets since it generally requires no code, other targets/cases may
333 /// want to try harder to find a dominating block.
334 static void InsertInsnsWithoutSideEffectsBeforeUse(
335     MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
336     std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
337                        MachineOperand &UseMO)>
338         Inserter) {
339   MachineInstr &UseMI = *UseMO.getParent();
340 
341   MachineBasicBlock *InsertBB = UseMI.getParent();
342 
343   // If the use is a PHI then we want the predecessor block instead.
344   if (UseMI.isPHI()) {
345     MachineOperand *PredBB = std::next(&UseMO);
346     InsertBB = PredBB->getMBB();
347   }
348 
349   // If the block is the same block as the def then we want to insert just after
350   // the def instead of at the start of the block.
351   if (InsertBB == DefMI.getParent()) {
352     MachineBasicBlock::iterator InsertPt = &DefMI;
353     Inserter(InsertBB, std::next(InsertPt), UseMO);
354     return;
355   }
356 
357   // Otherwise we want the start of the BB
358   Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
359 }
360 } // end anonymous namespace
361 
362 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
363   PreferredTuple Preferred;
364   if (matchCombineExtendingLoads(MI, Preferred)) {
365     applyCombineExtendingLoads(MI, Preferred);
366     return true;
367   }
368   return false;
369 }
370 
371 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
372                                                 PreferredTuple &Preferred) {
373   // We match the loads and follow the uses to the extend instead of matching
374   // the extends and following the def to the load. This is because the load
375   // must remain in the same position for correctness (unless we also add code
376   // to find a safe place to sink it) whereas the extend is freely movable.
377   // It also prevents us from duplicating the load for the volatile case or just
378   // for performance.
379 
380   if (MI.getOpcode() != TargetOpcode::G_LOAD &&
381       MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
382       MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
383     return false;
384 
385   auto &LoadValue = MI.getOperand(0);
386   assert(LoadValue.isReg() && "Result wasn't a register?");
387 
388   LLT LoadValueTy = MRI.getType(LoadValue.getReg());
389   if (!LoadValueTy.isScalar())
390     return false;
391 
392   // Most architectures are going to legalize <s8 loads into at least a 1 byte
393   // load, and the MMOs can only describe memory accesses in multiples of bytes.
394   // If we try to perform extload combining on those, we can end up with
395   // %a(s8) = extload %ptr (load 1 byte from %ptr)
396   // ... which is an illegal extload instruction.
397   if (LoadValueTy.getSizeInBits() < 8)
398     return false;
399 
400   // For non power-of-2 types, they will very likely be legalized into multiple
401   // loads. Don't bother trying to match them into extending loads.
402   if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
403     return false;
404 
405   // Find the preferred type aside from the any-extends (unless it's the only
406   // one) and non-extending ops. We'll emit an extending load to that type and
407   // and emit a variant of (extend (trunc X)) for the others according to the
408   // relative type sizes. At the same time, pick an extend to use based on the
409   // extend involved in the chosen type.
410   unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
411                                  ? TargetOpcode::G_ANYEXT
412                                  : MI.getOpcode() == TargetOpcode::G_SEXTLOAD
413                                        ? TargetOpcode::G_SEXT
414                                        : TargetOpcode::G_ZEXT;
415   Preferred = {LLT(), PreferredOpcode, nullptr};
416   for (auto &UseMI : MRI.use_nodbg_instructions(LoadValue.getReg())) {
417     if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
418         UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
419         (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
420       // Check for legality.
421       if (LI) {
422         LegalityQuery::MemDesc MMDesc;
423         const auto &MMO = **MI.memoperands_begin();
424         MMDesc.SizeInBits = MMO.getSizeInBits();
425         MMDesc.AlignInBits = MMO.getAlign().value() * 8;
426         MMDesc.Ordering = MMO.getOrdering();
427         LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
428         LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
429         if (LI->getAction({MI.getOpcode(), {UseTy, SrcTy}, {MMDesc}}).Action !=
430             LegalizeActions::Legal)
431           continue;
432       }
433       Preferred = ChoosePreferredUse(Preferred,
434                                      MRI.getType(UseMI.getOperand(0).getReg()),
435                                      UseMI.getOpcode(), &UseMI);
436     }
437   }
438 
439   // There were no extends
440   if (!Preferred.MI)
441     return false;
442   // It should be impossible to chose an extend without selecting a different
443   // type since by definition the result of an extend is larger.
444   assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
445 
446   LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
447   return true;
448 }
449 
450 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
451                                                 PreferredTuple &Preferred) {
452   // Rewrite the load to the chosen extending load.
453   Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
454 
455   // Inserter to insert a truncate back to the original type at a given point
456   // with some basic CSE to limit truncate duplication to one per BB.
457   DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
458   auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
459                            MachineBasicBlock::iterator InsertBefore,
460                            MachineOperand &UseMO) {
461     MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
462     if (PreviouslyEmitted) {
463       Observer.changingInstr(*UseMO.getParent());
464       UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
465       Observer.changedInstr(*UseMO.getParent());
466       return;
467     }
468 
469     Builder.setInsertPt(*InsertIntoBB, InsertBefore);
470     Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
471     MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
472     EmittedInsns[InsertIntoBB] = NewMI;
473     replaceRegOpWith(MRI, UseMO, NewDstReg);
474   };
475 
476   Observer.changingInstr(MI);
477   MI.setDesc(
478       Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
479                                ? TargetOpcode::G_SEXTLOAD
480                                : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
481                                      ? TargetOpcode::G_ZEXTLOAD
482                                      : TargetOpcode::G_LOAD));
483 
484   // Rewrite all the uses to fix up the types.
485   auto &LoadValue = MI.getOperand(0);
486   SmallVector<MachineOperand *, 4> Uses;
487   for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
488     Uses.push_back(&UseMO);
489 
490   for (auto *UseMO : Uses) {
491     MachineInstr *UseMI = UseMO->getParent();
492 
493     // If the extend is compatible with the preferred extend then we should fix
494     // up the type and extend so that it uses the preferred use.
495     if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
496         UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
497       Register UseDstReg = UseMI->getOperand(0).getReg();
498       MachineOperand &UseSrcMO = UseMI->getOperand(1);
499       const LLT UseDstTy = MRI.getType(UseDstReg);
500       if (UseDstReg != ChosenDstReg) {
501         if (Preferred.Ty == UseDstTy) {
502           // If the use has the same type as the preferred use, then merge
503           // the vregs and erase the extend. For example:
504           //    %1:_(s8) = G_LOAD ...
505           //    %2:_(s32) = G_SEXT %1(s8)
506           //    %3:_(s32) = G_ANYEXT %1(s8)
507           //    ... = ... %3(s32)
508           // rewrites to:
509           //    %2:_(s32) = G_SEXTLOAD ...
510           //    ... = ... %2(s32)
511           replaceRegWith(MRI, UseDstReg, ChosenDstReg);
512           Observer.erasingInstr(*UseMO->getParent());
513           UseMO->getParent()->eraseFromParent();
514         } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
515           // If the preferred size is smaller, then keep the extend but extend
516           // from the result of the extending load. For example:
517           //    %1:_(s8) = G_LOAD ...
518           //    %2:_(s32) = G_SEXT %1(s8)
519           //    %3:_(s64) = G_ANYEXT %1(s8)
520           //    ... = ... %3(s64)
521           /// rewrites to:
522           //    %2:_(s32) = G_SEXTLOAD ...
523           //    %3:_(s64) = G_ANYEXT %2:_(s32)
524           //    ... = ... %3(s64)
525           replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
526         } else {
527           // If the preferred size is large, then insert a truncate. For
528           // example:
529           //    %1:_(s8) = G_LOAD ...
530           //    %2:_(s64) = G_SEXT %1(s8)
531           //    %3:_(s32) = G_ZEXT %1(s8)
532           //    ... = ... %3(s32)
533           /// rewrites to:
534           //    %2:_(s64) = G_SEXTLOAD ...
535           //    %4:_(s8) = G_TRUNC %2:_(s32)
536           //    %3:_(s64) = G_ZEXT %2:_(s8)
537           //    ... = ... %3(s64)
538           InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
539                                                  InsertTruncAt);
540         }
541         continue;
542       }
543       // The use is (one of) the uses of the preferred use we chose earlier.
544       // We're going to update the load to def this value later so just erase
545       // the old extend.
546       Observer.erasingInstr(*UseMO->getParent());
547       UseMO->getParent()->eraseFromParent();
548       continue;
549     }
550 
551     // The use isn't an extend. Truncate back to the type we originally loaded.
552     // This is free on many targets.
553     InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
554   }
555 
556   MI.getOperand(0).setReg(ChosenDstReg);
557   Observer.changedInstr(MI);
558 }
559 
560 bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
561                                    const MachineInstr &UseMI) {
562   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
563          "shouldn't consider debug uses");
564   assert(DefMI.getParent() == UseMI.getParent());
565   if (&DefMI == &UseMI)
566     return false;
567 
568   // Loop through the basic block until we find one of the instructions.
569   MachineBasicBlock::const_iterator I = DefMI.getParent()->begin();
570   for (; &*I != &DefMI && &*I != &UseMI; ++I)
571     return &*I == &DefMI;
572 
573   llvm_unreachable("Block must contain instructions");
574 }
575 
576 bool CombinerHelper::dominates(const MachineInstr &DefMI,
577                                const MachineInstr &UseMI) {
578   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
579          "shouldn't consider debug uses");
580   if (MDT)
581     return MDT->dominates(&DefMI, &UseMI);
582   else if (DefMI.getParent() != UseMI.getParent())
583     return false;
584 
585   return isPredecessor(DefMI, UseMI);
586 }
587 
588 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
589   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
590   Register SrcReg = MI.getOperand(1).getReg();
591   Register LoadUser = SrcReg;
592 
593   if (MRI.getType(SrcReg).isVector())
594     return false;
595 
596   Register TruncSrc;
597   if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
598     LoadUser = TruncSrc;
599 
600   uint64_t SizeInBits = MI.getOperand(2).getImm();
601   // If the source is a G_SEXTLOAD from the same bit width, then we don't
602   // need any extend at all, just a truncate.
603   if (auto *LoadMI = getOpcodeDef(TargetOpcode::G_SEXTLOAD, LoadUser, MRI)) {
604     const auto &MMO = **LoadMI->memoperands_begin();
605     // If truncating more than the original extended value, abort.
606     if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < MMO.getSizeInBits())
607       return false;
608     if (MMO.getSizeInBits() == SizeInBits)
609       return true;
610   }
611   return false;
612 }
613 
614 bool CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
615   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
616   Builder.setInstrAndDebugLoc(MI);
617   Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
618   MI.eraseFromParent();
619   return true;
620 }
621 
622 bool CombinerHelper::matchSextInRegOfLoad(
623     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
624   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
625 
626   // Only supports scalars for now.
627   if (MRI.getType(MI.getOperand(0).getReg()).isVector())
628     return false;
629 
630   Register SrcReg = MI.getOperand(1).getReg();
631   MachineInstr *LoadDef = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI);
632   if (!LoadDef || !MRI.hasOneNonDBGUse(LoadDef->getOperand(0).getReg()))
633     return false;
634 
635   // If the sign extend extends from a narrower width than the load's width,
636   // then we can narrow the load width when we combine to a G_SEXTLOAD.
637   auto &MMO = **LoadDef->memoperands_begin();
638   // Don't do this for non-simple loads.
639   if (MMO.isAtomic() || MMO.isVolatile())
640     return false;
641 
642   // Avoid widening the load at all.
643   unsigned NewSizeBits =
644       std::min((uint64_t)MI.getOperand(2).getImm(), MMO.getSizeInBits());
645 
646   // Don't generate G_SEXTLOADs with a < 1 byte width.
647   if (NewSizeBits < 8)
648     return false;
649   // Don't bother creating a non-power-2 sextload, it will likely be broken up
650   // anyway for most targets.
651   if (!isPowerOf2_32(NewSizeBits))
652     return false;
653   MatchInfo = std::make_tuple(LoadDef->getOperand(0).getReg(), NewSizeBits);
654   return true;
655 }
656 
657 bool CombinerHelper::applySextInRegOfLoad(
658     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
659   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
660   Register LoadReg;
661   unsigned ScalarSizeBits;
662   std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
663   auto *LoadDef = MRI.getVRegDef(LoadReg);
664   assert(LoadDef && "Expected a load reg");
665 
666   // If we have the following:
667   // %ld = G_LOAD %ptr, (load 2)
668   // %ext = G_SEXT_INREG %ld, 8
669   //    ==>
670   // %ld = G_SEXTLOAD %ptr (load 1)
671 
672   auto &MMO = **LoadDef->memoperands_begin();
673   Builder.setInstrAndDebugLoc(MI);
674   auto &MF = Builder.getMF();
675   auto PtrInfo = MMO.getPointerInfo();
676   auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
677   Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
678                          LoadDef->getOperand(1).getReg(), *NewMMO);
679   MI.eraseFromParent();
680   return true;
681 }
682 
683 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
684                                             Register &Base, Register &Offset) {
685   auto &MF = *MI.getParent()->getParent();
686   const auto &TLI = *MF.getSubtarget().getTargetLowering();
687 
688 #ifndef NDEBUG
689   unsigned Opcode = MI.getOpcode();
690   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
691          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
692 #endif
693 
694   Base = MI.getOperand(1).getReg();
695   MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
696   if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
697     return false;
698 
699   LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
700   // FIXME: The following use traversal needs a bail out for patholigical cases.
701   for (auto &Use : MRI.use_nodbg_instructions(Base)) {
702     if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
703       continue;
704 
705     Offset = Use.getOperand(2).getReg();
706     if (!ForceLegalIndexing &&
707         !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
708       LLVM_DEBUG(dbgs() << "    Ignoring candidate with illegal addrmode: "
709                         << Use);
710       continue;
711     }
712 
713     // Make sure the offset calculation is before the potentially indexed op.
714     // FIXME: we really care about dependency here. The offset calculation might
715     // be movable.
716     MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
717     if (!OffsetDef || !dominates(*OffsetDef, MI)) {
718       LLVM_DEBUG(dbgs() << "    Ignoring candidate with offset after mem-op: "
719                         << Use);
720       continue;
721     }
722 
723     // FIXME: check whether all uses of Base are load/store with foldable
724     // addressing modes. If so, using the normal addr-modes is better than
725     // forming an indexed one.
726 
727     bool MemOpDominatesAddrUses = true;
728     for (auto &PtrAddUse :
729          MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
730       if (!dominates(MI, PtrAddUse)) {
731         MemOpDominatesAddrUses = false;
732         break;
733       }
734     }
735 
736     if (!MemOpDominatesAddrUses) {
737       LLVM_DEBUG(
738           dbgs() << "    Ignoring candidate as memop does not dominate uses: "
739                  << Use);
740       continue;
741     }
742 
743     LLVM_DEBUG(dbgs() << "    Found match: " << Use);
744     Addr = Use.getOperand(0).getReg();
745     return true;
746   }
747 
748   return false;
749 }
750 
751 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
752                                            Register &Base, Register &Offset) {
753   auto &MF = *MI.getParent()->getParent();
754   const auto &TLI = *MF.getSubtarget().getTargetLowering();
755 
756 #ifndef NDEBUG
757   unsigned Opcode = MI.getOpcode();
758   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
759          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
760 #endif
761 
762   Addr = MI.getOperand(1).getReg();
763   MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
764   if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
765     return false;
766 
767   Base = AddrDef->getOperand(1).getReg();
768   Offset = AddrDef->getOperand(2).getReg();
769 
770   LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
771 
772   if (!ForceLegalIndexing &&
773       !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
774     LLVM_DEBUG(dbgs() << "    Skipping, not legal for target");
775     return false;
776   }
777 
778   MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
779   if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
780     LLVM_DEBUG(dbgs() << "    Skipping, frame index would need copy anyway.");
781     return false;
782   }
783 
784   if (MI.getOpcode() == TargetOpcode::G_STORE) {
785     // Would require a copy.
786     if (Base == MI.getOperand(0).getReg()) {
787       LLVM_DEBUG(dbgs() << "    Skipping, storing base so need copy anyway.");
788       return false;
789     }
790 
791     // We're expecting one use of Addr in MI, but it could also be the
792     // value stored, which isn't actually dominated by the instruction.
793     if (MI.getOperand(0).getReg() == Addr) {
794       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses");
795       return false;
796     }
797   }
798 
799   // FIXME: check whether all uses of the base pointer are constant PtrAdds.
800   // That might allow us to end base's liveness here by adjusting the constant.
801 
802   for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
803     if (!dominates(MI, UseMI)) {
804       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses.");
805       return false;
806     }
807   }
808 
809   return true;
810 }
811 
812 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
813   IndexedLoadStoreMatchInfo MatchInfo;
814   if (matchCombineIndexedLoadStore(MI, MatchInfo)) {
815     applyCombineIndexedLoadStore(MI, MatchInfo);
816     return true;
817   }
818   return false;
819 }
820 
821 bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
822   unsigned Opcode = MI.getOpcode();
823   if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
824       Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
825     return false;
826 
827   // For now, no targets actually support these opcodes so don't waste time
828   // running these unless we're forced to for testing.
829   if (!ForceLegalIndexing)
830     return false;
831 
832   MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
833                                           MatchInfo.Offset);
834   if (!MatchInfo.IsPre &&
835       !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
836                               MatchInfo.Offset))
837     return false;
838 
839   return true;
840 }
841 
842 void CombinerHelper::applyCombineIndexedLoadStore(
843     MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
844   MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
845   MachineIRBuilder MIRBuilder(MI);
846   unsigned Opcode = MI.getOpcode();
847   bool IsStore = Opcode == TargetOpcode::G_STORE;
848   unsigned NewOpcode;
849   switch (Opcode) {
850   case TargetOpcode::G_LOAD:
851     NewOpcode = TargetOpcode::G_INDEXED_LOAD;
852     break;
853   case TargetOpcode::G_SEXTLOAD:
854     NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
855     break;
856   case TargetOpcode::G_ZEXTLOAD:
857     NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
858     break;
859   case TargetOpcode::G_STORE:
860     NewOpcode = TargetOpcode::G_INDEXED_STORE;
861     break;
862   default:
863     llvm_unreachable("Unknown load/store opcode");
864   }
865 
866   auto MIB = MIRBuilder.buildInstr(NewOpcode);
867   if (IsStore) {
868     MIB.addDef(MatchInfo.Addr);
869     MIB.addUse(MI.getOperand(0).getReg());
870   } else {
871     MIB.addDef(MI.getOperand(0).getReg());
872     MIB.addDef(MatchInfo.Addr);
873   }
874 
875   MIB.addUse(MatchInfo.Base);
876   MIB.addUse(MatchInfo.Offset);
877   MIB.addImm(MatchInfo.IsPre);
878   MI.eraseFromParent();
879   AddrDef.eraseFromParent();
880 
881   LLVM_DEBUG(dbgs() << "    Combinined to indexed operation");
882 }
883 
884 bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI) {
885   if (MI.getOpcode() != TargetOpcode::G_BR)
886     return false;
887 
888   // Try to match the following:
889   // bb1:
890   //   G_BRCOND %c1, %bb2
891   //   G_BR %bb3
892   // bb2:
893   // ...
894   // bb3:
895 
896   // The above pattern does not have a fall through to the successor bb2, always
897   // resulting in a branch no matter which path is taken. Here we try to find
898   // and replace that pattern with conditional branch to bb3 and otherwise
899   // fallthrough to bb2. This is generally better for branch predictors.
900 
901   MachineBasicBlock *MBB = MI.getParent();
902   MachineBasicBlock::iterator BrIt(MI);
903   if (BrIt == MBB->begin())
904     return false;
905   assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
906 
907   MachineInstr *BrCond = &*std::prev(BrIt);
908   if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
909     return false;
910 
911   // Check that the next block is the conditional branch target.
912   if (!MBB->isLayoutSuccessor(BrCond->getOperand(1).getMBB()))
913     return false;
914   return true;
915 }
916 
917 void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI) {
918   MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
919   MachineBasicBlock::iterator BrIt(MI);
920   MachineInstr *BrCond = &*std::prev(BrIt);
921 
922   Builder.setInstrAndDebugLoc(*BrCond);
923   LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
924   // FIXME: Does int/fp matter for this? If so, we might need to restrict
925   // this to i1 only since we might not know for sure what kind of
926   // compare generated the condition value.
927   auto True = Builder.buildConstant(
928       Ty, getICmpTrueVal(getTargetLowering(), false, false));
929   auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True);
930 
931   auto *FallthroughBB = BrCond->getOperand(1).getMBB();
932   Observer.changingInstr(MI);
933   MI.getOperand(0).setMBB(FallthroughBB);
934   Observer.changedInstr(MI);
935 
936   // Change the conditional branch to use the inverted condition and
937   // new target block.
938   Observer.changingInstr(*BrCond);
939   BrCond->getOperand(0).setReg(Xor.getReg(0));
940   BrCond->getOperand(1).setMBB(BrTarget);
941   Observer.changedInstr(*BrCond);
942 }
943 
944 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
945   // On Darwin, -Os means optimize for size without hurting performance, so
946   // only really optimize for size when -Oz (MinSize) is used.
947   if (MF.getTarget().getTargetTriple().isOSDarwin())
948     return MF.getFunction().hasMinSize();
949   return MF.getFunction().hasOptSize();
950 }
951 
952 // Returns a list of types to use for memory op lowering in MemOps. A partial
953 // port of findOptimalMemOpLowering in TargetLowering.
954 static bool findGISelOptimalMemOpLowering(std::vector<LLT> &MemOps,
955                                           unsigned Limit, const MemOp &Op,
956                                           unsigned DstAS, unsigned SrcAS,
957                                           const AttributeList &FuncAttributes,
958                                           const TargetLowering &TLI) {
959   if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign())
960     return false;
961 
962   LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes);
963 
964   if (Ty == LLT()) {
965     // Use the largest scalar type whose alignment constraints are satisfied.
966     // We only need to check DstAlign here as SrcAlign is always greater or
967     // equal to DstAlign (or zero).
968     Ty = LLT::scalar(64);
969     if (Op.isFixedDstAlign())
970       while (Op.getDstAlign() < Ty.getSizeInBytes() &&
971              !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, Op.getDstAlign()))
972         Ty = LLT::scalar(Ty.getSizeInBytes());
973     assert(Ty.getSizeInBits() > 0 && "Could not find valid type");
974     // FIXME: check for the largest legal type we can load/store to.
975   }
976 
977   unsigned NumMemOps = 0;
978   uint64_t Size = Op.size();
979   while (Size) {
980     unsigned TySize = Ty.getSizeInBytes();
981     while (TySize > Size) {
982       // For now, only use non-vector load / store's for the left-over pieces.
983       LLT NewTy = Ty;
984       // FIXME: check for mem op safety and legality of the types. Not all of
985       // SDAGisms map cleanly to GISel concepts.
986       if (NewTy.isVector())
987         NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32);
988       NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1));
989       unsigned NewTySize = NewTy.getSizeInBytes();
990       assert(NewTySize > 0 && "Could not find appropriate type");
991 
992       // If the new LLT cannot cover all of the remaining bits, then consider
993       // issuing a (or a pair of) unaligned and overlapping load / store.
994       bool Fast;
995       // Need to get a VT equivalent for allowMisalignedMemoryAccesses().
996       MVT VT = getMVTForLLT(Ty);
997       if (NumMemOps && Op.allowOverlap() && NewTySize < Size &&
998           TLI.allowsMisalignedMemoryAccesses(
999               VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0,
1000               MachineMemOperand::MONone, &Fast) &&
1001           Fast)
1002         TySize = Size;
1003       else {
1004         Ty = NewTy;
1005         TySize = NewTySize;
1006       }
1007     }
1008 
1009     if (++NumMemOps > Limit)
1010       return false;
1011 
1012     MemOps.push_back(Ty);
1013     Size -= TySize;
1014   }
1015 
1016   return true;
1017 }
1018 
1019 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
1020   if (Ty.isVector())
1021     return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1022                                 Ty.getNumElements());
1023   return IntegerType::get(C, Ty.getSizeInBits());
1024 }
1025 
1026 // Get a vectorized representation of the memset value operand, GISel edition.
1027 static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
1028   MachineRegisterInfo &MRI = *MIB.getMRI();
1029   unsigned NumBits = Ty.getScalarSizeInBits();
1030   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
1031   if (!Ty.isVector() && ValVRegAndVal) {
1032     unsigned KnownVal = ValVRegAndVal->Value;
1033     APInt Scalar = APInt(8, KnownVal);
1034     APInt SplatVal = APInt::getSplat(NumBits, Scalar);
1035     return MIB.buildConstant(Ty, SplatVal).getReg(0);
1036   }
1037 
1038   // Extend the byte value to the larger type, and then multiply by a magic
1039   // value 0x010101... in order to replicate it across every byte.
1040   // Unless it's zero, in which case just emit a larger G_CONSTANT 0.
1041   if (ValVRegAndVal && ValVRegAndVal->Value == 0) {
1042     return MIB.buildConstant(Ty, 0).getReg(0);
1043   }
1044 
1045   LLT ExtType = Ty.getScalarType();
1046   auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val);
1047   if (NumBits > 8) {
1048     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
1049     auto MagicMI = MIB.buildConstant(ExtType, Magic);
1050     Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0);
1051   }
1052 
1053   // For vector types create a G_BUILD_VECTOR.
1054   if (Ty.isVector())
1055     Val = MIB.buildSplatVector(Ty, Val).getReg(0);
1056 
1057   return Val;
1058 }
1059 
1060 bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst,
1061                                     Register Val, unsigned KnownLen,
1062                                     Align Alignment, bool IsVolatile) {
1063   auto &MF = *MI.getParent()->getParent();
1064   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1065   auto &DL = MF.getDataLayout();
1066   LLVMContext &C = MF.getFunction().getContext();
1067 
1068   assert(KnownLen != 0 && "Have a zero length memset length!");
1069 
1070   bool DstAlignCanChange = false;
1071   MachineFrameInfo &MFI = MF.getFrameInfo();
1072   bool OptSize = shouldLowerMemFuncForSize(MF);
1073 
1074   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1075   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1076     DstAlignCanChange = true;
1077 
1078   unsigned Limit = TLI.getMaxStoresPerMemset(OptSize);
1079   std::vector<LLT> MemOps;
1080 
1081   const auto &DstMMO = **MI.memoperands_begin();
1082   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1083 
1084   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
1085   bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0;
1086 
1087   if (!findGISelOptimalMemOpLowering(MemOps, Limit,
1088                                      MemOp::Set(KnownLen, DstAlignCanChange,
1089                                                 Alignment,
1090                                                 /*IsZeroMemset=*/IsZeroVal,
1091                                                 /*IsVolatile=*/IsVolatile),
1092                                      DstPtrInfo.getAddrSpace(), ~0u,
1093                                      MF.getFunction().getAttributes(), TLI))
1094     return false;
1095 
1096   if (DstAlignCanChange) {
1097     // Get an estimate of the type from the LLT.
1098     Type *IRTy = getTypeForLLT(MemOps[0], C);
1099     Align NewAlign = DL.getABITypeAlign(IRTy);
1100     if (NewAlign > Alignment) {
1101       Alignment = NewAlign;
1102       unsigned FI = FIDef->getOperand(1).getIndex();
1103       // Give the stack frame object a larger alignment if needed.
1104       if (MFI.getObjectAlign(FI) < Alignment)
1105         MFI.setObjectAlignment(FI, Alignment);
1106     }
1107   }
1108 
1109   MachineIRBuilder MIB(MI);
1110   // Find the largest store and generate the bit pattern for it.
1111   LLT LargestTy = MemOps[0];
1112   for (unsigned i = 1; i < MemOps.size(); i++)
1113     if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits())
1114       LargestTy = MemOps[i];
1115 
1116   // The memset stored value is always defined as an s8, so in order to make it
1117   // work with larger store types we need to repeat the bit pattern across the
1118   // wider type.
1119   Register MemSetValue = getMemsetValue(Val, LargestTy, MIB);
1120 
1121   if (!MemSetValue)
1122     return false;
1123 
1124   // Generate the stores. For each store type in the list, we generate the
1125   // matching store of that type to the destination address.
1126   LLT PtrTy = MRI.getType(Dst);
1127   unsigned DstOff = 0;
1128   unsigned Size = KnownLen;
1129   for (unsigned I = 0; I < MemOps.size(); I++) {
1130     LLT Ty = MemOps[I];
1131     unsigned TySize = Ty.getSizeInBytes();
1132     if (TySize > Size) {
1133       // Issuing an unaligned load / store pair that overlaps with the previous
1134       // pair. Adjust the offset accordingly.
1135       assert(I == MemOps.size() - 1 && I != 0);
1136       DstOff -= TySize - Size;
1137     }
1138 
1139     // If this store is smaller than the largest store see whether we can get
1140     // the smaller value for free with a truncate.
1141     Register Value = MemSetValue;
1142     if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) {
1143       MVT VT = getMVTForLLT(Ty);
1144       MVT LargestVT = getMVTForLLT(LargestTy);
1145       if (!LargestTy.isVector() && !Ty.isVector() &&
1146           TLI.isTruncateFree(LargestVT, VT))
1147         Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0);
1148       else
1149         Value = getMemsetValue(Val, Ty, MIB);
1150       if (!Value)
1151         return false;
1152     }
1153 
1154     auto *StoreMMO =
1155         MF.getMachineMemOperand(&DstMMO, DstOff, Ty.getSizeInBytes());
1156 
1157     Register Ptr = Dst;
1158     if (DstOff != 0) {
1159       auto Offset =
1160           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
1161       Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1162     }
1163 
1164     MIB.buildStore(Value, Ptr, *StoreMMO);
1165     DstOff += Ty.getSizeInBytes();
1166     Size -= TySize;
1167   }
1168 
1169   MI.eraseFromParent();
1170   return true;
1171 }
1172 
1173 bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
1174                                     Register Src, unsigned KnownLen,
1175                                     Align DstAlign, Align SrcAlign,
1176                                     bool IsVolatile) {
1177   auto &MF = *MI.getParent()->getParent();
1178   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1179   auto &DL = MF.getDataLayout();
1180   LLVMContext &C = MF.getFunction().getContext();
1181 
1182   assert(KnownLen != 0 && "Have a zero length memcpy length!");
1183 
1184   bool DstAlignCanChange = false;
1185   MachineFrameInfo &MFI = MF.getFrameInfo();
1186   bool OptSize = shouldLowerMemFuncForSize(MF);
1187   Align Alignment = commonAlignment(DstAlign, SrcAlign);
1188 
1189   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1190   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1191     DstAlignCanChange = true;
1192 
1193   // FIXME: infer better src pointer alignment like SelectionDAG does here.
1194   // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining
1195   // if the memcpy is in a tail call position.
1196 
1197   unsigned Limit = TLI.getMaxStoresPerMemcpy(OptSize);
1198   std::vector<LLT> MemOps;
1199 
1200   const auto &DstMMO = **MI.memoperands_begin();
1201   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1202   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1203   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1204 
1205   if (!findGISelOptimalMemOpLowering(
1206           MemOps, Limit,
1207           MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
1208                       IsVolatile),
1209           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
1210           MF.getFunction().getAttributes(), TLI))
1211     return false;
1212 
1213   if (DstAlignCanChange) {
1214     // Get an estimate of the type from the LLT.
1215     Type *IRTy = getTypeForLLT(MemOps[0], C);
1216     Align NewAlign = DL.getABITypeAlign(IRTy);
1217 
1218     // Don't promote to an alignment that would require dynamic stack
1219     // realignment.
1220     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1221     if (!TRI->needsStackRealignment(MF))
1222       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
1223         NewAlign = NewAlign / 2;
1224 
1225     if (NewAlign > Alignment) {
1226       Alignment = NewAlign;
1227       unsigned FI = FIDef->getOperand(1).getIndex();
1228       // Give the stack frame object a larger alignment if needed.
1229       if (MFI.getObjectAlign(FI) < Alignment)
1230         MFI.setObjectAlignment(FI, Alignment);
1231     }
1232   }
1233 
1234   LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n");
1235 
1236   MachineIRBuilder MIB(MI);
1237   // Now we need to emit a pair of load and stores for each of the types we've
1238   // collected. I.e. for each type, generate a load from the source pointer of
1239   // that type width, and then generate a corresponding store to the dest buffer
1240   // of that value loaded. This can result in a sequence of loads and stores
1241   // mixed types, depending on what the target specifies as good types to use.
1242   unsigned CurrOffset = 0;
1243   LLT PtrTy = MRI.getType(Src);
1244   unsigned Size = KnownLen;
1245   for (auto CopyTy : MemOps) {
1246     // Issuing an unaligned load / store pair  that overlaps with the previous
1247     // pair. Adjust the offset accordingly.
1248     if (CopyTy.getSizeInBytes() > Size)
1249       CurrOffset -= CopyTy.getSizeInBytes() - Size;
1250 
1251     // Construct MMOs for the accesses.
1252     auto *LoadMMO =
1253         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1254     auto *StoreMMO =
1255         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1256 
1257     // Create the load.
1258     Register LoadPtr = Src;
1259     Register Offset;
1260     if (CurrOffset != 0) {
1261       Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset)
1262                    .getReg(0);
1263       LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1264     }
1265     auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
1266 
1267     // Create the store.
1268     Register StorePtr =
1269         CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1270     MIB.buildStore(LdVal, StorePtr, *StoreMMO);
1271     CurrOffset += CopyTy.getSizeInBytes();
1272     Size -= CopyTy.getSizeInBytes();
1273   }
1274 
1275   MI.eraseFromParent();
1276   return true;
1277 }
1278 
1279 bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
1280                                      Register Src, unsigned KnownLen,
1281                                      Align DstAlign, Align SrcAlign,
1282                                      bool IsVolatile) {
1283   auto &MF = *MI.getParent()->getParent();
1284   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1285   auto &DL = MF.getDataLayout();
1286   LLVMContext &C = MF.getFunction().getContext();
1287 
1288   assert(KnownLen != 0 && "Have a zero length memmove length!");
1289 
1290   bool DstAlignCanChange = false;
1291   MachineFrameInfo &MFI = MF.getFrameInfo();
1292   bool OptSize = shouldLowerMemFuncForSize(MF);
1293   Align Alignment = commonAlignment(DstAlign, SrcAlign);
1294 
1295   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1296   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1297     DstAlignCanChange = true;
1298 
1299   unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize);
1300   std::vector<LLT> MemOps;
1301 
1302   const auto &DstMMO = **MI.memoperands_begin();
1303   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1304   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1305   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1306 
1307   // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due
1308   // to a bug in it's findOptimalMemOpLowering implementation. For now do the
1309   // same thing here.
1310   if (!findGISelOptimalMemOpLowering(
1311           MemOps, Limit,
1312           MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
1313                       /*IsVolatile*/ true),
1314           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
1315           MF.getFunction().getAttributes(), TLI))
1316     return false;
1317 
1318   if (DstAlignCanChange) {
1319     // Get an estimate of the type from the LLT.
1320     Type *IRTy = getTypeForLLT(MemOps[0], C);
1321     Align NewAlign = DL.getABITypeAlign(IRTy);
1322 
1323     // Don't promote to an alignment that would require dynamic stack
1324     // realignment.
1325     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1326     if (!TRI->needsStackRealignment(MF))
1327       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
1328         NewAlign = NewAlign / 2;
1329 
1330     if (NewAlign > Alignment) {
1331       Alignment = NewAlign;
1332       unsigned FI = FIDef->getOperand(1).getIndex();
1333       // Give the stack frame object a larger alignment if needed.
1334       if (MFI.getObjectAlign(FI) < Alignment)
1335         MFI.setObjectAlignment(FI, Alignment);
1336     }
1337   }
1338 
1339   LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n");
1340 
1341   MachineIRBuilder MIB(MI);
1342   // Memmove requires that we perform the loads first before issuing the stores.
1343   // Apart from that, this loop is pretty much doing the same thing as the
1344   // memcpy codegen function.
1345   unsigned CurrOffset = 0;
1346   LLT PtrTy = MRI.getType(Src);
1347   SmallVector<Register, 16> LoadVals;
1348   for (auto CopyTy : MemOps) {
1349     // Construct MMO for the load.
1350     auto *LoadMMO =
1351         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1352 
1353     // Create the load.
1354     Register LoadPtr = Src;
1355     if (CurrOffset != 0) {
1356       auto Offset =
1357           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1358       LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1359     }
1360     LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
1361     CurrOffset += CopyTy.getSizeInBytes();
1362   }
1363 
1364   CurrOffset = 0;
1365   for (unsigned I = 0; I < MemOps.size(); ++I) {
1366     LLT CopyTy = MemOps[I];
1367     // Now store the values loaded.
1368     auto *StoreMMO =
1369         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1370 
1371     Register StorePtr = Dst;
1372     if (CurrOffset != 0) {
1373       auto Offset =
1374           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1375       StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1376     }
1377     MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
1378     CurrOffset += CopyTy.getSizeInBytes();
1379   }
1380   MI.eraseFromParent();
1381   return true;
1382 }
1383 
1384 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1385   const unsigned Opc = MI.getOpcode();
1386   // This combine is fairly complex so it's not written with a separate
1387   // matcher function.
1388   assert((Opc == TargetOpcode::G_MEMCPY || Opc == TargetOpcode::G_MEMMOVE ||
1389           Opc == TargetOpcode::G_MEMSET) && "Expected memcpy like instruction");
1390 
1391   auto MMOIt = MI.memoperands_begin();
1392   const MachineMemOperand *MemOp = *MMOIt;
1393   bool IsVolatile = MemOp->isVolatile();
1394   // Don't try to optimize volatile.
1395   if (IsVolatile)
1396     return false;
1397 
1398   Align DstAlign = MemOp->getBaseAlign();
1399   Align SrcAlign;
1400   Register Dst = MI.getOperand(0).getReg();
1401   Register Src = MI.getOperand(1).getReg();
1402   Register Len = MI.getOperand(2).getReg();
1403 
1404   if (Opc != TargetOpcode::G_MEMSET) {
1405     assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
1406     MemOp = *(++MMOIt);
1407     SrcAlign = MemOp->getBaseAlign();
1408   }
1409 
1410   // See if this is a constant length copy
1411   auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI);
1412   if (!LenVRegAndVal)
1413     return false; // Leave it to the legalizer to lower it to a libcall.
1414   unsigned KnownLen = LenVRegAndVal->Value;
1415 
1416   if (KnownLen == 0) {
1417     MI.eraseFromParent();
1418     return true;
1419   }
1420 
1421   if (MaxLen && KnownLen > MaxLen)
1422     return false;
1423 
1424   if (Opc == TargetOpcode::G_MEMCPY)
1425     return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1426   if (Opc == TargetOpcode::G_MEMMOVE)
1427     return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1428   if (Opc == TargetOpcode::G_MEMSET)
1429     return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile);
1430   return false;
1431 }
1432 
1433 static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy,
1434                                              const Register Op,
1435                                              const MachineRegisterInfo &MRI) {
1436   const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI);
1437   if (!MaybeCst)
1438     return None;
1439 
1440   APFloat V = MaybeCst->getValueAPF();
1441   switch (Opcode) {
1442   default:
1443     llvm_unreachable("Unexpected opcode!");
1444   case TargetOpcode::G_FNEG: {
1445     V.changeSign();
1446     return V;
1447   }
1448   case TargetOpcode::G_FABS: {
1449     V.clearSign();
1450     return V;
1451   }
1452   case TargetOpcode::G_FPTRUNC:
1453     break;
1454   case TargetOpcode::G_FSQRT: {
1455     bool Unused;
1456     V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1457     V = APFloat(sqrt(V.convertToDouble()));
1458     break;
1459   }
1460   case TargetOpcode::G_FLOG2: {
1461     bool Unused;
1462     V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1463     V = APFloat(log2(V.convertToDouble()));
1464     break;
1465   }
1466   }
1467   // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1468   // `buildFConstant` will assert on size mismatch. Only `G_FPTRUNC`, `G_FSQRT`,
1469   // and `G_FLOG2` reach here.
1470   bool Unused;
1471   V.convert(getFltSemanticForLLT(DstTy), APFloat::rmNearestTiesToEven, &Unused);
1472   return V;
1473 }
1474 
1475 bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI,
1476                                                      Optional<APFloat> &Cst) {
1477   Register DstReg = MI.getOperand(0).getReg();
1478   Register SrcReg = MI.getOperand(1).getReg();
1479   LLT DstTy = MRI.getType(DstReg);
1480   Cst = constantFoldFpUnary(MI.getOpcode(), DstTy, SrcReg, MRI);
1481   return Cst.hasValue();
1482 }
1483 
1484 bool CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
1485                                                      Optional<APFloat> &Cst) {
1486   assert(Cst.hasValue() && "Optional is unexpectedly empty!");
1487   Builder.setInstrAndDebugLoc(MI);
1488   MachineFunction &MF = Builder.getMF();
1489   auto *FPVal = ConstantFP::get(MF.getFunction().getContext(), *Cst);
1490   Register DstReg = MI.getOperand(0).getReg();
1491   Builder.buildFConstant(DstReg, *FPVal);
1492   MI.eraseFromParent();
1493   return true;
1494 }
1495 
1496 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
1497                                            PtrAddChain &MatchInfo) {
1498   // We're trying to match the following pattern:
1499   //   %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1500   //   %root = G_PTR_ADD %t1, G_CONSTANT imm2
1501   // -->
1502   //   %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1503 
1504   if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1505     return false;
1506 
1507   Register Add2 = MI.getOperand(1).getReg();
1508   Register Imm1 = MI.getOperand(2).getReg();
1509   auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI);
1510   if (!MaybeImmVal)
1511     return false;
1512 
1513   MachineInstr *Add2Def = MRI.getUniqueVRegDef(Add2);
1514   if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1515     return false;
1516 
1517   Register Base = Add2Def->getOperand(1).getReg();
1518   Register Imm2 = Add2Def->getOperand(2).getReg();
1519   auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI);
1520   if (!MaybeImm2Val)
1521     return false;
1522 
1523   // Pass the combined immediate to the apply function.
1524   MatchInfo.Imm = MaybeImmVal->Value + MaybeImm2Val->Value;
1525   MatchInfo.Base = Base;
1526   return true;
1527 }
1528 
1529 bool CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
1530                                            PtrAddChain &MatchInfo) {
1531   assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1532   MachineIRBuilder MIB(MI);
1533   LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1534   auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1535   Observer.changingInstr(MI);
1536   MI.getOperand(1).setReg(MatchInfo.Base);
1537   MI.getOperand(2).setReg(NewOffset.getReg(0));
1538   Observer.changedInstr(MI);
1539   return true;
1540 }
1541 
1542 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
1543                                           unsigned &ShiftVal) {
1544   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1545   auto MaybeImmVal =
1546       getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1547   if (!MaybeImmVal || !isPowerOf2_64(MaybeImmVal->Value))
1548     return false;
1549   ShiftVal = Log2_64(MaybeImmVal->Value);
1550   return true;
1551 }
1552 
1553 bool CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
1554                                           unsigned &ShiftVal) {
1555   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1556   MachineIRBuilder MIB(MI);
1557   LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1558   auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1559   Observer.changingInstr(MI);
1560   MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1561   MI.getOperand(2).setReg(ShiftCst.getReg(0));
1562   Observer.changedInstr(MI);
1563   return true;
1564 }
1565 
1566 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1567 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
1568                                              RegisterImmPair &MatchData) {
1569   assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
1570 
1571   Register LHS = MI.getOperand(1).getReg();
1572 
1573   Register ExtSrc;
1574   if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1575       !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1576       !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1577     return false;
1578 
1579   // TODO: Should handle vector splat.
1580   Register RHS = MI.getOperand(2).getReg();
1581   auto MaybeShiftAmtVal = getConstantVRegValWithLookThrough(RHS, MRI);
1582   if (!MaybeShiftAmtVal)
1583     return false;
1584 
1585   if (LI) {
1586     LLT SrcTy = MRI.getType(ExtSrc);
1587 
1588     // We only really care about the legality with the shifted value. We can
1589     // pick any type the constant shift amount, so ask the target what to
1590     // use. Otherwise we would have to guess and hope it is reported as legal.
1591     LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
1592     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
1593       return false;
1594   }
1595 
1596   int64_t ShiftAmt = MaybeShiftAmtVal->Value;
1597   MatchData.Reg = ExtSrc;
1598   MatchData.Imm = ShiftAmt;
1599 
1600   unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countLeadingOnes();
1601   return MinLeadingZeros >= ShiftAmt;
1602 }
1603 
1604 bool CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
1605                                              const RegisterImmPair &MatchData) {
1606   Register ExtSrcReg = MatchData.Reg;
1607   int64_t ShiftAmtVal = MatchData.Imm;
1608 
1609   LLT ExtSrcTy = MRI.getType(ExtSrcReg);
1610   Builder.setInstrAndDebugLoc(MI);
1611   auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
1612   auto NarrowShift =
1613       Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
1614   Builder.buildZExt(MI.getOperand(0), NarrowShift);
1615   MI.eraseFromParent();
1616   return true;
1617 }
1618 
1619 static Register peekThroughBitcast(Register Reg,
1620                                    const MachineRegisterInfo &MRI) {
1621   while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg))))
1622     ;
1623 
1624   return Reg;
1625 }
1626 
1627 bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
1628     MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1629   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1630          "Expected an unmerge");
1631   Register SrcReg =
1632       peekThroughBitcast(MI.getOperand(MI.getNumOperands() - 1).getReg(), MRI);
1633 
1634   MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1635   if (SrcInstr->getOpcode() != TargetOpcode::G_MERGE_VALUES &&
1636       SrcInstr->getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
1637       SrcInstr->getOpcode() != TargetOpcode::G_CONCAT_VECTORS)
1638     return false;
1639 
1640   // Check the source type of the merge.
1641   LLT SrcMergeTy = MRI.getType(SrcInstr->getOperand(1).getReg());
1642   LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1643   bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
1644   if (SrcMergeTy != Dst0Ty && !SameSize)
1645     return false;
1646   // They are the same now (modulo a bitcast).
1647   // We can collect all the src registers.
1648   for (unsigned Idx = 1, EndIdx = SrcInstr->getNumOperands(); Idx != EndIdx;
1649        ++Idx)
1650     Operands.push_back(SrcInstr->getOperand(Idx).getReg());
1651   return true;
1652 }
1653 
1654 bool CombinerHelper::applyCombineUnmergeMergeToPlainValues(
1655     MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1656   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1657          "Expected an unmerge");
1658   assert((MI.getNumOperands() - 1 == Operands.size()) &&
1659          "Not enough operands to replace all defs");
1660   unsigned NumElems = MI.getNumOperands() - 1;
1661 
1662   LLT SrcTy = MRI.getType(Operands[0]);
1663   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1664   bool CanReuseInputDirectly = DstTy == SrcTy;
1665   Builder.setInstrAndDebugLoc(MI);
1666   for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1667     Register DstReg = MI.getOperand(Idx).getReg();
1668     Register SrcReg = Operands[Idx];
1669     if (CanReuseInputDirectly)
1670       replaceRegWith(MRI, DstReg, SrcReg);
1671     else
1672       Builder.buildCast(DstReg, SrcReg);
1673   }
1674   MI.eraseFromParent();
1675   return true;
1676 }
1677 
1678 bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI,
1679                                                  SmallVectorImpl<APInt> &Csts) {
1680   unsigned SrcIdx = MI.getNumOperands() - 1;
1681   Register SrcReg = MI.getOperand(SrcIdx).getReg();
1682   MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1683   if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
1684       SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
1685     return false;
1686   // Break down the big constant in smaller ones.
1687   const MachineOperand &CstVal = SrcInstr->getOperand(1);
1688   APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
1689                   ? CstVal.getCImm()->getValue()
1690                   : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
1691 
1692   LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1693   unsigned ShiftAmt = Dst0Ty.getSizeInBits();
1694   // Unmerge a constant.
1695   for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
1696     Csts.emplace_back(Val.trunc(ShiftAmt));
1697     Val = Val.lshr(ShiftAmt);
1698   }
1699 
1700   return true;
1701 }
1702 
1703 bool CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
1704                                                  SmallVectorImpl<APInt> &Csts) {
1705   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1706          "Expected an unmerge");
1707   assert((MI.getNumOperands() - 1 == Csts.size()) &&
1708          "Not enough operands to replace all defs");
1709   unsigned NumElems = MI.getNumOperands() - 1;
1710   Builder.setInstrAndDebugLoc(MI);
1711   for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1712     Register DstReg = MI.getOperand(Idx).getReg();
1713     Builder.buildConstant(DstReg, Csts[Idx]);
1714   }
1715 
1716   MI.eraseFromParent();
1717   return true;
1718 }
1719 
1720 bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1721   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1722          "Expected an unmerge");
1723   // Check that all the lanes are dead except the first one.
1724   for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1725     if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg()))
1726       return false;
1727   }
1728   return true;
1729 }
1730 
1731 bool CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1732   Builder.setInstrAndDebugLoc(MI);
1733   Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1734   // Truncating a vector is going to truncate every single lane,
1735   // whereas we want the full lowbits.
1736   // Do the operation on a scalar instead.
1737   LLT SrcTy = MRI.getType(SrcReg);
1738   if (SrcTy.isVector())
1739     SrcReg =
1740         Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0);
1741 
1742   Register Dst0Reg = MI.getOperand(0).getReg();
1743   LLT Dst0Ty = MRI.getType(Dst0Reg);
1744   if (Dst0Ty.isVector()) {
1745     auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg);
1746     Builder.buildCast(Dst0Reg, MIB);
1747   } else
1748     Builder.buildTrunc(Dst0Reg, SrcReg);
1749   MI.eraseFromParent();
1750   return true;
1751 }
1752 
1753 bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) {
1754   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1755          "Expected an unmerge");
1756   Register Dst0Reg = MI.getOperand(0).getReg();
1757   LLT Dst0Ty = MRI.getType(Dst0Reg);
1758   // G_ZEXT on vector applies to each lane, so it will
1759   // affect all destinations. Therefore we won't be able
1760   // to simplify the unmerge to just the first definition.
1761   if (Dst0Ty.isVector())
1762     return false;
1763   Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1764   LLT SrcTy = MRI.getType(SrcReg);
1765   if (SrcTy.isVector())
1766     return false;
1767 
1768   Register ZExtSrcReg;
1769   if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg))))
1770     return false;
1771 
1772   // Finally we can replace the first definition with
1773   // a zext of the source if the definition is big enough to hold
1774   // all of ZExtSrc bits.
1775   LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1776   return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
1777 }
1778 
1779 bool CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
1780   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1781          "Expected an unmerge");
1782 
1783   Register Dst0Reg = MI.getOperand(0).getReg();
1784 
1785   MachineInstr *ZExtInstr =
1786       MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg());
1787   assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&
1788          "Expecting a G_ZEXT");
1789 
1790   Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg();
1791   LLT Dst0Ty = MRI.getType(Dst0Reg);
1792   LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1793 
1794   Builder.setInstrAndDebugLoc(MI);
1795 
1796   if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
1797     Builder.buildZExt(Dst0Reg, ZExtSrcReg);
1798   } else {
1799     assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&
1800            "ZExt src doesn't fit in destination");
1801     replaceRegWith(MRI, Dst0Reg, ZExtSrcReg);
1802   }
1803 
1804   Register ZeroReg;
1805   for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1806     if (!ZeroReg)
1807       ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0);
1808     replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg);
1809   }
1810   MI.eraseFromParent();
1811   return true;
1812 }
1813 
1814 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
1815                                                 unsigned TargetShiftSize,
1816                                                 unsigned &ShiftVal) {
1817   assert((MI.getOpcode() == TargetOpcode::G_SHL ||
1818           MI.getOpcode() == TargetOpcode::G_LSHR ||
1819           MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
1820 
1821   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1822   if (Ty.isVector()) // TODO:
1823     return false;
1824 
1825   // Don't narrow further than the requested size.
1826   unsigned Size = Ty.getSizeInBits();
1827   if (Size <= TargetShiftSize)
1828     return false;
1829 
1830   auto MaybeImmVal =
1831     getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1832   if (!MaybeImmVal)
1833     return false;
1834 
1835   ShiftVal = MaybeImmVal->Value;
1836   return ShiftVal >= Size / 2 && ShiftVal < Size;
1837 }
1838 
1839 bool CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
1840                                                 const unsigned &ShiftVal) {
1841   Register DstReg = MI.getOperand(0).getReg();
1842   Register SrcReg = MI.getOperand(1).getReg();
1843   LLT Ty = MRI.getType(SrcReg);
1844   unsigned Size = Ty.getSizeInBits();
1845   unsigned HalfSize = Size / 2;
1846   assert(ShiftVal >= HalfSize);
1847 
1848   LLT HalfTy = LLT::scalar(HalfSize);
1849 
1850   Builder.setInstr(MI);
1851   auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
1852   unsigned NarrowShiftAmt = ShiftVal - HalfSize;
1853 
1854   if (MI.getOpcode() == TargetOpcode::G_LSHR) {
1855     Register Narrowed = Unmerge.getReg(1);
1856 
1857     //  dst = G_LSHR s64:x, C for C >= 32
1858     // =>
1859     //   lo, hi = G_UNMERGE_VALUES x
1860     //   dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
1861 
1862     if (NarrowShiftAmt != 0) {
1863       Narrowed = Builder.buildLShr(HalfTy, Narrowed,
1864         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1865     }
1866 
1867     auto Zero = Builder.buildConstant(HalfTy, 0);
1868     Builder.buildMerge(DstReg, { Narrowed, Zero });
1869   } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
1870     Register Narrowed = Unmerge.getReg(0);
1871     //  dst = G_SHL s64:x, C for C >= 32
1872     // =>
1873     //   lo, hi = G_UNMERGE_VALUES x
1874     //   dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
1875     if (NarrowShiftAmt != 0) {
1876       Narrowed = Builder.buildShl(HalfTy, Narrowed,
1877         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1878     }
1879 
1880     auto Zero = Builder.buildConstant(HalfTy, 0);
1881     Builder.buildMerge(DstReg, { Zero, Narrowed });
1882   } else {
1883     assert(MI.getOpcode() == TargetOpcode::G_ASHR);
1884     auto Hi = Builder.buildAShr(
1885       HalfTy, Unmerge.getReg(1),
1886       Builder.buildConstant(HalfTy, HalfSize - 1));
1887 
1888     if (ShiftVal == HalfSize) {
1889       // (G_ASHR i64:x, 32) ->
1890       //   G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
1891       Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi });
1892     } else if (ShiftVal == Size - 1) {
1893       // Don't need a second shift.
1894       // (G_ASHR i64:x, 63) ->
1895       //   %narrowed = (G_ASHR hi_32(x), 31)
1896       //   G_MERGE_VALUES %narrowed, %narrowed
1897       Builder.buildMerge(DstReg, { Hi, Hi });
1898     } else {
1899       auto Lo = Builder.buildAShr(
1900         HalfTy, Unmerge.getReg(1),
1901         Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
1902 
1903       // (G_ASHR i64:x, C) ->, for C >= 32
1904       //   G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
1905       Builder.buildMerge(DstReg, { Lo, Hi });
1906     }
1907   }
1908 
1909   MI.eraseFromParent();
1910   return true;
1911 }
1912 
1913 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
1914                                               unsigned TargetShiftAmount) {
1915   unsigned ShiftAmt;
1916   if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
1917     applyCombineShiftToUnmerge(MI, ShiftAmt);
1918     return true;
1919   }
1920 
1921   return false;
1922 }
1923 
1924 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
1925   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
1926   Register DstReg = MI.getOperand(0).getReg();
1927   LLT DstTy = MRI.getType(DstReg);
1928   Register SrcReg = MI.getOperand(1).getReg();
1929   return mi_match(SrcReg, MRI,
1930                   m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
1931 }
1932 
1933 bool CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
1934   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
1935   Register DstReg = MI.getOperand(0).getReg();
1936   Builder.setInstr(MI);
1937   Builder.buildCopy(DstReg, Reg);
1938   MI.eraseFromParent();
1939   return true;
1940 }
1941 
1942 bool CombinerHelper::matchCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
1943   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
1944   Register SrcReg = MI.getOperand(1).getReg();
1945   return mi_match(SrcReg, MRI, m_GIntToPtr(m_Reg(Reg)));
1946 }
1947 
1948 bool CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
1949   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
1950   Register DstReg = MI.getOperand(0).getReg();
1951   Builder.setInstr(MI);
1952   Builder.buildZExtOrTrunc(DstReg, Reg);
1953   MI.eraseFromParent();
1954   return true;
1955 }
1956 
1957 bool CombinerHelper::matchCombineAddP2IToPtrAdd(
1958     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
1959   assert(MI.getOpcode() == TargetOpcode::G_ADD);
1960   Register LHS = MI.getOperand(1).getReg();
1961   Register RHS = MI.getOperand(2).getReg();
1962   LLT IntTy = MRI.getType(LHS);
1963 
1964   // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
1965   // instruction.
1966   PtrReg.second = false;
1967   for (Register SrcReg : {LHS, RHS}) {
1968     if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
1969       // Don't handle cases where the integer is implicitly converted to the
1970       // pointer width.
1971       LLT PtrTy = MRI.getType(PtrReg.first);
1972       if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
1973         return true;
1974     }
1975 
1976     PtrReg.second = true;
1977   }
1978 
1979   return false;
1980 }
1981 
1982 bool CombinerHelper::applyCombineAddP2IToPtrAdd(
1983     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
1984   Register Dst = MI.getOperand(0).getReg();
1985   Register LHS = MI.getOperand(1).getReg();
1986   Register RHS = MI.getOperand(2).getReg();
1987 
1988   const bool DoCommute = PtrReg.second;
1989   if (DoCommute)
1990     std::swap(LHS, RHS);
1991   LHS = PtrReg.first;
1992 
1993   LLT PtrTy = MRI.getType(LHS);
1994 
1995   Builder.setInstrAndDebugLoc(MI);
1996   auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
1997   Builder.buildPtrToInt(Dst, PtrAdd);
1998   MI.eraseFromParent();
1999   return true;
2000 }
2001 
2002 bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
2003   assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2004   Register DstReg = MI.getOperand(0).getReg();
2005   Register SrcReg = MI.getOperand(1).getReg();
2006   LLT DstTy = MRI.getType(DstReg);
2007   return mi_match(SrcReg, MRI,
2008                   m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))));
2009 }
2010 
2011 bool CombinerHelper::applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
2012   assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2013   Register DstReg = MI.getOperand(0).getReg();
2014   MI.eraseFromParent();
2015   replaceRegWith(MRI, DstReg, Reg);
2016   return true;
2017 }
2018 
2019 bool CombinerHelper::matchCombineExtOfExt(
2020     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2021   assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2022           MI.getOpcode() == TargetOpcode::G_SEXT ||
2023           MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2024          "Expected a G_[ASZ]EXT");
2025   Register SrcReg = MI.getOperand(1).getReg();
2026   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2027   // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
2028   unsigned Opc = MI.getOpcode();
2029   unsigned SrcOpc = SrcMI->getOpcode();
2030   if (Opc == SrcOpc ||
2031       (Opc == TargetOpcode::G_ANYEXT &&
2032        (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2033       (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2034     MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc);
2035     return true;
2036   }
2037   return false;
2038 }
2039 
2040 bool CombinerHelper::applyCombineExtOfExt(
2041     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2042   assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2043           MI.getOpcode() == TargetOpcode::G_SEXT ||
2044           MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2045          "Expected a G_[ASZ]EXT");
2046 
2047   Register Reg = std::get<0>(MatchInfo);
2048   unsigned SrcExtOp = std::get<1>(MatchInfo);
2049 
2050   // Combine exts with the same opcode.
2051   if (MI.getOpcode() == SrcExtOp) {
2052     Observer.changingInstr(MI);
2053     MI.getOperand(1).setReg(Reg);
2054     Observer.changedInstr(MI);
2055     return true;
2056   }
2057 
2058   // Combine:
2059   // - anyext([sz]ext x) to [sz]ext x
2060   // - sext(zext x) to zext x
2061   if (MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2062       (MI.getOpcode() == TargetOpcode::G_SEXT &&
2063        SrcExtOp == TargetOpcode::G_ZEXT)) {
2064     Register DstReg = MI.getOperand(0).getReg();
2065     Builder.setInstrAndDebugLoc(MI);
2066     Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
2067     MI.eraseFromParent();
2068     return true;
2069   }
2070 
2071   return false;
2072 }
2073 
2074 bool CombinerHelper::applyCombineMulByNegativeOne(MachineInstr &MI) {
2075   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
2076   Register DstReg = MI.getOperand(0).getReg();
2077   Register SrcReg = MI.getOperand(1).getReg();
2078   LLT DstTy = MRI.getType(DstReg);
2079 
2080   Builder.setInstrAndDebugLoc(MI);
2081   Builder.buildSub(DstReg, Builder.buildConstant(DstTy, 0), SrcReg,
2082                    MI.getFlags());
2083   MI.eraseFromParent();
2084   return true;
2085 }
2086 
2087 bool CombinerHelper::matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg) {
2088   assert(MI.getOpcode() == TargetOpcode::G_FNEG && "Expected a G_FNEG");
2089   Register SrcReg = MI.getOperand(1).getReg();
2090   return mi_match(SrcReg, MRI, m_GFNeg(m_Reg(Reg)));
2091 }
2092 
2093 bool CombinerHelper::matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src) {
2094   assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2095   Src = MI.getOperand(1).getReg();
2096   Register AbsSrc;
2097   return mi_match(Src, MRI, m_GFabs(m_Reg(AbsSrc)));
2098 }
2099 
2100 bool CombinerHelper::applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src) {
2101   assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2102   Register Dst = MI.getOperand(0).getReg();
2103   MI.eraseFromParent();
2104   replaceRegWith(MRI, Dst, Src);
2105   return true;
2106 }
2107 
2108 bool CombinerHelper::matchCombineTruncOfExt(
2109     MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2110   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2111   Register SrcReg = MI.getOperand(1).getReg();
2112   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2113   unsigned SrcOpc = SrcMI->getOpcode();
2114   if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2115       SrcOpc == TargetOpcode::G_ZEXT) {
2116     MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc);
2117     return true;
2118   }
2119   return false;
2120 }
2121 
2122 bool CombinerHelper::applyCombineTruncOfExt(
2123     MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2124   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2125   Register SrcReg = MatchInfo.first;
2126   unsigned SrcExtOp = MatchInfo.second;
2127   Register DstReg = MI.getOperand(0).getReg();
2128   LLT SrcTy = MRI.getType(SrcReg);
2129   LLT DstTy = MRI.getType(DstReg);
2130   if (SrcTy == DstTy) {
2131     MI.eraseFromParent();
2132     replaceRegWith(MRI, DstReg, SrcReg);
2133     return true;
2134   }
2135   Builder.setInstrAndDebugLoc(MI);
2136   if (SrcTy.getSizeInBits() < DstTy.getSizeInBits())
2137     Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg});
2138   else
2139     Builder.buildTrunc(DstReg, SrcReg);
2140   MI.eraseFromParent();
2141   return true;
2142 }
2143 
2144 bool CombinerHelper::matchCombineTruncOfShl(
2145     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2146   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2147   Register DstReg = MI.getOperand(0).getReg();
2148   Register SrcReg = MI.getOperand(1).getReg();
2149   LLT DstTy = MRI.getType(DstReg);
2150   Register ShiftSrc;
2151   Register ShiftAmt;
2152 
2153   if (MRI.hasOneNonDBGUse(SrcReg) &&
2154       mi_match(SrcReg, MRI, m_GShl(m_Reg(ShiftSrc), m_Reg(ShiftAmt))) &&
2155       isLegalOrBeforeLegalizer(
2156           {TargetOpcode::G_SHL,
2157            {DstTy, getTargetLowering().getPreferredShiftAmountTy(DstTy)}})) {
2158     KnownBits Known = KB->getKnownBits(ShiftAmt);
2159     unsigned Size = DstTy.getSizeInBits();
2160     if (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size)) {
2161       MatchInfo = std::make_pair(ShiftSrc, ShiftAmt);
2162       return true;
2163     }
2164   }
2165   return false;
2166 }
2167 
2168 bool CombinerHelper::applyCombineTruncOfShl(
2169     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2170   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2171   Register DstReg = MI.getOperand(0).getReg();
2172   Register SrcReg = MI.getOperand(1).getReg();
2173   LLT DstTy = MRI.getType(DstReg);
2174   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2175 
2176   Register ShiftSrc = MatchInfo.first;
2177   Register ShiftAmt = MatchInfo.second;
2178   Builder.setInstrAndDebugLoc(MI);
2179   auto TruncShiftSrc = Builder.buildTrunc(DstTy, ShiftSrc);
2180   auto TruncShiftAmt = Builder.buildTrunc(DstTy, ShiftAmt);
2181   Builder.buildShl(DstReg, TruncShiftSrc, TruncShiftAmt, SrcMI->getFlags());
2182   MI.eraseFromParent();
2183   return true;
2184 }
2185 
2186 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
2187   return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2188     return MO.isReg() &&
2189            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2190   });
2191 }
2192 
2193 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
2194   return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2195     return !MO.isReg() ||
2196            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2197   });
2198 }
2199 
2200 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
2201   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2202   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
2203   return all_of(Mask, [](int Elt) { return Elt < 0; });
2204 }
2205 
2206 bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
2207   assert(MI.getOpcode() == TargetOpcode::G_STORE);
2208   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
2209                       MRI);
2210 }
2211 
2212 bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) {
2213   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2214   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
2215                       MRI);
2216 }
2217 
2218 bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
2219   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2220   if (auto MaybeCstCmp =
2221           getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) {
2222     OpIdx = MaybeCstCmp->Value ? 2 : 3;
2223     return true;
2224   }
2225   return false;
2226 }
2227 
2228 bool CombinerHelper::eraseInst(MachineInstr &MI) {
2229   MI.eraseFromParent();
2230   return true;
2231 }
2232 
2233 bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
2234                                     const MachineOperand &MOP2) {
2235   if (!MOP1.isReg() || !MOP2.isReg())
2236     return false;
2237   MachineInstr *I1 = getDefIgnoringCopies(MOP1.getReg(), MRI);
2238   if (!I1)
2239     return false;
2240   MachineInstr *I2 = getDefIgnoringCopies(MOP2.getReg(), MRI);
2241   if (!I2)
2242     return false;
2243 
2244   // Handle a case like this:
2245   //
2246   // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2247   //
2248   // Even though %0 and %1 are produced by the same instruction they are not
2249   // the same values.
2250   if (I1 == I2)
2251     return MOP1.getReg() == MOP2.getReg();
2252 
2253   // If we have an instruction which loads or stores, we can't guarantee that
2254   // it is identical.
2255   //
2256   // For example, we may have
2257   //
2258   // %x1 = G_LOAD %addr (load N from @somewhere)
2259   // ...
2260   // call @foo
2261   // ...
2262   // %x2 = G_LOAD %addr (load N from @somewhere)
2263   // ...
2264   // %or = G_OR %x1, %x2
2265   //
2266   // It's possible that @foo will modify whatever lives at the address we're
2267   // loading from. To be safe, let's just assume that all loads and stores
2268   // are different (unless we have something which is guaranteed to not
2269   // change.)
2270   if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad(nullptr))
2271     return false;
2272 
2273   // Check for physical registers on the instructions first to avoid cases
2274   // like this:
2275   //
2276   // %a = COPY $physreg
2277   // ...
2278   // SOMETHING implicit-def $physreg
2279   // ...
2280   // %b = COPY $physreg
2281   //
2282   // These copies are not equivalent.
2283   if (any_of(I1->uses(), [](const MachineOperand &MO) {
2284         return MO.isReg() && MO.getReg().isPhysical();
2285       })) {
2286     // Check if we have a case like this:
2287     //
2288     // %a = COPY $physreg
2289     // %b = COPY %a
2290     //
2291     // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2292     // From that, we know that they must have the same value, since they must
2293     // have come from the same COPY.
2294     return I1->isIdenticalTo(*I2);
2295   }
2296 
2297   // We don't have any physical registers, so we don't necessarily need the
2298   // same vreg defs.
2299   //
2300   // On the off-chance that there's some target instruction feeding into the
2301   // instruction, let's use produceSameValue instead of isIdenticalTo.
2302   return Builder.getTII().produceSameValue(*I1, *I2, &MRI);
2303 }
2304 
2305 bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
2306   if (!MOP.isReg())
2307     return false;
2308   // MIPatternMatch doesn't let us look through G_ZEXT etc.
2309   auto ValAndVReg = getConstantVRegValWithLookThrough(MOP.getReg(), MRI);
2310   return ValAndVReg && ValAndVReg->Value == C;
2311 }
2312 
2313 bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
2314                                                      unsigned OpIdx) {
2315   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2316   Register OldReg = MI.getOperand(0).getReg();
2317   Register Replacement = MI.getOperand(OpIdx).getReg();
2318   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2319   MI.eraseFromParent();
2320   replaceRegWith(MRI, OldReg, Replacement);
2321   return true;
2322 }
2323 
2324 bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
2325                                                  Register Replacement) {
2326   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2327   Register OldReg = MI.getOperand(0).getReg();
2328   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2329   MI.eraseFromParent();
2330   replaceRegWith(MRI, OldReg, Replacement);
2331   return true;
2332 }
2333 
2334 bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
2335   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2336   // Match (cond ? x : x)
2337   return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
2338          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
2339                        MRI);
2340 }
2341 
2342 bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
2343   return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
2344          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
2345                        MRI);
2346 }
2347 
2348 bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
2349   return matchConstantOp(MI.getOperand(OpIdx), 0) &&
2350          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
2351                        MRI);
2352 }
2353 
2354 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) {
2355   MachineOperand &MO = MI.getOperand(OpIdx);
2356   return MO.isReg() &&
2357          getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2358 }
2359 
2360 bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
2361   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2362   Builder.setInstr(MI);
2363   Builder.buildFConstant(MI.getOperand(0), C);
2364   MI.eraseFromParent();
2365   return true;
2366 }
2367 
2368 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
2369   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2370   Builder.setInstr(MI);
2371   Builder.buildConstant(MI.getOperand(0), C);
2372   MI.eraseFromParent();
2373   return true;
2374 }
2375 
2376 bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
2377   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2378   Builder.setInstr(MI);
2379   Builder.buildUndef(MI.getOperand(0));
2380   MI.eraseFromParent();
2381   return true;
2382 }
2383 
2384 bool CombinerHelper::matchSimplifyAddToSub(
2385     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2386   Register LHS = MI.getOperand(1).getReg();
2387   Register RHS = MI.getOperand(2).getReg();
2388   Register &NewLHS = std::get<0>(MatchInfo);
2389   Register &NewRHS = std::get<1>(MatchInfo);
2390 
2391   // Helper lambda to check for opportunities for
2392   // ((0-A) + B) -> B - A
2393   // (A + (0-B)) -> A - B
2394   auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
2395     int64_t Cst;
2396     if (!mi_match(MaybeSub, MRI, m_GSub(m_ICst(Cst), m_Reg(NewRHS))) ||
2397         Cst != 0)
2398       return false;
2399     NewLHS = MaybeNewLHS;
2400     return true;
2401   };
2402 
2403   return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
2404 }
2405 
2406 bool CombinerHelper::applySimplifyAddToSub(
2407     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2408   Builder.setInstr(MI);
2409   Register SubLHS, SubRHS;
2410   std::tie(SubLHS, SubRHS) = MatchInfo;
2411   Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
2412   MI.eraseFromParent();
2413   return true;
2414 }
2415 
2416 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
2417     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2418   // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2419   //
2420   // Creates the new hand + logic instruction (but does not insert them.)
2421   //
2422   // On success, MatchInfo is populated with the new instructions. These are
2423   // inserted in applyHoistLogicOpWithSameOpcodeHands.
2424   unsigned LogicOpcode = MI.getOpcode();
2425   assert(LogicOpcode == TargetOpcode::G_AND ||
2426          LogicOpcode == TargetOpcode::G_OR ||
2427          LogicOpcode == TargetOpcode::G_XOR);
2428   MachineIRBuilder MIB(MI);
2429   Register Dst = MI.getOperand(0).getReg();
2430   Register LHSReg = MI.getOperand(1).getReg();
2431   Register RHSReg = MI.getOperand(2).getReg();
2432 
2433   // Don't recompute anything.
2434   if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
2435     return false;
2436 
2437   // Make sure we have (hand x, ...), (hand y, ...)
2438   MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
2439   MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
2440   if (!LeftHandInst || !RightHandInst)
2441     return false;
2442   unsigned HandOpcode = LeftHandInst->getOpcode();
2443   if (HandOpcode != RightHandInst->getOpcode())
2444     return false;
2445   if (!LeftHandInst->getOperand(1).isReg() ||
2446       !RightHandInst->getOperand(1).isReg())
2447     return false;
2448 
2449   // Make sure the types match up, and if we're doing this post-legalization,
2450   // we end up with legal types.
2451   Register X = LeftHandInst->getOperand(1).getReg();
2452   Register Y = RightHandInst->getOperand(1).getReg();
2453   LLT XTy = MRI.getType(X);
2454   LLT YTy = MRI.getType(Y);
2455   if (XTy != YTy)
2456     return false;
2457   if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
2458     return false;
2459 
2460   // Optional extra source register.
2461   Register ExtraHandOpSrcReg;
2462   switch (HandOpcode) {
2463   default:
2464     return false;
2465   case TargetOpcode::G_ANYEXT:
2466   case TargetOpcode::G_SEXT:
2467   case TargetOpcode::G_ZEXT: {
2468     // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2469     break;
2470   }
2471   case TargetOpcode::G_AND:
2472   case TargetOpcode::G_ASHR:
2473   case TargetOpcode::G_LSHR:
2474   case TargetOpcode::G_SHL: {
2475     // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
2476     MachineOperand &ZOp = LeftHandInst->getOperand(2);
2477     if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
2478       return false;
2479     ExtraHandOpSrcReg = ZOp.getReg();
2480     break;
2481   }
2482   }
2483 
2484   // Record the steps to build the new instructions.
2485   //
2486   // Steps to build (logic x, y)
2487   auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
2488   OperandBuildSteps LogicBuildSteps = {
2489       [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
2490       [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
2491       [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
2492   InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
2493 
2494   // Steps to build hand (logic x, y), ...z
2495   OperandBuildSteps HandBuildSteps = {
2496       [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
2497       [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
2498   if (ExtraHandOpSrcReg.isValid())
2499     HandBuildSteps.push_back(
2500         [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
2501   InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
2502 
2503   MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
2504   return true;
2505 }
2506 
2507 bool CombinerHelper::applyBuildInstructionSteps(
2508     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2509   assert(MatchInfo.InstrsToBuild.size() &&
2510          "Expected at least one instr to build?");
2511   Builder.setInstr(MI);
2512   for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
2513     assert(InstrToBuild.Opcode && "Expected a valid opcode?");
2514     assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
2515     MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
2516     for (auto &OperandFn : InstrToBuild.OperandFns)
2517       OperandFn(Instr);
2518   }
2519   MI.eraseFromParent();
2520   return true;
2521 }
2522 
2523 bool CombinerHelper::matchAshrShlToSextInreg(
2524     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2525   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2526   int64_t ShlCst, AshrCst;
2527   Register Src;
2528   // FIXME: detect splat constant vectors.
2529   if (!mi_match(MI.getOperand(0).getReg(), MRI,
2530                 m_GAShr(m_GShl(m_Reg(Src), m_ICst(ShlCst)), m_ICst(AshrCst))))
2531     return false;
2532   if (ShlCst != AshrCst)
2533     return false;
2534   if (!isLegalOrBeforeLegalizer(
2535           {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
2536     return false;
2537   MatchInfo = std::make_tuple(Src, ShlCst);
2538   return true;
2539 }
2540 bool CombinerHelper::applyAshShlToSextInreg(
2541     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2542   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2543   Register Src;
2544   int64_t ShiftAmt;
2545   std::tie(Src, ShiftAmt) = MatchInfo;
2546   unsigned Size = MRI.getType(Src).getScalarSizeInBits();
2547   Builder.setInstrAndDebugLoc(MI);
2548   Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
2549   MI.eraseFromParent();
2550   return true;
2551 }
2552 
2553 bool CombinerHelper::matchAndWithTrivialMask(MachineInstr &MI,
2554                                              Register &Replacement) {
2555   // Given
2556   //
2557   // %mask:_(sN) = G_CONSTANT iN 000...0111...1
2558   // %x:_(sN) = G_SOMETHING
2559   // %y:_(sN) = G_AND %x, %mask
2560   //
2561   // Eliminate the G_AND when it is known that x & mask == x.
2562   //
2563   // Patterns like this can appear as a result of legalization. E.g.
2564   //
2565   // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
2566   // %one:_(s32) = G_CONSTANT i32 1
2567   // %and:_(s32) = G_AND %cmp, %one
2568   //
2569   // In this case, G_ICMP only produces a single bit, so x & 1 == x.
2570   assert(MI.getOpcode() == TargetOpcode::G_AND);
2571   if (!KB)
2572     return false;
2573 
2574   // Replacement = %x, AndDst = %y. Check that we can replace AndDst with the
2575   // LHS of the G_AND.
2576   Replacement = MI.getOperand(1).getReg();
2577   Register AndDst = MI.getOperand(0).getReg();
2578   LLT DstTy = MRI.getType(AndDst);
2579 
2580   // FIXME: This should be removed once GISelKnownBits supports vectors.
2581   if (DstTy.isVector())
2582     return false;
2583   if (!canReplaceReg(AndDst, Replacement, MRI))
2584     return false;
2585 
2586   // Check that we have a constant on the RHS of the G_AND, which is of the form
2587   // 000...0111...1.
2588   int64_t Cst;
2589   if (!mi_match(MI.getOperand(2).getReg(), MRI, m_ICst(Cst)))
2590     return false;
2591   APInt Mask(DstTy.getSizeInBits(), Cst);
2592   if (!Mask.isMask())
2593     return false;
2594 
2595   // Now, let's check that x & Mask == x. If this is true, then x & ~Mask == 0.
2596   return KB->maskedValueIsZero(Replacement, ~Mask);
2597 }
2598 
2599 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) {
2600   // If the input is already sign extended, just drop the extension.
2601   Register Src = MI.getOperand(1).getReg();
2602   unsigned ExtBits = MI.getOperand(2).getImm();
2603   unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
2604   return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
2605 }
2606 
2607 static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
2608                              int64_t Cst, bool IsVector, bool IsFP) {
2609   // For i1, Cst will always be -1 regardless of boolean contents.
2610   return (ScalarSizeBits == 1 && Cst == -1) ||
2611          isConstTrueVal(TLI, Cst, IsVector, IsFP);
2612 }
2613 
2614 bool CombinerHelper::matchNotCmp(MachineInstr &MI,
2615                                  SmallVectorImpl<Register> &RegsToNegate) {
2616   assert(MI.getOpcode() == TargetOpcode::G_XOR);
2617   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
2618   const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
2619   Register XorSrc;
2620   Register CstReg;
2621   // We match xor(src, true) here.
2622   if (!mi_match(MI.getOperand(0).getReg(), MRI,
2623                 m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
2624     return false;
2625 
2626   if (!MRI.hasOneNonDBGUse(XorSrc))
2627     return false;
2628 
2629   // Check that XorSrc is the root of a tree of comparisons combined with ANDs
2630   // and ORs. The suffix of RegsToNegate starting from index I is used a work
2631   // list of tree nodes to visit.
2632   RegsToNegate.push_back(XorSrc);
2633   // Remember whether the comparisons are all integer or all floating point.
2634   bool IsInt = false;
2635   bool IsFP = false;
2636   for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
2637     Register Reg = RegsToNegate[I];
2638     if (!MRI.hasOneNonDBGUse(Reg))
2639       return false;
2640     MachineInstr *Def = MRI.getVRegDef(Reg);
2641     switch (Def->getOpcode()) {
2642     default:
2643       // Don't match if the tree contains anything other than ANDs, ORs and
2644       // comparisons.
2645       return false;
2646     case TargetOpcode::G_ICMP:
2647       if (IsFP)
2648         return false;
2649       IsInt = true;
2650       // When we apply the combine we will invert the predicate.
2651       break;
2652     case TargetOpcode::G_FCMP:
2653       if (IsInt)
2654         return false;
2655       IsFP = true;
2656       // When we apply the combine we will invert the predicate.
2657       break;
2658     case TargetOpcode::G_AND:
2659     case TargetOpcode::G_OR:
2660       // Implement De Morgan's laws:
2661       // ~(x & y) -> ~x | ~y
2662       // ~(x | y) -> ~x & ~y
2663       // When we apply the combine we will change the opcode and recursively
2664       // negate the operands.
2665       RegsToNegate.push_back(Def->getOperand(1).getReg());
2666       RegsToNegate.push_back(Def->getOperand(2).getReg());
2667       break;
2668     }
2669   }
2670 
2671   // Now we know whether the comparisons are integer or floating point, check
2672   // the constant in the xor.
2673   int64_t Cst;
2674   if (Ty.isVector()) {
2675     MachineInstr *CstDef = MRI.getVRegDef(CstReg);
2676     auto MaybeCst = getBuildVectorConstantSplat(*CstDef, MRI);
2677     if (!MaybeCst)
2678       return false;
2679     if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP))
2680       return false;
2681   } else {
2682     if (!mi_match(CstReg, MRI, m_ICst(Cst)))
2683       return false;
2684     if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP))
2685       return false;
2686   }
2687 
2688   return true;
2689 }
2690 
2691 bool CombinerHelper::applyNotCmp(MachineInstr &MI,
2692                                  SmallVectorImpl<Register> &RegsToNegate) {
2693   for (Register Reg : RegsToNegate) {
2694     MachineInstr *Def = MRI.getVRegDef(Reg);
2695     Observer.changingInstr(*Def);
2696     // For each comparison, invert the opcode. For each AND and OR, change the
2697     // opcode.
2698     switch (Def->getOpcode()) {
2699     default:
2700       llvm_unreachable("Unexpected opcode");
2701     case TargetOpcode::G_ICMP:
2702     case TargetOpcode::G_FCMP: {
2703       MachineOperand &PredOp = Def->getOperand(1);
2704       CmpInst::Predicate NewP = CmpInst::getInversePredicate(
2705           (CmpInst::Predicate)PredOp.getPredicate());
2706       PredOp.setPredicate(NewP);
2707       break;
2708     }
2709     case TargetOpcode::G_AND:
2710       Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
2711       break;
2712     case TargetOpcode::G_OR:
2713       Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
2714       break;
2715     }
2716     Observer.changedInstr(*Def);
2717   }
2718 
2719   replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
2720   MI.eraseFromParent();
2721   return true;
2722 }
2723 
2724 bool CombinerHelper::tryCombine(MachineInstr &MI) {
2725   if (tryCombineCopy(MI))
2726     return true;
2727   if (tryCombineExtendingLoads(MI))
2728     return true;
2729   if (tryCombineIndexedLoadStore(MI))
2730     return true;
2731   return false;
2732 }
2733