1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/ADT/SetVector.h"
10 #include "llvm/ADT/SmallBitVector.h"
11 #include "llvm/CodeGen/GlobalISel/Combiner.h"
12 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
13 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
14 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
15 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
16 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/GlobalISel/Utils.h"
19 #include "llvm/CodeGen/LowLevelType.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/MachineDominators.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineMemOperand.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/TargetInstrInfo.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetOpcodes.h"
29 #include "llvm/Support/MathExtras.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include <tuple>
32 
33 #define DEBUG_TYPE "gi-combiner"
34 
35 using namespace llvm;
36 using namespace MIPatternMatch;
37 
38 // Option to allow testing of the combiner while no targets know about indexed
39 // addressing.
40 static cl::opt<bool>
41     ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
42                        cl::desc("Force all indexed operations to be "
43                                 "legal for the GlobalISel combiner"));
44 
45 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
46                                MachineIRBuilder &B, GISelKnownBits *KB,
47                                MachineDominatorTree *MDT,
48                                const LegalizerInfo *LI)
49     : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer),
50       KB(KB), MDT(MDT), LI(LI) {
51   (void)this->KB;
52 }
53 
54 const TargetLowering &CombinerHelper::getTargetLowering() const {
55   return *Builder.getMF().getSubtarget().getTargetLowering();
56 }
57 
58 /// \returns The little endian in-memory byte position of byte \p I in a
59 /// \p ByteWidth bytes wide type.
60 ///
61 /// E.g. Given a 4-byte type x, x[0] -> byte 0
62 static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) {
63   assert(I < ByteWidth && "I must be in [0, ByteWidth)");
64   return I;
65 }
66 
67 /// \returns The big endian in-memory byte position of byte \p I in a
68 /// \p ByteWidth bytes wide type.
69 ///
70 /// E.g. Given a 4-byte type x, x[0] -> byte 3
71 static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) {
72   assert(I < ByteWidth && "I must be in [0, ByteWidth)");
73   return ByteWidth - I - 1;
74 }
75 
76 /// Given a map from byte offsets in memory to indices in a load/store,
77 /// determine if that map corresponds to a little or big endian byte pattern.
78 ///
79 /// \param MemOffset2Idx maps memory offsets to address offsets.
80 /// \param LowestIdx is the lowest index in \p MemOffset2Idx.
81 ///
82 /// \returns true if the map corresponds to a big endian byte pattern, false
83 /// if it corresponds to a little endian byte pattern, and None otherwise.
84 ///
85 /// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns
86 /// are as follows:
87 ///
88 /// AddrOffset   Little endian    Big endian
89 /// 0            0                3
90 /// 1            1                2
91 /// 2            2                1
92 /// 3            3                0
93 static Optional<bool>
94 isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
95             int64_t LowestIdx) {
96   // Need at least two byte positions to decide on endianness.
97   unsigned Width = MemOffset2Idx.size();
98   if (Width < 2)
99     return None;
100   bool BigEndian = true, LittleEndian = true;
101   for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
102     auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset);
103     if (MemOffsetAndIdx == MemOffset2Idx.end())
104       return None;
105     const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
106     assert(Idx >= 0 && "Expected non-negative byte offset?");
107     LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset);
108     BigEndian &= Idx == bigEndianByteAt(Width, MemOffset);
109     if (!BigEndian && !LittleEndian)
110       return None;
111   }
112 
113   assert((BigEndian != LittleEndian) &&
114          "Pattern cannot be both big and little endian!");
115   return BigEndian;
116 }
117 
118 bool CombinerHelper::isLegalOrBeforeLegalizer(
119     const LegalityQuery &Query) const {
120   return !LI || LI->getAction(Query).Action == LegalizeActions::Legal;
121 }
122 
123 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
124                                     Register ToReg) const {
125   Observer.changingAllUsesOfReg(MRI, FromReg);
126 
127   if (MRI.constrainRegAttrs(ToReg, FromReg))
128     MRI.replaceRegWith(FromReg, ToReg);
129   else
130     Builder.buildCopy(ToReg, FromReg);
131 
132   Observer.finishedChangingAllUsesOfReg();
133 }
134 
135 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
136                                       MachineOperand &FromRegOp,
137                                       Register ToReg) const {
138   assert(FromRegOp.getParent() && "Expected an operand in an MI");
139   Observer.changingInstr(*FromRegOp.getParent());
140 
141   FromRegOp.setReg(ToReg);
142 
143   Observer.changedInstr(*FromRegOp.getParent());
144 }
145 
146 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
147   if (matchCombineCopy(MI)) {
148     applyCombineCopy(MI);
149     return true;
150   }
151   return false;
152 }
153 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
154   if (MI.getOpcode() != TargetOpcode::COPY)
155     return false;
156   Register DstReg = MI.getOperand(0).getReg();
157   Register SrcReg = MI.getOperand(1).getReg();
158   return canReplaceReg(DstReg, SrcReg, MRI);
159 }
160 void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
161   Register DstReg = MI.getOperand(0).getReg();
162   Register SrcReg = MI.getOperand(1).getReg();
163   MI.eraseFromParent();
164   replaceRegWith(MRI, DstReg, SrcReg);
165 }
166 
167 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
168   bool IsUndef = false;
169   SmallVector<Register, 4> Ops;
170   if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
171     applyCombineConcatVectors(MI, IsUndef, Ops);
172     return true;
173   }
174   return false;
175 }
176 
177 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
178                                                SmallVectorImpl<Register> &Ops) {
179   assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
180          "Invalid instruction");
181   IsUndef = true;
182   MachineInstr *Undef = nullptr;
183 
184   // Walk over all the operands of concat vectors and check if they are
185   // build_vector themselves or undef.
186   // Then collect their operands in Ops.
187   for (const MachineOperand &MO : MI.uses()) {
188     Register Reg = MO.getReg();
189     MachineInstr *Def = MRI.getVRegDef(Reg);
190     assert(Def && "Operand not defined");
191     switch (Def->getOpcode()) {
192     case TargetOpcode::G_BUILD_VECTOR:
193       IsUndef = false;
194       // Remember the operands of the build_vector to fold
195       // them into the yet-to-build flattened concat vectors.
196       for (const MachineOperand &BuildVecMO : Def->uses())
197         Ops.push_back(BuildVecMO.getReg());
198       break;
199     case TargetOpcode::G_IMPLICIT_DEF: {
200       LLT OpType = MRI.getType(Reg);
201       // Keep one undef value for all the undef operands.
202       if (!Undef) {
203         Builder.setInsertPt(*MI.getParent(), MI);
204         Undef = Builder.buildUndef(OpType.getScalarType());
205       }
206       assert(MRI.getType(Undef->getOperand(0).getReg()) ==
207                  OpType.getScalarType() &&
208              "All undefs should have the same type");
209       // Break the undef vector in as many scalar elements as needed
210       // for the flattening.
211       for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
212            EltIdx != EltEnd; ++EltIdx)
213         Ops.push_back(Undef->getOperand(0).getReg());
214       break;
215     }
216     default:
217       return false;
218     }
219   }
220   return true;
221 }
222 void CombinerHelper::applyCombineConcatVectors(
223     MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
224   // We determined that the concat_vectors can be flatten.
225   // Generate the flattened build_vector.
226   Register DstReg = MI.getOperand(0).getReg();
227   Builder.setInsertPt(*MI.getParent(), MI);
228   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
229 
230   // Note: IsUndef is sort of redundant. We could have determine it by
231   // checking that at all Ops are undef.  Alternatively, we could have
232   // generate a build_vector of undefs and rely on another combine to
233   // clean that up.  For now, given we already gather this information
234   // in tryCombineConcatVectors, just save compile time and issue the
235   // right thing.
236   if (IsUndef)
237     Builder.buildUndef(NewDstReg);
238   else
239     Builder.buildBuildVector(NewDstReg, Ops);
240   MI.eraseFromParent();
241   replaceRegWith(MRI, DstReg, NewDstReg);
242 }
243 
244 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
245   SmallVector<Register, 4> Ops;
246   if (matchCombineShuffleVector(MI, Ops)) {
247     applyCombineShuffleVector(MI, Ops);
248     return true;
249   }
250   return false;
251 }
252 
253 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
254                                                SmallVectorImpl<Register> &Ops) {
255   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
256          "Invalid instruction kind");
257   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
258   Register Src1 = MI.getOperand(1).getReg();
259   LLT SrcType = MRI.getType(Src1);
260   // As bizarre as it may look, shuffle vector can actually produce
261   // scalar! This is because at the IR level a <1 x ty> shuffle
262   // vector is perfectly valid.
263   unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
264   unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
265 
266   // If the resulting vector is smaller than the size of the source
267   // vectors being concatenated, we won't be able to replace the
268   // shuffle vector into a concat_vectors.
269   //
270   // Note: We may still be able to produce a concat_vectors fed by
271   //       extract_vector_elt and so on. It is less clear that would
272   //       be better though, so don't bother for now.
273   //
274   // If the destination is a scalar, the size of the sources doesn't
275   // matter. we will lower the shuffle to a plain copy. This will
276   // work only if the source and destination have the same size. But
277   // that's covered by the next condition.
278   //
279   // TODO: If the size between the source and destination don't match
280   //       we could still emit an extract vector element in that case.
281   if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
282     return false;
283 
284   // Check that the shuffle mask can be broken evenly between the
285   // different sources.
286   if (DstNumElts % SrcNumElts != 0)
287     return false;
288 
289   // Mask length is a multiple of the source vector length.
290   // Check if the shuffle is some kind of concatenation of the input
291   // vectors.
292   unsigned NumConcat = DstNumElts / SrcNumElts;
293   SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
294   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
295   for (unsigned i = 0; i != DstNumElts; ++i) {
296     int Idx = Mask[i];
297     // Undef value.
298     if (Idx < 0)
299       continue;
300     // Ensure the indices in each SrcType sized piece are sequential and that
301     // the same source is used for the whole piece.
302     if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
303         (ConcatSrcs[i / SrcNumElts] >= 0 &&
304          ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
305       return false;
306     // Remember which source this index came from.
307     ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
308   }
309 
310   // The shuffle is concatenating multiple vectors together.
311   // Collect the different operands for that.
312   Register UndefReg;
313   Register Src2 = MI.getOperand(2).getReg();
314   for (auto Src : ConcatSrcs) {
315     if (Src < 0) {
316       if (!UndefReg) {
317         Builder.setInsertPt(*MI.getParent(), MI);
318         UndefReg = Builder.buildUndef(SrcType).getReg(0);
319       }
320       Ops.push_back(UndefReg);
321     } else if (Src == 0)
322       Ops.push_back(Src1);
323     else
324       Ops.push_back(Src2);
325   }
326   return true;
327 }
328 
329 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
330                                                const ArrayRef<Register> Ops) {
331   Register DstReg = MI.getOperand(0).getReg();
332   Builder.setInsertPt(*MI.getParent(), MI);
333   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
334 
335   if (Ops.size() == 1)
336     Builder.buildCopy(NewDstReg, Ops[0]);
337   else
338     Builder.buildMerge(NewDstReg, Ops);
339 
340   MI.eraseFromParent();
341   replaceRegWith(MRI, DstReg, NewDstReg);
342 }
343 
344 namespace {
345 
346 /// Select a preference between two uses. CurrentUse is the current preference
347 /// while *ForCandidate is attributes of the candidate under consideration.
348 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
349                                   const LLT TyForCandidate,
350                                   unsigned OpcodeForCandidate,
351                                   MachineInstr *MIForCandidate) {
352   if (!CurrentUse.Ty.isValid()) {
353     if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
354         CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
355       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
356     return CurrentUse;
357   }
358 
359   // We permit the extend to hoist through basic blocks but this is only
360   // sensible if the target has extending loads. If you end up lowering back
361   // into a load and extend during the legalizer then the end result is
362   // hoisting the extend up to the load.
363 
364   // Prefer defined extensions to undefined extensions as these are more
365   // likely to reduce the number of instructions.
366   if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
367       CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
368     return CurrentUse;
369   else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
370            OpcodeForCandidate != TargetOpcode::G_ANYEXT)
371     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
372 
373   // Prefer sign extensions to zero extensions as sign-extensions tend to be
374   // more expensive.
375   if (CurrentUse.Ty == TyForCandidate) {
376     if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
377         OpcodeForCandidate == TargetOpcode::G_ZEXT)
378       return CurrentUse;
379     else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
380              OpcodeForCandidate == TargetOpcode::G_SEXT)
381       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
382   }
383 
384   // This is potentially target specific. We've chosen the largest type
385   // because G_TRUNC is usually free. One potential catch with this is that
386   // some targets have a reduced number of larger registers than smaller
387   // registers and this choice potentially increases the live-range for the
388   // larger value.
389   if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
390     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
391   }
392   return CurrentUse;
393 }
394 
395 /// Find a suitable place to insert some instructions and insert them. This
396 /// function accounts for special cases like inserting before a PHI node.
397 /// The current strategy for inserting before PHI's is to duplicate the
398 /// instructions for each predecessor. However, while that's ok for G_TRUNC
399 /// on most targets since it generally requires no code, other targets/cases may
400 /// want to try harder to find a dominating block.
401 static void InsertInsnsWithoutSideEffectsBeforeUse(
402     MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
403     std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
404                        MachineOperand &UseMO)>
405         Inserter) {
406   MachineInstr &UseMI = *UseMO.getParent();
407 
408   MachineBasicBlock *InsertBB = UseMI.getParent();
409 
410   // If the use is a PHI then we want the predecessor block instead.
411   if (UseMI.isPHI()) {
412     MachineOperand *PredBB = std::next(&UseMO);
413     InsertBB = PredBB->getMBB();
414   }
415 
416   // If the block is the same block as the def then we want to insert just after
417   // the def instead of at the start of the block.
418   if (InsertBB == DefMI.getParent()) {
419     MachineBasicBlock::iterator InsertPt = &DefMI;
420     Inserter(InsertBB, std::next(InsertPt), UseMO);
421     return;
422   }
423 
424   // Otherwise we want the start of the BB
425   Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
426 }
427 } // end anonymous namespace
428 
429 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
430   PreferredTuple Preferred;
431   if (matchCombineExtendingLoads(MI, Preferred)) {
432     applyCombineExtendingLoads(MI, Preferred);
433     return true;
434   }
435   return false;
436 }
437 
438 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
439                                                 PreferredTuple &Preferred) {
440   // We match the loads and follow the uses to the extend instead of matching
441   // the extends and following the def to the load. This is because the load
442   // must remain in the same position for correctness (unless we also add code
443   // to find a safe place to sink it) whereas the extend is freely movable.
444   // It also prevents us from duplicating the load for the volatile case or just
445   // for performance.
446   GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI);
447   if (!LoadMI)
448     return false;
449 
450   Register LoadReg = LoadMI->getDstReg();
451 
452   LLT LoadValueTy = MRI.getType(LoadReg);
453   if (!LoadValueTy.isScalar())
454     return false;
455 
456   // Most architectures are going to legalize <s8 loads into at least a 1 byte
457   // load, and the MMOs can only describe memory accesses in multiples of bytes.
458   // If we try to perform extload combining on those, we can end up with
459   // %a(s8) = extload %ptr (load 1 byte from %ptr)
460   // ... which is an illegal extload instruction.
461   if (LoadValueTy.getSizeInBits() < 8)
462     return false;
463 
464   // For non power-of-2 types, they will very likely be legalized into multiple
465   // loads. Don't bother trying to match them into extending loads.
466   if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
467     return false;
468 
469   // Find the preferred type aside from the any-extends (unless it's the only
470   // one) and non-extending ops. We'll emit an extending load to that type and
471   // and emit a variant of (extend (trunc X)) for the others according to the
472   // relative type sizes. At the same time, pick an extend to use based on the
473   // extend involved in the chosen type.
474   unsigned PreferredOpcode =
475       isa<GLoad>(&MI)
476           ? TargetOpcode::G_ANYEXT
477           : isa<GSExtLoad>(&MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
478   Preferred = {LLT(), PreferredOpcode, nullptr};
479   for (auto &UseMI : MRI.use_nodbg_instructions(LoadReg)) {
480     if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
481         UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
482         (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
483       const auto &MMO = LoadMI->getMMO();
484       // For atomics, only form anyextending loads.
485       if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
486         continue;
487       // Check for legality.
488       if (LI) {
489         LegalityQuery::MemDesc MMDesc;
490         MMDesc.MemoryTy = MMO.getMemoryType();
491         MMDesc.AlignInBits = MMO.getAlign().value() * 8;
492         MMDesc.Ordering = MMO.getSuccessOrdering();
493         LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
494         LLT SrcTy = MRI.getType(LoadMI->getPointerReg());
495         if (LI->getAction({LoadMI->getOpcode(), {UseTy, SrcTy}, {MMDesc}})
496                 .Action != LegalizeActions::Legal)
497           continue;
498       }
499       Preferred = ChoosePreferredUse(Preferred,
500                                      MRI.getType(UseMI.getOperand(0).getReg()),
501                                      UseMI.getOpcode(), &UseMI);
502     }
503   }
504 
505   // There were no extends
506   if (!Preferred.MI)
507     return false;
508   // It should be impossible to chose an extend without selecting a different
509   // type since by definition the result of an extend is larger.
510   assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
511 
512   LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
513   return true;
514 }
515 
516 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
517                                                 PreferredTuple &Preferred) {
518   // Rewrite the load to the chosen extending load.
519   Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
520 
521   // Inserter to insert a truncate back to the original type at a given point
522   // with some basic CSE to limit truncate duplication to one per BB.
523   DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
524   auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
525                            MachineBasicBlock::iterator InsertBefore,
526                            MachineOperand &UseMO) {
527     MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
528     if (PreviouslyEmitted) {
529       Observer.changingInstr(*UseMO.getParent());
530       UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
531       Observer.changedInstr(*UseMO.getParent());
532       return;
533     }
534 
535     Builder.setInsertPt(*InsertIntoBB, InsertBefore);
536     Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
537     MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
538     EmittedInsns[InsertIntoBB] = NewMI;
539     replaceRegOpWith(MRI, UseMO, NewDstReg);
540   };
541 
542   Observer.changingInstr(MI);
543   MI.setDesc(
544       Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
545                                ? TargetOpcode::G_SEXTLOAD
546                                : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
547                                      ? TargetOpcode::G_ZEXTLOAD
548                                      : TargetOpcode::G_LOAD));
549 
550   // Rewrite all the uses to fix up the types.
551   auto &LoadValue = MI.getOperand(0);
552   SmallVector<MachineOperand *, 4> Uses;
553   for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
554     Uses.push_back(&UseMO);
555 
556   for (auto *UseMO : Uses) {
557     MachineInstr *UseMI = UseMO->getParent();
558 
559     // If the extend is compatible with the preferred extend then we should fix
560     // up the type and extend so that it uses the preferred use.
561     if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
562         UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
563       Register UseDstReg = UseMI->getOperand(0).getReg();
564       MachineOperand &UseSrcMO = UseMI->getOperand(1);
565       const LLT UseDstTy = MRI.getType(UseDstReg);
566       if (UseDstReg != ChosenDstReg) {
567         if (Preferred.Ty == UseDstTy) {
568           // If the use has the same type as the preferred use, then merge
569           // the vregs and erase the extend. For example:
570           //    %1:_(s8) = G_LOAD ...
571           //    %2:_(s32) = G_SEXT %1(s8)
572           //    %3:_(s32) = G_ANYEXT %1(s8)
573           //    ... = ... %3(s32)
574           // rewrites to:
575           //    %2:_(s32) = G_SEXTLOAD ...
576           //    ... = ... %2(s32)
577           replaceRegWith(MRI, UseDstReg, ChosenDstReg);
578           Observer.erasingInstr(*UseMO->getParent());
579           UseMO->getParent()->eraseFromParent();
580         } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
581           // If the preferred size is smaller, then keep the extend but extend
582           // from the result of the extending load. For example:
583           //    %1:_(s8) = G_LOAD ...
584           //    %2:_(s32) = G_SEXT %1(s8)
585           //    %3:_(s64) = G_ANYEXT %1(s8)
586           //    ... = ... %3(s64)
587           /// rewrites to:
588           //    %2:_(s32) = G_SEXTLOAD ...
589           //    %3:_(s64) = G_ANYEXT %2:_(s32)
590           //    ... = ... %3(s64)
591           replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
592         } else {
593           // If the preferred size is large, then insert a truncate. For
594           // example:
595           //    %1:_(s8) = G_LOAD ...
596           //    %2:_(s64) = G_SEXT %1(s8)
597           //    %3:_(s32) = G_ZEXT %1(s8)
598           //    ... = ... %3(s32)
599           /// rewrites to:
600           //    %2:_(s64) = G_SEXTLOAD ...
601           //    %4:_(s8) = G_TRUNC %2:_(s32)
602           //    %3:_(s64) = G_ZEXT %2:_(s8)
603           //    ... = ... %3(s64)
604           InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
605                                                  InsertTruncAt);
606         }
607         continue;
608       }
609       // The use is (one of) the uses of the preferred use we chose earlier.
610       // We're going to update the load to def this value later so just erase
611       // the old extend.
612       Observer.erasingInstr(*UseMO->getParent());
613       UseMO->getParent()->eraseFromParent();
614       continue;
615     }
616 
617     // The use isn't an extend. Truncate back to the type we originally loaded.
618     // This is free on many targets.
619     InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
620   }
621 
622   MI.getOperand(0).setReg(ChosenDstReg);
623   Observer.changedInstr(MI);
624 }
625 
626 bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
627                                    const MachineInstr &UseMI) {
628   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
629          "shouldn't consider debug uses");
630   assert(DefMI.getParent() == UseMI.getParent());
631   if (&DefMI == &UseMI)
632     return false;
633   const MachineBasicBlock &MBB = *DefMI.getParent();
634   auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) {
635     return &MI == &DefMI || &MI == &UseMI;
636   });
637   if (DefOrUse == MBB.end())
638     llvm_unreachable("Block must contain both DefMI and UseMI!");
639   return &*DefOrUse == &DefMI;
640 }
641 
642 bool CombinerHelper::dominates(const MachineInstr &DefMI,
643                                const MachineInstr &UseMI) {
644   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
645          "shouldn't consider debug uses");
646   if (MDT)
647     return MDT->dominates(&DefMI, &UseMI);
648   else if (DefMI.getParent() != UseMI.getParent())
649     return false;
650 
651   return isPredecessor(DefMI, UseMI);
652 }
653 
654 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
655   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
656   Register SrcReg = MI.getOperand(1).getReg();
657   Register LoadUser = SrcReg;
658 
659   if (MRI.getType(SrcReg).isVector())
660     return false;
661 
662   Register TruncSrc;
663   if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
664     LoadUser = TruncSrc;
665 
666   uint64_t SizeInBits = MI.getOperand(2).getImm();
667   // If the source is a G_SEXTLOAD from the same bit width, then we don't
668   // need any extend at all, just a truncate.
669   if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) {
670     // If truncating more than the original extended value, abort.
671     auto LoadSizeBits = LoadMI->getMemSizeInBits();
672     if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits)
673       return false;
674     if (LoadSizeBits == SizeInBits)
675       return true;
676   }
677   return false;
678 }
679 
680 void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
681   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
682   Builder.setInstrAndDebugLoc(MI);
683   Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
684   MI.eraseFromParent();
685 }
686 
687 bool CombinerHelper::matchSextInRegOfLoad(
688     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
689   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
690 
691   // Only supports scalars for now.
692   if (MRI.getType(MI.getOperand(0).getReg()).isVector())
693     return false;
694 
695   Register SrcReg = MI.getOperand(1).getReg();
696   auto *LoadDef = getOpcodeDef<GLoad>(SrcReg, MRI);
697   if (!LoadDef || !MRI.hasOneNonDBGUse(LoadDef->getOperand(0).getReg()) ||
698       !LoadDef->isSimple())
699     return false;
700 
701   // If the sign extend extends from a narrower width than the load's width,
702   // then we can narrow the load width when we combine to a G_SEXTLOAD.
703   // Avoid widening the load at all.
704   unsigned NewSizeBits = std::min((uint64_t)MI.getOperand(2).getImm(),
705                                   LoadDef->getMemSizeInBits());
706 
707   // Don't generate G_SEXTLOADs with a < 1 byte width.
708   if (NewSizeBits < 8)
709     return false;
710   // Don't bother creating a non-power-2 sextload, it will likely be broken up
711   // anyway for most targets.
712   if (!isPowerOf2_32(NewSizeBits))
713     return false;
714   MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
715   return true;
716 }
717 
718 void CombinerHelper::applySextInRegOfLoad(
719     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
720   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
721   Register LoadReg;
722   unsigned ScalarSizeBits;
723   std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
724   GLoad *LoadDef = cast<GLoad>(MRI.getVRegDef(LoadReg));
725 
726   // If we have the following:
727   // %ld = G_LOAD %ptr, (load 2)
728   // %ext = G_SEXT_INREG %ld, 8
729   //    ==>
730   // %ld = G_SEXTLOAD %ptr (load 1)
731 
732   auto &MMO = LoadDef->getMMO();
733   Builder.setInstrAndDebugLoc(*LoadDef);
734   auto &MF = Builder.getMF();
735   auto PtrInfo = MMO.getPointerInfo();
736   auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
737   Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
738                          LoadDef->getPointerReg(), *NewMMO);
739   MI.eraseFromParent();
740 }
741 
742 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
743                                             Register &Base, Register &Offset) {
744   auto &MF = *MI.getParent()->getParent();
745   const auto &TLI = *MF.getSubtarget().getTargetLowering();
746 
747 #ifndef NDEBUG
748   unsigned Opcode = MI.getOpcode();
749   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
750          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
751 #endif
752 
753   Base = MI.getOperand(1).getReg();
754   MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
755   if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
756     return false;
757 
758   LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
759   // FIXME: The following use traversal needs a bail out for patholigical cases.
760   for (auto &Use : MRI.use_nodbg_instructions(Base)) {
761     if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
762       continue;
763 
764     Offset = Use.getOperand(2).getReg();
765     if (!ForceLegalIndexing &&
766         !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
767       LLVM_DEBUG(dbgs() << "    Ignoring candidate with illegal addrmode: "
768                         << Use);
769       continue;
770     }
771 
772     // Make sure the offset calculation is before the potentially indexed op.
773     // FIXME: we really care about dependency here. The offset calculation might
774     // be movable.
775     MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
776     if (!OffsetDef || !dominates(*OffsetDef, MI)) {
777       LLVM_DEBUG(dbgs() << "    Ignoring candidate with offset after mem-op: "
778                         << Use);
779       continue;
780     }
781 
782     // FIXME: check whether all uses of Base are load/store with foldable
783     // addressing modes. If so, using the normal addr-modes is better than
784     // forming an indexed one.
785 
786     bool MemOpDominatesAddrUses = true;
787     for (auto &PtrAddUse :
788          MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
789       if (!dominates(MI, PtrAddUse)) {
790         MemOpDominatesAddrUses = false;
791         break;
792       }
793     }
794 
795     if (!MemOpDominatesAddrUses) {
796       LLVM_DEBUG(
797           dbgs() << "    Ignoring candidate as memop does not dominate uses: "
798                  << Use);
799       continue;
800     }
801 
802     LLVM_DEBUG(dbgs() << "    Found match: " << Use);
803     Addr = Use.getOperand(0).getReg();
804     return true;
805   }
806 
807   return false;
808 }
809 
810 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
811                                            Register &Base, Register &Offset) {
812   auto &MF = *MI.getParent()->getParent();
813   const auto &TLI = *MF.getSubtarget().getTargetLowering();
814 
815 #ifndef NDEBUG
816   unsigned Opcode = MI.getOpcode();
817   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
818          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
819 #endif
820 
821   Addr = MI.getOperand(1).getReg();
822   MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
823   if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
824     return false;
825 
826   Base = AddrDef->getOperand(1).getReg();
827   Offset = AddrDef->getOperand(2).getReg();
828 
829   LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
830 
831   if (!ForceLegalIndexing &&
832       !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
833     LLVM_DEBUG(dbgs() << "    Skipping, not legal for target");
834     return false;
835   }
836 
837   MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
838   if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
839     LLVM_DEBUG(dbgs() << "    Skipping, frame index would need copy anyway.");
840     return false;
841   }
842 
843   if (MI.getOpcode() == TargetOpcode::G_STORE) {
844     // Would require a copy.
845     if (Base == MI.getOperand(0).getReg()) {
846       LLVM_DEBUG(dbgs() << "    Skipping, storing base so need copy anyway.");
847       return false;
848     }
849 
850     // We're expecting one use of Addr in MI, but it could also be the
851     // value stored, which isn't actually dominated by the instruction.
852     if (MI.getOperand(0).getReg() == Addr) {
853       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses");
854       return false;
855     }
856   }
857 
858   // FIXME: check whether all uses of the base pointer are constant PtrAdds.
859   // That might allow us to end base's liveness here by adjusting the constant.
860 
861   for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
862     if (!dominates(MI, UseMI)) {
863       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses.");
864       return false;
865     }
866   }
867 
868   return true;
869 }
870 
871 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
872   IndexedLoadStoreMatchInfo MatchInfo;
873   if (matchCombineIndexedLoadStore(MI, MatchInfo)) {
874     applyCombineIndexedLoadStore(MI, MatchInfo);
875     return true;
876   }
877   return false;
878 }
879 
880 bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
881   unsigned Opcode = MI.getOpcode();
882   if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
883       Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
884     return false;
885 
886   // For now, no targets actually support these opcodes so don't waste time
887   // running these unless we're forced to for testing.
888   if (!ForceLegalIndexing)
889     return false;
890 
891   MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
892                                           MatchInfo.Offset);
893   if (!MatchInfo.IsPre &&
894       !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
895                               MatchInfo.Offset))
896     return false;
897 
898   return true;
899 }
900 
901 void CombinerHelper::applyCombineIndexedLoadStore(
902     MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
903   MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
904   MachineIRBuilder MIRBuilder(MI);
905   unsigned Opcode = MI.getOpcode();
906   bool IsStore = Opcode == TargetOpcode::G_STORE;
907   unsigned NewOpcode;
908   switch (Opcode) {
909   case TargetOpcode::G_LOAD:
910     NewOpcode = TargetOpcode::G_INDEXED_LOAD;
911     break;
912   case TargetOpcode::G_SEXTLOAD:
913     NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
914     break;
915   case TargetOpcode::G_ZEXTLOAD:
916     NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
917     break;
918   case TargetOpcode::G_STORE:
919     NewOpcode = TargetOpcode::G_INDEXED_STORE;
920     break;
921   default:
922     llvm_unreachable("Unknown load/store opcode");
923   }
924 
925   auto MIB = MIRBuilder.buildInstr(NewOpcode);
926   if (IsStore) {
927     MIB.addDef(MatchInfo.Addr);
928     MIB.addUse(MI.getOperand(0).getReg());
929   } else {
930     MIB.addDef(MI.getOperand(0).getReg());
931     MIB.addDef(MatchInfo.Addr);
932   }
933 
934   MIB.addUse(MatchInfo.Base);
935   MIB.addUse(MatchInfo.Offset);
936   MIB.addImm(MatchInfo.IsPre);
937   MI.eraseFromParent();
938   AddrDef.eraseFromParent();
939 
940   LLVM_DEBUG(dbgs() << "    Combinined to indexed operation");
941 }
942 
943 bool CombinerHelper::matchCombineDivRem(MachineInstr &MI,
944                                         MachineInstr *&OtherMI) {
945   unsigned Opcode = MI.getOpcode();
946   bool IsDiv, IsSigned;
947 
948   switch (Opcode) {
949   default:
950     llvm_unreachable("Unexpected opcode!");
951   case TargetOpcode::G_SDIV:
952   case TargetOpcode::G_UDIV: {
953     IsDiv = true;
954     IsSigned = Opcode == TargetOpcode::G_SDIV;
955     break;
956   }
957   case TargetOpcode::G_SREM:
958   case TargetOpcode::G_UREM: {
959     IsDiv = false;
960     IsSigned = Opcode == TargetOpcode::G_SREM;
961     break;
962   }
963   }
964 
965   Register Src1 = MI.getOperand(1).getReg();
966   unsigned DivOpcode, RemOpcode, DivremOpcode;
967   if (IsSigned) {
968     DivOpcode = TargetOpcode::G_SDIV;
969     RemOpcode = TargetOpcode::G_SREM;
970     DivremOpcode = TargetOpcode::G_SDIVREM;
971   } else {
972     DivOpcode = TargetOpcode::G_UDIV;
973     RemOpcode = TargetOpcode::G_UREM;
974     DivremOpcode = TargetOpcode::G_UDIVREM;
975   }
976 
977   if (!isLegalOrBeforeLegalizer({DivremOpcode, {MRI.getType(Src1)}}))
978     return false;
979 
980   // Combine:
981   //   %div:_ = G_[SU]DIV %src1:_, %src2:_
982   //   %rem:_ = G_[SU]REM %src1:_, %src2:_
983   // into:
984   //  %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
985 
986   // Combine:
987   //   %rem:_ = G_[SU]REM %src1:_, %src2:_
988   //   %div:_ = G_[SU]DIV %src1:_, %src2:_
989   // into:
990   //  %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
991 
992   for (auto &UseMI : MRI.use_nodbg_instructions(Src1)) {
993     if (MI.getParent() == UseMI.getParent() &&
994         ((IsDiv && UseMI.getOpcode() == RemOpcode) ||
995          (!IsDiv && UseMI.getOpcode() == DivOpcode)) &&
996         matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2))) {
997       OtherMI = &UseMI;
998       return true;
999     }
1000   }
1001 
1002   return false;
1003 }
1004 
1005 void CombinerHelper::applyCombineDivRem(MachineInstr &MI,
1006                                         MachineInstr *&OtherMI) {
1007   unsigned Opcode = MI.getOpcode();
1008   assert(OtherMI && "OtherMI shouldn't be empty.");
1009 
1010   Register DestDivReg, DestRemReg;
1011   if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1012     DestDivReg = MI.getOperand(0).getReg();
1013     DestRemReg = OtherMI->getOperand(0).getReg();
1014   } else {
1015     DestDivReg = OtherMI->getOperand(0).getReg();
1016     DestRemReg = MI.getOperand(0).getReg();
1017   }
1018 
1019   bool IsSigned =
1020       Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1021 
1022   // Check which instruction is first in the block so we don't break def-use
1023   // deps by "moving" the instruction incorrectly.
1024   if (dominates(MI, *OtherMI))
1025     Builder.setInstrAndDebugLoc(MI);
1026   else
1027     Builder.setInstrAndDebugLoc(*OtherMI);
1028 
1029   Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM
1030                               : TargetOpcode::G_UDIVREM,
1031                      {DestDivReg, DestRemReg},
1032                      {MI.getOperand(1).getReg(), MI.getOperand(2).getReg()});
1033   MI.eraseFromParent();
1034   OtherMI->eraseFromParent();
1035 }
1036 
1037 bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI,
1038                                                    MachineInstr *&BrCond) {
1039   assert(MI.getOpcode() == TargetOpcode::G_BR);
1040 
1041   // Try to match the following:
1042   // bb1:
1043   //   G_BRCOND %c1, %bb2
1044   //   G_BR %bb3
1045   // bb2:
1046   // ...
1047   // bb3:
1048 
1049   // The above pattern does not have a fall through to the successor bb2, always
1050   // resulting in a branch no matter which path is taken. Here we try to find
1051   // and replace that pattern with conditional branch to bb3 and otherwise
1052   // fallthrough to bb2. This is generally better for branch predictors.
1053 
1054   MachineBasicBlock *MBB = MI.getParent();
1055   MachineBasicBlock::iterator BrIt(MI);
1056   if (BrIt == MBB->begin())
1057     return false;
1058   assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
1059 
1060   BrCond = &*std::prev(BrIt);
1061   if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
1062     return false;
1063 
1064   // Check that the next block is the conditional branch target. Also make sure
1065   // that it isn't the same as the G_BR's target (otherwise, this will loop.)
1066   MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB();
1067   return BrCondTarget != MI.getOperand(0).getMBB() &&
1068          MBB->isLayoutSuccessor(BrCondTarget);
1069 }
1070 
1071 void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI,
1072                                                    MachineInstr *&BrCond) {
1073   MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
1074   Builder.setInstrAndDebugLoc(*BrCond);
1075   LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
1076   // FIXME: Does int/fp matter for this? If so, we might need to restrict
1077   // this to i1 only since we might not know for sure what kind of
1078   // compare generated the condition value.
1079   auto True = Builder.buildConstant(
1080       Ty, getICmpTrueVal(getTargetLowering(), false, false));
1081   auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True);
1082 
1083   auto *FallthroughBB = BrCond->getOperand(1).getMBB();
1084   Observer.changingInstr(MI);
1085   MI.getOperand(0).setMBB(FallthroughBB);
1086   Observer.changedInstr(MI);
1087 
1088   // Change the conditional branch to use the inverted condition and
1089   // new target block.
1090   Observer.changingInstr(*BrCond);
1091   BrCond->getOperand(0).setReg(Xor.getReg(0));
1092   BrCond->getOperand(1).setMBB(BrTarget);
1093   Observer.changedInstr(*BrCond);
1094 }
1095 
1096 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
1097   // On Darwin, -Os means optimize for size without hurting performance, so
1098   // only really optimize for size when -Oz (MinSize) is used.
1099   if (MF.getTarget().getTargetTriple().isOSDarwin())
1100     return MF.getFunction().hasMinSize();
1101   return MF.getFunction().hasOptSize();
1102 }
1103 
1104 // Returns a list of types to use for memory op lowering in MemOps. A partial
1105 // port of findOptimalMemOpLowering in TargetLowering.
1106 static bool findGISelOptimalMemOpLowering(std::vector<LLT> &MemOps,
1107                                           unsigned Limit, const MemOp &Op,
1108                                           unsigned DstAS, unsigned SrcAS,
1109                                           const AttributeList &FuncAttributes,
1110                                           const TargetLowering &TLI) {
1111   if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign())
1112     return false;
1113 
1114   LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes);
1115 
1116   if (Ty == LLT()) {
1117     // Use the largest scalar type whose alignment constraints are satisfied.
1118     // We only need to check DstAlign here as SrcAlign is always greater or
1119     // equal to DstAlign (or zero).
1120     Ty = LLT::scalar(64);
1121     if (Op.isFixedDstAlign())
1122       while (Op.getDstAlign() < Ty.getSizeInBytes() &&
1123              !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, Op.getDstAlign()))
1124         Ty = LLT::scalar(Ty.getSizeInBytes());
1125     assert(Ty.getSizeInBits() > 0 && "Could not find valid type");
1126     // FIXME: check for the largest legal type we can load/store to.
1127   }
1128 
1129   unsigned NumMemOps = 0;
1130   uint64_t Size = Op.size();
1131   while (Size) {
1132     unsigned TySize = Ty.getSizeInBytes();
1133     while (TySize > Size) {
1134       // For now, only use non-vector load / store's for the left-over pieces.
1135       LLT NewTy = Ty;
1136       // FIXME: check for mem op safety and legality of the types. Not all of
1137       // SDAGisms map cleanly to GISel concepts.
1138       if (NewTy.isVector())
1139         NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32);
1140       NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1));
1141       unsigned NewTySize = NewTy.getSizeInBytes();
1142       assert(NewTySize > 0 && "Could not find appropriate type");
1143 
1144       // If the new LLT cannot cover all of the remaining bits, then consider
1145       // issuing a (or a pair of) unaligned and overlapping load / store.
1146       bool Fast;
1147       // Need to get a VT equivalent for allowMisalignedMemoryAccesses().
1148       MVT VT = getMVTForLLT(Ty);
1149       if (NumMemOps && Op.allowOverlap() && NewTySize < Size &&
1150           TLI.allowsMisalignedMemoryAccesses(
1151               VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1),
1152               MachineMemOperand::MONone, &Fast) &&
1153           Fast)
1154         TySize = Size;
1155       else {
1156         Ty = NewTy;
1157         TySize = NewTySize;
1158       }
1159     }
1160 
1161     if (++NumMemOps > Limit)
1162       return false;
1163 
1164     MemOps.push_back(Ty);
1165     Size -= TySize;
1166   }
1167 
1168   return true;
1169 }
1170 
1171 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
1172   if (Ty.isVector())
1173     return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1174                                 Ty.getNumElements());
1175   return IntegerType::get(C, Ty.getSizeInBits());
1176 }
1177 
1178 // Get a vectorized representation of the memset value operand, GISel edition.
1179 static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
1180   MachineRegisterInfo &MRI = *MIB.getMRI();
1181   unsigned NumBits = Ty.getScalarSizeInBits();
1182   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
1183   if (!Ty.isVector() && ValVRegAndVal) {
1184     APInt Scalar = ValVRegAndVal->Value.truncOrSelf(8);
1185     APInt SplatVal = APInt::getSplat(NumBits, Scalar);
1186     return MIB.buildConstant(Ty, SplatVal).getReg(0);
1187   }
1188 
1189   // Extend the byte value to the larger type, and then multiply by a magic
1190   // value 0x010101... in order to replicate it across every byte.
1191   // Unless it's zero, in which case just emit a larger G_CONSTANT 0.
1192   if (ValVRegAndVal && ValVRegAndVal->Value == 0) {
1193     return MIB.buildConstant(Ty, 0).getReg(0);
1194   }
1195 
1196   LLT ExtType = Ty.getScalarType();
1197   auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val);
1198   if (NumBits > 8) {
1199     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
1200     auto MagicMI = MIB.buildConstant(ExtType, Magic);
1201     Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0);
1202   }
1203 
1204   // For vector types create a G_BUILD_VECTOR.
1205   if (Ty.isVector())
1206     Val = MIB.buildSplatVector(Ty, Val).getReg(0);
1207 
1208   return Val;
1209 }
1210 
1211 bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst,
1212                                     Register Val, uint64_t KnownLen,
1213                                     Align Alignment, bool IsVolatile) {
1214   auto &MF = *MI.getParent()->getParent();
1215   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1216   auto &DL = MF.getDataLayout();
1217   LLVMContext &C = MF.getFunction().getContext();
1218 
1219   assert(KnownLen != 0 && "Have a zero length memset length!");
1220 
1221   bool DstAlignCanChange = false;
1222   MachineFrameInfo &MFI = MF.getFrameInfo();
1223   bool OptSize = shouldLowerMemFuncForSize(MF);
1224 
1225   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1226   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1227     DstAlignCanChange = true;
1228 
1229   unsigned Limit = TLI.getMaxStoresPerMemset(OptSize);
1230   std::vector<LLT> MemOps;
1231 
1232   const auto &DstMMO = **MI.memoperands_begin();
1233   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1234 
1235   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
1236   bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0;
1237 
1238   if (!findGISelOptimalMemOpLowering(MemOps, Limit,
1239                                      MemOp::Set(KnownLen, DstAlignCanChange,
1240                                                 Alignment,
1241                                                 /*IsZeroMemset=*/IsZeroVal,
1242                                                 /*IsVolatile=*/IsVolatile),
1243                                      DstPtrInfo.getAddrSpace(), ~0u,
1244                                      MF.getFunction().getAttributes(), TLI))
1245     return false;
1246 
1247   if (DstAlignCanChange) {
1248     // Get an estimate of the type from the LLT.
1249     Type *IRTy = getTypeForLLT(MemOps[0], C);
1250     Align NewAlign = DL.getABITypeAlign(IRTy);
1251     if (NewAlign > Alignment) {
1252       Alignment = NewAlign;
1253       unsigned FI = FIDef->getOperand(1).getIndex();
1254       // Give the stack frame object a larger alignment if needed.
1255       if (MFI.getObjectAlign(FI) < Alignment)
1256         MFI.setObjectAlignment(FI, Alignment);
1257     }
1258   }
1259 
1260   MachineIRBuilder MIB(MI);
1261   // Find the largest store and generate the bit pattern for it.
1262   LLT LargestTy = MemOps[0];
1263   for (unsigned i = 1; i < MemOps.size(); i++)
1264     if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits())
1265       LargestTy = MemOps[i];
1266 
1267   // The memset stored value is always defined as an s8, so in order to make it
1268   // work with larger store types we need to repeat the bit pattern across the
1269   // wider type.
1270   Register MemSetValue = getMemsetValue(Val, LargestTy, MIB);
1271 
1272   if (!MemSetValue)
1273     return false;
1274 
1275   // Generate the stores. For each store type in the list, we generate the
1276   // matching store of that type to the destination address.
1277   LLT PtrTy = MRI.getType(Dst);
1278   unsigned DstOff = 0;
1279   unsigned Size = KnownLen;
1280   for (unsigned I = 0; I < MemOps.size(); I++) {
1281     LLT Ty = MemOps[I];
1282     unsigned TySize = Ty.getSizeInBytes();
1283     if (TySize > Size) {
1284       // Issuing an unaligned load / store pair that overlaps with the previous
1285       // pair. Adjust the offset accordingly.
1286       assert(I == MemOps.size() - 1 && I != 0);
1287       DstOff -= TySize - Size;
1288     }
1289 
1290     // If this store is smaller than the largest store see whether we can get
1291     // the smaller value for free with a truncate.
1292     Register Value = MemSetValue;
1293     if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) {
1294       MVT VT = getMVTForLLT(Ty);
1295       MVT LargestVT = getMVTForLLT(LargestTy);
1296       if (!LargestTy.isVector() && !Ty.isVector() &&
1297           TLI.isTruncateFree(LargestVT, VT))
1298         Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0);
1299       else
1300         Value = getMemsetValue(Val, Ty, MIB);
1301       if (!Value)
1302         return false;
1303     }
1304 
1305     auto *StoreMMO =
1306         MF.getMachineMemOperand(&DstMMO, DstOff, Ty);
1307 
1308     Register Ptr = Dst;
1309     if (DstOff != 0) {
1310       auto Offset =
1311           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
1312       Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1313     }
1314 
1315     MIB.buildStore(Value, Ptr, *StoreMMO);
1316     DstOff += Ty.getSizeInBytes();
1317     Size -= TySize;
1318   }
1319 
1320   MI.eraseFromParent();
1321   return true;
1322 }
1323 
1324 bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) {
1325   assert(MI.getOpcode() == TargetOpcode::G_MEMCPY_INLINE);
1326 
1327   Register Dst = MI.getOperand(0).getReg();
1328   Register Src = MI.getOperand(1).getReg();
1329   Register Len = MI.getOperand(2).getReg();
1330 
1331   const auto *MMOIt = MI.memoperands_begin();
1332   const MachineMemOperand *MemOp = *MMOIt;
1333   bool IsVolatile = MemOp->isVolatile();
1334 
1335   // See if this is a constant length copy
1336   auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI);
1337   // FIXME: support dynamically sized G_MEMCPY_INLINE
1338   assert(LenVRegAndVal.hasValue() &&
1339          "inline memcpy with dynamic size is not yet supported");
1340   uint64_t KnownLen = LenVRegAndVal->Value.getZExtValue();
1341   if (KnownLen == 0) {
1342     MI.eraseFromParent();
1343     return true;
1344   }
1345 
1346   const auto &DstMMO = **MI.memoperands_begin();
1347   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1348   Align DstAlign = DstMMO.getBaseAlign();
1349   Align SrcAlign = SrcMMO.getBaseAlign();
1350 
1351   return tryEmitMemcpyInline(MI, Dst, Src, KnownLen, DstAlign, SrcAlign,
1352                              IsVolatile);
1353 }
1354 
1355 bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI, Register Dst,
1356                                          Register Src, uint64_t KnownLen,
1357                                          Align DstAlign, Align SrcAlign,
1358                                          bool IsVolatile) {
1359   assert(MI.getOpcode() == TargetOpcode::G_MEMCPY_INLINE);
1360   return optimizeMemcpy(MI, Dst, Src, KnownLen,
1361                         std::numeric_limits<uint64_t>::max(), DstAlign,
1362                         SrcAlign, IsVolatile);
1363 }
1364 
1365 bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
1366                                     Register Src, uint64_t KnownLen,
1367                                     uint64_t Limit, Align DstAlign,
1368                                     Align SrcAlign, bool IsVolatile) {
1369   auto &MF = *MI.getParent()->getParent();
1370   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1371   auto &DL = MF.getDataLayout();
1372   LLVMContext &C = MF.getFunction().getContext();
1373 
1374   assert(KnownLen != 0 && "Have a zero length memcpy length!");
1375 
1376   bool DstAlignCanChange = false;
1377   MachineFrameInfo &MFI = MF.getFrameInfo();
1378   Align Alignment = commonAlignment(DstAlign, SrcAlign);
1379 
1380   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1381   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1382     DstAlignCanChange = true;
1383 
1384   // FIXME: infer better src pointer alignment like SelectionDAG does here.
1385   // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining
1386   // if the memcpy is in a tail call position.
1387 
1388   std::vector<LLT> MemOps;
1389 
1390   const auto &DstMMO = **MI.memoperands_begin();
1391   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1392   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1393   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1394 
1395   if (!findGISelOptimalMemOpLowering(
1396           MemOps, Limit,
1397           MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
1398                       IsVolatile),
1399           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
1400           MF.getFunction().getAttributes(), TLI))
1401     return false;
1402 
1403   if (DstAlignCanChange) {
1404     // Get an estimate of the type from the LLT.
1405     Type *IRTy = getTypeForLLT(MemOps[0], C);
1406     Align NewAlign = DL.getABITypeAlign(IRTy);
1407 
1408     // Don't promote to an alignment that would require dynamic stack
1409     // realignment.
1410     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1411     if (!TRI->hasStackRealignment(MF))
1412       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
1413         NewAlign = NewAlign / 2;
1414 
1415     if (NewAlign > Alignment) {
1416       Alignment = NewAlign;
1417       unsigned FI = FIDef->getOperand(1).getIndex();
1418       // Give the stack frame object a larger alignment if needed.
1419       if (MFI.getObjectAlign(FI) < Alignment)
1420         MFI.setObjectAlignment(FI, Alignment);
1421     }
1422   }
1423 
1424   LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n");
1425 
1426   MachineIRBuilder MIB(MI);
1427   // Now we need to emit a pair of load and stores for each of the types we've
1428   // collected. I.e. for each type, generate a load from the source pointer of
1429   // that type width, and then generate a corresponding store to the dest buffer
1430   // of that value loaded. This can result in a sequence of loads and stores
1431   // mixed types, depending on what the target specifies as good types to use.
1432   unsigned CurrOffset = 0;
1433   LLT PtrTy = MRI.getType(Src);
1434   unsigned Size = KnownLen;
1435   for (auto CopyTy : MemOps) {
1436     // Issuing an unaligned load / store pair  that overlaps with the previous
1437     // pair. Adjust the offset accordingly.
1438     if (CopyTy.getSizeInBytes() > Size)
1439       CurrOffset -= CopyTy.getSizeInBytes() - Size;
1440 
1441     // Construct MMOs for the accesses.
1442     auto *LoadMMO =
1443         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1444     auto *StoreMMO =
1445         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1446 
1447     // Create the load.
1448     Register LoadPtr = Src;
1449     Register Offset;
1450     if (CurrOffset != 0) {
1451       Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset)
1452                    .getReg(0);
1453       LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1454     }
1455     auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
1456 
1457     // Create the store.
1458     Register StorePtr =
1459         CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1460     MIB.buildStore(LdVal, StorePtr, *StoreMMO);
1461     CurrOffset += CopyTy.getSizeInBytes();
1462     Size -= CopyTy.getSizeInBytes();
1463   }
1464 
1465   MI.eraseFromParent();
1466   return true;
1467 }
1468 
1469 bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
1470                                      Register Src, uint64_t KnownLen,
1471                                      Align DstAlign, Align SrcAlign,
1472                                      bool IsVolatile) {
1473   auto &MF = *MI.getParent()->getParent();
1474   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1475   auto &DL = MF.getDataLayout();
1476   LLVMContext &C = MF.getFunction().getContext();
1477 
1478   assert(KnownLen != 0 && "Have a zero length memmove length!");
1479 
1480   bool DstAlignCanChange = false;
1481   MachineFrameInfo &MFI = MF.getFrameInfo();
1482   bool OptSize = shouldLowerMemFuncForSize(MF);
1483   Align Alignment = commonAlignment(DstAlign, SrcAlign);
1484 
1485   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1486   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1487     DstAlignCanChange = true;
1488 
1489   unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize);
1490   std::vector<LLT> MemOps;
1491 
1492   const auto &DstMMO = **MI.memoperands_begin();
1493   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1494   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1495   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1496 
1497   // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due
1498   // to a bug in it's findOptimalMemOpLowering implementation. For now do the
1499   // same thing here.
1500   if (!findGISelOptimalMemOpLowering(
1501           MemOps, Limit,
1502           MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
1503                       /*IsVolatile*/ true),
1504           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
1505           MF.getFunction().getAttributes(), TLI))
1506     return false;
1507 
1508   if (DstAlignCanChange) {
1509     // Get an estimate of the type from the LLT.
1510     Type *IRTy = getTypeForLLT(MemOps[0], C);
1511     Align NewAlign = DL.getABITypeAlign(IRTy);
1512 
1513     // Don't promote to an alignment that would require dynamic stack
1514     // realignment.
1515     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1516     if (!TRI->hasStackRealignment(MF))
1517       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
1518         NewAlign = NewAlign / 2;
1519 
1520     if (NewAlign > Alignment) {
1521       Alignment = NewAlign;
1522       unsigned FI = FIDef->getOperand(1).getIndex();
1523       // Give the stack frame object a larger alignment if needed.
1524       if (MFI.getObjectAlign(FI) < Alignment)
1525         MFI.setObjectAlignment(FI, Alignment);
1526     }
1527   }
1528 
1529   LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n");
1530 
1531   MachineIRBuilder MIB(MI);
1532   // Memmove requires that we perform the loads first before issuing the stores.
1533   // Apart from that, this loop is pretty much doing the same thing as the
1534   // memcpy codegen function.
1535   unsigned CurrOffset = 0;
1536   LLT PtrTy = MRI.getType(Src);
1537   SmallVector<Register, 16> LoadVals;
1538   for (auto CopyTy : MemOps) {
1539     // Construct MMO for the load.
1540     auto *LoadMMO =
1541         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1542 
1543     // Create the load.
1544     Register LoadPtr = Src;
1545     if (CurrOffset != 0) {
1546       auto Offset =
1547           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1548       LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1549     }
1550     LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
1551     CurrOffset += CopyTy.getSizeInBytes();
1552   }
1553 
1554   CurrOffset = 0;
1555   for (unsigned I = 0; I < MemOps.size(); ++I) {
1556     LLT CopyTy = MemOps[I];
1557     // Now store the values loaded.
1558     auto *StoreMMO =
1559         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1560 
1561     Register StorePtr = Dst;
1562     if (CurrOffset != 0) {
1563       auto Offset =
1564           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1565       StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1566     }
1567     MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
1568     CurrOffset += CopyTy.getSizeInBytes();
1569   }
1570   MI.eraseFromParent();
1571   return true;
1572 }
1573 
1574 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1575   const unsigned Opc = MI.getOpcode();
1576   // This combine is fairly complex so it's not written with a separate
1577   // matcher function.
1578   assert((Opc == TargetOpcode::G_MEMCPY || Opc == TargetOpcode::G_MEMMOVE ||
1579           Opc == TargetOpcode::G_MEMSET) && "Expected memcpy like instruction");
1580 
1581   auto MMOIt = MI.memoperands_begin();
1582   const MachineMemOperand *MemOp = *MMOIt;
1583 
1584   Align DstAlign = MemOp->getBaseAlign();
1585   Align SrcAlign;
1586   Register Dst = MI.getOperand(0).getReg();
1587   Register Src = MI.getOperand(1).getReg();
1588   Register Len = MI.getOperand(2).getReg();
1589 
1590   if (Opc != TargetOpcode::G_MEMSET) {
1591     assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
1592     MemOp = *(++MMOIt);
1593     SrcAlign = MemOp->getBaseAlign();
1594   }
1595 
1596   // See if this is a constant length copy
1597   auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI);
1598   if (!LenVRegAndVal)
1599     return false; // Leave it to the legalizer to lower it to a libcall.
1600   uint64_t KnownLen = LenVRegAndVal->Value.getZExtValue();
1601 
1602   if (KnownLen == 0) {
1603     MI.eraseFromParent();
1604     return true;
1605   }
1606 
1607   bool IsVolatile = MemOp->isVolatile();
1608   if (Opc == TargetOpcode::G_MEMCPY_INLINE)
1609     return tryEmitMemcpyInline(MI, Dst, Src, KnownLen, DstAlign, SrcAlign,
1610                                IsVolatile);
1611 
1612   // Don't try to optimize volatile.
1613   if (IsVolatile)
1614     return false;
1615 
1616   if (MaxLen && KnownLen > MaxLen)
1617     return false;
1618 
1619   if (Opc == TargetOpcode::G_MEMCPY) {
1620     auto &MF = *MI.getParent()->getParent();
1621     const auto &TLI = *MF.getSubtarget().getTargetLowering();
1622     bool OptSize = shouldLowerMemFuncForSize(MF);
1623     uint64_t Limit = TLI.getMaxStoresPerMemcpy(OptSize);
1624     return optimizeMemcpy(MI, Dst, Src, KnownLen, Limit, DstAlign, SrcAlign,
1625                           IsVolatile);
1626   }
1627   if (Opc == TargetOpcode::G_MEMMOVE)
1628     return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1629   if (Opc == TargetOpcode::G_MEMSET)
1630     return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile);
1631   return false;
1632 }
1633 
1634 static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy,
1635                                              const Register Op,
1636                                              const MachineRegisterInfo &MRI) {
1637   const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI);
1638   if (!MaybeCst)
1639     return None;
1640 
1641   APFloat V = MaybeCst->getValueAPF();
1642   switch (Opcode) {
1643   default:
1644     llvm_unreachable("Unexpected opcode!");
1645   case TargetOpcode::G_FNEG: {
1646     V.changeSign();
1647     return V;
1648   }
1649   case TargetOpcode::G_FABS: {
1650     V.clearSign();
1651     return V;
1652   }
1653   case TargetOpcode::G_FPTRUNC:
1654     break;
1655   case TargetOpcode::G_FSQRT: {
1656     bool Unused;
1657     V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1658     V = APFloat(sqrt(V.convertToDouble()));
1659     break;
1660   }
1661   case TargetOpcode::G_FLOG2: {
1662     bool Unused;
1663     V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1664     V = APFloat(log2(V.convertToDouble()));
1665     break;
1666   }
1667   }
1668   // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1669   // `buildFConstant` will assert on size mismatch. Only `G_FPTRUNC`, `G_FSQRT`,
1670   // and `G_FLOG2` reach here.
1671   bool Unused;
1672   V.convert(getFltSemanticForLLT(DstTy), APFloat::rmNearestTiesToEven, &Unused);
1673   return V;
1674 }
1675 
1676 bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI,
1677                                                      Optional<APFloat> &Cst) {
1678   Register DstReg = MI.getOperand(0).getReg();
1679   Register SrcReg = MI.getOperand(1).getReg();
1680   LLT DstTy = MRI.getType(DstReg);
1681   Cst = constantFoldFpUnary(MI.getOpcode(), DstTy, SrcReg, MRI);
1682   return Cst.hasValue();
1683 }
1684 
1685 void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
1686                                                      Optional<APFloat> &Cst) {
1687   assert(Cst.hasValue() && "Optional is unexpectedly empty!");
1688   Builder.setInstrAndDebugLoc(MI);
1689   MachineFunction &MF = Builder.getMF();
1690   auto *FPVal = ConstantFP::get(MF.getFunction().getContext(), *Cst);
1691   Register DstReg = MI.getOperand(0).getReg();
1692   Builder.buildFConstant(DstReg, *FPVal);
1693   MI.eraseFromParent();
1694 }
1695 
1696 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
1697                                            PtrAddChain &MatchInfo) {
1698   // We're trying to match the following pattern:
1699   //   %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1700   //   %root = G_PTR_ADD %t1, G_CONSTANT imm2
1701   // -->
1702   //   %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1703 
1704   if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1705     return false;
1706 
1707   Register Add2 = MI.getOperand(1).getReg();
1708   Register Imm1 = MI.getOperand(2).getReg();
1709   auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI);
1710   if (!MaybeImmVal)
1711     return false;
1712 
1713   MachineInstr *Add2Def = MRI.getUniqueVRegDef(Add2);
1714   if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1715     return false;
1716 
1717   Register Base = Add2Def->getOperand(1).getReg();
1718   Register Imm2 = Add2Def->getOperand(2).getReg();
1719   auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI);
1720   if (!MaybeImm2Val)
1721     return false;
1722 
1723   // Check if the new combined immediate forms an illegal addressing mode.
1724   // Do not combine if it was legal before but would get illegal.
1725   // To do so, we need to find a load/store user of the pointer to get
1726   // the access type.
1727   Type *AccessTy = nullptr;
1728   auto &MF = *MI.getMF();
1729   for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) {
1730     if (auto *LdSt = dyn_cast<GLoadStore>(&UseMI)) {
1731       AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)),
1732                                MF.getFunction().getContext());
1733       break;
1734     }
1735   }
1736   TargetLoweringBase::AddrMode AMNew;
1737   APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1738   AMNew.BaseOffs = CombinedImm.getSExtValue();
1739   if (AccessTy) {
1740     AMNew.HasBaseReg = true;
1741     TargetLoweringBase::AddrMode AMOld;
1742     AMOld.BaseOffs = MaybeImm2Val->Value.getSExtValue();
1743     AMOld.HasBaseReg = true;
1744     unsigned AS = MRI.getType(Add2).getAddressSpace();
1745     const auto &TLI = *MF.getSubtarget().getTargetLowering();
1746     if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1747         !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1748       return false;
1749   }
1750 
1751   // Pass the combined immediate to the apply function.
1752   MatchInfo.Imm = AMNew.BaseOffs;
1753   MatchInfo.Base = Base;
1754   return true;
1755 }
1756 
1757 void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
1758                                            PtrAddChain &MatchInfo) {
1759   assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1760   MachineIRBuilder MIB(MI);
1761   LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1762   auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1763   Observer.changingInstr(MI);
1764   MI.getOperand(1).setReg(MatchInfo.Base);
1765   MI.getOperand(2).setReg(NewOffset.getReg(0));
1766   Observer.changedInstr(MI);
1767 }
1768 
1769 bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI,
1770                                           RegisterImmPair &MatchInfo) {
1771   // We're trying to match the following pattern with any of
1772   // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
1773   //   %t1 = SHIFT %base, G_CONSTANT imm1
1774   //   %root = SHIFT %t1, G_CONSTANT imm2
1775   // -->
1776   //   %root = SHIFT %base, G_CONSTANT (imm1 + imm2)
1777 
1778   unsigned Opcode = MI.getOpcode();
1779   assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1780           Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1781           Opcode == TargetOpcode::G_USHLSAT) &&
1782          "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1783 
1784   Register Shl2 = MI.getOperand(1).getReg();
1785   Register Imm1 = MI.getOperand(2).getReg();
1786   auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI);
1787   if (!MaybeImmVal)
1788     return false;
1789 
1790   MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2);
1791   if (Shl2Def->getOpcode() != Opcode)
1792     return false;
1793 
1794   Register Base = Shl2Def->getOperand(1).getReg();
1795   Register Imm2 = Shl2Def->getOperand(2).getReg();
1796   auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI);
1797   if (!MaybeImm2Val)
1798     return false;
1799 
1800   // Pass the combined immediate to the apply function.
1801   MatchInfo.Imm =
1802       (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue();
1803   MatchInfo.Reg = Base;
1804 
1805   // There is no simple replacement for a saturating unsigned left shift that
1806   // exceeds the scalar size.
1807   if (Opcode == TargetOpcode::G_USHLSAT &&
1808       MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits())
1809     return false;
1810 
1811   return true;
1812 }
1813 
1814 void CombinerHelper::applyShiftImmedChain(MachineInstr &MI,
1815                                           RegisterImmPair &MatchInfo) {
1816   unsigned Opcode = MI.getOpcode();
1817   assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1818           Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1819           Opcode == TargetOpcode::G_USHLSAT) &&
1820          "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1821 
1822   Builder.setInstrAndDebugLoc(MI);
1823   LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1824   unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
1825   auto Imm = MatchInfo.Imm;
1826 
1827   if (Imm >= ScalarSizeInBits) {
1828     // Any logical shift that exceeds scalar size will produce zero.
1829     if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1830       Builder.buildConstant(MI.getOperand(0), 0);
1831       MI.eraseFromParent();
1832       return;
1833     }
1834     // Arithmetic shift and saturating signed left shift have no effect beyond
1835     // scalar size.
1836     Imm = ScalarSizeInBits - 1;
1837   }
1838 
1839   LLT ImmTy = MRI.getType(MI.getOperand(2).getReg());
1840   Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0);
1841   Observer.changingInstr(MI);
1842   MI.getOperand(1).setReg(MatchInfo.Reg);
1843   MI.getOperand(2).setReg(NewImm);
1844   Observer.changedInstr(MI);
1845 }
1846 
1847 bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI,
1848                                               ShiftOfShiftedLogic &MatchInfo) {
1849   // We're trying to match the following pattern with any of
1850   // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
1851   // with any of G_AND/G_OR/G_XOR logic instructions.
1852   //   %t1 = SHIFT %X, G_CONSTANT C0
1853   //   %t2 = LOGIC %t1, %Y
1854   //   %root = SHIFT %t2, G_CONSTANT C1
1855   // -->
1856   //   %t3 = SHIFT %X, G_CONSTANT (C0+C1)
1857   //   %t4 = SHIFT %Y, G_CONSTANT C1
1858   //   %root = LOGIC %t3, %t4
1859   unsigned ShiftOpcode = MI.getOpcode();
1860   assert((ShiftOpcode == TargetOpcode::G_SHL ||
1861           ShiftOpcode == TargetOpcode::G_ASHR ||
1862           ShiftOpcode == TargetOpcode::G_LSHR ||
1863           ShiftOpcode == TargetOpcode::G_USHLSAT ||
1864           ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1865          "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1866 
1867   // Match a one-use bitwise logic op.
1868   Register LogicDest = MI.getOperand(1).getReg();
1869   if (!MRI.hasOneNonDBGUse(LogicDest))
1870     return false;
1871 
1872   MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest);
1873   unsigned LogicOpcode = LogicMI->getOpcode();
1874   if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1875       LogicOpcode != TargetOpcode::G_XOR)
1876     return false;
1877 
1878   // Find a matching one-use shift by constant.
1879   const Register C1 = MI.getOperand(2).getReg();
1880   auto MaybeImmVal = getConstantVRegValWithLookThrough(C1, MRI);
1881   if (!MaybeImmVal)
1882     return false;
1883 
1884   const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1885 
1886   auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) {
1887     // Shift should match previous one and should be a one-use.
1888     if (MI->getOpcode() != ShiftOpcode ||
1889         !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1890       return false;
1891 
1892     // Must be a constant.
1893     auto MaybeImmVal =
1894         getConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI);
1895     if (!MaybeImmVal)
1896       return false;
1897 
1898     ShiftVal = MaybeImmVal->Value.getSExtValue();
1899     return true;
1900   };
1901 
1902   // Logic ops are commutative, so check each operand for a match.
1903   Register LogicMIReg1 = LogicMI->getOperand(1).getReg();
1904   MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1);
1905   Register LogicMIReg2 = LogicMI->getOperand(2).getReg();
1906   MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2);
1907   uint64_t C0Val;
1908 
1909   if (matchFirstShift(LogicMIOp1, C0Val)) {
1910     MatchInfo.LogicNonShiftReg = LogicMIReg2;
1911     MatchInfo.Shift2 = LogicMIOp1;
1912   } else if (matchFirstShift(LogicMIOp2, C0Val)) {
1913     MatchInfo.LogicNonShiftReg = LogicMIReg1;
1914     MatchInfo.Shift2 = LogicMIOp2;
1915   } else
1916     return false;
1917 
1918   MatchInfo.ValSum = C0Val + C1Val;
1919 
1920   // The fold is not valid if the sum of the shift values exceeds bitwidth.
1921   if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits())
1922     return false;
1923 
1924   MatchInfo.Logic = LogicMI;
1925   return true;
1926 }
1927 
1928 void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI,
1929                                               ShiftOfShiftedLogic &MatchInfo) {
1930   unsigned Opcode = MI.getOpcode();
1931   assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1932           Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1933           Opcode == TargetOpcode::G_SSHLSAT) &&
1934          "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1935 
1936   LLT ShlType = MRI.getType(MI.getOperand(2).getReg());
1937   LLT DestType = MRI.getType(MI.getOperand(0).getReg());
1938   Builder.setInstrAndDebugLoc(MI);
1939 
1940   Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0);
1941 
1942   Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg();
1943   Register Shift1 =
1944       Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0);
1945 
1946   Register Shift2Const = MI.getOperand(2).getReg();
1947   Register Shift2 = Builder
1948                         .buildInstr(Opcode, {DestType},
1949                                     {MatchInfo.LogicNonShiftReg, Shift2Const})
1950                         .getReg(0);
1951 
1952   Register Dest = MI.getOperand(0).getReg();
1953   Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2});
1954 
1955   // These were one use so it's safe to remove them.
1956   MatchInfo.Shift2->eraseFromParentAndMarkDBGValuesForRemoval();
1957   MatchInfo.Logic->eraseFromParentAndMarkDBGValuesForRemoval();
1958 
1959   MI.eraseFromParent();
1960 }
1961 
1962 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
1963                                           unsigned &ShiftVal) {
1964   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1965   auto MaybeImmVal =
1966       getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1967   if (!MaybeImmVal)
1968     return false;
1969 
1970   ShiftVal = MaybeImmVal->Value.exactLogBase2();
1971   return (static_cast<int32_t>(ShiftVal) != -1);
1972 }
1973 
1974 void CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
1975                                           unsigned &ShiftVal) {
1976   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1977   MachineIRBuilder MIB(MI);
1978   LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1979   auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1980   Observer.changingInstr(MI);
1981   MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1982   MI.getOperand(2).setReg(ShiftCst.getReg(0));
1983   Observer.changedInstr(MI);
1984 }
1985 
1986 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1987 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
1988                                              RegisterImmPair &MatchData) {
1989   assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
1990 
1991   Register LHS = MI.getOperand(1).getReg();
1992 
1993   Register ExtSrc;
1994   if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1995       !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1996       !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1997     return false;
1998 
1999   // TODO: Should handle vector splat.
2000   Register RHS = MI.getOperand(2).getReg();
2001   auto MaybeShiftAmtVal = getConstantVRegValWithLookThrough(RHS, MRI);
2002   if (!MaybeShiftAmtVal)
2003     return false;
2004 
2005   if (LI) {
2006     LLT SrcTy = MRI.getType(ExtSrc);
2007 
2008     // We only really care about the legality with the shifted value. We can
2009     // pick any type the constant shift amount, so ask the target what to
2010     // use. Otherwise we would have to guess and hope it is reported as legal.
2011     LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
2012     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
2013       return false;
2014   }
2015 
2016   int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue();
2017   MatchData.Reg = ExtSrc;
2018   MatchData.Imm = ShiftAmt;
2019 
2020   unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countLeadingOnes();
2021   return MinLeadingZeros >= ShiftAmt;
2022 }
2023 
2024 void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
2025                                              const RegisterImmPair &MatchData) {
2026   Register ExtSrcReg = MatchData.Reg;
2027   int64_t ShiftAmtVal = MatchData.Imm;
2028 
2029   LLT ExtSrcTy = MRI.getType(ExtSrcReg);
2030   Builder.setInstrAndDebugLoc(MI);
2031   auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
2032   auto NarrowShift =
2033       Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
2034   Builder.buildZExt(MI.getOperand(0), NarrowShift);
2035   MI.eraseFromParent();
2036 }
2037 
2038 bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr &MI,
2039                                               Register &MatchInfo) {
2040   GMerge &Merge = cast<GMerge>(MI);
2041   SmallVector<Register, 16> MergedValues;
2042   for (unsigned I = 0; I < Merge.getNumSources(); ++I)
2043     MergedValues.emplace_back(Merge.getSourceReg(I));
2044 
2045   auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0], MRI);
2046   if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources())
2047     return false;
2048 
2049   for (unsigned I = 0; I < MergedValues.size(); ++I)
2050     if (MergedValues[I] != Unmerge->getReg(I))
2051       return false;
2052 
2053   MatchInfo = Unmerge->getSourceReg();
2054   return true;
2055 }
2056 
2057 static Register peekThroughBitcast(Register Reg,
2058                                    const MachineRegisterInfo &MRI) {
2059   while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg))))
2060     ;
2061 
2062   return Reg;
2063 }
2064 
2065 bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
2066     MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
2067   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2068          "Expected an unmerge");
2069   auto &Unmerge = cast<GUnmerge>(MI);
2070   Register SrcReg = peekThroughBitcast(Unmerge.getSourceReg(), MRI);
2071 
2072   auto *SrcInstr = getOpcodeDef<GMergeLikeOp>(SrcReg, MRI);
2073   if (!SrcInstr)
2074     return false;
2075 
2076   // Check the source type of the merge.
2077   LLT SrcMergeTy = MRI.getType(SrcInstr->getSourceReg(0));
2078   LLT Dst0Ty = MRI.getType(Unmerge.getReg(0));
2079   bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
2080   if (SrcMergeTy != Dst0Ty && !SameSize)
2081     return false;
2082   // They are the same now (modulo a bitcast).
2083   // We can collect all the src registers.
2084   for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx)
2085     Operands.push_back(SrcInstr->getSourceReg(Idx));
2086   return true;
2087 }
2088 
2089 void CombinerHelper::applyCombineUnmergeMergeToPlainValues(
2090     MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
2091   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2092          "Expected an unmerge");
2093   assert((MI.getNumOperands() - 1 == Operands.size()) &&
2094          "Not enough operands to replace all defs");
2095   unsigned NumElems = MI.getNumOperands() - 1;
2096 
2097   LLT SrcTy = MRI.getType(Operands[0]);
2098   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2099   bool CanReuseInputDirectly = DstTy == SrcTy;
2100   Builder.setInstrAndDebugLoc(MI);
2101   for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
2102     Register DstReg = MI.getOperand(Idx).getReg();
2103     Register SrcReg = Operands[Idx];
2104     if (CanReuseInputDirectly)
2105       replaceRegWith(MRI, DstReg, SrcReg);
2106     else
2107       Builder.buildCast(DstReg, SrcReg);
2108   }
2109   MI.eraseFromParent();
2110 }
2111 
2112 bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI,
2113                                                  SmallVectorImpl<APInt> &Csts) {
2114   unsigned SrcIdx = MI.getNumOperands() - 1;
2115   Register SrcReg = MI.getOperand(SrcIdx).getReg();
2116   MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
2117   if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
2118       SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
2119     return false;
2120   // Break down the big constant in smaller ones.
2121   const MachineOperand &CstVal = SrcInstr->getOperand(1);
2122   APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
2123                   ? CstVal.getCImm()->getValue()
2124                   : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
2125 
2126   LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
2127   unsigned ShiftAmt = Dst0Ty.getSizeInBits();
2128   // Unmerge a constant.
2129   for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
2130     Csts.emplace_back(Val.trunc(ShiftAmt));
2131     Val = Val.lshr(ShiftAmt);
2132   }
2133 
2134   return true;
2135 }
2136 
2137 void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
2138                                                  SmallVectorImpl<APInt> &Csts) {
2139   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2140          "Expected an unmerge");
2141   assert((MI.getNumOperands() - 1 == Csts.size()) &&
2142          "Not enough operands to replace all defs");
2143   unsigned NumElems = MI.getNumOperands() - 1;
2144   Builder.setInstrAndDebugLoc(MI);
2145   for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
2146     Register DstReg = MI.getOperand(Idx).getReg();
2147     Builder.buildConstant(DstReg, Csts[Idx]);
2148   }
2149 
2150   MI.eraseFromParent();
2151 }
2152 
2153 bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
2154   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2155          "Expected an unmerge");
2156   // Check that all the lanes are dead except the first one.
2157   for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
2158     if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg()))
2159       return false;
2160   }
2161   return true;
2162 }
2163 
2164 void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
2165   Builder.setInstrAndDebugLoc(MI);
2166   Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
2167   // Truncating a vector is going to truncate every single lane,
2168   // whereas we want the full lowbits.
2169   // Do the operation on a scalar instead.
2170   LLT SrcTy = MRI.getType(SrcReg);
2171   if (SrcTy.isVector())
2172     SrcReg =
2173         Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0);
2174 
2175   Register Dst0Reg = MI.getOperand(0).getReg();
2176   LLT Dst0Ty = MRI.getType(Dst0Reg);
2177   if (Dst0Ty.isVector()) {
2178     auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg);
2179     Builder.buildCast(Dst0Reg, MIB);
2180   } else
2181     Builder.buildTrunc(Dst0Reg, SrcReg);
2182   MI.eraseFromParent();
2183 }
2184 
2185 bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) {
2186   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2187          "Expected an unmerge");
2188   Register Dst0Reg = MI.getOperand(0).getReg();
2189   LLT Dst0Ty = MRI.getType(Dst0Reg);
2190   // G_ZEXT on vector applies to each lane, so it will
2191   // affect all destinations. Therefore we won't be able
2192   // to simplify the unmerge to just the first definition.
2193   if (Dst0Ty.isVector())
2194     return false;
2195   Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
2196   LLT SrcTy = MRI.getType(SrcReg);
2197   if (SrcTy.isVector())
2198     return false;
2199 
2200   Register ZExtSrcReg;
2201   if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg))))
2202     return false;
2203 
2204   // Finally we can replace the first definition with
2205   // a zext of the source if the definition is big enough to hold
2206   // all of ZExtSrc bits.
2207   LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
2208   return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
2209 }
2210 
2211 void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
2212   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2213          "Expected an unmerge");
2214 
2215   Register Dst0Reg = MI.getOperand(0).getReg();
2216 
2217   MachineInstr *ZExtInstr =
2218       MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg());
2219   assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&
2220          "Expecting a G_ZEXT");
2221 
2222   Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg();
2223   LLT Dst0Ty = MRI.getType(Dst0Reg);
2224   LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
2225 
2226   Builder.setInstrAndDebugLoc(MI);
2227 
2228   if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
2229     Builder.buildZExt(Dst0Reg, ZExtSrcReg);
2230   } else {
2231     assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&
2232            "ZExt src doesn't fit in destination");
2233     replaceRegWith(MRI, Dst0Reg, ZExtSrcReg);
2234   }
2235 
2236   Register ZeroReg;
2237   for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
2238     if (!ZeroReg)
2239       ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0);
2240     replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg);
2241   }
2242   MI.eraseFromParent();
2243 }
2244 
2245 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
2246                                                 unsigned TargetShiftSize,
2247                                                 unsigned &ShiftVal) {
2248   assert((MI.getOpcode() == TargetOpcode::G_SHL ||
2249           MI.getOpcode() == TargetOpcode::G_LSHR ||
2250           MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
2251 
2252   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
2253   if (Ty.isVector()) // TODO:
2254     return false;
2255 
2256   // Don't narrow further than the requested size.
2257   unsigned Size = Ty.getSizeInBits();
2258   if (Size <= TargetShiftSize)
2259     return false;
2260 
2261   auto MaybeImmVal =
2262     getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
2263   if (!MaybeImmVal)
2264     return false;
2265 
2266   ShiftVal = MaybeImmVal->Value.getSExtValue();
2267   return ShiftVal >= Size / 2 && ShiftVal < Size;
2268 }
2269 
2270 void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
2271                                                 const unsigned &ShiftVal) {
2272   Register DstReg = MI.getOperand(0).getReg();
2273   Register SrcReg = MI.getOperand(1).getReg();
2274   LLT Ty = MRI.getType(SrcReg);
2275   unsigned Size = Ty.getSizeInBits();
2276   unsigned HalfSize = Size / 2;
2277   assert(ShiftVal >= HalfSize);
2278 
2279   LLT HalfTy = LLT::scalar(HalfSize);
2280 
2281   Builder.setInstr(MI);
2282   auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
2283   unsigned NarrowShiftAmt = ShiftVal - HalfSize;
2284 
2285   if (MI.getOpcode() == TargetOpcode::G_LSHR) {
2286     Register Narrowed = Unmerge.getReg(1);
2287 
2288     //  dst = G_LSHR s64:x, C for C >= 32
2289     // =>
2290     //   lo, hi = G_UNMERGE_VALUES x
2291     //   dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
2292 
2293     if (NarrowShiftAmt != 0) {
2294       Narrowed = Builder.buildLShr(HalfTy, Narrowed,
2295         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
2296     }
2297 
2298     auto Zero = Builder.buildConstant(HalfTy, 0);
2299     Builder.buildMerge(DstReg, { Narrowed, Zero });
2300   } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
2301     Register Narrowed = Unmerge.getReg(0);
2302     //  dst = G_SHL s64:x, C for C >= 32
2303     // =>
2304     //   lo, hi = G_UNMERGE_VALUES x
2305     //   dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
2306     if (NarrowShiftAmt != 0) {
2307       Narrowed = Builder.buildShl(HalfTy, Narrowed,
2308         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
2309     }
2310 
2311     auto Zero = Builder.buildConstant(HalfTy, 0);
2312     Builder.buildMerge(DstReg, { Zero, Narrowed });
2313   } else {
2314     assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2315     auto Hi = Builder.buildAShr(
2316       HalfTy, Unmerge.getReg(1),
2317       Builder.buildConstant(HalfTy, HalfSize - 1));
2318 
2319     if (ShiftVal == HalfSize) {
2320       // (G_ASHR i64:x, 32) ->
2321       //   G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
2322       Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi });
2323     } else if (ShiftVal == Size - 1) {
2324       // Don't need a second shift.
2325       // (G_ASHR i64:x, 63) ->
2326       //   %narrowed = (G_ASHR hi_32(x), 31)
2327       //   G_MERGE_VALUES %narrowed, %narrowed
2328       Builder.buildMerge(DstReg, { Hi, Hi });
2329     } else {
2330       auto Lo = Builder.buildAShr(
2331         HalfTy, Unmerge.getReg(1),
2332         Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
2333 
2334       // (G_ASHR i64:x, C) ->, for C >= 32
2335       //   G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
2336       Builder.buildMerge(DstReg, { Lo, Hi });
2337     }
2338   }
2339 
2340   MI.eraseFromParent();
2341 }
2342 
2343 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
2344                                               unsigned TargetShiftAmount) {
2345   unsigned ShiftAmt;
2346   if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
2347     applyCombineShiftToUnmerge(MI, ShiftAmt);
2348     return true;
2349   }
2350 
2351   return false;
2352 }
2353 
2354 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2355   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2356   Register DstReg = MI.getOperand(0).getReg();
2357   LLT DstTy = MRI.getType(DstReg);
2358   Register SrcReg = MI.getOperand(1).getReg();
2359   return mi_match(SrcReg, MRI,
2360                   m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
2361 }
2362 
2363 void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2364   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2365   Register DstReg = MI.getOperand(0).getReg();
2366   Builder.setInstr(MI);
2367   Builder.buildCopy(DstReg, Reg);
2368   MI.eraseFromParent();
2369 }
2370 
2371 bool CombinerHelper::matchCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
2372   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2373   Register SrcReg = MI.getOperand(1).getReg();
2374   return mi_match(SrcReg, MRI, m_GIntToPtr(m_Reg(Reg)));
2375 }
2376 
2377 void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
2378   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2379   Register DstReg = MI.getOperand(0).getReg();
2380   Builder.setInstr(MI);
2381   Builder.buildZExtOrTrunc(DstReg, Reg);
2382   MI.eraseFromParent();
2383 }
2384 
2385 bool CombinerHelper::matchCombineAddP2IToPtrAdd(
2386     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2387   assert(MI.getOpcode() == TargetOpcode::G_ADD);
2388   Register LHS = MI.getOperand(1).getReg();
2389   Register RHS = MI.getOperand(2).getReg();
2390   LLT IntTy = MRI.getType(LHS);
2391 
2392   // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
2393   // instruction.
2394   PtrReg.second = false;
2395   for (Register SrcReg : {LHS, RHS}) {
2396     if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
2397       // Don't handle cases where the integer is implicitly converted to the
2398       // pointer width.
2399       LLT PtrTy = MRI.getType(PtrReg.first);
2400       if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
2401         return true;
2402     }
2403 
2404     PtrReg.second = true;
2405   }
2406 
2407   return false;
2408 }
2409 
2410 void CombinerHelper::applyCombineAddP2IToPtrAdd(
2411     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2412   Register Dst = MI.getOperand(0).getReg();
2413   Register LHS = MI.getOperand(1).getReg();
2414   Register RHS = MI.getOperand(2).getReg();
2415 
2416   const bool DoCommute = PtrReg.second;
2417   if (DoCommute)
2418     std::swap(LHS, RHS);
2419   LHS = PtrReg.first;
2420 
2421   LLT PtrTy = MRI.getType(LHS);
2422 
2423   Builder.setInstrAndDebugLoc(MI);
2424   auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
2425   Builder.buildPtrToInt(Dst, PtrAdd);
2426   MI.eraseFromParent();
2427 }
2428 
2429 bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
2430                                                   int64_t &NewCst) {
2431   auto &PtrAdd = cast<GPtrAdd>(MI);
2432   Register LHS = PtrAdd.getBaseReg();
2433   Register RHS = PtrAdd.getOffsetReg();
2434   MachineRegisterInfo &MRI = Builder.getMF().getRegInfo();
2435 
2436   if (auto RHSCst = getConstantVRegSExtVal(RHS, MRI)) {
2437     int64_t Cst;
2438     if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) {
2439       NewCst = Cst + *RHSCst;
2440       return true;
2441     }
2442   }
2443 
2444   return false;
2445 }
2446 
2447 void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
2448                                                   int64_t &NewCst) {
2449   auto &PtrAdd = cast<GPtrAdd>(MI);
2450   Register Dst = PtrAdd.getReg(0);
2451 
2452   Builder.setInstrAndDebugLoc(MI);
2453   Builder.buildConstant(Dst, NewCst);
2454   PtrAdd.eraseFromParent();
2455 }
2456 
2457 bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
2458   assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2459   Register DstReg = MI.getOperand(0).getReg();
2460   Register SrcReg = MI.getOperand(1).getReg();
2461   LLT DstTy = MRI.getType(DstReg);
2462   return mi_match(SrcReg, MRI,
2463                   m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))));
2464 }
2465 
2466 bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) {
2467   assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT");
2468   Register DstReg = MI.getOperand(0).getReg();
2469   Register SrcReg = MI.getOperand(1).getReg();
2470   LLT DstTy = MRI.getType(DstReg);
2471   if (mi_match(SrcReg, MRI,
2472                m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) {
2473     unsigned DstSize = DstTy.getScalarSizeInBits();
2474     unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits();
2475     return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize;
2476   }
2477   return false;
2478 }
2479 
2480 bool CombinerHelper::matchCombineExtOfExt(
2481     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2482   assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2483           MI.getOpcode() == TargetOpcode::G_SEXT ||
2484           MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2485          "Expected a G_[ASZ]EXT");
2486   Register SrcReg = MI.getOperand(1).getReg();
2487   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2488   // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
2489   unsigned Opc = MI.getOpcode();
2490   unsigned SrcOpc = SrcMI->getOpcode();
2491   if (Opc == SrcOpc ||
2492       (Opc == TargetOpcode::G_ANYEXT &&
2493        (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2494       (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2495     MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc);
2496     return true;
2497   }
2498   return false;
2499 }
2500 
2501 void CombinerHelper::applyCombineExtOfExt(
2502     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2503   assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2504           MI.getOpcode() == TargetOpcode::G_SEXT ||
2505           MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2506          "Expected a G_[ASZ]EXT");
2507 
2508   Register Reg = std::get<0>(MatchInfo);
2509   unsigned SrcExtOp = std::get<1>(MatchInfo);
2510 
2511   // Combine exts with the same opcode.
2512   if (MI.getOpcode() == SrcExtOp) {
2513     Observer.changingInstr(MI);
2514     MI.getOperand(1).setReg(Reg);
2515     Observer.changedInstr(MI);
2516     return;
2517   }
2518 
2519   // Combine:
2520   // - anyext([sz]ext x) to [sz]ext x
2521   // - sext(zext x) to zext x
2522   if (MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2523       (MI.getOpcode() == TargetOpcode::G_SEXT &&
2524        SrcExtOp == TargetOpcode::G_ZEXT)) {
2525     Register DstReg = MI.getOperand(0).getReg();
2526     Builder.setInstrAndDebugLoc(MI);
2527     Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
2528     MI.eraseFromParent();
2529   }
2530 }
2531 
2532 void CombinerHelper::applyCombineMulByNegativeOne(MachineInstr &MI) {
2533   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
2534   Register DstReg = MI.getOperand(0).getReg();
2535   Register SrcReg = MI.getOperand(1).getReg();
2536   LLT DstTy = MRI.getType(DstReg);
2537 
2538   Builder.setInstrAndDebugLoc(MI);
2539   Builder.buildSub(DstReg, Builder.buildConstant(DstTy, 0), SrcReg,
2540                    MI.getFlags());
2541   MI.eraseFromParent();
2542 }
2543 
2544 bool CombinerHelper::matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg) {
2545   assert(MI.getOpcode() == TargetOpcode::G_FNEG && "Expected a G_FNEG");
2546   Register SrcReg = MI.getOperand(1).getReg();
2547   return mi_match(SrcReg, MRI, m_GFNeg(m_Reg(Reg)));
2548 }
2549 
2550 bool CombinerHelper::matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src) {
2551   assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2552   Src = MI.getOperand(1).getReg();
2553   Register AbsSrc;
2554   return mi_match(Src, MRI, m_GFabs(m_Reg(AbsSrc)));
2555 }
2556 
2557 bool CombinerHelper::matchCombineTruncOfExt(
2558     MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2559   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2560   Register SrcReg = MI.getOperand(1).getReg();
2561   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2562   unsigned SrcOpc = SrcMI->getOpcode();
2563   if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2564       SrcOpc == TargetOpcode::G_ZEXT) {
2565     MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc);
2566     return true;
2567   }
2568   return false;
2569 }
2570 
2571 void CombinerHelper::applyCombineTruncOfExt(
2572     MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2573   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2574   Register SrcReg = MatchInfo.first;
2575   unsigned SrcExtOp = MatchInfo.second;
2576   Register DstReg = MI.getOperand(0).getReg();
2577   LLT SrcTy = MRI.getType(SrcReg);
2578   LLT DstTy = MRI.getType(DstReg);
2579   if (SrcTy == DstTy) {
2580     MI.eraseFromParent();
2581     replaceRegWith(MRI, DstReg, SrcReg);
2582     return;
2583   }
2584   Builder.setInstrAndDebugLoc(MI);
2585   if (SrcTy.getSizeInBits() < DstTy.getSizeInBits())
2586     Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg});
2587   else
2588     Builder.buildTrunc(DstReg, SrcReg);
2589   MI.eraseFromParent();
2590 }
2591 
2592 bool CombinerHelper::matchCombineTruncOfShl(
2593     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2594   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2595   Register DstReg = MI.getOperand(0).getReg();
2596   Register SrcReg = MI.getOperand(1).getReg();
2597   LLT DstTy = MRI.getType(DstReg);
2598   Register ShiftSrc;
2599   Register ShiftAmt;
2600 
2601   if (MRI.hasOneNonDBGUse(SrcReg) &&
2602       mi_match(SrcReg, MRI, m_GShl(m_Reg(ShiftSrc), m_Reg(ShiftAmt))) &&
2603       isLegalOrBeforeLegalizer(
2604           {TargetOpcode::G_SHL,
2605            {DstTy, getTargetLowering().getPreferredShiftAmountTy(DstTy)}})) {
2606     KnownBits Known = KB->getKnownBits(ShiftAmt);
2607     unsigned Size = DstTy.getSizeInBits();
2608     if (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size)) {
2609       MatchInfo = std::make_pair(ShiftSrc, ShiftAmt);
2610       return true;
2611     }
2612   }
2613   return false;
2614 }
2615 
2616 void CombinerHelper::applyCombineTruncOfShl(
2617     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2618   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2619   Register DstReg = MI.getOperand(0).getReg();
2620   Register SrcReg = MI.getOperand(1).getReg();
2621   LLT DstTy = MRI.getType(DstReg);
2622   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2623 
2624   Register ShiftSrc = MatchInfo.first;
2625   Register ShiftAmt = MatchInfo.second;
2626   Builder.setInstrAndDebugLoc(MI);
2627   auto TruncShiftSrc = Builder.buildTrunc(DstTy, ShiftSrc);
2628   Builder.buildShl(DstReg, TruncShiftSrc, ShiftAmt, SrcMI->getFlags());
2629   MI.eraseFromParent();
2630 }
2631 
2632 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
2633   return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2634     return MO.isReg() &&
2635            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2636   });
2637 }
2638 
2639 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
2640   return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2641     return !MO.isReg() ||
2642            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2643   });
2644 }
2645 
2646 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
2647   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2648   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
2649   return all_of(Mask, [](int Elt) { return Elt < 0; });
2650 }
2651 
2652 bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
2653   assert(MI.getOpcode() == TargetOpcode::G_STORE);
2654   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
2655                       MRI);
2656 }
2657 
2658 bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) {
2659   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2660   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
2661                       MRI);
2662 }
2663 
2664 bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
2665   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2666   if (auto MaybeCstCmp =
2667           getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) {
2668     OpIdx = MaybeCstCmp->Value.isNullValue() ? 3 : 2;
2669     return true;
2670   }
2671   return false;
2672 }
2673 
2674 bool CombinerHelper::eraseInst(MachineInstr &MI) {
2675   MI.eraseFromParent();
2676   return true;
2677 }
2678 
2679 bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
2680                                     const MachineOperand &MOP2) {
2681   if (!MOP1.isReg() || !MOP2.isReg())
2682     return false;
2683   auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI);
2684   if (!InstAndDef1)
2685     return false;
2686   auto InstAndDef2 = getDefSrcRegIgnoringCopies(MOP2.getReg(), MRI);
2687   if (!InstAndDef2)
2688     return false;
2689   MachineInstr *I1 = InstAndDef1->MI;
2690   MachineInstr *I2 = InstAndDef2->MI;
2691 
2692   // Handle a case like this:
2693   //
2694   // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2695   //
2696   // Even though %0 and %1 are produced by the same instruction they are not
2697   // the same values.
2698   if (I1 == I2)
2699     return MOP1.getReg() == MOP2.getReg();
2700 
2701   // If we have an instruction which loads or stores, we can't guarantee that
2702   // it is identical.
2703   //
2704   // For example, we may have
2705   //
2706   // %x1 = G_LOAD %addr (load N from @somewhere)
2707   // ...
2708   // call @foo
2709   // ...
2710   // %x2 = G_LOAD %addr (load N from @somewhere)
2711   // ...
2712   // %or = G_OR %x1, %x2
2713   //
2714   // It's possible that @foo will modify whatever lives at the address we're
2715   // loading from. To be safe, let's just assume that all loads and stores
2716   // are different (unless we have something which is guaranteed to not
2717   // change.)
2718   if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad(nullptr))
2719     return false;
2720 
2721   // Check for physical registers on the instructions first to avoid cases
2722   // like this:
2723   //
2724   // %a = COPY $physreg
2725   // ...
2726   // SOMETHING implicit-def $physreg
2727   // ...
2728   // %b = COPY $physreg
2729   //
2730   // These copies are not equivalent.
2731   if (any_of(I1->uses(), [](const MachineOperand &MO) {
2732         return MO.isReg() && MO.getReg().isPhysical();
2733       })) {
2734     // Check if we have a case like this:
2735     //
2736     // %a = COPY $physreg
2737     // %b = COPY %a
2738     //
2739     // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2740     // From that, we know that they must have the same value, since they must
2741     // have come from the same COPY.
2742     return I1->isIdenticalTo(*I2);
2743   }
2744 
2745   // We don't have any physical registers, so we don't necessarily need the
2746   // same vreg defs.
2747   //
2748   // On the off-chance that there's some target instruction feeding into the
2749   // instruction, let's use produceSameValue instead of isIdenticalTo.
2750   if (Builder.getTII().produceSameValue(*I1, *I2, &MRI)) {
2751     // Handle instructions with multiple defs that produce same values. Values
2752     // are same for operands with same index.
2753     // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2754     // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2755     // I1 and I2 are different instructions but produce same values,
2756     // %1 and %6 are same, %1 and %7 are not the same value.
2757     return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
2758            I2->findRegisterDefOperandIdx(InstAndDef2->Reg);
2759   }
2760   return false;
2761 }
2762 
2763 bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
2764   if (!MOP.isReg())
2765     return false;
2766   // MIPatternMatch doesn't let us look through G_ZEXT etc.
2767   auto ValAndVReg = getConstantVRegValWithLookThrough(MOP.getReg(), MRI);
2768   return ValAndVReg && ValAndVReg->Value == C;
2769 }
2770 
2771 bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
2772                                                      unsigned OpIdx) {
2773   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2774   Register OldReg = MI.getOperand(0).getReg();
2775   Register Replacement = MI.getOperand(OpIdx).getReg();
2776   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2777   MI.eraseFromParent();
2778   replaceRegWith(MRI, OldReg, Replacement);
2779   return true;
2780 }
2781 
2782 bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
2783                                                  Register Replacement) {
2784   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2785   Register OldReg = MI.getOperand(0).getReg();
2786   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2787   MI.eraseFromParent();
2788   replaceRegWith(MRI, OldReg, Replacement);
2789   return true;
2790 }
2791 
2792 bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
2793   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2794   // Match (cond ? x : x)
2795   return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
2796          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
2797                        MRI);
2798 }
2799 
2800 bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
2801   return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
2802          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
2803                        MRI);
2804 }
2805 
2806 bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
2807   return matchConstantOp(MI.getOperand(OpIdx), 0) &&
2808          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
2809                        MRI);
2810 }
2811 
2812 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) {
2813   MachineOperand &MO = MI.getOperand(OpIdx);
2814   return MO.isReg() &&
2815          getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2816 }
2817 
2818 bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
2819                                                         unsigned OpIdx) {
2820   MachineOperand &MO = MI.getOperand(OpIdx);
2821   return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB);
2822 }
2823 
2824 bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
2825   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2826   Builder.setInstr(MI);
2827   Builder.buildFConstant(MI.getOperand(0), C);
2828   MI.eraseFromParent();
2829   return true;
2830 }
2831 
2832 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
2833   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2834   Builder.setInstr(MI);
2835   Builder.buildConstant(MI.getOperand(0), C);
2836   MI.eraseFromParent();
2837   return true;
2838 }
2839 
2840 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) {
2841   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2842   Builder.setInstr(MI);
2843   Builder.buildConstant(MI.getOperand(0), C);
2844   MI.eraseFromParent();
2845   return true;
2846 }
2847 
2848 bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
2849   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2850   Builder.setInstr(MI);
2851   Builder.buildUndef(MI.getOperand(0));
2852   MI.eraseFromParent();
2853   return true;
2854 }
2855 
2856 bool CombinerHelper::matchSimplifyAddToSub(
2857     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2858   Register LHS = MI.getOperand(1).getReg();
2859   Register RHS = MI.getOperand(2).getReg();
2860   Register &NewLHS = std::get<0>(MatchInfo);
2861   Register &NewRHS = std::get<1>(MatchInfo);
2862 
2863   // Helper lambda to check for opportunities for
2864   // ((0-A) + B) -> B - A
2865   // (A + (0-B)) -> A - B
2866   auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
2867     if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS))))
2868       return false;
2869     NewLHS = MaybeNewLHS;
2870     return true;
2871   };
2872 
2873   return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
2874 }
2875 
2876 bool CombinerHelper::matchCombineInsertVecElts(
2877     MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2878   assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2879          "Invalid opcode");
2880   Register DstReg = MI.getOperand(0).getReg();
2881   LLT DstTy = MRI.getType(DstReg);
2882   assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?");
2883   unsigned NumElts = DstTy.getNumElements();
2884   // If this MI is part of a sequence of insert_vec_elts, then
2885   // don't do the combine in the middle of the sequence.
2886   if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() ==
2887                                    TargetOpcode::G_INSERT_VECTOR_ELT)
2888     return false;
2889   MachineInstr *CurrInst = &MI;
2890   MachineInstr *TmpInst;
2891   int64_t IntImm;
2892   Register TmpReg;
2893   MatchInfo.resize(NumElts);
2894   while (mi_match(
2895       CurrInst->getOperand(0).getReg(), MRI,
2896       m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) {
2897     if (IntImm >= NumElts)
2898       return false;
2899     if (!MatchInfo[IntImm])
2900       MatchInfo[IntImm] = TmpReg;
2901     CurrInst = TmpInst;
2902   }
2903   // Variable index.
2904   if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2905     return false;
2906   if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2907     for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) {
2908       if (!MatchInfo[I - 1].isValid())
2909         MatchInfo[I - 1] = TmpInst->getOperand(I).getReg();
2910     }
2911     return true;
2912   }
2913   // If we didn't end in a G_IMPLICIT_DEF, bail out.
2914   return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2915 }
2916 
2917 void CombinerHelper::applyCombineInsertVecElts(
2918     MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2919   Builder.setInstr(MI);
2920   Register UndefReg;
2921   auto GetUndef = [&]() {
2922     if (UndefReg)
2923       return UndefReg;
2924     LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2925     UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0);
2926     return UndefReg;
2927   };
2928   for (unsigned I = 0; I < MatchInfo.size(); ++I) {
2929     if (!MatchInfo[I])
2930       MatchInfo[I] = GetUndef();
2931   }
2932   Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo);
2933   MI.eraseFromParent();
2934 }
2935 
2936 void CombinerHelper::applySimplifyAddToSub(
2937     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2938   Builder.setInstr(MI);
2939   Register SubLHS, SubRHS;
2940   std::tie(SubLHS, SubRHS) = MatchInfo;
2941   Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
2942   MI.eraseFromParent();
2943 }
2944 
2945 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
2946     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2947   // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2948   //
2949   // Creates the new hand + logic instruction (but does not insert them.)
2950   //
2951   // On success, MatchInfo is populated with the new instructions. These are
2952   // inserted in applyHoistLogicOpWithSameOpcodeHands.
2953   unsigned LogicOpcode = MI.getOpcode();
2954   assert(LogicOpcode == TargetOpcode::G_AND ||
2955          LogicOpcode == TargetOpcode::G_OR ||
2956          LogicOpcode == TargetOpcode::G_XOR);
2957   MachineIRBuilder MIB(MI);
2958   Register Dst = MI.getOperand(0).getReg();
2959   Register LHSReg = MI.getOperand(1).getReg();
2960   Register RHSReg = MI.getOperand(2).getReg();
2961 
2962   // Don't recompute anything.
2963   if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
2964     return false;
2965 
2966   // Make sure we have (hand x, ...), (hand y, ...)
2967   MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
2968   MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
2969   if (!LeftHandInst || !RightHandInst)
2970     return false;
2971   unsigned HandOpcode = LeftHandInst->getOpcode();
2972   if (HandOpcode != RightHandInst->getOpcode())
2973     return false;
2974   if (!LeftHandInst->getOperand(1).isReg() ||
2975       !RightHandInst->getOperand(1).isReg())
2976     return false;
2977 
2978   // Make sure the types match up, and if we're doing this post-legalization,
2979   // we end up with legal types.
2980   Register X = LeftHandInst->getOperand(1).getReg();
2981   Register Y = RightHandInst->getOperand(1).getReg();
2982   LLT XTy = MRI.getType(X);
2983   LLT YTy = MRI.getType(Y);
2984   if (XTy != YTy)
2985     return false;
2986   if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
2987     return false;
2988 
2989   // Optional extra source register.
2990   Register ExtraHandOpSrcReg;
2991   switch (HandOpcode) {
2992   default:
2993     return false;
2994   case TargetOpcode::G_ANYEXT:
2995   case TargetOpcode::G_SEXT:
2996   case TargetOpcode::G_ZEXT: {
2997     // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2998     break;
2999   }
3000   case TargetOpcode::G_AND:
3001   case TargetOpcode::G_ASHR:
3002   case TargetOpcode::G_LSHR:
3003   case TargetOpcode::G_SHL: {
3004     // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
3005     MachineOperand &ZOp = LeftHandInst->getOperand(2);
3006     if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
3007       return false;
3008     ExtraHandOpSrcReg = ZOp.getReg();
3009     break;
3010   }
3011   }
3012 
3013   // Record the steps to build the new instructions.
3014   //
3015   // Steps to build (logic x, y)
3016   auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
3017   OperandBuildSteps LogicBuildSteps = {
3018       [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
3019       [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
3020       [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
3021   InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
3022 
3023   // Steps to build hand (logic x, y), ...z
3024   OperandBuildSteps HandBuildSteps = {
3025       [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
3026       [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
3027   if (ExtraHandOpSrcReg.isValid())
3028     HandBuildSteps.push_back(
3029         [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
3030   InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
3031 
3032   MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
3033   return true;
3034 }
3035 
3036 void CombinerHelper::applyBuildInstructionSteps(
3037     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
3038   assert(MatchInfo.InstrsToBuild.size() &&
3039          "Expected at least one instr to build?");
3040   Builder.setInstr(MI);
3041   for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
3042     assert(InstrToBuild.Opcode && "Expected a valid opcode?");
3043     assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
3044     MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
3045     for (auto &OperandFn : InstrToBuild.OperandFns)
3046       OperandFn(Instr);
3047   }
3048   MI.eraseFromParent();
3049 }
3050 
3051 bool CombinerHelper::matchAshrShlToSextInreg(
3052     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
3053   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
3054   int64_t ShlCst, AshrCst;
3055   Register Src;
3056   // FIXME: detect splat constant vectors.
3057   if (!mi_match(MI.getOperand(0).getReg(), MRI,
3058                 m_GAShr(m_GShl(m_Reg(Src), m_ICst(ShlCst)), m_ICst(AshrCst))))
3059     return false;
3060   if (ShlCst != AshrCst)
3061     return false;
3062   if (!isLegalOrBeforeLegalizer(
3063           {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
3064     return false;
3065   MatchInfo = std::make_tuple(Src, ShlCst);
3066   return true;
3067 }
3068 
3069 void CombinerHelper::applyAshShlToSextInreg(
3070     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
3071   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
3072   Register Src;
3073   int64_t ShiftAmt;
3074   std::tie(Src, ShiftAmt) = MatchInfo;
3075   unsigned Size = MRI.getType(Src).getScalarSizeInBits();
3076   Builder.setInstrAndDebugLoc(MI);
3077   Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
3078   MI.eraseFromParent();
3079 }
3080 
3081 /// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
3082 bool CombinerHelper::matchOverlappingAnd(
3083     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3084   assert(MI.getOpcode() == TargetOpcode::G_AND);
3085 
3086   Register Dst = MI.getOperand(0).getReg();
3087   LLT Ty = MRI.getType(Dst);
3088 
3089   Register R;
3090   int64_t C1;
3091   int64_t C2;
3092   if (!mi_match(
3093           Dst, MRI,
3094           m_GAnd(m_GAnd(m_Reg(R), m_ICst(C1)), m_ICst(C2))))
3095     return false;
3096 
3097   MatchInfo = [=](MachineIRBuilder &B) {
3098     if (C1 & C2) {
3099       B.buildAnd(Dst, R, B.buildConstant(Ty, C1 & C2));
3100       return;
3101     }
3102     auto Zero = B.buildConstant(Ty, 0);
3103     replaceRegWith(MRI, Dst, Zero->getOperand(0).getReg());
3104   };
3105   return true;
3106 }
3107 
3108 bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
3109                                        Register &Replacement) {
3110   // Given
3111   //
3112   // %y:_(sN) = G_SOMETHING
3113   // %x:_(sN) = G_SOMETHING
3114   // %res:_(sN) = G_AND %x, %y
3115   //
3116   // Eliminate the G_AND when it is known that x & y == x or x & y == y.
3117   //
3118   // Patterns like this can appear as a result of legalization. E.g.
3119   //
3120   // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
3121   // %one:_(s32) = G_CONSTANT i32 1
3122   // %and:_(s32) = G_AND %cmp, %one
3123   //
3124   // In this case, G_ICMP only produces a single bit, so x & 1 == x.
3125   assert(MI.getOpcode() == TargetOpcode::G_AND);
3126   if (!KB)
3127     return false;
3128 
3129   Register AndDst = MI.getOperand(0).getReg();
3130   LLT DstTy = MRI.getType(AndDst);
3131 
3132   // FIXME: This should be removed once GISelKnownBits supports vectors.
3133   if (DstTy.isVector())
3134     return false;
3135 
3136   Register LHS = MI.getOperand(1).getReg();
3137   Register RHS = MI.getOperand(2).getReg();
3138   KnownBits LHSBits = KB->getKnownBits(LHS);
3139   KnownBits RHSBits = KB->getKnownBits(RHS);
3140 
3141   // Check that x & Mask == x.
3142   // x & 1 == x, always
3143   // x & 0 == x, only if x is also 0
3144   // Meaning Mask has no effect if every bit is either one in Mask or zero in x.
3145   //
3146   // Check if we can replace AndDst with the LHS of the G_AND
3147   if (canReplaceReg(AndDst, LHS, MRI) &&
3148       (LHSBits.Zero | RHSBits.One).isAllOnesValue()) {
3149     Replacement = LHS;
3150     return true;
3151   }
3152 
3153   // Check if we can replace AndDst with the RHS of the G_AND
3154   if (canReplaceReg(AndDst, RHS, MRI) &&
3155       (LHSBits.One | RHSBits.Zero).isAllOnesValue()) {
3156     Replacement = RHS;
3157     return true;
3158   }
3159 
3160   return false;
3161 }
3162 
3163 bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) {
3164   // Given
3165   //
3166   // %y:_(sN) = G_SOMETHING
3167   // %x:_(sN) = G_SOMETHING
3168   // %res:_(sN) = G_OR %x, %y
3169   //
3170   // Eliminate the G_OR when it is known that x | y == x or x | y == y.
3171   assert(MI.getOpcode() == TargetOpcode::G_OR);
3172   if (!KB)
3173     return false;
3174 
3175   Register OrDst = MI.getOperand(0).getReg();
3176   LLT DstTy = MRI.getType(OrDst);
3177 
3178   // FIXME: This should be removed once GISelKnownBits supports vectors.
3179   if (DstTy.isVector())
3180     return false;
3181 
3182   Register LHS = MI.getOperand(1).getReg();
3183   Register RHS = MI.getOperand(2).getReg();
3184   KnownBits LHSBits = KB->getKnownBits(LHS);
3185   KnownBits RHSBits = KB->getKnownBits(RHS);
3186 
3187   // Check that x | Mask == x.
3188   // x | 0 == x, always
3189   // x | 1 == x, only if x is also 1
3190   // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
3191   //
3192   // Check if we can replace OrDst with the LHS of the G_OR
3193   if (canReplaceReg(OrDst, LHS, MRI) &&
3194       (LHSBits.One | RHSBits.Zero).isAllOnesValue()) {
3195     Replacement = LHS;
3196     return true;
3197   }
3198 
3199   // Check if we can replace OrDst with the RHS of the G_OR
3200   if (canReplaceReg(OrDst, RHS, MRI) &&
3201       (LHSBits.Zero | RHSBits.One).isAllOnesValue()) {
3202     Replacement = RHS;
3203     return true;
3204   }
3205 
3206   return false;
3207 }
3208 
3209 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) {
3210   // If the input is already sign extended, just drop the extension.
3211   Register Src = MI.getOperand(1).getReg();
3212   unsigned ExtBits = MI.getOperand(2).getImm();
3213   unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
3214   return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
3215 }
3216 
3217 static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
3218                              int64_t Cst, bool IsVector, bool IsFP) {
3219   // For i1, Cst will always be -1 regardless of boolean contents.
3220   return (ScalarSizeBits == 1 && Cst == -1) ||
3221          isConstTrueVal(TLI, Cst, IsVector, IsFP);
3222 }
3223 
3224 bool CombinerHelper::matchNotCmp(MachineInstr &MI,
3225                                  SmallVectorImpl<Register> &RegsToNegate) {
3226   assert(MI.getOpcode() == TargetOpcode::G_XOR);
3227   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
3228   const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
3229   Register XorSrc;
3230   Register CstReg;
3231   // We match xor(src, true) here.
3232   if (!mi_match(MI.getOperand(0).getReg(), MRI,
3233                 m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
3234     return false;
3235 
3236   if (!MRI.hasOneNonDBGUse(XorSrc))
3237     return false;
3238 
3239   // Check that XorSrc is the root of a tree of comparisons combined with ANDs
3240   // and ORs. The suffix of RegsToNegate starting from index I is used a work
3241   // list of tree nodes to visit.
3242   RegsToNegate.push_back(XorSrc);
3243   // Remember whether the comparisons are all integer or all floating point.
3244   bool IsInt = false;
3245   bool IsFP = false;
3246   for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
3247     Register Reg = RegsToNegate[I];
3248     if (!MRI.hasOneNonDBGUse(Reg))
3249       return false;
3250     MachineInstr *Def = MRI.getVRegDef(Reg);
3251     switch (Def->getOpcode()) {
3252     default:
3253       // Don't match if the tree contains anything other than ANDs, ORs and
3254       // comparisons.
3255       return false;
3256     case TargetOpcode::G_ICMP:
3257       if (IsFP)
3258         return false;
3259       IsInt = true;
3260       // When we apply the combine we will invert the predicate.
3261       break;
3262     case TargetOpcode::G_FCMP:
3263       if (IsInt)
3264         return false;
3265       IsFP = true;
3266       // When we apply the combine we will invert the predicate.
3267       break;
3268     case TargetOpcode::G_AND:
3269     case TargetOpcode::G_OR:
3270       // Implement De Morgan's laws:
3271       // ~(x & y) -> ~x | ~y
3272       // ~(x | y) -> ~x & ~y
3273       // When we apply the combine we will change the opcode and recursively
3274       // negate the operands.
3275       RegsToNegate.push_back(Def->getOperand(1).getReg());
3276       RegsToNegate.push_back(Def->getOperand(2).getReg());
3277       break;
3278     }
3279   }
3280 
3281   // Now we know whether the comparisons are integer or floating point, check
3282   // the constant in the xor.
3283   int64_t Cst;
3284   if (Ty.isVector()) {
3285     MachineInstr *CstDef = MRI.getVRegDef(CstReg);
3286     auto MaybeCst = getBuildVectorConstantSplat(*CstDef, MRI);
3287     if (!MaybeCst)
3288       return false;
3289     if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP))
3290       return false;
3291   } else {
3292     if (!mi_match(CstReg, MRI, m_ICst(Cst)))
3293       return false;
3294     if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP))
3295       return false;
3296   }
3297 
3298   return true;
3299 }
3300 
3301 void CombinerHelper::applyNotCmp(MachineInstr &MI,
3302                                  SmallVectorImpl<Register> &RegsToNegate) {
3303   for (Register Reg : RegsToNegate) {
3304     MachineInstr *Def = MRI.getVRegDef(Reg);
3305     Observer.changingInstr(*Def);
3306     // For each comparison, invert the opcode. For each AND and OR, change the
3307     // opcode.
3308     switch (Def->getOpcode()) {
3309     default:
3310       llvm_unreachable("Unexpected opcode");
3311     case TargetOpcode::G_ICMP:
3312     case TargetOpcode::G_FCMP: {
3313       MachineOperand &PredOp = Def->getOperand(1);
3314       CmpInst::Predicate NewP = CmpInst::getInversePredicate(
3315           (CmpInst::Predicate)PredOp.getPredicate());
3316       PredOp.setPredicate(NewP);
3317       break;
3318     }
3319     case TargetOpcode::G_AND:
3320       Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
3321       break;
3322     case TargetOpcode::G_OR:
3323       Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3324       break;
3325     }
3326     Observer.changedInstr(*Def);
3327   }
3328 
3329   replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
3330   MI.eraseFromParent();
3331 }
3332 
3333 bool CombinerHelper::matchXorOfAndWithSameReg(
3334     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3335   // Match (xor (and x, y), y) (or any of its commuted cases)
3336   assert(MI.getOpcode() == TargetOpcode::G_XOR);
3337   Register &X = MatchInfo.first;
3338   Register &Y = MatchInfo.second;
3339   Register AndReg = MI.getOperand(1).getReg();
3340   Register SharedReg = MI.getOperand(2).getReg();
3341 
3342   // Find a G_AND on either side of the G_XOR.
3343   // Look for one of
3344   //
3345   // (xor (and x, y), SharedReg)
3346   // (xor SharedReg, (and x, y))
3347   if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) {
3348     std::swap(AndReg, SharedReg);
3349     if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y))))
3350       return false;
3351   }
3352 
3353   // Only do this if we'll eliminate the G_AND.
3354   if (!MRI.hasOneNonDBGUse(AndReg))
3355     return false;
3356 
3357   // We can combine if SharedReg is the same as either the LHS or RHS of the
3358   // G_AND.
3359   if (Y != SharedReg)
3360     std::swap(X, Y);
3361   return Y == SharedReg;
3362 }
3363 
3364 void CombinerHelper::applyXorOfAndWithSameReg(
3365     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3366   // Fold (xor (and x, y), y) -> (and (not x), y)
3367   Builder.setInstrAndDebugLoc(MI);
3368   Register X, Y;
3369   std::tie(X, Y) = MatchInfo;
3370   auto Not = Builder.buildNot(MRI.getType(X), X);
3371   Observer.changingInstr(MI);
3372   MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3373   MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3374   MI.getOperand(2).setReg(Y);
3375   Observer.changedInstr(MI);
3376 }
3377 
3378 bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) {
3379   auto &PtrAdd = cast<GPtrAdd>(MI);
3380   Register DstReg = PtrAdd.getReg(0);
3381   LLT Ty = MRI.getType(DstReg);
3382   const DataLayout &DL = Builder.getMF().getDataLayout();
3383 
3384   if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace()))
3385     return false;
3386 
3387   if (Ty.isPointer()) {
3388     auto ConstVal = getConstantVRegVal(PtrAdd.getBaseReg(), MRI);
3389     return ConstVal && *ConstVal == 0;
3390   }
3391 
3392   assert(Ty.isVector() && "Expecting a vector type");
3393   const MachineInstr *VecMI = MRI.getVRegDef(PtrAdd.getBaseReg());
3394   return isBuildVectorAllZeros(*VecMI, MRI);
3395 }
3396 
3397 void CombinerHelper::applyPtrAddZero(MachineInstr &MI) {
3398   auto &PtrAdd = cast<GPtrAdd>(MI);
3399   Builder.setInstrAndDebugLoc(PtrAdd);
3400   Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg());
3401   PtrAdd.eraseFromParent();
3402 }
3403 
3404 /// The second source operand is known to be a power of 2.
3405 void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) {
3406   Register DstReg = MI.getOperand(0).getReg();
3407   Register Src0 = MI.getOperand(1).getReg();
3408   Register Pow2Src1 = MI.getOperand(2).getReg();
3409   LLT Ty = MRI.getType(DstReg);
3410   Builder.setInstrAndDebugLoc(MI);
3411 
3412   // Fold (urem x, pow2) -> (and x, pow2-1)
3413   auto NegOne = Builder.buildConstant(Ty, -1);
3414   auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne);
3415   Builder.buildAnd(DstReg, Src0, Add);
3416   MI.eraseFromParent();
3417 }
3418 
3419 Optional<SmallVector<Register, 8>>
3420 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
3421   assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");
3422   // We want to detect if Root is part of a tree which represents a bunch
3423   // of loads being merged into a larger load. We'll try to recognize patterns
3424   // like, for example:
3425   //
3426   //  Reg   Reg
3427   //   \    /
3428   //    OR_1   Reg
3429   //     \    /
3430   //      OR_2
3431   //        \     Reg
3432   //         .. /
3433   //        Root
3434   //
3435   //  Reg   Reg   Reg   Reg
3436   //     \ /       \   /
3437   //     OR_1      OR_2
3438   //       \       /
3439   //        \    /
3440   //         ...
3441   //         Root
3442   //
3443   // Each "Reg" may have been produced by a load + some arithmetic. This
3444   // function will save each of them.
3445   SmallVector<Register, 8> RegsToVisit;
3446   SmallVector<const MachineInstr *, 7> Ors = {Root};
3447 
3448   // In the "worst" case, we're dealing with a load for each byte. So, there
3449   // are at most #bytes - 1 ORs.
3450   const unsigned MaxIter =
3451       MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1;
3452   for (unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3453     if (Ors.empty())
3454       break;
3455     const MachineInstr *Curr = Ors.pop_back_val();
3456     Register OrLHS = Curr->getOperand(1).getReg();
3457     Register OrRHS = Curr->getOperand(2).getReg();
3458 
3459     // In the combine, we want to elimate the entire tree.
3460     if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS))
3461       return None;
3462 
3463     // If it's a G_OR, save it and continue to walk. If it's not, then it's
3464     // something that may be a load + arithmetic.
3465     if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI))
3466       Ors.push_back(Or);
3467     else
3468       RegsToVisit.push_back(OrLHS);
3469     if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI))
3470       Ors.push_back(Or);
3471     else
3472       RegsToVisit.push_back(OrRHS);
3473   }
3474 
3475   // We're going to try and merge each register into a wider power-of-2 type,
3476   // so we ought to have an even number of registers.
3477   if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0)
3478     return None;
3479   return RegsToVisit;
3480 }
3481 
3482 /// Helper function for findLoadOffsetsForLoadOrCombine.
3483 ///
3484 /// Check if \p Reg is the result of loading a \p MemSizeInBits wide value,
3485 /// and then moving that value into a specific byte offset.
3486 ///
3487 /// e.g. x[i] << 24
3488 ///
3489 /// \returns The load instruction and the byte offset it is moved into.
3490 static Optional<std::pair<GZExtLoad *, int64_t>>
3491 matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
3492                          const MachineRegisterInfo &MRI) {
3493   assert(MRI.hasOneNonDBGUse(Reg) &&
3494          "Expected Reg to only have one non-debug use?");
3495   Register MaybeLoad;
3496   int64_t Shift;
3497   if (!mi_match(Reg, MRI,
3498                 m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) {
3499     Shift = 0;
3500     MaybeLoad = Reg;
3501   }
3502 
3503   if (Shift % MemSizeInBits != 0)
3504     return None;
3505 
3506   // TODO: Handle other types of loads.
3507   auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI);
3508   if (!Load)
3509     return None;
3510 
3511   if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3512     return None;
3513 
3514   return std::make_pair(Load, Shift / MemSizeInBits);
3515 }
3516 
3517 Optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3518 CombinerHelper::findLoadOffsetsForLoadOrCombine(
3519     SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
3520     const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
3521 
3522   // Each load found for the pattern. There should be one for each RegsToVisit.
3523   SmallSetVector<const MachineInstr *, 8> Loads;
3524 
3525   // The lowest index used in any load. (The lowest "i" for each x[i].)
3526   int64_t LowestIdx = INT64_MAX;
3527 
3528   // The load which uses the lowest index.
3529   GZExtLoad *LowestIdxLoad = nullptr;
3530 
3531   // Keeps track of the load indices we see. We shouldn't see any indices twice.
3532   SmallSet<int64_t, 8> SeenIdx;
3533 
3534   // Ensure each load is in the same MBB.
3535   // TODO: Support multiple MachineBasicBlocks.
3536   MachineBasicBlock *MBB = nullptr;
3537   const MachineMemOperand *MMO = nullptr;
3538 
3539   // Earliest instruction-order load in the pattern.
3540   GZExtLoad *EarliestLoad = nullptr;
3541 
3542   // Latest instruction-order load in the pattern.
3543   GZExtLoad *LatestLoad = nullptr;
3544 
3545   // Base pointer which every load should share.
3546   Register BasePtr;
3547 
3548   // We want to find a load for each register. Each load should have some
3549   // appropriate bit twiddling arithmetic. During this loop, we will also keep
3550   // track of the load which uses the lowest index. Later, we will check if we
3551   // can use its pointer in the final, combined load.
3552   for (auto Reg : RegsToVisit) {
3553     // Find the load, and find the position that it will end up in (e.g. a
3554     // shifted) value.
3555     auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI);
3556     if (!LoadAndPos)
3557       return None;
3558     GZExtLoad *Load;
3559     int64_t DstPos;
3560     std::tie(Load, DstPos) = *LoadAndPos;
3561 
3562     // TODO: Handle multiple MachineBasicBlocks. Currently not handled because
3563     // it is difficult to check for stores/calls/etc between loads.
3564     MachineBasicBlock *LoadMBB = Load->getParent();
3565     if (!MBB)
3566       MBB = LoadMBB;
3567     if (LoadMBB != MBB)
3568       return None;
3569 
3570     // Make sure that the MachineMemOperands of every seen load are compatible.
3571     auto &LoadMMO = Load->getMMO();
3572     if (!MMO)
3573       MMO = &LoadMMO;
3574     if (MMO->getAddrSpace() != LoadMMO.getAddrSpace())
3575       return None;
3576 
3577     // Find out what the base pointer and index for the load is.
3578     Register LoadPtr;
3579     int64_t Idx;
3580     if (!mi_match(Load->getOperand(1).getReg(), MRI,
3581                   m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) {
3582       LoadPtr = Load->getOperand(1).getReg();
3583       Idx = 0;
3584     }
3585 
3586     // Don't combine things like a[i], a[i] -> a bigger load.
3587     if (!SeenIdx.insert(Idx).second)
3588       return None;
3589 
3590     // Every load must share the same base pointer; don't combine things like:
3591     //
3592     // a[i], b[i + 1] -> a bigger load.
3593     if (!BasePtr.isValid())
3594       BasePtr = LoadPtr;
3595     if (BasePtr != LoadPtr)
3596       return None;
3597 
3598     if (Idx < LowestIdx) {
3599       LowestIdx = Idx;
3600       LowestIdxLoad = Load;
3601     }
3602 
3603     // Keep track of the byte offset that this load ends up at. If we have seen
3604     // the byte offset, then stop here. We do not want to combine:
3605     //
3606     // a[i] << 16, a[i + k] << 16 -> a bigger load.
3607     if (!MemOffset2Idx.try_emplace(DstPos, Idx).second)
3608       return None;
3609     Loads.insert(Load);
3610 
3611     // Keep track of the position of the earliest/latest loads in the pattern.
3612     // We will check that there are no load fold barriers between them later
3613     // on.
3614     //
3615     // FIXME: Is there a better way to check for load fold barriers?
3616     if (!EarliestLoad || dominates(*Load, *EarliestLoad))
3617       EarliestLoad = Load;
3618     if (!LatestLoad || dominates(*LatestLoad, *Load))
3619       LatestLoad = Load;
3620   }
3621 
3622   // We found a load for each register. Let's check if each load satisfies the
3623   // pattern.
3624   assert(Loads.size() == RegsToVisit.size() &&
3625          "Expected to find a load for each register?");
3626   assert(EarliestLoad != LatestLoad && EarliestLoad &&
3627          LatestLoad && "Expected at least two loads?");
3628 
3629   // Check if there are any stores, calls, etc. between any of the loads. If
3630   // there are, then we can't safely perform the combine.
3631   //
3632   // MaxIter is chosen based off the (worst case) number of iterations it
3633   // typically takes to succeed in the LLVM test suite plus some padding.
3634   //
3635   // FIXME: Is there a better way to check for load fold barriers?
3636   const unsigned MaxIter = 20;
3637   unsigned Iter = 0;
3638   for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(),
3639                                                  LatestLoad->getIterator())) {
3640     if (Loads.count(&MI))
3641       continue;
3642     if (MI.isLoadFoldBarrier())
3643       return None;
3644     if (Iter++ == MaxIter)
3645       return None;
3646   }
3647 
3648   return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3649 }
3650 
3651 bool CombinerHelper::matchLoadOrCombine(
3652     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3653   assert(MI.getOpcode() == TargetOpcode::G_OR);
3654   MachineFunction &MF = *MI.getMF();
3655   // Assuming a little-endian target, transform:
3656   //  s8 *a = ...
3657   //  s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
3658   // =>
3659   //  s32 val = *((i32)a)
3660   //
3661   //  s8 *a = ...
3662   //  s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
3663   // =>
3664   //  s32 val = BSWAP(*((s32)a))
3665   Register Dst = MI.getOperand(0).getReg();
3666   LLT Ty = MRI.getType(Dst);
3667   if (Ty.isVector())
3668     return false;
3669 
3670   // We need to combine at least two loads into this type. Since the smallest
3671   // possible load is into a byte, we need at least a 16-bit wide type.
3672   const unsigned WideMemSizeInBits = Ty.getSizeInBits();
3673   if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3674     return false;
3675 
3676   // Match a collection of non-OR instructions in the pattern.
3677   auto RegsToVisit = findCandidatesForLoadOrCombine(&MI);
3678   if (!RegsToVisit)
3679     return false;
3680 
3681   // We have a collection of non-OR instructions. Figure out how wide each of
3682   // the small loads should be based off of the number of potential loads we
3683   // found.
3684   const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3685   if (NarrowMemSizeInBits % 8 != 0)
3686     return false;
3687 
3688   // Check if each register feeding into each OR is a load from the same
3689   // base pointer + some arithmetic.
3690   //
3691   // e.g. a[0], a[1] << 8, a[2] << 16, etc.
3692   //
3693   // Also verify that each of these ends up putting a[i] into the same memory
3694   // offset as a load into a wide type would.
3695   SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx;
3696   GZExtLoad *LowestIdxLoad, *LatestLoad;
3697   int64_t LowestIdx;
3698   auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3699       MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3700   if (!MaybeLoadInfo)
3701     return false;
3702   std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3703 
3704   // We have a bunch of loads being OR'd together. Using the addresses + offsets
3705   // we found before, check if this corresponds to a big or little endian byte
3706   // pattern. If it does, then we can represent it using a load + possibly a
3707   // BSWAP.
3708   bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
3709   Optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
3710   if (!IsBigEndian.hasValue())
3711     return false;
3712   bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3713   if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}}))
3714     return false;
3715 
3716   // Make sure that the load from the lowest index produces offset 0 in the
3717   // final value.
3718   //
3719   // This ensures that we won't combine something like this:
3720   //
3721   // load x[i] -> byte 2
3722   // load x[i+1] -> byte 0 ---> wide_load x[i]
3723   // load x[i+2] -> byte 1
3724   const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3725   const unsigned ZeroByteOffset =
3726       *IsBigEndian
3727           ? bigEndianByteAt(NumLoadsInTy, 0)
3728           : littleEndianByteAt(NumLoadsInTy, 0);
3729   auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset);
3730   if (ZeroOffsetIdx == MemOffset2Idx.end() ||
3731       ZeroOffsetIdx->second != LowestIdx)
3732     return false;
3733 
3734   // We wil reuse the pointer from the load which ends up at byte offset 0. It
3735   // may not use index 0.
3736   Register Ptr = LowestIdxLoad->getPointerReg();
3737   const MachineMemOperand &MMO = LowestIdxLoad->getMMO();
3738   LegalityQuery::MemDesc MMDesc;
3739   MMDesc.MemoryTy = Ty;
3740   MMDesc.AlignInBits = MMO.getAlign().value() * 8;
3741   MMDesc.Ordering = MMO.getSuccessOrdering();
3742   if (!isLegalOrBeforeLegalizer(
3743           {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}}))
3744     return false;
3745   auto PtrInfo = MMO.getPointerInfo();
3746   auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8);
3747 
3748   // Load must be allowed and fast on the target.
3749   LLVMContext &C = MF.getFunction().getContext();
3750   auto &DL = MF.getDataLayout();
3751   bool Fast = false;
3752   if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) ||
3753       !Fast)
3754     return false;
3755 
3756   MatchInfo = [=](MachineIRBuilder &MIB) {
3757     MIB.setInstrAndDebugLoc(*LatestLoad);
3758     Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst;
3759     MIB.buildLoad(LoadDst, Ptr, *NewMMO);
3760     if (NeedsBSwap)
3761       MIB.buildBSwap(Dst, LoadDst);
3762   };
3763   return true;
3764 }
3765 
3766 bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI,
3767                                             MachineInstr *&ExtMI) {
3768   assert(MI.getOpcode() == TargetOpcode::G_PHI);
3769 
3770   Register DstReg = MI.getOperand(0).getReg();
3771 
3772   // TODO: Extending a vector may be expensive, don't do this until heuristics
3773   // are better.
3774   if (MRI.getType(DstReg).isVector())
3775     return false;
3776 
3777   // Try to match a phi, whose only use is an extend.
3778   if (!MRI.hasOneNonDBGUse(DstReg))
3779     return false;
3780   ExtMI = &*MRI.use_instr_nodbg_begin(DstReg);
3781   switch (ExtMI->getOpcode()) {
3782   case TargetOpcode::G_ANYEXT:
3783     return true; // G_ANYEXT is usually free.
3784   case TargetOpcode::G_ZEXT:
3785   case TargetOpcode::G_SEXT:
3786     break;
3787   default:
3788     return false;
3789   }
3790 
3791   // If the target is likely to fold this extend away, don't propagate.
3792   if (Builder.getTII().isExtendLikelyToBeFolded(*ExtMI, MRI))
3793     return false;
3794 
3795   // We don't want to propagate the extends unless there's a good chance that
3796   // they'll be optimized in some way.
3797   // Collect the unique incoming values.
3798   SmallPtrSet<MachineInstr *, 4> InSrcs;
3799   for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {
3800     auto *DefMI = getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI);
3801     switch (DefMI->getOpcode()) {
3802     case TargetOpcode::G_LOAD:
3803     case TargetOpcode::G_TRUNC:
3804     case TargetOpcode::G_SEXT:
3805     case TargetOpcode::G_ZEXT:
3806     case TargetOpcode::G_ANYEXT:
3807     case TargetOpcode::G_CONSTANT:
3808       InSrcs.insert(getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI));
3809       // Don't try to propagate if there are too many places to create new
3810       // extends, chances are it'll increase code size.
3811       if (InSrcs.size() > 2)
3812         return false;
3813       break;
3814     default:
3815       return false;
3816     }
3817   }
3818   return true;
3819 }
3820 
3821 void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI,
3822                                             MachineInstr *&ExtMI) {
3823   assert(MI.getOpcode() == TargetOpcode::G_PHI);
3824   Register DstReg = ExtMI->getOperand(0).getReg();
3825   LLT ExtTy = MRI.getType(DstReg);
3826 
3827   // Propagate the extension into the block of each incoming reg's block.
3828   // Use a SetVector here because PHIs can have duplicate edges, and we want
3829   // deterministic iteration order.
3830   SmallSetVector<MachineInstr *, 8> SrcMIs;
3831   SmallDenseMap<MachineInstr *, MachineInstr *, 8> OldToNewSrcMap;
3832   for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); SrcIdx += 2) {
3833     auto *SrcMI = MRI.getVRegDef(MI.getOperand(SrcIdx).getReg());
3834     if (!SrcMIs.insert(SrcMI))
3835       continue;
3836 
3837     // Build an extend after each src inst.
3838     auto *MBB = SrcMI->getParent();
3839     MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator();
3840     if (InsertPt != MBB->end() && InsertPt->isPHI())
3841       InsertPt = MBB->getFirstNonPHI();
3842 
3843     Builder.setInsertPt(*SrcMI->getParent(), InsertPt);
3844     Builder.setDebugLoc(MI.getDebugLoc());
3845     auto NewExt = Builder.buildExtOrTrunc(ExtMI->getOpcode(), ExtTy,
3846                                           SrcMI->getOperand(0).getReg());
3847     OldToNewSrcMap[SrcMI] = NewExt;
3848   }
3849 
3850   // Create a new phi with the extended inputs.
3851   Builder.setInstrAndDebugLoc(MI);
3852   auto NewPhi = Builder.buildInstrNoInsert(TargetOpcode::G_PHI);
3853   NewPhi.addDef(DstReg);
3854   for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); ++SrcIdx) {
3855     auto &MO = MI.getOperand(SrcIdx);
3856     if (!MO.isReg()) {
3857       NewPhi.addMBB(MO.getMBB());
3858       continue;
3859     }
3860     auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(MO.getReg())];
3861     NewPhi.addUse(NewSrc->getOperand(0).getReg());
3862   }
3863   Builder.insertInstr(NewPhi);
3864   ExtMI->eraseFromParent();
3865 }
3866 
3867 bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI,
3868                                                 Register &Reg) {
3869   assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
3870   // If we have a constant index, look for a G_BUILD_VECTOR source
3871   // and find the source register that the index maps to.
3872   Register SrcVec = MI.getOperand(1).getReg();
3873   LLT SrcTy = MRI.getType(SrcVec);
3874   if (!isLegalOrBeforeLegalizer(
3875           {TargetOpcode::G_BUILD_VECTOR, {SrcTy, SrcTy.getElementType()}}))
3876     return false;
3877 
3878   auto Cst = getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
3879   if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements())
3880     return false;
3881 
3882   unsigned VecIdx = Cst->Value.getZExtValue();
3883   MachineInstr *BuildVecMI =
3884       getOpcodeDef(TargetOpcode::G_BUILD_VECTOR, SrcVec, MRI);
3885   if (!BuildVecMI) {
3886     BuildVecMI = getOpcodeDef(TargetOpcode::G_BUILD_VECTOR_TRUNC, SrcVec, MRI);
3887     if (!BuildVecMI)
3888       return false;
3889     LLT ScalarTy = MRI.getType(BuildVecMI->getOperand(1).getReg());
3890     if (!isLegalOrBeforeLegalizer(
3891             {TargetOpcode::G_BUILD_VECTOR_TRUNC, {SrcTy, ScalarTy}}))
3892       return false;
3893   }
3894 
3895   EVT Ty(getMVTForLLT(SrcTy));
3896   if (!MRI.hasOneNonDBGUse(SrcVec) &&
3897       !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty))
3898     return false;
3899 
3900   Reg = BuildVecMI->getOperand(VecIdx + 1).getReg();
3901   return true;
3902 }
3903 
3904 void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI,
3905                                                 Register &Reg) {
3906   // Check the type of the register, since it may have come from a
3907   // G_BUILD_VECTOR_TRUNC.
3908   LLT ScalarTy = MRI.getType(Reg);
3909   Register DstReg = MI.getOperand(0).getReg();
3910   LLT DstTy = MRI.getType(DstReg);
3911 
3912   Builder.setInstrAndDebugLoc(MI);
3913   if (ScalarTy != DstTy) {
3914     assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits());
3915     Builder.buildTrunc(DstReg, Reg);
3916     MI.eraseFromParent();
3917     return;
3918   }
3919   replaceSingleDefInstWithReg(MI, Reg);
3920 }
3921 
3922 bool CombinerHelper::matchExtractAllEltsFromBuildVector(
3923     MachineInstr &MI,
3924     SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
3925   assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3926   // This combine tries to find build_vector's which have every source element
3927   // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like
3928   // the masked load scalarization is run late in the pipeline. There's already
3929   // a combine for a similar pattern starting from the extract, but that
3930   // doesn't attempt to do it if there are multiple uses of the build_vector,
3931   // which in this case is true. Starting the combine from the build_vector
3932   // feels more natural than trying to find sibling nodes of extracts.
3933   // E.g.
3934   //  %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4
3935   //  %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0
3936   //  %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1
3937   //  %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2
3938   //  %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3
3939   // ==>
3940   // replace ext{1,2,3,4} with %s{1,2,3,4}
3941 
3942   Register DstReg = MI.getOperand(0).getReg();
3943   LLT DstTy = MRI.getType(DstReg);
3944   unsigned NumElts = DstTy.getNumElements();
3945 
3946   SmallBitVector ExtractedElts(NumElts);
3947   for (auto &II : make_range(MRI.use_instr_nodbg_begin(DstReg),
3948                              MRI.use_instr_nodbg_end())) {
3949     if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
3950       return false;
3951     auto Cst = getConstantVRegVal(II.getOperand(2).getReg(), MRI);
3952     if (!Cst)
3953       return false;
3954     unsigned Idx = Cst.getValue().getZExtValue();
3955     if (Idx >= NumElts)
3956       return false; // Out of range.
3957     ExtractedElts.set(Idx);
3958     SrcDstPairs.emplace_back(
3959         std::make_pair(MI.getOperand(Idx + 1).getReg(), &II));
3960   }
3961   // Match if every element was extracted.
3962   return ExtractedElts.all();
3963 }
3964 
3965 void CombinerHelper::applyExtractAllEltsFromBuildVector(
3966     MachineInstr &MI,
3967     SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
3968   assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3969   for (auto &Pair : SrcDstPairs) {
3970     auto *ExtMI = Pair.second;
3971     replaceRegWith(MRI, ExtMI->getOperand(0).getReg(), Pair.first);
3972     ExtMI->eraseFromParent();
3973   }
3974   MI.eraseFromParent();
3975 }
3976 
3977 void CombinerHelper::applyBuildFn(
3978     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3979   Builder.setInstrAndDebugLoc(MI);
3980   MatchInfo(Builder);
3981   MI.eraseFromParent();
3982 }
3983 
3984 void CombinerHelper::applyBuildFnNoErase(
3985     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3986   Builder.setInstrAndDebugLoc(MI);
3987   MatchInfo(Builder);
3988 }
3989 
3990 /// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
3991 bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) {
3992   unsigned Opc = MI.getOpcode();
3993   assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
3994   Register X = MI.getOperand(1).getReg();
3995   Register Y = MI.getOperand(2).getReg();
3996   if (X != Y)
3997     return false;
3998   unsigned RotateOpc =
3999       Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
4000   return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}});
4001 }
4002 
4003 void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) {
4004   unsigned Opc = MI.getOpcode();
4005   assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4006   bool IsFSHL = Opc == TargetOpcode::G_FSHL;
4007   Observer.changingInstr(MI);
4008   MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL
4009                                          : TargetOpcode::G_ROTR));
4010   MI.RemoveOperand(2);
4011   Observer.changedInstr(MI);
4012 }
4013 
4014 // Fold (rot x, c) -> (rot x, c % BitSize)
4015 bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) {
4016   assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
4017          MI.getOpcode() == TargetOpcode::G_ROTR);
4018   unsigned Bitsize =
4019       MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
4020   Register AmtReg = MI.getOperand(2).getReg();
4021   bool OutOfRange = false;
4022   auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) {
4023     if (auto *CI = dyn_cast<ConstantInt>(C))
4024       OutOfRange |= CI->getValue().uge(Bitsize);
4025     return true;
4026   };
4027   return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange;
4028 }
4029 
4030 void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) {
4031   assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
4032          MI.getOpcode() == TargetOpcode::G_ROTR);
4033   unsigned Bitsize =
4034       MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
4035   Builder.setInstrAndDebugLoc(MI);
4036   Register Amt = MI.getOperand(2).getReg();
4037   LLT AmtTy = MRI.getType(Amt);
4038   auto Bits = Builder.buildConstant(AmtTy, Bitsize);
4039   Amt = Builder.buildURem(AmtTy, MI.getOperand(2).getReg(), Bits).getReg(0);
4040   Observer.changingInstr(MI);
4041   MI.getOperand(2).setReg(Amt);
4042   Observer.changedInstr(MI);
4043 }
4044 
4045 bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI,
4046                                                    int64_t &MatchInfo) {
4047   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
4048   auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
4049   auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg());
4050   auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg());
4051   Optional<bool> KnownVal;
4052   switch (Pred) {
4053   default:
4054     llvm_unreachable("Unexpected G_ICMP predicate?");
4055   case CmpInst::ICMP_EQ:
4056     KnownVal = KnownBits::eq(KnownLHS, KnownRHS);
4057     break;
4058   case CmpInst::ICMP_NE:
4059     KnownVal = KnownBits::ne(KnownLHS, KnownRHS);
4060     break;
4061   case CmpInst::ICMP_SGE:
4062     KnownVal = KnownBits::sge(KnownLHS, KnownRHS);
4063     break;
4064   case CmpInst::ICMP_SGT:
4065     KnownVal = KnownBits::sgt(KnownLHS, KnownRHS);
4066     break;
4067   case CmpInst::ICMP_SLE:
4068     KnownVal = KnownBits::sle(KnownLHS, KnownRHS);
4069     break;
4070   case CmpInst::ICMP_SLT:
4071     KnownVal = KnownBits::slt(KnownLHS, KnownRHS);
4072     break;
4073   case CmpInst::ICMP_UGE:
4074     KnownVal = KnownBits::uge(KnownLHS, KnownRHS);
4075     break;
4076   case CmpInst::ICMP_UGT:
4077     KnownVal = KnownBits::ugt(KnownLHS, KnownRHS);
4078     break;
4079   case CmpInst::ICMP_ULE:
4080     KnownVal = KnownBits::ule(KnownLHS, KnownRHS);
4081     break;
4082   case CmpInst::ICMP_ULT:
4083     KnownVal = KnownBits::ult(KnownLHS, KnownRHS);
4084     break;
4085   }
4086   if (!KnownVal)
4087     return false;
4088   MatchInfo =
4089       *KnownVal
4090           ? getICmpTrueVal(getTargetLowering(),
4091                            /*IsVector = */
4092                            MRI.getType(MI.getOperand(0).getReg()).isVector(),
4093                            /* IsFP = */ false)
4094           : 0;
4095   return true;
4096 }
4097 
4098 /// Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
4099 bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
4100     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4101   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4102   Register Dst = MI.getOperand(0).getReg();
4103   Register Src = MI.getOperand(1).getReg();
4104   LLT Ty = MRI.getType(Src);
4105   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4106   if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}}))
4107     return false;
4108   int64_t Width = MI.getOperand(2).getImm();
4109   Register ShiftSrc;
4110   int64_t ShiftImm;
4111   if (!mi_match(
4112           Src, MRI,
4113           m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)),
4114                                   m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm))))))
4115     return false;
4116   if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
4117     return false;
4118 
4119   MatchInfo = [=](MachineIRBuilder &B) {
4120     auto Cst1 = B.buildConstant(ExtractTy, ShiftImm);
4121     auto Cst2 = B.buildConstant(ExtractTy, Width);
4122     B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4123   };
4124   return true;
4125 }
4126 
4127 /// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
4128 bool CombinerHelper::matchBitfieldExtractFromAnd(
4129     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4130   assert(MI.getOpcode() == TargetOpcode::G_AND);
4131   Register Dst = MI.getOperand(0).getReg();
4132   LLT Ty = MRI.getType(Dst);
4133   if (!getTargetLowering().isConstantUnsignedBitfieldExtactLegal(
4134           TargetOpcode::G_UBFX, Ty, Ty))
4135     return false;
4136 
4137   int64_t AndImm, LSBImm;
4138   Register ShiftSrc;
4139   const unsigned Size = Ty.getScalarSizeInBits();
4140   if (!mi_match(MI.getOperand(0).getReg(), MRI,
4141                 m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))),
4142                        m_ICst(AndImm))))
4143     return false;
4144 
4145   // The mask is a mask of the low bits iff imm & (imm+1) == 0.
4146   auto MaybeMask = static_cast<uint64_t>(AndImm);
4147   if (MaybeMask & (MaybeMask + 1))
4148     return false;
4149 
4150   // LSB must fit within the register.
4151   if (static_cast<uint64_t>(LSBImm) >= Size)
4152     return false;
4153 
4154   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4155   uint64_t Width = APInt(Size, AndImm).countTrailingOnes();
4156   MatchInfo = [=](MachineIRBuilder &B) {
4157     auto WidthCst = B.buildConstant(ExtractTy, Width);
4158     auto LSBCst = B.buildConstant(ExtractTy, LSBImm);
4159     B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});
4160   };
4161   return true;
4162 }
4163 
4164 bool CombinerHelper::matchBitfieldExtractFromShr(
4165     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4166   const unsigned Opcode = MI.getOpcode();
4167   assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
4168 
4169   const Register Dst = MI.getOperand(0).getReg();
4170 
4171   const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
4172                                   ? TargetOpcode::G_SBFX
4173                                   : TargetOpcode::G_UBFX;
4174 
4175   // Check if the type we would use for the extract is legal
4176   LLT Ty = MRI.getType(Dst);
4177   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4178   if (!LI || !LI->isLegalOrCustom({ExtrOpcode, {Ty, ExtractTy}}))
4179     return false;
4180 
4181   Register ShlSrc;
4182   int64_t ShrAmt;
4183   int64_t ShlAmt;
4184   const unsigned Size = Ty.getScalarSizeInBits();
4185 
4186   // Try to match shr (shl x, c1), c2
4187   if (!mi_match(Dst, MRI,
4188                 m_BinOp(Opcode,
4189                         m_OneNonDBGUse(m_GShl(m_Reg(ShlSrc), m_ICst(ShlAmt))),
4190                         m_ICst(ShrAmt))))
4191     return false;
4192 
4193   // Make sure that the shift sizes can fit a bitfield extract
4194   if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size)
4195     return false;
4196 
4197   // Skip this combine if the G_SEXT_INREG combine could handle it
4198   if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt)
4199     return false;
4200 
4201   // Calculate start position and width of the extract
4202   const int64_t Pos = ShrAmt - ShlAmt;
4203   const int64_t Width = Size - ShrAmt;
4204 
4205   MatchInfo = [=](MachineIRBuilder &B) {
4206     auto WidthCst = B.buildConstant(ExtractTy, Width);
4207     auto PosCst = B.buildConstant(ExtractTy, Pos);
4208     B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst});
4209   };
4210   return true;
4211 }
4212 
4213 bool CombinerHelper::reassociationCanBreakAddressingModePattern(
4214     MachineInstr &PtrAdd) {
4215   assert(PtrAdd.getOpcode() == TargetOpcode::G_PTR_ADD);
4216 
4217   Register Src1Reg = PtrAdd.getOperand(1).getReg();
4218   MachineInstr *Src1Def = getOpcodeDef(TargetOpcode::G_PTR_ADD, Src1Reg, MRI);
4219   if (!Src1Def)
4220     return false;
4221 
4222   Register Src2Reg = PtrAdd.getOperand(2).getReg();
4223 
4224   if (MRI.hasOneNonDBGUse(Src1Reg))
4225     return false;
4226 
4227   auto C1 = getConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI);
4228   if (!C1)
4229     return false;
4230   auto C2 = getConstantVRegVal(Src2Reg, MRI);
4231   if (!C2)
4232     return false;
4233 
4234   const APInt &C1APIntVal = *C1;
4235   const APInt &C2APIntVal = *C2;
4236   const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue();
4237 
4238   for (auto &UseMI : MRI.use_nodbg_instructions(Src1Reg)) {
4239     // This combine may end up running before ptrtoint/inttoptr combines
4240     // manage to eliminate redundant conversions, so try to look through them.
4241     MachineInstr *ConvUseMI = &UseMI;
4242     unsigned ConvUseOpc = ConvUseMI->getOpcode();
4243     while (ConvUseOpc == TargetOpcode::G_INTTOPTR ||
4244            ConvUseOpc == TargetOpcode::G_PTRTOINT) {
4245       Register DefReg = ConvUseMI->getOperand(0).getReg();
4246       if (!MRI.hasOneNonDBGUse(DefReg))
4247         break;
4248       ConvUseMI = &*MRI.use_instr_nodbg_begin(DefReg);
4249       ConvUseOpc = ConvUseMI->getOpcode();
4250     }
4251     auto LoadStore = ConvUseOpc == TargetOpcode::G_LOAD ||
4252                      ConvUseOpc == TargetOpcode::G_STORE;
4253     if (!LoadStore)
4254       continue;
4255     // Is x[offset2] already not a legal addressing mode? If so then
4256     // reassociating the constants breaks nothing (we test offset2 because
4257     // that's the one we hope to fold into the load or store).
4258     TargetLoweringBase::AddrMode AM;
4259     AM.HasBaseReg = true;
4260     AM.BaseOffs = C2APIntVal.getSExtValue();
4261     unsigned AS =
4262         MRI.getType(ConvUseMI->getOperand(1).getReg()).getAddressSpace();
4263     Type *AccessTy =
4264         getTypeForLLT(MRI.getType(ConvUseMI->getOperand(0).getReg()),
4265                       PtrAdd.getMF()->getFunction().getContext());
4266     const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering();
4267     if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4268                                    AccessTy, AS))
4269       continue;
4270 
4271     // Would x[offset1+offset2] still be a legal addressing mode?
4272     AM.BaseOffs = CombinedValue;
4273     if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4274                                    AccessTy, AS))
4275       return true;
4276   }
4277 
4278   return false;
4279 }
4280 
4281 bool CombinerHelper::matchReassocPtrAdd(
4282     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4283   assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD);
4284   // We're trying to match a few pointer computation patterns here for
4285   // re-association opportunities.
4286   // 1) Isolating a constant operand to be on the RHS, e.g.:
4287   // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4288   //
4289   // 2) Folding two constants in each sub-tree as long as such folding
4290   // doesn't break a legal addressing mode.
4291   // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4292   Register Src1Reg = MI.getOperand(1).getReg();
4293   Register Src2Reg = MI.getOperand(2).getReg();
4294   MachineInstr *LHS = MRI.getVRegDef(Src1Reg);
4295   MachineInstr *RHS = MRI.getVRegDef(Src2Reg);
4296 
4297   if (LHS->getOpcode() != TargetOpcode::G_PTR_ADD) {
4298     // Try to match example 1).
4299     if (RHS->getOpcode() != TargetOpcode::G_ADD)
4300       return false;
4301     auto C2 = getConstantVRegVal(RHS->getOperand(2).getReg(), MRI);
4302     if (!C2)
4303       return false;
4304 
4305     MatchInfo = [=,&MI](MachineIRBuilder &B) {
4306       LLT PtrTy = MRI.getType(MI.getOperand(0).getReg());
4307 
4308       auto NewBase =
4309           Builder.buildPtrAdd(PtrTy, Src1Reg, RHS->getOperand(1).getReg());
4310       Observer.changingInstr(MI);
4311       MI.getOperand(1).setReg(NewBase.getReg(0));
4312       MI.getOperand(2).setReg(RHS->getOperand(2).getReg());
4313       Observer.changedInstr(MI);
4314     };
4315   } else {
4316     // Try to match example 2.
4317     Register LHSSrc1 = LHS->getOperand(1).getReg();
4318     Register LHSSrc2 = LHS->getOperand(2).getReg();
4319     auto C1 = getConstantVRegVal(LHSSrc2, MRI);
4320     if (!C1)
4321       return false;
4322     auto C2 = getConstantVRegVal(Src2Reg, MRI);
4323     if (!C2)
4324       return false;
4325 
4326     MatchInfo = [=, &MI](MachineIRBuilder &B) {
4327       auto NewCst = B.buildConstant(MRI.getType(Src2Reg), *C1 + *C2);
4328       Observer.changingInstr(MI);
4329       MI.getOperand(1).setReg(LHSSrc1);
4330       MI.getOperand(2).setReg(NewCst.getReg(0));
4331       Observer.changedInstr(MI);
4332     };
4333   }
4334   return !reassociationCanBreakAddressingModePattern(MI);
4335 }
4336 
4337 bool CombinerHelper::matchConstantFold(MachineInstr &MI, APInt &MatchInfo) {
4338   Register Op1 = MI.getOperand(1).getReg();
4339   Register Op2 = MI.getOperand(2).getReg();
4340   auto MaybeCst = ConstantFoldBinOp(MI.getOpcode(), Op1, Op2, MRI);
4341   if (!MaybeCst)
4342     return false;
4343   MatchInfo = *MaybeCst;
4344   return true;
4345 }
4346 
4347 bool CombinerHelper::matchNarrowBinopFeedingAnd(
4348     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4349   // Look for a binop feeding into an AND with a mask:
4350   //
4351   // %add = G_ADD %lhs, %rhs
4352   // %and = G_AND %add, 000...11111111
4353   //
4354   // Check if it's possible to perform the binop at a narrower width and zext
4355   // back to the original width like so:
4356   //
4357   // %narrow_lhs = G_TRUNC %lhs
4358   // %narrow_rhs = G_TRUNC %rhs
4359   // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs
4360   // %new_add = G_ZEXT %narrow_add
4361   // %and = G_AND %new_add, 000...11111111
4362   //
4363   // This can allow later combines to eliminate the G_AND if it turns out
4364   // that the mask is irrelevant.
4365   assert(MI.getOpcode() == TargetOpcode::G_AND);
4366   Register Dst = MI.getOperand(0).getReg();
4367   Register AndLHS = MI.getOperand(1).getReg();
4368   Register AndRHS = MI.getOperand(2).getReg();
4369   LLT WideTy = MRI.getType(Dst);
4370 
4371   // If the potential binop has more than one use, then it's possible that one
4372   // of those uses will need its full width.
4373   if (!WideTy.isScalar() || !MRI.hasOneNonDBGUse(AndLHS))
4374     return false;
4375 
4376   // Check if the LHS feeding the AND is impacted by the high bits that we're
4377   // masking out.
4378   //
4379   // e.g. for 64-bit x, y:
4380   //
4381   // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535
4382   MachineInstr *LHSInst = getDefIgnoringCopies(AndLHS, MRI);
4383   if (!LHSInst)
4384     return false;
4385   unsigned LHSOpc = LHSInst->getOpcode();
4386   switch (LHSOpc) {
4387   default:
4388     return false;
4389   case TargetOpcode::G_ADD:
4390   case TargetOpcode::G_SUB:
4391   case TargetOpcode::G_MUL:
4392   case TargetOpcode::G_AND:
4393   case TargetOpcode::G_OR:
4394   case TargetOpcode::G_XOR:
4395     break;
4396   }
4397 
4398   // Find the mask on the RHS.
4399   auto Cst = getConstantVRegValWithLookThrough(AndRHS, MRI);
4400   if (!Cst)
4401     return false;
4402   auto Mask = Cst->Value;
4403   if (!Mask.isMask())
4404     return false;
4405 
4406   // No point in combining if there's nothing to truncate.
4407   unsigned NarrowWidth = Mask.countTrailingOnes();
4408   if (NarrowWidth == WideTy.getSizeInBits())
4409     return false;
4410   LLT NarrowTy = LLT::scalar(NarrowWidth);
4411 
4412   // Check if adding the zext + truncates could be harmful.
4413   auto &MF = *MI.getMF();
4414   const auto &TLI = getTargetLowering();
4415   LLVMContext &Ctx = MF.getFunction().getContext();
4416   auto &DL = MF.getDataLayout();
4417   if (!TLI.isTruncateFree(WideTy, NarrowTy, DL, Ctx) ||
4418       !TLI.isZExtFree(NarrowTy, WideTy, DL, Ctx))
4419     return false;
4420   if (!isLegalOrBeforeLegalizer({TargetOpcode::G_TRUNC, {NarrowTy, WideTy}}) ||
4421       !isLegalOrBeforeLegalizer({TargetOpcode::G_ZEXT, {WideTy, NarrowTy}}))
4422     return false;
4423   Register BinOpLHS = LHSInst->getOperand(1).getReg();
4424   Register BinOpRHS = LHSInst->getOperand(2).getReg();
4425   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4426     auto NarrowLHS = Builder.buildTrunc(NarrowTy, BinOpLHS);
4427     auto NarrowRHS = Builder.buildTrunc(NarrowTy, BinOpRHS);
4428     auto NarrowBinOp =
4429         Builder.buildInstr(LHSOpc, {NarrowTy}, {NarrowLHS, NarrowRHS});
4430     auto Ext = Builder.buildZExt(WideTy, NarrowBinOp);
4431     Observer.changingInstr(MI);
4432     MI.getOperand(1).setReg(Ext.getReg(0));
4433     Observer.changedInstr(MI);
4434   };
4435   return true;
4436 }
4437 
4438 bool CombinerHelper::tryCombine(MachineInstr &MI) {
4439   if (tryCombineCopy(MI))
4440     return true;
4441   if (tryCombineExtendingLoads(MI))
4442     return true;
4443   if (tryCombineIndexedLoadStore(MI))
4444     return true;
4445   return false;
4446 }
4447