1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/CodeGen/GlobalISel/Combiner.h"
10 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
11 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
12 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
13 #include "llvm/CodeGen/GlobalISel/Utils.h"
14 #include "llvm/CodeGen/MachineDominators.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/Target/TargetMachine.h"
21 
22 #define DEBUG_TYPE "gi-combiner"
23 
24 using namespace llvm;
25 
26 // Option to allow testing of the combiner while no targets know about indexed
27 // addressing.
28 static cl::opt<bool>
29     ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
30                        cl::desc("Force all indexed operations to be "
31                                 "legal for the GlobalISel combiner"));
32 
33 
34 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
35                                MachineIRBuilder &B, GISelKnownBits *KB,
36                                MachineDominatorTree *MDT)
37     : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer),
38       KB(KB), MDT(MDT) {
39   (void)this->KB;
40 }
41 
42 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
43                                     Register ToReg) const {
44   Observer.changingAllUsesOfReg(MRI, FromReg);
45 
46   if (MRI.constrainRegAttrs(ToReg, FromReg))
47     MRI.replaceRegWith(FromReg, ToReg);
48   else
49     Builder.buildCopy(ToReg, FromReg);
50 
51   Observer.finishedChangingAllUsesOfReg();
52 }
53 
54 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
55                                       MachineOperand &FromRegOp,
56                                       Register ToReg) const {
57   assert(FromRegOp.getParent() && "Expected an operand in an MI");
58   Observer.changingInstr(*FromRegOp.getParent());
59 
60   FromRegOp.setReg(ToReg);
61 
62   Observer.changedInstr(*FromRegOp.getParent());
63 }
64 
65 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
66   if (matchCombineCopy(MI)) {
67     applyCombineCopy(MI);
68     return true;
69   }
70   return false;
71 }
72 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
73   if (MI.getOpcode() != TargetOpcode::COPY)
74     return false;
75   Register DstReg = MI.getOperand(0).getReg();
76   Register SrcReg = MI.getOperand(1).getReg();
77   LLT DstTy = MRI.getType(DstReg);
78   LLT SrcTy = MRI.getType(SrcReg);
79   // Simple Copy Propagation.
80   // a(sx) = COPY b(sx) -> Replace all uses of a with b.
81   if (DstTy.isValid() && SrcTy.isValid() && DstTy == SrcTy)
82     return true;
83   return false;
84 }
85 void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
86   Register DstReg = MI.getOperand(0).getReg();
87   Register SrcReg = MI.getOperand(1).getReg();
88   MI.eraseFromParent();
89   replaceRegWith(MRI, DstReg, SrcReg);
90 }
91 
92 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
93   bool IsUndef = false;
94   SmallVector<Register, 4> Ops;
95   if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
96     applyCombineConcatVectors(MI, IsUndef, Ops);
97     return true;
98   }
99   return false;
100 }
101 
102 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
103                                                SmallVectorImpl<Register> &Ops) {
104   assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
105          "Invalid instruction");
106   IsUndef = true;
107   MachineInstr *Undef = nullptr;
108 
109   // Walk over all the operands of concat vectors and check if they are
110   // build_vector themselves or undef.
111   // Then collect their operands in Ops.
112   for (const MachineOperand &MO : MI.uses()) {
113     Register Reg = MO.getReg();
114     MachineInstr *Def = MRI.getVRegDef(Reg);
115     assert(Def && "Operand not defined");
116     switch (Def->getOpcode()) {
117     case TargetOpcode::G_BUILD_VECTOR:
118       IsUndef = false;
119       // Remember the operands of the build_vector to fold
120       // them into the yet-to-build flattened concat vectors.
121       for (const MachineOperand &BuildVecMO : Def->uses())
122         Ops.push_back(BuildVecMO.getReg());
123       break;
124     case TargetOpcode::G_IMPLICIT_DEF: {
125       LLT OpType = MRI.getType(Reg);
126       // Keep one undef value for all the undef operands.
127       if (!Undef) {
128         Builder.setInsertPt(*MI.getParent(), MI);
129         Undef = Builder.buildUndef(OpType.getScalarType());
130       }
131       assert(MRI.getType(Undef->getOperand(0).getReg()) ==
132                  OpType.getScalarType() &&
133              "All undefs should have the same type");
134       // Break the undef vector in as many scalar elements as needed
135       // for the flattening.
136       for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
137            EltIdx != EltEnd; ++EltIdx)
138         Ops.push_back(Undef->getOperand(0).getReg());
139       break;
140     }
141     default:
142       return false;
143     }
144   }
145   return true;
146 }
147 void CombinerHelper::applyCombineConcatVectors(
148     MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
149   // We determined that the concat_vectors can be flatten.
150   // Generate the flattened build_vector.
151   Register DstReg = MI.getOperand(0).getReg();
152   Builder.setInsertPt(*MI.getParent(), MI);
153   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
154 
155   // Note: IsUndef is sort of redundant. We could have determine it by
156   // checking that at all Ops are undef.  Alternatively, we could have
157   // generate a build_vector of undefs and rely on another combine to
158   // clean that up.  For now, given we already gather this information
159   // in tryCombineConcatVectors, just save compile time and issue the
160   // right thing.
161   if (IsUndef)
162     Builder.buildUndef(NewDstReg);
163   else
164     Builder.buildBuildVector(NewDstReg, Ops);
165   MI.eraseFromParent();
166   replaceRegWith(MRI, DstReg, NewDstReg);
167 }
168 
169 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
170   SmallVector<Register, 4> Ops;
171   if (matchCombineShuffleVector(MI, Ops)) {
172     applyCombineShuffleVector(MI, Ops);
173     return true;
174   }
175   return false;
176 }
177 
178 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
179                                                SmallVectorImpl<Register> &Ops) {
180   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
181          "Invalid instruction kind");
182   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
183   Register Src1 = MI.getOperand(1).getReg();
184   LLT SrcType = MRI.getType(Src1);
185   // As bizarre as it may look, shuffle vector can actually produce
186   // scalar! This is because at the IR level a <1 x ty> shuffle
187   // vector is perfectly valid.
188   unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
189   unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
190 
191   // If the resulting vector is smaller than the size of the source
192   // vectors being concatenated, we won't be able to replace the
193   // shuffle vector into a concat_vectors.
194   //
195   // Note: We may still be able to produce a concat_vectors fed by
196   //       extract_vector_elt and so on. It is less clear that would
197   //       be better though, so don't bother for now.
198   //
199   // If the destination is a scalar, the size of the sources doesn't
200   // matter. we will lower the shuffle to a plain copy. This will
201   // work only if the source and destination have the same size. But
202   // that's covered by the next condition.
203   //
204   // TODO: If the size between the source and destination don't match
205   //       we could still emit an extract vector element in that case.
206   if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
207     return false;
208 
209   // Check that the shuffle mask can be broken evenly between the
210   // different sources.
211   if (DstNumElts % SrcNumElts != 0)
212     return false;
213 
214   // Mask length is a multiple of the source vector length.
215   // Check if the shuffle is some kind of concatenation of the input
216   // vectors.
217   unsigned NumConcat = DstNumElts / SrcNumElts;
218   SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
219   SmallVector<int, 8> Mask;
220   ShuffleVectorInst::getShuffleMask(MI.getOperand(3).getShuffleMask(), Mask);
221   for (unsigned i = 0; i != DstNumElts; ++i) {
222     int Idx = Mask[i];
223     // Undef value.
224     if (Idx < 0)
225       continue;
226     // Ensure the indices in each SrcType sized piece are sequential and that
227     // the same source is used for the whole piece.
228     if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
229         (ConcatSrcs[i / SrcNumElts] >= 0 &&
230          ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
231       return false;
232     // Remember which source this index came from.
233     ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
234   }
235 
236   // The shuffle is concatenating multiple vectors together.
237   // Collect the different operands for that.
238   Register UndefReg;
239   Register Src2 = MI.getOperand(2).getReg();
240   for (auto Src : ConcatSrcs) {
241     if (Src < 0) {
242       if (!UndefReg) {
243         Builder.setInsertPt(*MI.getParent(), MI);
244         UndefReg = Builder.buildUndef(SrcType).getReg(0);
245       }
246       Ops.push_back(UndefReg);
247     } else if (Src == 0)
248       Ops.push_back(Src1);
249     else
250       Ops.push_back(Src2);
251   }
252   return true;
253 }
254 
255 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
256                                                const ArrayRef<Register> Ops) {
257   Register DstReg = MI.getOperand(0).getReg();
258   Builder.setInsertPt(*MI.getParent(), MI);
259   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
260 
261   if (Ops.size() == 1)
262     Builder.buildCopy(NewDstReg, Ops[0]);
263   else
264     Builder.buildMerge(NewDstReg, Ops);
265 
266   MI.eraseFromParent();
267   replaceRegWith(MRI, DstReg, NewDstReg);
268 }
269 
270 namespace {
271 
272 /// Select a preference between two uses. CurrentUse is the current preference
273 /// while *ForCandidate is attributes of the candidate under consideration.
274 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
275                                   const LLT &TyForCandidate,
276                                   unsigned OpcodeForCandidate,
277                                   MachineInstr *MIForCandidate) {
278   if (!CurrentUse.Ty.isValid()) {
279     if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
280         CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
281       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
282     return CurrentUse;
283   }
284 
285   // We permit the extend to hoist through basic blocks but this is only
286   // sensible if the target has extending loads. If you end up lowering back
287   // into a load and extend during the legalizer then the end result is
288   // hoisting the extend up to the load.
289 
290   // Prefer defined extensions to undefined extensions as these are more
291   // likely to reduce the number of instructions.
292   if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
293       CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
294     return CurrentUse;
295   else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
296            OpcodeForCandidate != TargetOpcode::G_ANYEXT)
297     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
298 
299   // Prefer sign extensions to zero extensions as sign-extensions tend to be
300   // more expensive.
301   if (CurrentUse.Ty == TyForCandidate) {
302     if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
303         OpcodeForCandidate == TargetOpcode::G_ZEXT)
304       return CurrentUse;
305     else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
306              OpcodeForCandidate == TargetOpcode::G_SEXT)
307       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
308   }
309 
310   // This is potentially target specific. We've chosen the largest type
311   // because G_TRUNC is usually free. One potential catch with this is that
312   // some targets have a reduced number of larger registers than smaller
313   // registers and this choice potentially increases the live-range for the
314   // larger value.
315   if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
316     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
317   }
318   return CurrentUse;
319 }
320 
321 /// Find a suitable place to insert some instructions and insert them. This
322 /// function accounts for special cases like inserting before a PHI node.
323 /// The current strategy for inserting before PHI's is to duplicate the
324 /// instructions for each predecessor. However, while that's ok for G_TRUNC
325 /// on most targets since it generally requires no code, other targets/cases may
326 /// want to try harder to find a dominating block.
327 static void InsertInsnsWithoutSideEffectsBeforeUse(
328     MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
329     std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
330                        MachineOperand &UseMO)>
331         Inserter) {
332   MachineInstr &UseMI = *UseMO.getParent();
333 
334   MachineBasicBlock *InsertBB = UseMI.getParent();
335 
336   // If the use is a PHI then we want the predecessor block instead.
337   if (UseMI.isPHI()) {
338     MachineOperand *PredBB = std::next(&UseMO);
339     InsertBB = PredBB->getMBB();
340   }
341 
342   // If the block is the same block as the def then we want to insert just after
343   // the def instead of at the start of the block.
344   if (InsertBB == DefMI.getParent()) {
345     MachineBasicBlock::iterator InsertPt = &DefMI;
346     Inserter(InsertBB, std::next(InsertPt), UseMO);
347     return;
348   }
349 
350   // Otherwise we want the start of the BB
351   Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
352 }
353 } // end anonymous namespace
354 
355 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
356   PreferredTuple Preferred;
357   if (matchCombineExtendingLoads(MI, Preferred)) {
358     applyCombineExtendingLoads(MI, Preferred);
359     return true;
360   }
361   return false;
362 }
363 
364 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
365                                                 PreferredTuple &Preferred) {
366   // We match the loads and follow the uses to the extend instead of matching
367   // the extends and following the def to the load. This is because the load
368   // must remain in the same position for correctness (unless we also add code
369   // to find a safe place to sink it) whereas the extend is freely movable.
370   // It also prevents us from duplicating the load for the volatile case or just
371   // for performance.
372 
373   if (MI.getOpcode() != TargetOpcode::G_LOAD &&
374       MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
375       MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
376     return false;
377 
378   auto &LoadValue = MI.getOperand(0);
379   assert(LoadValue.isReg() && "Result wasn't a register?");
380 
381   LLT LoadValueTy = MRI.getType(LoadValue.getReg());
382   if (!LoadValueTy.isScalar())
383     return false;
384 
385   // Most architectures are going to legalize <s8 loads into at least a 1 byte
386   // load, and the MMOs can only describe memory accesses in multiples of bytes.
387   // If we try to perform extload combining on those, we can end up with
388   // %a(s8) = extload %ptr (load 1 byte from %ptr)
389   // ... which is an illegal extload instruction.
390   if (LoadValueTy.getSizeInBits() < 8)
391     return false;
392 
393   // For non power-of-2 types, they will very likely be legalized into multiple
394   // loads. Don't bother trying to match them into extending loads.
395   if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
396     return false;
397 
398   // Find the preferred type aside from the any-extends (unless it's the only
399   // one) and non-extending ops. We'll emit an extending load to that type and
400   // and emit a variant of (extend (trunc X)) for the others according to the
401   // relative type sizes. At the same time, pick an extend to use based on the
402   // extend involved in the chosen type.
403   unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
404                                  ? TargetOpcode::G_ANYEXT
405                                  : MI.getOpcode() == TargetOpcode::G_SEXTLOAD
406                                        ? TargetOpcode::G_SEXT
407                                        : TargetOpcode::G_ZEXT;
408   Preferred = {LLT(), PreferredOpcode, nullptr};
409   for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) {
410     if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
411         UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
412         UseMI.getOpcode() == TargetOpcode::G_ANYEXT) {
413       Preferred = ChoosePreferredUse(Preferred,
414                                      MRI.getType(UseMI.getOperand(0).getReg()),
415                                      UseMI.getOpcode(), &UseMI);
416     }
417   }
418 
419   // There were no extends
420   if (!Preferred.MI)
421     return false;
422   // It should be impossible to chose an extend without selecting a different
423   // type since by definition the result of an extend is larger.
424   assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
425 
426   LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
427   return true;
428 }
429 
430 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
431                                                 PreferredTuple &Preferred) {
432   // Rewrite the load to the chosen extending load.
433   Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
434 
435   // Inserter to insert a truncate back to the original type at a given point
436   // with some basic CSE to limit truncate duplication to one per BB.
437   DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
438   auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
439                            MachineBasicBlock::iterator InsertBefore,
440                            MachineOperand &UseMO) {
441     MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
442     if (PreviouslyEmitted) {
443       Observer.changingInstr(*UseMO.getParent());
444       UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
445       Observer.changedInstr(*UseMO.getParent());
446       return;
447     }
448 
449     Builder.setInsertPt(*InsertIntoBB, InsertBefore);
450     Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
451     MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
452     EmittedInsns[InsertIntoBB] = NewMI;
453     replaceRegOpWith(MRI, UseMO, NewDstReg);
454   };
455 
456   Observer.changingInstr(MI);
457   MI.setDesc(
458       Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
459                                ? TargetOpcode::G_SEXTLOAD
460                                : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
461                                      ? TargetOpcode::G_ZEXTLOAD
462                                      : TargetOpcode::G_LOAD));
463 
464   // Rewrite all the uses to fix up the types.
465   auto &LoadValue = MI.getOperand(0);
466   SmallVector<MachineOperand *, 4> Uses;
467   for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
468     Uses.push_back(&UseMO);
469 
470   for (auto *UseMO : Uses) {
471     MachineInstr *UseMI = UseMO->getParent();
472 
473     // If the extend is compatible with the preferred extend then we should fix
474     // up the type and extend so that it uses the preferred use.
475     if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
476         UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
477       Register UseDstReg = UseMI->getOperand(0).getReg();
478       MachineOperand &UseSrcMO = UseMI->getOperand(1);
479       const LLT &UseDstTy = MRI.getType(UseDstReg);
480       if (UseDstReg != ChosenDstReg) {
481         if (Preferred.Ty == UseDstTy) {
482           // If the use has the same type as the preferred use, then merge
483           // the vregs and erase the extend. For example:
484           //    %1:_(s8) = G_LOAD ...
485           //    %2:_(s32) = G_SEXT %1(s8)
486           //    %3:_(s32) = G_ANYEXT %1(s8)
487           //    ... = ... %3(s32)
488           // rewrites to:
489           //    %2:_(s32) = G_SEXTLOAD ...
490           //    ... = ... %2(s32)
491           replaceRegWith(MRI, UseDstReg, ChosenDstReg);
492           Observer.erasingInstr(*UseMO->getParent());
493           UseMO->getParent()->eraseFromParent();
494         } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
495           // If the preferred size is smaller, then keep the extend but extend
496           // from the result of the extending load. For example:
497           //    %1:_(s8) = G_LOAD ...
498           //    %2:_(s32) = G_SEXT %1(s8)
499           //    %3:_(s64) = G_ANYEXT %1(s8)
500           //    ... = ... %3(s64)
501           /// rewrites to:
502           //    %2:_(s32) = G_SEXTLOAD ...
503           //    %3:_(s64) = G_ANYEXT %2:_(s32)
504           //    ... = ... %3(s64)
505           replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
506         } else {
507           // If the preferred size is large, then insert a truncate. For
508           // example:
509           //    %1:_(s8) = G_LOAD ...
510           //    %2:_(s64) = G_SEXT %1(s8)
511           //    %3:_(s32) = G_ZEXT %1(s8)
512           //    ... = ... %3(s32)
513           /// rewrites to:
514           //    %2:_(s64) = G_SEXTLOAD ...
515           //    %4:_(s8) = G_TRUNC %2:_(s32)
516           //    %3:_(s64) = G_ZEXT %2:_(s8)
517           //    ... = ... %3(s64)
518           InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
519                                                  InsertTruncAt);
520         }
521         continue;
522       }
523       // The use is (one of) the uses of the preferred use we chose earlier.
524       // We're going to update the load to def this value later so just erase
525       // the old extend.
526       Observer.erasingInstr(*UseMO->getParent());
527       UseMO->getParent()->eraseFromParent();
528       continue;
529     }
530 
531     // The use isn't an extend. Truncate back to the type we originally loaded.
532     // This is free on many targets.
533     InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
534   }
535 
536   MI.getOperand(0).setReg(ChosenDstReg);
537   Observer.changedInstr(MI);
538 }
539 
540 bool CombinerHelper::isPredecessor(MachineInstr &DefMI, MachineInstr &UseMI) {
541   assert(DefMI.getParent() == UseMI.getParent());
542   if (&DefMI == &UseMI)
543     return false;
544 
545   // Loop through the basic block until we find one of the instructions.
546   MachineBasicBlock::const_iterator I = DefMI.getParent()->begin();
547   for (; &*I != &DefMI && &*I != &UseMI; ++I)
548     return &*I == &DefMI;
549 
550   llvm_unreachable("Block must contain instructions");
551 }
552 
553 bool CombinerHelper::dominates(MachineInstr &DefMI, MachineInstr &UseMI) {
554   if (MDT)
555     return MDT->dominates(&DefMI, &UseMI);
556   else if (DefMI.getParent() != UseMI.getParent())
557     return false;
558 
559   return isPredecessor(DefMI, UseMI);
560 }
561 
562 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
563                                             Register &Base, Register &Offset) {
564   auto &MF = *MI.getParent()->getParent();
565   const auto &TLI = *MF.getSubtarget().getTargetLowering();
566 
567 #ifndef NDEBUG
568   unsigned Opcode = MI.getOpcode();
569   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
570          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
571 #endif
572 
573   Base = MI.getOperand(1).getReg();
574   MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
575   if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
576     return false;
577 
578   LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
579 
580   for (auto &Use : MRI.use_instructions(Base)) {
581     if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
582       continue;
583 
584     Offset = Use.getOperand(2).getReg();
585     if (!ForceLegalIndexing &&
586         !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
587       LLVM_DEBUG(dbgs() << "    Ignoring candidate with illegal addrmode: "
588                         << Use);
589       continue;
590     }
591 
592     // Make sure the offset calculation is before the potentially indexed op.
593     // FIXME: we really care about dependency here. The offset calculation might
594     // be movable.
595     MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
596     if (!OffsetDef || !dominates(*OffsetDef, MI)) {
597       LLVM_DEBUG(dbgs() << "    Ignoring candidate with offset after mem-op: "
598                         << Use);
599       continue;
600     }
601 
602     // FIXME: check whether all uses of Base are load/store with foldable
603     // addressing modes. If so, using the normal addr-modes is better than
604     // forming an indexed one.
605 
606     bool MemOpDominatesAddrUses = true;
607     for (auto &PtrAddUse : MRI.use_instructions(Use.getOperand(0).getReg())) {
608       if (!dominates(MI, PtrAddUse)) {
609         MemOpDominatesAddrUses = false;
610         break;
611       }
612     }
613 
614     if (!MemOpDominatesAddrUses) {
615       LLVM_DEBUG(
616           dbgs() << "    Ignoring candidate as memop does not dominate uses: "
617                  << Use);
618       continue;
619     }
620 
621     LLVM_DEBUG(dbgs() << "    Found match: " << Use);
622     Addr = Use.getOperand(0).getReg();
623     return true;
624   }
625 
626   return false;
627 }
628 
629 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
630                                            Register &Base, Register &Offset) {
631   auto &MF = *MI.getParent()->getParent();
632   const auto &TLI = *MF.getSubtarget().getTargetLowering();
633 
634 #ifndef NDEBUG
635   unsigned Opcode = MI.getOpcode();
636   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
637          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
638 #endif
639 
640   Addr = MI.getOperand(1).getReg();
641   MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
642   if (!AddrDef || MRI.hasOneUse(Addr))
643     return false;
644 
645   Base = AddrDef->getOperand(1).getReg();
646   Offset = AddrDef->getOperand(2).getReg();
647 
648   LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
649 
650   if (!ForceLegalIndexing &&
651       !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
652     LLVM_DEBUG(dbgs() << "    Skipping, not legal for target");
653     return false;
654   }
655 
656   MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
657   if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
658     LLVM_DEBUG(dbgs() << "    Skipping, frame index would need copy anyway.");
659     return false;
660   }
661 
662   if (MI.getOpcode() == TargetOpcode::G_STORE) {
663     // Would require a copy.
664     if (Base == MI.getOperand(0).getReg()) {
665       LLVM_DEBUG(dbgs() << "    Skipping, storing base so need copy anyway.");
666       return false;
667     }
668 
669     // We're expecting one use of Addr in MI, but it could also be the
670     // value stored, which isn't actually dominated by the instruction.
671     if (MI.getOperand(0).getReg() == Addr) {
672       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses");
673       return false;
674     }
675   }
676 
677   // FIXME: check whether all uses of the base pointer are constant PtrAdds.
678   // That might allow us to end base's liveness here by adjusting the constant.
679 
680   for (auto &UseMI : MRI.use_instructions(Addr)) {
681     if (!dominates(MI, UseMI)) {
682       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses.");
683       return false;
684     }
685   }
686 
687   return true;
688 }
689 
690 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
691   unsigned Opcode = MI.getOpcode();
692   if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
693       Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
694     return false;
695 
696   bool IsStore = Opcode == TargetOpcode::G_STORE;
697   Register Addr, Base, Offset;
698   bool IsPre = findPreIndexCandidate(MI, Addr, Base, Offset);
699   if (!IsPre && !findPostIndexCandidate(MI, Addr, Base, Offset))
700     return false;
701 
702 
703   unsigned NewOpcode;
704   switch (Opcode) {
705   case TargetOpcode::G_LOAD:
706     NewOpcode = TargetOpcode::G_INDEXED_LOAD;
707     break;
708   case TargetOpcode::G_SEXTLOAD:
709     NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
710     break;
711   case TargetOpcode::G_ZEXTLOAD:
712     NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
713     break;
714   case TargetOpcode::G_STORE:
715     NewOpcode = TargetOpcode::G_INDEXED_STORE;
716     break;
717   default:
718     llvm_unreachable("Unknown load/store opcode");
719   }
720 
721   MachineInstr &AddrDef = *MRI.getUniqueVRegDef(Addr);
722   MachineIRBuilder MIRBuilder(MI);
723   auto MIB = MIRBuilder.buildInstr(NewOpcode);
724   if (IsStore) {
725     MIB.addDef(Addr);
726     MIB.addUse(MI.getOperand(0).getReg());
727   } else {
728     MIB.addDef(MI.getOperand(0).getReg());
729     MIB.addDef(Addr);
730   }
731 
732   MIB.addUse(Base);
733   MIB.addUse(Offset);
734   MIB.addImm(IsPre);
735   MI.eraseFromParent();
736   AddrDef.eraseFromParent();
737 
738   LLVM_DEBUG(dbgs() << "    Combinined to indexed operation");
739   return true;
740 }
741 
742 bool CombinerHelper::matchElideBrByInvertingCond(MachineInstr &MI) {
743   if (MI.getOpcode() != TargetOpcode::G_BR)
744     return false;
745 
746   // Try to match the following:
747   // bb1:
748   //   %c(s32) = G_ICMP pred, %a, %b
749   //   %c1(s1) = G_TRUNC %c(s32)
750   //   G_BRCOND %c1, %bb2
751   //   G_BR %bb3
752   // bb2:
753   // ...
754   // bb3:
755 
756   // The above pattern does not have a fall through to the successor bb2, always
757   // resulting in a branch no matter which path is taken. Here we try to find
758   // and replace that pattern with conditional branch to bb3 and otherwise
759   // fallthrough to bb2.
760 
761   MachineBasicBlock *MBB = MI.getParent();
762   MachineBasicBlock::iterator BrIt(MI);
763   if (BrIt == MBB->begin())
764     return false;
765   assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
766 
767   MachineInstr *BrCond = &*std::prev(BrIt);
768   if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
769     return false;
770 
771   // Check that the next block is the conditional branch target.
772   if (!MBB->isLayoutSuccessor(BrCond->getOperand(1).getMBB()))
773     return false;
774 
775   MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg());
776   if (!CmpMI || CmpMI->getOpcode() != TargetOpcode::G_ICMP ||
777       !MRI.hasOneUse(CmpMI->getOperand(0).getReg()))
778     return false;
779   return true;
780 }
781 
782 bool CombinerHelper::tryElideBrByInvertingCond(MachineInstr &MI) {
783   if (!matchElideBrByInvertingCond(MI))
784     return false;
785   applyElideBrByInvertingCond(MI);
786   return true;
787 }
788 
789 void CombinerHelper::applyElideBrByInvertingCond(MachineInstr &MI) {
790   MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
791   MachineBasicBlock::iterator BrIt(MI);
792   MachineInstr *BrCond = &*std::prev(BrIt);
793   MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg());
794 
795   CmpInst::Predicate InversePred = CmpInst::getInversePredicate(
796       (CmpInst::Predicate)CmpMI->getOperand(1).getPredicate());
797 
798   // Invert the G_ICMP condition.
799   Observer.changingInstr(*CmpMI);
800   CmpMI->getOperand(1).setPredicate(InversePred);
801   Observer.changedInstr(*CmpMI);
802 
803   // Change the conditional branch target.
804   Observer.changingInstr(*BrCond);
805   BrCond->getOperand(1).setMBB(BrTarget);
806   Observer.changedInstr(*BrCond);
807   MI.eraseFromParent();
808 }
809 
810 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
811   // On Darwin, -Os means optimize for size without hurting performance, so
812   // only really optimize for size when -Oz (MinSize) is used.
813   if (MF.getTarget().getTargetTriple().isOSDarwin())
814     return MF.getFunction().hasMinSize();
815   return MF.getFunction().hasOptSize();
816 }
817 
818 // Returns a list of types to use for memory op lowering in MemOps. A partial
819 // port of findOptimalMemOpLowering in TargetLowering.
820 static bool findGISelOptimalMemOpLowering(
821     std::vector<LLT> &MemOps, unsigned Limit, uint64_t Size, unsigned DstAlign,
822     unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
823     bool AllowOverlap, unsigned DstAS, unsigned SrcAS,
824     const AttributeList &FuncAttributes, const TargetLowering &TLI) {
825   // If 'SrcAlign' is zero, that means the memory operation does not need to
826   // load the value, i.e. memset or memcpy from constant string. Otherwise,
827   // it's the inferred alignment of the source. 'DstAlign', on the other hand,
828   // is the specified alignment of the memory operation. If it is zero, that
829   // means it's possible to change the alignment of the destination.
830   // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
831   // not need to be loaded.
832   if (SrcAlign != 0 && SrcAlign < DstAlign)
833     return false;
834 
835   LLT Ty = TLI.getOptimalMemOpLLT(Size, DstAlign, SrcAlign, IsMemset,
836                                   ZeroMemset, MemcpyStrSrc, FuncAttributes);
837 
838   if (Ty == LLT()) {
839     // Use the largest scalar type whose alignment constraints are satisfied.
840     // We only need to check DstAlign here as SrcAlign is always greater or
841     // equal to DstAlign (or zero).
842     Ty = LLT::scalar(64);
843     while (DstAlign && DstAlign < Ty.getSizeInBytes() &&
844            !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, DstAlign))
845       Ty = LLT::scalar(Ty.getSizeInBytes());
846     assert(Ty.getSizeInBits() > 0 && "Could not find valid type");
847     // FIXME: check for the largest legal type we can load/store to.
848   }
849 
850   unsigned NumMemOps = 0;
851   while (Size != 0) {
852     unsigned TySize = Ty.getSizeInBytes();
853     while (TySize > Size) {
854       // For now, only use non-vector load / store's for the left-over pieces.
855       LLT NewTy = Ty;
856       // FIXME: check for mem op safety and legality of the types. Not all of
857       // SDAGisms map cleanly to GISel concepts.
858       if (NewTy.isVector())
859         NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32);
860       NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1));
861       unsigned NewTySize = NewTy.getSizeInBytes();
862       assert(NewTySize > 0 && "Could not find appropriate type");
863 
864       // If the new LLT cannot cover all of the remaining bits, then consider
865       // issuing a (or a pair of) unaligned and overlapping load / store.
866       bool Fast;
867       // Need to get a VT equivalent for allowMisalignedMemoryAccesses().
868       MVT VT = getMVTForLLT(Ty);
869       if (NumMemOps && AllowOverlap && NewTySize < Size &&
870           TLI.allowsMisalignedMemoryAccesses(
871               VT, DstAS, DstAlign, MachineMemOperand::MONone, &Fast) &&
872           Fast)
873         TySize = Size;
874       else {
875         Ty = NewTy;
876         TySize = NewTySize;
877       }
878     }
879 
880     if (++NumMemOps > Limit)
881       return false;
882 
883     MemOps.push_back(Ty);
884     Size -= TySize;
885   }
886 
887   return true;
888 }
889 
890 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
891   if (Ty.isVector())
892     return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
893                            Ty.getNumElements());
894   return IntegerType::get(C, Ty.getSizeInBits());
895 }
896 
897 // Get a vectorized representation of the memset value operand, GISel edition.
898 static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
899   MachineRegisterInfo &MRI = *MIB.getMRI();
900   unsigned NumBits = Ty.getScalarSizeInBits();
901   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
902   if (!Ty.isVector() && ValVRegAndVal) {
903     unsigned KnownVal = ValVRegAndVal->Value;
904     APInt Scalar = APInt(8, KnownVal);
905     APInt SplatVal = APInt::getSplat(NumBits, Scalar);
906     return MIB.buildConstant(Ty, SplatVal).getReg(0);
907   }
908   // FIXME: for vector types create a G_BUILD_VECTOR.
909   if (Ty.isVector())
910     return Register();
911 
912   // Extend the byte value to the larger type, and then multiply by a magic
913   // value 0x010101... in order to replicate it across every byte.
914   LLT ExtType = Ty.getScalarType();
915   auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val);
916   if (NumBits > 8) {
917     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
918     auto MagicMI = MIB.buildConstant(ExtType, Magic);
919     Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0);
920   }
921 
922   assert(ExtType == Ty && "Vector memset value type not supported yet");
923   return Val;
924 }
925 
926 bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val,
927                                     unsigned KnownLen, unsigned Align,
928                                     bool IsVolatile) {
929   auto &MF = *MI.getParent()->getParent();
930   const auto &TLI = *MF.getSubtarget().getTargetLowering();
931   auto &DL = MF.getDataLayout();
932   LLVMContext &C = MF.getFunction().getContext();
933 
934   assert(KnownLen != 0 && "Have a zero length memset length!");
935 
936   bool DstAlignCanChange = false;
937   MachineFrameInfo &MFI = MF.getFrameInfo();
938   bool OptSize = shouldLowerMemFuncForSize(MF);
939 
940   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
941   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
942     DstAlignCanChange = true;
943 
944   unsigned Limit = TLI.getMaxStoresPerMemset(OptSize);
945   std::vector<LLT> MemOps;
946 
947   const auto &DstMMO = **MI.memoperands_begin();
948   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
949 
950   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
951   bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0;
952 
953   if (!findGISelOptimalMemOpLowering(
954           MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), 0,
955           /*IsMemset=*/true,
956           /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false,
957           /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(), ~0u,
958           MF.getFunction().getAttributes(), TLI))
959     return false;
960 
961   if (DstAlignCanChange) {
962     // Get an estimate of the type from the LLT.
963     Type *IRTy = getTypeForLLT(MemOps[0], C);
964     unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy);
965     if (NewAlign > Align) {
966       Align = NewAlign;
967       unsigned FI = FIDef->getOperand(1).getIndex();
968       // Give the stack frame object a larger alignment if needed.
969       if (MFI.getObjectAlignment(FI) < Align)
970         MFI.setObjectAlignment(FI, Align);
971     }
972   }
973 
974   MachineIRBuilder MIB(MI);
975   // Find the largest store and generate the bit pattern for it.
976   LLT LargestTy = MemOps[0];
977   for (unsigned i = 1; i < MemOps.size(); i++)
978     if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits())
979       LargestTy = MemOps[i];
980 
981   // The memset stored value is always defined as an s8, so in order to make it
982   // work with larger store types we need to repeat the bit pattern across the
983   // wider type.
984   Register MemSetValue = getMemsetValue(Val, LargestTy, MIB);
985 
986   if (!MemSetValue)
987     return false;
988 
989   // Generate the stores. For each store type in the list, we generate the
990   // matching store of that type to the destination address.
991   LLT PtrTy = MRI.getType(Dst);
992   unsigned DstOff = 0;
993   unsigned Size = KnownLen;
994   for (unsigned I = 0; I < MemOps.size(); I++) {
995     LLT Ty = MemOps[I];
996     unsigned TySize = Ty.getSizeInBytes();
997     if (TySize > Size) {
998       // Issuing an unaligned load / store pair that overlaps with the previous
999       // pair. Adjust the offset accordingly.
1000       assert(I == MemOps.size() - 1 && I != 0);
1001       DstOff -= TySize - Size;
1002     }
1003 
1004     // If this store is smaller than the largest store see whether we can get
1005     // the smaller value for free with a truncate.
1006     Register Value = MemSetValue;
1007     if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) {
1008       MVT VT = getMVTForLLT(Ty);
1009       MVT LargestVT = getMVTForLLT(LargestTy);
1010       if (!LargestTy.isVector() && !Ty.isVector() &&
1011           TLI.isTruncateFree(LargestVT, VT))
1012         Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0);
1013       else
1014         Value = getMemsetValue(Val, Ty, MIB);
1015       if (!Value)
1016         return false;
1017     }
1018 
1019     auto *StoreMMO =
1020         MF.getMachineMemOperand(&DstMMO, DstOff, Ty.getSizeInBytes());
1021 
1022     Register Ptr = Dst;
1023     if (DstOff != 0) {
1024       auto Offset =
1025           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
1026       Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1027     }
1028 
1029     MIB.buildStore(Value, Ptr, *StoreMMO);
1030     DstOff += Ty.getSizeInBytes();
1031     Size -= TySize;
1032   }
1033 
1034   MI.eraseFromParent();
1035   return true;
1036 }
1037 
1038 
1039 bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
1040                                     Register Src, unsigned KnownLen,
1041                                     unsigned DstAlign, unsigned SrcAlign,
1042                                     bool IsVolatile) {
1043   auto &MF = *MI.getParent()->getParent();
1044   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1045   auto &DL = MF.getDataLayout();
1046   LLVMContext &C = MF.getFunction().getContext();
1047 
1048   assert(KnownLen != 0 && "Have a zero length memcpy length!");
1049 
1050   bool DstAlignCanChange = false;
1051   MachineFrameInfo &MFI = MF.getFrameInfo();
1052   bool OptSize = shouldLowerMemFuncForSize(MF);
1053   unsigned Alignment = MinAlign(DstAlign, SrcAlign);
1054 
1055   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1056   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1057     DstAlignCanChange = true;
1058 
1059   // FIXME: infer better src pointer alignment like SelectionDAG does here.
1060   // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining
1061   // if the memcpy is in a tail call position.
1062 
1063   unsigned Limit = TLI.getMaxStoresPerMemcpy(OptSize);
1064   std::vector<LLT> MemOps;
1065 
1066   const auto &DstMMO = **MI.memoperands_begin();
1067   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1068   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1069   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1070 
1071   if (!findGISelOptimalMemOpLowering(
1072           MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment),
1073           SrcAlign,
1074           /*IsMemset=*/false,
1075           /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
1076           /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(),
1077           SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), TLI))
1078     return false;
1079 
1080   if (DstAlignCanChange) {
1081     // Get an estimate of the type from the LLT.
1082     Type *IRTy = getTypeForLLT(MemOps[0], C);
1083     unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy);
1084 
1085     // Don't promote to an alignment that would require dynamic stack
1086     // realignment.
1087     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1088     if (!TRI->needsStackRealignment(MF))
1089       while (NewAlign > Alignment &&
1090              DL.exceedsNaturalStackAlignment(Align(NewAlign)))
1091         NewAlign /= 2;
1092 
1093     if (NewAlign > Alignment) {
1094       Alignment = NewAlign;
1095       unsigned FI = FIDef->getOperand(1).getIndex();
1096       // Give the stack frame object a larger alignment if needed.
1097       if (MFI.getObjectAlignment(FI) < Alignment)
1098         MFI.setObjectAlignment(FI, Alignment);
1099     }
1100   }
1101 
1102   LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n");
1103 
1104   MachineIRBuilder MIB(MI);
1105   // Now we need to emit a pair of load and stores for each of the types we've
1106   // collected. I.e. for each type, generate a load from the source pointer of
1107   // that type width, and then generate a corresponding store to the dest buffer
1108   // of that value loaded. This can result in a sequence of loads and stores
1109   // mixed types, depending on what the target specifies as good types to use.
1110   unsigned CurrOffset = 0;
1111   LLT PtrTy = MRI.getType(Src);
1112   unsigned Size = KnownLen;
1113   for (auto CopyTy : MemOps) {
1114     // Issuing an unaligned load / store pair  that overlaps with the previous
1115     // pair. Adjust the offset accordingly.
1116     if (CopyTy.getSizeInBytes() > Size)
1117       CurrOffset -= CopyTy.getSizeInBytes() - Size;
1118 
1119     // Construct MMOs for the accesses.
1120     auto *LoadMMO =
1121         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1122     auto *StoreMMO =
1123         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1124 
1125     // Create the load.
1126     Register LoadPtr = Src;
1127     Register Offset;
1128     if (CurrOffset != 0) {
1129       Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset)
1130                    .getReg(0);
1131       LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1132     }
1133     auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
1134 
1135     // Create the store.
1136     Register StorePtr =
1137         CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1138     MIB.buildStore(LdVal, StorePtr, *StoreMMO);
1139     CurrOffset += CopyTy.getSizeInBytes();
1140     Size -= CopyTy.getSizeInBytes();
1141   }
1142 
1143   MI.eraseFromParent();
1144   return true;
1145 }
1146 
1147 bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
1148                                     Register Src, unsigned KnownLen,
1149                                     unsigned DstAlign, unsigned SrcAlign,
1150                                     bool IsVolatile) {
1151   auto &MF = *MI.getParent()->getParent();
1152   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1153   auto &DL = MF.getDataLayout();
1154   LLVMContext &C = MF.getFunction().getContext();
1155 
1156   assert(KnownLen != 0 && "Have a zero length memmove length!");
1157 
1158   bool DstAlignCanChange = false;
1159   MachineFrameInfo &MFI = MF.getFrameInfo();
1160   bool OptSize = shouldLowerMemFuncForSize(MF);
1161   unsigned Alignment = MinAlign(DstAlign, SrcAlign);
1162 
1163   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1164   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1165     DstAlignCanChange = true;
1166 
1167   unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize);
1168   std::vector<LLT> MemOps;
1169 
1170   const auto &DstMMO = **MI.memoperands_begin();
1171   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1172   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1173   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1174 
1175   // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due
1176   // to a bug in it's findOptimalMemOpLowering implementation. For now do the
1177   // same thing here.
1178   if (!findGISelOptimalMemOpLowering(
1179           MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment),
1180           SrcAlign,
1181           /*IsMemset=*/false,
1182           /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
1183           /*AllowOverlap=*/false, DstPtrInfo.getAddrSpace(),
1184           SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), TLI))
1185     return false;
1186 
1187   if (DstAlignCanChange) {
1188     // Get an estimate of the type from the LLT.
1189     Type *IRTy = getTypeForLLT(MemOps[0], C);
1190     unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy);
1191 
1192     // Don't promote to an alignment that would require dynamic stack
1193     // realignment.
1194     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1195     if (!TRI->needsStackRealignment(MF))
1196       while (NewAlign > Alignment &&
1197              DL.exceedsNaturalStackAlignment(Align(NewAlign)))
1198         NewAlign /= 2;
1199 
1200     if (NewAlign > Alignment) {
1201       Alignment = NewAlign;
1202       unsigned FI = FIDef->getOperand(1).getIndex();
1203       // Give the stack frame object a larger alignment if needed.
1204       if (MFI.getObjectAlignment(FI) < Alignment)
1205         MFI.setObjectAlignment(FI, Alignment);
1206     }
1207   }
1208 
1209   LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n");
1210 
1211   MachineIRBuilder MIB(MI);
1212   // Memmove requires that we perform the loads first before issuing the stores.
1213   // Apart from that, this loop is pretty much doing the same thing as the
1214   // memcpy codegen function.
1215   unsigned CurrOffset = 0;
1216   LLT PtrTy = MRI.getType(Src);
1217   SmallVector<Register, 16> LoadVals;
1218   for (auto CopyTy : MemOps) {
1219     // Construct MMO for the load.
1220     auto *LoadMMO =
1221         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1222 
1223     // Create the load.
1224     Register LoadPtr = Src;
1225     if (CurrOffset != 0) {
1226       auto Offset =
1227           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1228       LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1229     }
1230     LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
1231     CurrOffset += CopyTy.getSizeInBytes();
1232   }
1233 
1234   CurrOffset = 0;
1235   for (unsigned I = 0; I < MemOps.size(); ++I) {
1236     LLT CopyTy = MemOps[I];
1237     // Now store the values loaded.
1238     auto *StoreMMO =
1239         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1240 
1241     Register StorePtr = Dst;
1242     if (CurrOffset != 0) {
1243       auto Offset =
1244           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1245       StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1246     }
1247     MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
1248     CurrOffset += CopyTy.getSizeInBytes();
1249   }
1250   MI.eraseFromParent();
1251   return true;
1252 }
1253 
1254 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1255   // This combine is fairly complex so it's not written with a separate
1256   // matcher function.
1257   assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
1258   Intrinsic::ID ID = (Intrinsic::ID)MI.getIntrinsicID();
1259   assert((ID == Intrinsic::memcpy || ID == Intrinsic::memmove ||
1260           ID == Intrinsic::memset) &&
1261          "Expected a memcpy like intrinsic");
1262 
1263   auto MMOIt = MI.memoperands_begin();
1264   const MachineMemOperand *MemOp = *MMOIt;
1265   bool IsVolatile = MemOp->isVolatile();
1266   // Don't try to optimize volatile.
1267   if (IsVolatile)
1268     return false;
1269 
1270   unsigned DstAlign = MemOp->getBaseAlignment();
1271   unsigned SrcAlign = 0;
1272   Register Dst = MI.getOperand(1).getReg();
1273   Register Src = MI.getOperand(2).getReg();
1274   Register Len = MI.getOperand(3).getReg();
1275 
1276   if (ID != Intrinsic::memset) {
1277     assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
1278     MemOp = *(++MMOIt);
1279     SrcAlign = MemOp->getBaseAlignment();
1280   }
1281 
1282   // See if this is a constant length copy
1283   auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI);
1284   if (!LenVRegAndVal)
1285     return false; // Leave it to the legalizer to lower it to a libcall.
1286   unsigned KnownLen = LenVRegAndVal->Value;
1287 
1288   if (KnownLen == 0) {
1289     MI.eraseFromParent();
1290     return true;
1291   }
1292 
1293   if (MaxLen && KnownLen > MaxLen)
1294     return false;
1295 
1296   if (ID == Intrinsic::memcpy)
1297     return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1298   if (ID == Intrinsic::memmove)
1299     return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1300   if (ID == Intrinsic::memset)
1301     return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile);
1302   return false;
1303 }
1304 
1305 bool CombinerHelper::tryCombine(MachineInstr &MI) {
1306   if (tryCombineCopy(MI))
1307     return true;
1308   if (tryCombineExtendingLoads(MI))
1309     return true;
1310   if (tryCombineIndexedLoadStore(MI))
1311     return true;
1312   return false;
1313 }
1314