1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/CodeGen/GlobalISel/Combiner.h"
10 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
11 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
12 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
13 #include "llvm/CodeGen/GlobalISel/Utils.h"
14 #include "llvm/CodeGen/MachineDominators.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/Target/TargetMachine.h"
21 
22 #define DEBUG_TYPE "gi-combiner"
23 
24 using namespace llvm;
25 
26 // Option to allow testing of the combiner while no targets know about indexed
27 // addressing.
28 static cl::opt<bool>
29     ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
30                        cl::desc("Force all indexed operations to be "
31                                 "legal for the GlobalISel combiner"));
32 
33 
34 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
35                                MachineIRBuilder &B, GISelKnownBits *KB,
36                                MachineDominatorTree *MDT)
37     : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer),
38       KB(KB), MDT(MDT) {
39   (void)this->KB;
40 }
41 
42 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
43                                     Register ToReg) const {
44   Observer.changingAllUsesOfReg(MRI, FromReg);
45 
46   if (MRI.constrainRegAttrs(ToReg, FromReg))
47     MRI.replaceRegWith(FromReg, ToReg);
48   else
49     Builder.buildCopy(ToReg, FromReg);
50 
51   Observer.finishedChangingAllUsesOfReg();
52 }
53 
54 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
55                                       MachineOperand &FromRegOp,
56                                       Register ToReg) const {
57   assert(FromRegOp.getParent() && "Expected an operand in an MI");
58   Observer.changingInstr(*FromRegOp.getParent());
59 
60   FromRegOp.setReg(ToReg);
61 
62   Observer.changedInstr(*FromRegOp.getParent());
63 }
64 
65 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
66   if (matchCombineCopy(MI)) {
67     applyCombineCopy(MI);
68     return true;
69   }
70   return false;
71 }
72 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
73   if (MI.getOpcode() != TargetOpcode::COPY)
74     return false;
75   Register DstReg = MI.getOperand(0).getReg();
76   Register SrcReg = MI.getOperand(1).getReg();
77   LLT DstTy = MRI.getType(DstReg);
78   LLT SrcTy = MRI.getType(SrcReg);
79   // Simple Copy Propagation.
80   // a(sx) = COPY b(sx) -> Replace all uses of a with b.
81   if (DstTy.isValid() && SrcTy.isValid() && DstTy == SrcTy)
82     return true;
83   return false;
84 }
85 void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
86   Register DstReg = MI.getOperand(0).getReg();
87   Register SrcReg = MI.getOperand(1).getReg();
88   MI.eraseFromParent();
89   replaceRegWith(MRI, DstReg, SrcReg);
90 }
91 
92 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
93   bool IsUndef = false;
94   SmallVector<Register, 4> Ops;
95   if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
96     applyCombineConcatVectors(MI, IsUndef, Ops);
97     return true;
98   }
99   return false;
100 }
101 
102 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
103                                                SmallVectorImpl<Register> &Ops) {
104   assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
105          "Invalid instruction");
106   IsUndef = true;
107   MachineInstr *Undef = nullptr;
108 
109   // Walk over all the operands of concat vectors and check if they are
110   // build_vector themselves or undef.
111   // Then collect their operands in Ops.
112   for (const MachineOperand &MO : MI.operands()) {
113     // Skip the instruction definition.
114     if (MO.isDef())
115       continue;
116     Register Reg = MO.getReg();
117     MachineInstr *Def = MRI.getVRegDef(Reg);
118     assert(Def && "Operand not defined");
119     switch (Def->getOpcode()) {
120     case TargetOpcode::G_BUILD_VECTOR:
121       IsUndef = false;
122       // Remember the operands of the build_vector to fold
123       // them into the yet-to-build flattened concat vectors.
124       for (const MachineOperand &BuildVecMO : Def->operands()) {
125         // Skip the definition.
126         if (BuildVecMO.isDef())
127           continue;
128         Ops.push_back(BuildVecMO.getReg());
129       }
130       break;
131     case TargetOpcode::G_IMPLICIT_DEF: {
132       LLT OpType = MRI.getType(Reg);
133       // Keep one undef value for all the undef operands.
134       if (!Undef) {
135         Builder.setInsertPt(*MI.getParent(), MI);
136         Undef = Builder.buildUndef(OpType.getScalarType());
137       }
138       assert(MRI.getType(Undef->getOperand(0).getReg()) ==
139                  OpType.getScalarType() &&
140              "All undefs should have the same type");
141       // Break the undef vector in as many scalar elements as needed
142       // for the flattening.
143       for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
144            EltIdx != EltEnd; ++EltIdx)
145         Ops.push_back(Undef->getOperand(0).getReg());
146       break;
147     }
148     default:
149       return false;
150     }
151   }
152   return true;
153 }
154 void CombinerHelper::applyCombineConcatVectors(
155     MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
156   // We determined that the concat_vectors can be flatten.
157   // Generate the flattened build_vector.
158   Register DstReg = MI.getOperand(0).getReg();
159   Builder.setInsertPt(*MI.getParent(), MI);
160   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
161 
162   // Note: IsUndef is sort of redundant. We could have determine it by
163   // checking that at all Ops are undef.  Alternatively, we could have
164   // generate a build_vector of undefs and rely on another combine to
165   // clean that up.  For now, given we already gather this information
166   // in tryCombineConcatVectors, just save compile time and issue the
167   // right thing.
168   if (IsUndef)
169     Builder.buildUndef(NewDstReg);
170   else
171     Builder.buildBuildVector(NewDstReg, Ops);
172   MI.eraseFromParent();
173   replaceRegWith(MRI, DstReg, NewDstReg);
174 }
175 
176 namespace {
177 
178 /// Select a preference between two uses. CurrentUse is the current preference
179 /// while *ForCandidate is attributes of the candidate under consideration.
180 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
181                                   const LLT &TyForCandidate,
182                                   unsigned OpcodeForCandidate,
183                                   MachineInstr *MIForCandidate) {
184   if (!CurrentUse.Ty.isValid()) {
185     if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
186         CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
187       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
188     return CurrentUse;
189   }
190 
191   // We permit the extend to hoist through basic blocks but this is only
192   // sensible if the target has extending loads. If you end up lowering back
193   // into a load and extend during the legalizer then the end result is
194   // hoisting the extend up to the load.
195 
196   // Prefer defined extensions to undefined extensions as these are more
197   // likely to reduce the number of instructions.
198   if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
199       CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
200     return CurrentUse;
201   else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
202            OpcodeForCandidate != TargetOpcode::G_ANYEXT)
203     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
204 
205   // Prefer sign extensions to zero extensions as sign-extensions tend to be
206   // more expensive.
207   if (CurrentUse.Ty == TyForCandidate) {
208     if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
209         OpcodeForCandidate == TargetOpcode::G_ZEXT)
210       return CurrentUse;
211     else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
212              OpcodeForCandidate == TargetOpcode::G_SEXT)
213       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
214   }
215 
216   // This is potentially target specific. We've chosen the largest type
217   // because G_TRUNC is usually free. One potential catch with this is that
218   // some targets have a reduced number of larger registers than smaller
219   // registers and this choice potentially increases the live-range for the
220   // larger value.
221   if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
222     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
223   }
224   return CurrentUse;
225 }
226 
227 /// Find a suitable place to insert some instructions and insert them. This
228 /// function accounts for special cases like inserting before a PHI node.
229 /// The current strategy for inserting before PHI's is to duplicate the
230 /// instructions for each predecessor. However, while that's ok for G_TRUNC
231 /// on most targets since it generally requires no code, other targets/cases may
232 /// want to try harder to find a dominating block.
233 static void InsertInsnsWithoutSideEffectsBeforeUse(
234     MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
235     std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
236                        MachineOperand &UseMO)>
237         Inserter) {
238   MachineInstr &UseMI = *UseMO.getParent();
239 
240   MachineBasicBlock *InsertBB = UseMI.getParent();
241 
242   // If the use is a PHI then we want the predecessor block instead.
243   if (UseMI.isPHI()) {
244     MachineOperand *PredBB = std::next(&UseMO);
245     InsertBB = PredBB->getMBB();
246   }
247 
248   // If the block is the same block as the def then we want to insert just after
249   // the def instead of at the start of the block.
250   if (InsertBB == DefMI.getParent()) {
251     MachineBasicBlock::iterator InsertPt = &DefMI;
252     Inserter(InsertBB, std::next(InsertPt), UseMO);
253     return;
254   }
255 
256   // Otherwise we want the start of the BB
257   Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
258 }
259 } // end anonymous namespace
260 
261 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
262   PreferredTuple Preferred;
263   if (matchCombineExtendingLoads(MI, Preferred)) {
264     applyCombineExtendingLoads(MI, Preferred);
265     return true;
266   }
267   return false;
268 }
269 
270 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
271                                                 PreferredTuple &Preferred) {
272   // We match the loads and follow the uses to the extend instead of matching
273   // the extends and following the def to the load. This is because the load
274   // must remain in the same position for correctness (unless we also add code
275   // to find a safe place to sink it) whereas the extend is freely movable.
276   // It also prevents us from duplicating the load for the volatile case or just
277   // for performance.
278 
279   if (MI.getOpcode() != TargetOpcode::G_LOAD &&
280       MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
281       MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
282     return false;
283 
284   auto &LoadValue = MI.getOperand(0);
285   assert(LoadValue.isReg() && "Result wasn't a register?");
286 
287   LLT LoadValueTy = MRI.getType(LoadValue.getReg());
288   if (!LoadValueTy.isScalar())
289     return false;
290 
291   // Most architectures are going to legalize <s8 loads into at least a 1 byte
292   // load, and the MMOs can only describe memory accesses in multiples of bytes.
293   // If we try to perform extload combining on those, we can end up with
294   // %a(s8) = extload %ptr (load 1 byte from %ptr)
295   // ... which is an illegal extload instruction.
296   if (LoadValueTy.getSizeInBits() < 8)
297     return false;
298 
299   // For non power-of-2 types, they will very likely be legalized into multiple
300   // loads. Don't bother trying to match them into extending loads.
301   if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
302     return false;
303 
304   // Find the preferred type aside from the any-extends (unless it's the only
305   // one) and non-extending ops. We'll emit an extending load to that type and
306   // and emit a variant of (extend (trunc X)) for the others according to the
307   // relative type sizes. At the same time, pick an extend to use based on the
308   // extend involved in the chosen type.
309   unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
310                                  ? TargetOpcode::G_ANYEXT
311                                  : MI.getOpcode() == TargetOpcode::G_SEXTLOAD
312                                        ? TargetOpcode::G_SEXT
313                                        : TargetOpcode::G_ZEXT;
314   Preferred = {LLT(), PreferredOpcode, nullptr};
315   for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) {
316     if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
317         UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
318         UseMI.getOpcode() == TargetOpcode::G_ANYEXT) {
319       Preferred = ChoosePreferredUse(Preferred,
320                                      MRI.getType(UseMI.getOperand(0).getReg()),
321                                      UseMI.getOpcode(), &UseMI);
322     }
323   }
324 
325   // There were no extends
326   if (!Preferred.MI)
327     return false;
328   // It should be impossible to chose an extend without selecting a different
329   // type since by definition the result of an extend is larger.
330   assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
331 
332   LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
333   return true;
334 }
335 
336 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
337                                                 PreferredTuple &Preferred) {
338   // Rewrite the load to the chosen extending load.
339   Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
340 
341   // Inserter to insert a truncate back to the original type at a given point
342   // with some basic CSE to limit truncate duplication to one per BB.
343   DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
344   auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
345                            MachineBasicBlock::iterator InsertBefore,
346                            MachineOperand &UseMO) {
347     MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
348     if (PreviouslyEmitted) {
349       Observer.changingInstr(*UseMO.getParent());
350       UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
351       Observer.changedInstr(*UseMO.getParent());
352       return;
353     }
354 
355     Builder.setInsertPt(*InsertIntoBB, InsertBefore);
356     Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
357     MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
358     EmittedInsns[InsertIntoBB] = NewMI;
359     replaceRegOpWith(MRI, UseMO, NewDstReg);
360   };
361 
362   Observer.changingInstr(MI);
363   MI.setDesc(
364       Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
365                                ? TargetOpcode::G_SEXTLOAD
366                                : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
367                                      ? TargetOpcode::G_ZEXTLOAD
368                                      : TargetOpcode::G_LOAD));
369 
370   // Rewrite all the uses to fix up the types.
371   auto &LoadValue = MI.getOperand(0);
372   SmallVector<MachineOperand *, 4> Uses;
373   for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
374     Uses.push_back(&UseMO);
375 
376   for (auto *UseMO : Uses) {
377     MachineInstr *UseMI = UseMO->getParent();
378 
379     // If the extend is compatible with the preferred extend then we should fix
380     // up the type and extend so that it uses the preferred use.
381     if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
382         UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
383       Register UseDstReg = UseMI->getOperand(0).getReg();
384       MachineOperand &UseSrcMO = UseMI->getOperand(1);
385       const LLT &UseDstTy = MRI.getType(UseDstReg);
386       if (UseDstReg != ChosenDstReg) {
387         if (Preferred.Ty == UseDstTy) {
388           // If the use has the same type as the preferred use, then merge
389           // the vregs and erase the extend. For example:
390           //    %1:_(s8) = G_LOAD ...
391           //    %2:_(s32) = G_SEXT %1(s8)
392           //    %3:_(s32) = G_ANYEXT %1(s8)
393           //    ... = ... %3(s32)
394           // rewrites to:
395           //    %2:_(s32) = G_SEXTLOAD ...
396           //    ... = ... %2(s32)
397           replaceRegWith(MRI, UseDstReg, ChosenDstReg);
398           Observer.erasingInstr(*UseMO->getParent());
399           UseMO->getParent()->eraseFromParent();
400         } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
401           // If the preferred size is smaller, then keep the extend but extend
402           // from the result of the extending load. For example:
403           //    %1:_(s8) = G_LOAD ...
404           //    %2:_(s32) = G_SEXT %1(s8)
405           //    %3:_(s64) = G_ANYEXT %1(s8)
406           //    ... = ... %3(s64)
407           /// rewrites to:
408           //    %2:_(s32) = G_SEXTLOAD ...
409           //    %3:_(s64) = G_ANYEXT %2:_(s32)
410           //    ... = ... %3(s64)
411           replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
412         } else {
413           // If the preferred size is large, then insert a truncate. For
414           // example:
415           //    %1:_(s8) = G_LOAD ...
416           //    %2:_(s64) = G_SEXT %1(s8)
417           //    %3:_(s32) = G_ZEXT %1(s8)
418           //    ... = ... %3(s32)
419           /// rewrites to:
420           //    %2:_(s64) = G_SEXTLOAD ...
421           //    %4:_(s8) = G_TRUNC %2:_(s32)
422           //    %3:_(s64) = G_ZEXT %2:_(s8)
423           //    ... = ... %3(s64)
424           InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
425                                                  InsertTruncAt);
426         }
427         continue;
428       }
429       // The use is (one of) the uses of the preferred use we chose earlier.
430       // We're going to update the load to def this value later so just erase
431       // the old extend.
432       Observer.erasingInstr(*UseMO->getParent());
433       UseMO->getParent()->eraseFromParent();
434       continue;
435     }
436 
437     // The use isn't an extend. Truncate back to the type we originally loaded.
438     // This is free on many targets.
439     InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
440   }
441 
442   MI.getOperand(0).setReg(ChosenDstReg);
443   Observer.changedInstr(MI);
444 }
445 
446 bool CombinerHelper::isPredecessor(MachineInstr &DefMI, MachineInstr &UseMI) {
447   assert(DefMI.getParent() == UseMI.getParent());
448   if (&DefMI == &UseMI)
449     return false;
450 
451   // Loop through the basic block until we find one of the instructions.
452   MachineBasicBlock::const_iterator I = DefMI.getParent()->begin();
453   for (; &*I != &DefMI && &*I != &UseMI; ++I)
454     return &*I == &DefMI;
455 
456   llvm_unreachable("Block must contain instructions");
457 }
458 
459 bool CombinerHelper::dominates(MachineInstr &DefMI, MachineInstr &UseMI) {
460   if (MDT)
461     return MDT->dominates(&DefMI, &UseMI);
462   else if (DefMI.getParent() != UseMI.getParent())
463     return false;
464 
465   return isPredecessor(DefMI, UseMI);
466 }
467 
468 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
469                                             Register &Base, Register &Offset) {
470   auto &MF = *MI.getParent()->getParent();
471   const auto &TLI = *MF.getSubtarget().getTargetLowering();
472 
473 #ifndef NDEBUG
474   unsigned Opcode = MI.getOpcode();
475   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
476          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
477 #endif
478 
479   Base = MI.getOperand(1).getReg();
480   MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
481   if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
482     return false;
483 
484   LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
485 
486   for (auto &Use : MRI.use_instructions(Base)) {
487     if (Use.getOpcode() != TargetOpcode::G_GEP)
488       continue;
489 
490     Offset = Use.getOperand(2).getReg();
491     if (!ForceLegalIndexing &&
492         !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
493       LLVM_DEBUG(dbgs() << "    Ignoring candidate with illegal addrmode: "
494                         << Use);
495       continue;
496     }
497 
498     // Make sure the offset calculation is before the potentially indexed op.
499     // FIXME: we really care about dependency here. The offset calculation might
500     // be movable.
501     MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
502     if (!OffsetDef || !dominates(*OffsetDef, MI)) {
503       LLVM_DEBUG(dbgs() << "    Ignoring candidate with offset after mem-op: "
504                         << Use);
505       continue;
506     }
507 
508     // FIXME: check whether all uses of Base are load/store with foldable
509     // addressing modes. If so, using the normal addr-modes is better than
510     // forming an indexed one.
511 
512     bool MemOpDominatesAddrUses = true;
513     for (auto &GEPUse : MRI.use_instructions(Use.getOperand(0).getReg())) {
514       if (!dominates(MI, GEPUse)) {
515         MemOpDominatesAddrUses = false;
516         break;
517       }
518     }
519 
520     if (!MemOpDominatesAddrUses) {
521       LLVM_DEBUG(
522           dbgs() << "    Ignoring candidate as memop does not dominate uses: "
523                  << Use);
524       continue;
525     }
526 
527     LLVM_DEBUG(dbgs() << "    Found match: " << Use);
528     Addr = Use.getOperand(0).getReg();
529     return true;
530   }
531 
532   return false;
533 }
534 
535 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
536                                            Register &Base, Register &Offset) {
537   auto &MF = *MI.getParent()->getParent();
538   const auto &TLI = *MF.getSubtarget().getTargetLowering();
539 
540 #ifndef NDEBUG
541   unsigned Opcode = MI.getOpcode();
542   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
543          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
544 #endif
545 
546   Addr = MI.getOperand(1).getReg();
547   MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_GEP, Addr, MRI);
548   if (!AddrDef || MRI.hasOneUse(Addr))
549     return false;
550 
551   Base = AddrDef->getOperand(1).getReg();
552   Offset = AddrDef->getOperand(2).getReg();
553 
554   LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
555 
556   if (!ForceLegalIndexing &&
557       !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
558     LLVM_DEBUG(dbgs() << "    Skipping, not legal for target");
559     return false;
560   }
561 
562   MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
563   if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
564     LLVM_DEBUG(dbgs() << "    Skipping, frame index would need copy anyway.");
565     return false;
566   }
567 
568   if (MI.getOpcode() == TargetOpcode::G_STORE) {
569     // Would require a copy.
570     if (Base == MI.getOperand(0).getReg()) {
571       LLVM_DEBUG(dbgs() << "    Skipping, storing base so need copy anyway.");
572       return false;
573     }
574 
575     // We're expecting one use of Addr in MI, but it could also be the
576     // value stored, which isn't actually dominated by the instruction.
577     if (MI.getOperand(0).getReg() == Addr) {
578       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses");
579       return false;
580     }
581   }
582 
583   // FIXME: check whether all uses of the base pointer are constant GEPs. That
584   // might allow us to end base's liveness here by adjusting the constant.
585 
586   for (auto &UseMI : MRI.use_instructions(Addr)) {
587     if (!dominates(MI, UseMI)) {
588       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses.");
589       return false;
590     }
591   }
592 
593   return true;
594 }
595 
596 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
597   unsigned Opcode = MI.getOpcode();
598   if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
599       Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
600     return false;
601 
602   bool IsStore = Opcode == TargetOpcode::G_STORE;
603   Register Addr, Base, Offset;
604   bool IsPre = findPreIndexCandidate(MI, Addr, Base, Offset);
605   if (!IsPre && !findPostIndexCandidate(MI, Addr, Base, Offset))
606     return false;
607 
608 
609   unsigned NewOpcode;
610   switch (Opcode) {
611   case TargetOpcode::G_LOAD:
612     NewOpcode = TargetOpcode::G_INDEXED_LOAD;
613     break;
614   case TargetOpcode::G_SEXTLOAD:
615     NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
616     break;
617   case TargetOpcode::G_ZEXTLOAD:
618     NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
619     break;
620   case TargetOpcode::G_STORE:
621     NewOpcode = TargetOpcode::G_INDEXED_STORE;
622     break;
623   default:
624     llvm_unreachable("Unknown load/store opcode");
625   }
626 
627   MachineInstr &AddrDef = *MRI.getUniqueVRegDef(Addr);
628   MachineIRBuilder MIRBuilder(MI);
629   auto MIB = MIRBuilder.buildInstr(NewOpcode);
630   if (IsStore) {
631     MIB.addDef(Addr);
632     MIB.addUse(MI.getOperand(0).getReg());
633   } else {
634     MIB.addDef(MI.getOperand(0).getReg());
635     MIB.addDef(Addr);
636   }
637 
638   MIB.addUse(Base);
639   MIB.addUse(Offset);
640   MIB.addImm(IsPre);
641   MI.eraseFromParent();
642   AddrDef.eraseFromParent();
643 
644   LLVM_DEBUG(dbgs() << "    Combinined to indexed operation");
645   return true;
646 }
647 
648 bool CombinerHelper::matchElideBrByInvertingCond(MachineInstr &MI) {
649   if (MI.getOpcode() != TargetOpcode::G_BR)
650     return false;
651 
652   // Try to match the following:
653   // bb1:
654   //   %c(s32) = G_ICMP pred, %a, %b
655   //   %c1(s1) = G_TRUNC %c(s32)
656   //   G_BRCOND %c1, %bb2
657   //   G_BR %bb3
658   // bb2:
659   // ...
660   // bb3:
661 
662   // The above pattern does not have a fall through to the successor bb2, always
663   // resulting in a branch no matter which path is taken. Here we try to find
664   // and replace that pattern with conditional branch to bb3 and otherwise
665   // fallthrough to bb2.
666 
667   MachineBasicBlock *MBB = MI.getParent();
668   MachineBasicBlock::iterator BrIt(MI);
669   if (BrIt == MBB->begin())
670     return false;
671   assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
672 
673   MachineInstr *BrCond = &*std::prev(BrIt);
674   if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
675     return false;
676 
677   // Check that the next block is the conditional branch target.
678   if (!MBB->isLayoutSuccessor(BrCond->getOperand(1).getMBB()))
679     return false;
680 
681   MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg());
682   if (!CmpMI || CmpMI->getOpcode() != TargetOpcode::G_ICMP ||
683       !MRI.hasOneUse(CmpMI->getOperand(0).getReg()))
684     return false;
685   return true;
686 }
687 
688 bool CombinerHelper::tryElideBrByInvertingCond(MachineInstr &MI) {
689   if (!matchElideBrByInvertingCond(MI))
690     return false;
691   applyElideBrByInvertingCond(MI);
692   return true;
693 }
694 
695 void CombinerHelper::applyElideBrByInvertingCond(MachineInstr &MI) {
696   MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
697   MachineBasicBlock::iterator BrIt(MI);
698   MachineInstr *BrCond = &*std::prev(BrIt);
699   MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg());
700 
701   CmpInst::Predicate InversePred = CmpInst::getInversePredicate(
702       (CmpInst::Predicate)CmpMI->getOperand(1).getPredicate());
703 
704   // Invert the G_ICMP condition.
705   Observer.changingInstr(*CmpMI);
706   CmpMI->getOperand(1).setPredicate(InversePred);
707   Observer.changedInstr(*CmpMI);
708 
709   // Change the conditional branch target.
710   Observer.changingInstr(*BrCond);
711   BrCond->getOperand(1).setMBB(BrTarget);
712   Observer.changedInstr(*BrCond);
713   MI.eraseFromParent();
714 }
715 
716 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
717   // On Darwin, -Os means optimize for size without hurting performance, so
718   // only really optimize for size when -Oz (MinSize) is used.
719   if (MF.getTarget().getTargetTriple().isOSDarwin())
720     return MF.getFunction().hasMinSize();
721   return MF.getFunction().hasOptSize();
722 }
723 
724 // Returns a list of types to use for memory op lowering in MemOps. A partial
725 // port of findOptimalMemOpLowering in TargetLowering.
726 static bool findGISelOptimalMemOpLowering(
727     std::vector<LLT> &MemOps, unsigned Limit, uint64_t Size, unsigned DstAlign,
728     unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
729     bool AllowOverlap, unsigned DstAS, unsigned SrcAS,
730     const AttributeList &FuncAttributes, const TargetLowering &TLI) {
731   // If 'SrcAlign' is zero, that means the memory operation does not need to
732   // load the value, i.e. memset or memcpy from constant string. Otherwise,
733   // it's the inferred alignment of the source. 'DstAlign', on the other hand,
734   // is the specified alignment of the memory operation. If it is zero, that
735   // means it's possible to change the alignment of the destination.
736   // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
737   // not need to be loaded.
738   if (SrcAlign != 0 && SrcAlign < DstAlign)
739     return false;
740 
741   LLT Ty = TLI.getOptimalMemOpLLT(Size, DstAlign, SrcAlign, IsMemset,
742                                   ZeroMemset, MemcpyStrSrc, FuncAttributes);
743 
744   if (Ty == LLT()) {
745     // Use the largest scalar type whose alignment constraints are satisfied.
746     // We only need to check DstAlign here as SrcAlign is always greater or
747     // equal to DstAlign (or zero).
748     Ty = LLT::scalar(64);
749     while (DstAlign && DstAlign < Ty.getSizeInBytes() &&
750            !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, DstAlign))
751       Ty = LLT::scalar(Ty.getSizeInBytes());
752     assert(Ty.getSizeInBits() > 0 && "Could not find valid type");
753     // FIXME: check for the largest legal type we can load/store to.
754   }
755 
756   unsigned NumMemOps = 0;
757   while (Size != 0) {
758     unsigned TySize = Ty.getSizeInBytes();
759     while (TySize > Size) {
760       // For now, only use non-vector load / store's for the left-over pieces.
761       LLT NewTy = Ty;
762       // FIXME: check for mem op safety and legality of the types. Not all of
763       // SDAGisms map cleanly to GISel concepts.
764       if (NewTy.isVector())
765         NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32);
766       NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1));
767       unsigned NewTySize = NewTy.getSizeInBytes();
768       assert(NewTySize > 0 && "Could not find appropriate type");
769 
770       // If the new LLT cannot cover all of the remaining bits, then consider
771       // issuing a (or a pair of) unaligned and overlapping load / store.
772       bool Fast;
773       // Need to get a VT equivalent for allowMisalignedMemoryAccesses().
774       MVT VT = getMVTForLLT(Ty);
775       if (NumMemOps && AllowOverlap && NewTySize < Size &&
776           TLI.allowsMisalignedMemoryAccesses(
777               VT, DstAS, DstAlign, MachineMemOperand::MONone, &Fast) &&
778           Fast)
779         TySize = Size;
780       else {
781         Ty = NewTy;
782         TySize = NewTySize;
783       }
784     }
785 
786     if (++NumMemOps > Limit)
787       return false;
788 
789     MemOps.push_back(Ty);
790     Size -= TySize;
791   }
792 
793   return true;
794 }
795 
796 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
797   if (Ty.isVector())
798     return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
799                            Ty.getNumElements());
800   return IntegerType::get(C, Ty.getSizeInBits());
801 }
802 
803 // Get a vectorized representation of the memset value operand, GISel edition.
804 static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
805   MachineRegisterInfo &MRI = *MIB.getMRI();
806   unsigned NumBits = Ty.getScalarSizeInBits();
807   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
808   if (!Ty.isVector() && ValVRegAndVal) {
809     unsigned KnownVal = ValVRegAndVal->Value;
810     APInt Scalar = APInt(8, KnownVal);
811     APInt SplatVal = APInt::getSplat(NumBits, Scalar);
812     return MIB.buildConstant(Ty, SplatVal).getReg(0);
813   }
814   // FIXME: for vector types create a G_BUILD_VECTOR.
815   if (Ty.isVector())
816     return Register();
817 
818   // Extend the byte value to the larger type, and then multiply by a magic
819   // value 0x010101... in order to replicate it across every byte.
820   LLT ExtType = Ty.getScalarType();
821   auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val);
822   if (NumBits > 8) {
823     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
824     auto MagicMI = MIB.buildConstant(ExtType, Magic);
825     Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0);
826   }
827 
828   assert(ExtType == Ty && "Vector memset value type not supported yet");
829   return Val;
830 }
831 
832 bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val,
833                                     unsigned KnownLen, unsigned Align,
834                                     bool IsVolatile) {
835   auto &MF = *MI.getParent()->getParent();
836   const auto &TLI = *MF.getSubtarget().getTargetLowering();
837   auto &DL = MF.getDataLayout();
838   LLVMContext &C = MF.getFunction().getContext();
839 
840   assert(KnownLen != 0 && "Have a zero length memset length!");
841 
842   bool DstAlignCanChange = false;
843   MachineFrameInfo &MFI = MF.getFrameInfo();
844   bool OptSize = shouldLowerMemFuncForSize(MF);
845 
846   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
847   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
848     DstAlignCanChange = true;
849 
850   unsigned Limit = TLI.getMaxStoresPerMemset(OptSize);
851   std::vector<LLT> MemOps;
852 
853   const auto &DstMMO = **MI.memoperands_begin();
854   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
855 
856   auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
857   bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0;
858 
859   if (!findGISelOptimalMemOpLowering(
860           MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), 0,
861           /*IsMemset=*/true,
862           /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false,
863           /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(), ~0u,
864           MF.getFunction().getAttributes(), TLI))
865     return false;
866 
867   if (DstAlignCanChange) {
868     // Get an estimate of the type from the LLT.
869     Type *IRTy = getTypeForLLT(MemOps[0], C);
870     unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy);
871     if (NewAlign > Align) {
872       Align = NewAlign;
873       unsigned FI = FIDef->getOperand(1).getIndex();
874       // Give the stack frame object a larger alignment if needed.
875       if (MFI.getObjectAlignment(FI) < Align)
876         MFI.setObjectAlignment(FI, Align);
877     }
878   }
879 
880   MachineIRBuilder MIB(MI);
881   // Find the largest store and generate the bit pattern for it.
882   LLT LargestTy = MemOps[0];
883   for (unsigned i = 1; i < MemOps.size(); i++)
884     if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits())
885       LargestTy = MemOps[i];
886 
887   // The memset stored value is always defined as an s8, so in order to make it
888   // work with larger store types we need to repeat the bit pattern across the
889   // wider type.
890   Register MemSetValue = getMemsetValue(Val, LargestTy, MIB);
891 
892   if (!MemSetValue)
893     return false;
894 
895   // Generate the stores. For each store type in the list, we generate the
896   // matching store of that type to the destination address.
897   LLT PtrTy = MRI.getType(Dst);
898   unsigned DstOff = 0;
899   unsigned Size = KnownLen;
900   for (unsigned I = 0; I < MemOps.size(); I++) {
901     LLT Ty = MemOps[I];
902     unsigned TySize = Ty.getSizeInBytes();
903     if (TySize > Size) {
904       // Issuing an unaligned load / store pair that overlaps with the previous
905       // pair. Adjust the offset accordingly.
906       assert(I == MemOps.size() - 1 && I != 0);
907       DstOff -= TySize - Size;
908     }
909 
910     // If this store is smaller than the largest store see whether we can get
911     // the smaller value for free with a truncate.
912     Register Value = MemSetValue;
913     if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) {
914       MVT VT = getMVTForLLT(Ty);
915       MVT LargestVT = getMVTForLLT(LargestTy);
916       if (!LargestTy.isVector() && !Ty.isVector() &&
917           TLI.isTruncateFree(LargestVT, VT))
918         Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0);
919       else
920         Value = getMemsetValue(Val, Ty, MIB);
921       if (!Value)
922         return false;
923     }
924 
925     auto *StoreMMO =
926         MF.getMachineMemOperand(&DstMMO, DstOff, Ty.getSizeInBytes());
927 
928     Register Ptr = Dst;
929     if (DstOff != 0) {
930       auto Offset =
931           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
932       Ptr = MIB.buildGEP(PtrTy, Dst, Offset).getReg(0);
933     }
934 
935     MIB.buildStore(Value, Ptr, *StoreMMO);
936     DstOff += Ty.getSizeInBytes();
937     Size -= TySize;
938   }
939 
940   MI.eraseFromParent();
941   return true;
942 }
943 
944 
945 bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
946                                     Register Src, unsigned KnownLen,
947                                     unsigned DstAlign, unsigned SrcAlign,
948                                     bool IsVolatile) {
949   auto &MF = *MI.getParent()->getParent();
950   const auto &TLI = *MF.getSubtarget().getTargetLowering();
951   auto &DL = MF.getDataLayout();
952   LLVMContext &C = MF.getFunction().getContext();
953 
954   assert(KnownLen != 0 && "Have a zero length memcpy length!");
955 
956   bool DstAlignCanChange = false;
957   MachineFrameInfo &MFI = MF.getFrameInfo();
958   bool OptSize = shouldLowerMemFuncForSize(MF);
959   unsigned Alignment = MinAlign(DstAlign, SrcAlign);
960 
961   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
962   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
963     DstAlignCanChange = true;
964 
965   // FIXME: infer better src pointer alignment like SelectionDAG does here.
966   // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining
967   // if the memcpy is in a tail call position.
968 
969   unsigned Limit = TLI.getMaxStoresPerMemcpy(OptSize);
970   std::vector<LLT> MemOps;
971 
972   const auto &DstMMO = **MI.memoperands_begin();
973   const auto &SrcMMO = **std::next(MI.memoperands_begin());
974   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
975   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
976 
977   if (!findGISelOptimalMemOpLowering(
978           MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment),
979           SrcAlign,
980           /*IsMemset=*/false,
981           /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
982           /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(),
983           SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), TLI))
984     return false;
985 
986   if (DstAlignCanChange) {
987     // Get an estimate of the type from the LLT.
988     Type *IRTy = getTypeForLLT(MemOps[0], C);
989     unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy);
990 
991     // Don't promote to an alignment that would require dynamic stack
992     // realignment.
993     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
994     if (!TRI->needsStackRealignment(MF))
995       while (NewAlign > Alignment &&
996              DL.exceedsNaturalStackAlignment(Align(NewAlign)))
997         NewAlign /= 2;
998 
999     if (NewAlign > Alignment) {
1000       Alignment = NewAlign;
1001       unsigned FI = FIDef->getOperand(1).getIndex();
1002       // Give the stack frame object a larger alignment if needed.
1003       if (MFI.getObjectAlignment(FI) < Alignment)
1004         MFI.setObjectAlignment(FI, Alignment);
1005     }
1006   }
1007 
1008   LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n");
1009 
1010   MachineIRBuilder MIB(MI);
1011   // Now we need to emit a pair of load and stores for each of the types we've
1012   // collected. I.e. for each type, generate a load from the source pointer of
1013   // that type width, and then generate a corresponding store to the dest buffer
1014   // of that value loaded. This can result in a sequence of loads and stores
1015   // mixed types, depending on what the target specifies as good types to use.
1016   unsigned CurrOffset = 0;
1017   LLT PtrTy = MRI.getType(Src);
1018   unsigned Size = KnownLen;
1019   for (auto CopyTy : MemOps) {
1020     // Issuing an unaligned load / store pair  that overlaps with the previous
1021     // pair. Adjust the offset accordingly.
1022     if (CopyTy.getSizeInBytes() > Size)
1023       CurrOffset -= CopyTy.getSizeInBytes() - Size;
1024 
1025     // Construct MMOs for the accesses.
1026     auto *LoadMMO =
1027         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1028     auto *StoreMMO =
1029         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1030 
1031     // Create the load.
1032     Register LoadPtr = Src;
1033     Register Offset;
1034     if (CurrOffset != 0) {
1035       Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset)
1036                    .getReg(0);
1037       LoadPtr = MIB.buildGEP(PtrTy, Src, Offset).getReg(0);
1038     }
1039     auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
1040 
1041     // Create the store.
1042     Register StorePtr =
1043         CurrOffset == 0 ? Dst : MIB.buildGEP(PtrTy, Dst, Offset).getReg(0);
1044     MIB.buildStore(LdVal, StorePtr, *StoreMMO);
1045     CurrOffset += CopyTy.getSizeInBytes();
1046     Size -= CopyTy.getSizeInBytes();
1047   }
1048 
1049   MI.eraseFromParent();
1050   return true;
1051 }
1052 
1053 bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
1054                                     Register Src, unsigned KnownLen,
1055                                     unsigned DstAlign, unsigned SrcAlign,
1056                                     bool IsVolatile) {
1057   auto &MF = *MI.getParent()->getParent();
1058   const auto &TLI = *MF.getSubtarget().getTargetLowering();
1059   auto &DL = MF.getDataLayout();
1060   LLVMContext &C = MF.getFunction().getContext();
1061 
1062   assert(KnownLen != 0 && "Have a zero length memmove length!");
1063 
1064   bool DstAlignCanChange = false;
1065   MachineFrameInfo &MFI = MF.getFrameInfo();
1066   bool OptSize = shouldLowerMemFuncForSize(MF);
1067   unsigned Alignment = MinAlign(DstAlign, SrcAlign);
1068 
1069   MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1070   if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1071     DstAlignCanChange = true;
1072 
1073   unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize);
1074   std::vector<LLT> MemOps;
1075 
1076   const auto &DstMMO = **MI.memoperands_begin();
1077   const auto &SrcMMO = **std::next(MI.memoperands_begin());
1078   MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1079   MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1080 
1081   // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due
1082   // to a bug in it's findOptimalMemOpLowering implementation. For now do the
1083   // same thing here.
1084   if (!findGISelOptimalMemOpLowering(
1085           MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment),
1086           SrcAlign,
1087           /*IsMemset=*/false,
1088           /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
1089           /*AllowOverlap=*/false, DstPtrInfo.getAddrSpace(),
1090           SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), TLI))
1091     return false;
1092 
1093   if (DstAlignCanChange) {
1094     // Get an estimate of the type from the LLT.
1095     Type *IRTy = getTypeForLLT(MemOps[0], C);
1096     unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy);
1097 
1098     // Don't promote to an alignment that would require dynamic stack
1099     // realignment.
1100     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1101     if (!TRI->needsStackRealignment(MF))
1102       while (NewAlign > Alignment &&
1103              DL.exceedsNaturalStackAlignment(Align(NewAlign)))
1104         NewAlign /= 2;
1105 
1106     if (NewAlign > Alignment) {
1107       Alignment = NewAlign;
1108       unsigned FI = FIDef->getOperand(1).getIndex();
1109       // Give the stack frame object a larger alignment if needed.
1110       if (MFI.getObjectAlignment(FI) < Alignment)
1111         MFI.setObjectAlignment(FI, Alignment);
1112     }
1113   }
1114 
1115   LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n");
1116 
1117   MachineIRBuilder MIB(MI);
1118   // Memmove requires that we perform the loads first before issuing the stores.
1119   // Apart from that, this loop is pretty much doing the same thing as the
1120   // memcpy codegen function.
1121   unsigned CurrOffset = 0;
1122   LLT PtrTy = MRI.getType(Src);
1123   SmallVector<Register, 16> LoadVals;
1124   for (auto CopyTy : MemOps) {
1125     // Construct MMO for the load.
1126     auto *LoadMMO =
1127         MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1128 
1129     // Create the load.
1130     Register LoadPtr = Src;
1131     if (CurrOffset != 0) {
1132       auto Offset =
1133           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1134       LoadPtr = MIB.buildGEP(PtrTy, Src, Offset).getReg(0);
1135     }
1136     LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
1137     CurrOffset += CopyTy.getSizeInBytes();
1138   }
1139 
1140   CurrOffset = 0;
1141   for (unsigned I = 0; I < MemOps.size(); ++I) {
1142     LLT CopyTy = MemOps[I];
1143     // Now store the values loaded.
1144     auto *StoreMMO =
1145         MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1146 
1147     Register StorePtr = Dst;
1148     if (CurrOffset != 0) {
1149       auto Offset =
1150           MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1151       StorePtr = MIB.buildGEP(PtrTy, Dst, Offset).getReg(0);
1152     }
1153     MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
1154     CurrOffset += CopyTy.getSizeInBytes();
1155   }
1156   MI.eraseFromParent();
1157   return true;
1158 }
1159 
1160 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1161   // This combine is fairly complex so it's not written with a separate
1162   // matcher function.
1163   assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
1164   Intrinsic::ID ID = (Intrinsic::ID)MI.getIntrinsicID();
1165   assert((ID == Intrinsic::memcpy || ID == Intrinsic::memmove ||
1166           ID == Intrinsic::memset) &&
1167          "Expected a memcpy like intrinsic");
1168 
1169   auto MMOIt = MI.memoperands_begin();
1170   const MachineMemOperand *MemOp = *MMOIt;
1171   bool IsVolatile = MemOp->isVolatile();
1172   // Don't try to optimize volatile.
1173   if (IsVolatile)
1174     return false;
1175 
1176   unsigned DstAlign = MemOp->getBaseAlignment();
1177   unsigned SrcAlign = 0;
1178   Register Dst = MI.getOperand(1).getReg();
1179   Register Src = MI.getOperand(2).getReg();
1180   Register Len = MI.getOperand(3).getReg();
1181 
1182   if (ID != Intrinsic::memset) {
1183     assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
1184     MemOp = *(++MMOIt);
1185     SrcAlign = MemOp->getBaseAlignment();
1186   }
1187 
1188   // See if this is a constant length copy
1189   auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI);
1190   if (!LenVRegAndVal)
1191     return false; // Leave it to the legalizer to lower it to a libcall.
1192   unsigned KnownLen = LenVRegAndVal->Value;
1193 
1194   if (KnownLen == 0) {
1195     MI.eraseFromParent();
1196     return true;
1197   }
1198 
1199   if (MaxLen && KnownLen > MaxLen)
1200     return false;
1201 
1202   if (ID == Intrinsic::memcpy)
1203     return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1204   if (ID == Intrinsic::memmove)
1205     return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1206   if (ID == Intrinsic::memset)
1207     return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile);
1208   return false;
1209 }
1210 
1211 bool CombinerHelper::tryCombine(MachineInstr &MI) {
1212   if (tryCombineCopy(MI))
1213     return true;
1214   if (tryCombineExtendingLoads(MI))
1215     return true;
1216   if (tryCombineIndexedLoadStore(MI))
1217     return true;
1218   return false;
1219 }
1220