1 //===-- llvm/CodeGen/GlobalISel/LegalizerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file implements the LegalizerHelper class to legalize
10 /// individual instructions and the LegalizeMachineIR wrapper pass for the
11 /// primary legalization.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
16 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
17 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
18 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetFrameLowering.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/CodeGen/TargetSubtargetInfo.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Support/raw_ostream.h"
27 
28 #define DEBUG_TYPE "legalizer"
29 
30 using namespace llvm;
31 using namespace LegalizeActions;
32 
33 /// Try to break down \p OrigTy into \p NarrowTy sized pieces.
34 ///
35 /// Returns the number of \p NarrowTy elements needed to reconstruct \p OrigTy,
36 /// with any leftover piece as type \p LeftoverTy
37 ///
38 /// Returns -1 in the first element of the pair if the breakdown is not
39 /// satisfiable.
40 static std::pair<int, int>
41 getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy) {
42   assert(!LeftoverTy.isValid() && "this is an out argument");
43 
44   unsigned Size = OrigTy.getSizeInBits();
45   unsigned NarrowSize = NarrowTy.getSizeInBits();
46   unsigned NumParts = Size / NarrowSize;
47   unsigned LeftoverSize = Size - NumParts * NarrowSize;
48   assert(Size > NarrowSize);
49 
50   if (LeftoverSize == 0)
51     return {NumParts, 0};
52 
53   if (NarrowTy.isVector()) {
54     unsigned EltSize = OrigTy.getScalarSizeInBits();
55     if (LeftoverSize % EltSize != 0)
56       return {-1, -1};
57     LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize);
58   } else {
59     LeftoverTy = LLT::scalar(LeftoverSize);
60   }
61 
62   int NumLeftover = LeftoverSize / LeftoverTy.getSizeInBits();
63   return std::make_pair(NumParts, NumLeftover);
64 }
65 
66 LegalizerHelper::LegalizerHelper(MachineFunction &MF,
67                                  GISelChangeObserver &Observer,
68                                  MachineIRBuilder &Builder)
69     : MIRBuilder(Builder), MRI(MF.getRegInfo()),
70       LI(*MF.getSubtarget().getLegalizerInfo()), Observer(Observer) {
71   MIRBuilder.setMF(MF);
72   MIRBuilder.setChangeObserver(Observer);
73 }
74 
75 LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
76                                  GISelChangeObserver &Observer,
77                                  MachineIRBuilder &B)
78     : MIRBuilder(B), MRI(MF.getRegInfo()), LI(LI), Observer(Observer) {
79   MIRBuilder.setMF(MF);
80   MIRBuilder.setChangeObserver(Observer);
81 }
82 LegalizerHelper::LegalizeResult
83 LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
84   LLVM_DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
85 
86   if (MI.getOpcode() == TargetOpcode::G_INTRINSIC ||
87       MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS)
88     return LI.legalizeIntrinsic(MI, MRI, MIRBuilder) ? Legalized
89                                                      : UnableToLegalize;
90   auto Step = LI.getAction(MI, MRI);
91   switch (Step.Action) {
92   case Legal:
93     LLVM_DEBUG(dbgs() << ".. Already legal\n");
94     return AlreadyLegal;
95   case Libcall:
96     LLVM_DEBUG(dbgs() << ".. Convert to libcall\n");
97     return libcall(MI);
98   case NarrowScalar:
99     LLVM_DEBUG(dbgs() << ".. Narrow scalar\n");
100     return narrowScalar(MI, Step.TypeIdx, Step.NewType);
101   case WidenScalar:
102     LLVM_DEBUG(dbgs() << ".. Widen scalar\n");
103     return widenScalar(MI, Step.TypeIdx, Step.NewType);
104   case Lower:
105     LLVM_DEBUG(dbgs() << ".. Lower\n");
106     return lower(MI, Step.TypeIdx, Step.NewType);
107   case FewerElements:
108     LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n");
109     return fewerElementsVector(MI, Step.TypeIdx, Step.NewType);
110   case MoreElements:
111     LLVM_DEBUG(dbgs() << ".. Increase number of elements\n");
112     return moreElementsVector(MI, Step.TypeIdx, Step.NewType);
113   case Custom:
114     LLVM_DEBUG(dbgs() << ".. Custom legalization\n");
115     return LI.legalizeCustom(MI, MRI, MIRBuilder, Observer) ? Legalized
116                                                             : UnableToLegalize;
117   default:
118     LLVM_DEBUG(dbgs() << ".. Unable to legalize\n");
119     return UnableToLegalize;
120   }
121 }
122 
123 void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts,
124                                    SmallVectorImpl<Register> &VRegs) {
125   for (int i = 0; i < NumParts; ++i)
126     VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
127   MIRBuilder.buildUnmerge(VRegs, Reg);
128 }
129 
130 bool LegalizerHelper::extractParts(Register Reg, LLT RegTy,
131                                    LLT MainTy, LLT &LeftoverTy,
132                                    SmallVectorImpl<Register> &VRegs,
133                                    SmallVectorImpl<Register> &LeftoverRegs) {
134   assert(!LeftoverTy.isValid() && "this is an out argument");
135 
136   unsigned RegSize = RegTy.getSizeInBits();
137   unsigned MainSize = MainTy.getSizeInBits();
138   unsigned NumParts = RegSize / MainSize;
139   unsigned LeftoverSize = RegSize - NumParts * MainSize;
140 
141   // Use an unmerge when possible.
142   if (LeftoverSize == 0) {
143     for (unsigned I = 0; I < NumParts; ++I)
144       VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
145     MIRBuilder.buildUnmerge(VRegs, Reg);
146     return true;
147   }
148 
149   if (MainTy.isVector()) {
150     unsigned EltSize = MainTy.getScalarSizeInBits();
151     if (LeftoverSize % EltSize != 0)
152       return false;
153     LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize);
154   } else {
155     LeftoverTy = LLT::scalar(LeftoverSize);
156   }
157 
158   // For irregular sizes, extract the individual parts.
159   for (unsigned I = 0; I != NumParts; ++I) {
160     Register NewReg = MRI.createGenericVirtualRegister(MainTy);
161     VRegs.push_back(NewReg);
162     MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
163   }
164 
165   for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
166        Offset += LeftoverSize) {
167     Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
168     LeftoverRegs.push_back(NewReg);
169     MIRBuilder.buildExtract(NewReg, Reg, Offset);
170   }
171 
172   return true;
173 }
174 
175 static LLT getGCDType(LLT OrigTy, LLT TargetTy) {
176   if (OrigTy.isVector() && TargetTy.isVector()) {
177     assert(OrigTy.getElementType() == TargetTy.getElementType());
178     int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
179                                     TargetTy.getNumElements());
180     return LLT::scalarOrVector(GCD, OrigTy.getElementType());
181   }
182 
183   if (OrigTy.isVector() && !TargetTy.isVector()) {
184     assert(OrigTy.getElementType() == TargetTy);
185     return TargetTy;
186   }
187 
188   assert(!OrigTy.isVector() && !TargetTy.isVector());
189 
190   int GCD = greatestCommonDivisor(OrigTy.getSizeInBits(),
191                                   TargetTy.getSizeInBits());
192   return LLT::scalar(GCD);
193 }
194 
195 void LegalizerHelper::insertParts(Register DstReg,
196                                   LLT ResultTy, LLT PartTy,
197                                   ArrayRef<Register> PartRegs,
198                                   LLT LeftoverTy,
199                                   ArrayRef<Register> LeftoverRegs) {
200   if (!LeftoverTy.isValid()) {
201     assert(LeftoverRegs.empty());
202 
203     if (!ResultTy.isVector()) {
204       MIRBuilder.buildMerge(DstReg, PartRegs);
205       return;
206     }
207 
208     if (PartTy.isVector())
209       MIRBuilder.buildConcatVectors(DstReg, PartRegs);
210     else
211       MIRBuilder.buildBuildVector(DstReg, PartRegs);
212     return;
213   }
214 
215   unsigned PartSize = PartTy.getSizeInBits();
216   unsigned LeftoverPartSize = LeftoverTy.getSizeInBits();
217 
218   Register CurResultReg = MRI.createGenericVirtualRegister(ResultTy);
219   MIRBuilder.buildUndef(CurResultReg);
220 
221   unsigned Offset = 0;
222   for (Register PartReg : PartRegs) {
223     Register NewResultReg = MRI.createGenericVirtualRegister(ResultTy);
224     MIRBuilder.buildInsert(NewResultReg, CurResultReg, PartReg, Offset);
225     CurResultReg = NewResultReg;
226     Offset += PartSize;
227   }
228 
229   for (unsigned I = 0, E = LeftoverRegs.size(); I != E; ++I) {
230     // Use the original output register for the final insert to avoid a copy.
231     Register NewResultReg = (I + 1 == E) ?
232       DstReg : MRI.createGenericVirtualRegister(ResultTy);
233 
234     MIRBuilder.buildInsert(NewResultReg, CurResultReg, LeftoverRegs[I], Offset);
235     CurResultReg = NewResultReg;
236     Offset += LeftoverPartSize;
237   }
238 }
239 
240 static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
241   switch (Opcode) {
242   case TargetOpcode::G_SDIV:
243     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
244     switch (Size) {
245     case 32:
246       return RTLIB::SDIV_I32;
247     case 64:
248       return RTLIB::SDIV_I64;
249     case 128:
250       return RTLIB::SDIV_I128;
251     default:
252       llvm_unreachable("unexpected size");
253     }
254   case TargetOpcode::G_UDIV:
255     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
256     switch (Size) {
257     case 32:
258       return RTLIB::UDIV_I32;
259     case 64:
260       return RTLIB::UDIV_I64;
261     case 128:
262       return RTLIB::UDIV_I128;
263     default:
264       llvm_unreachable("unexpected size");
265     }
266   case TargetOpcode::G_SREM:
267     assert((Size == 32 || Size == 64) && "Unsupported size");
268     return Size == 64 ? RTLIB::SREM_I64 : RTLIB::SREM_I32;
269   case TargetOpcode::G_UREM:
270     assert((Size == 32 || Size == 64) && "Unsupported size");
271     return Size == 64 ? RTLIB::UREM_I64 : RTLIB::UREM_I32;
272   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
273     assert(Size == 32 && "Unsupported size");
274     return RTLIB::CTLZ_I32;
275   case TargetOpcode::G_FADD:
276     assert((Size == 32 || Size == 64) && "Unsupported size");
277     return Size == 64 ? RTLIB::ADD_F64 : RTLIB::ADD_F32;
278   case TargetOpcode::G_FSUB:
279     assert((Size == 32 || Size == 64) && "Unsupported size");
280     return Size == 64 ? RTLIB::SUB_F64 : RTLIB::SUB_F32;
281   case TargetOpcode::G_FMUL:
282     assert((Size == 32 || Size == 64) && "Unsupported size");
283     return Size == 64 ? RTLIB::MUL_F64 : RTLIB::MUL_F32;
284   case TargetOpcode::G_FDIV:
285     assert((Size == 32 || Size == 64) && "Unsupported size");
286     return Size == 64 ? RTLIB::DIV_F64 : RTLIB::DIV_F32;
287   case TargetOpcode::G_FEXP:
288     assert((Size == 32 || Size == 64) && "Unsupported size");
289     return Size == 64 ? RTLIB::EXP_F64 : RTLIB::EXP_F32;
290   case TargetOpcode::G_FEXP2:
291     assert((Size == 32 || Size == 64) && "Unsupported size");
292     return Size == 64 ? RTLIB::EXP2_F64 : RTLIB::EXP2_F32;
293   case TargetOpcode::G_FREM:
294     return Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32;
295   case TargetOpcode::G_FPOW:
296     return Size == 64 ? RTLIB::POW_F64 : RTLIB::POW_F32;
297   case TargetOpcode::G_FMA:
298     assert((Size == 32 || Size == 64) && "Unsupported size");
299     return Size == 64 ? RTLIB::FMA_F64 : RTLIB::FMA_F32;
300   case TargetOpcode::G_FSIN:
301     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
302     return Size == 128 ? RTLIB::SIN_F128
303                        : Size == 64 ? RTLIB::SIN_F64 : RTLIB::SIN_F32;
304   case TargetOpcode::G_FCOS:
305     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
306     return Size == 128 ? RTLIB::COS_F128
307                        : Size == 64 ? RTLIB::COS_F64 : RTLIB::COS_F32;
308   case TargetOpcode::G_FLOG10:
309     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
310     return Size == 128 ? RTLIB::LOG10_F128
311                        : Size == 64 ? RTLIB::LOG10_F64 : RTLIB::LOG10_F32;
312   case TargetOpcode::G_FLOG:
313     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
314     return Size == 128 ? RTLIB::LOG_F128
315                        : Size == 64 ? RTLIB::LOG_F64 : RTLIB::LOG_F32;
316   case TargetOpcode::G_FLOG2:
317     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
318     return Size == 128 ? RTLIB::LOG2_F128
319                        : Size == 64 ? RTLIB::LOG2_F64 : RTLIB::LOG2_F32;
320   case TargetOpcode::G_FCEIL:
321     assert((Size == 32 || Size == 64) && "Unsupported size");
322     return Size == 64 ? RTLIB::CEIL_F64 : RTLIB::CEIL_F32;
323   case TargetOpcode::G_FFLOOR:
324     assert((Size == 32 || Size == 64) && "Unsupported size");
325     return Size == 64 ? RTLIB::FLOOR_F64 : RTLIB::FLOOR_F32;
326   }
327   llvm_unreachable("Unknown libcall function");
328 }
329 
330 /// True if an instruction is in tail position in its caller. Intended for
331 /// legalizing libcalls as tail calls when possible.
332 static bool isLibCallInTailPosition(MachineInstr &MI) {
333   const Function &F = MI.getParent()->getParent()->getFunction();
334 
335   // Conservatively require the attributes of the call to match those of
336   // the return. Ignore NoAlias and NonNull because they don't affect the
337   // call sequence.
338   AttributeList CallerAttrs = F.getAttributes();
339   if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex)
340           .removeAttribute(Attribute::NoAlias)
341           .removeAttribute(Attribute::NonNull)
342           .hasAttributes())
343     return false;
344 
345   // It's not safe to eliminate the sign / zero extension of the return value.
346   if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) ||
347       CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
348     return false;
349 
350   // Only tail call if the following instruction is a standard return.
351   auto &TII = *MI.getMF()->getSubtarget().getInstrInfo();
352   MachineInstr *Next = MI.getNextNode();
353   if (!Next || TII.isTailCall(*Next) || !Next->isReturn())
354     return false;
355 
356   return true;
357 }
358 
359 LegalizerHelper::LegalizeResult
360 llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
361                     const CallLowering::ArgInfo &Result,
362                     ArrayRef<CallLowering::ArgInfo> Args) {
363   auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
364   auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
365   const char *Name = TLI.getLibcallName(Libcall);
366 
367   MIRBuilder.getMF().getFrameInfo().setHasCalls(true);
368 
369   CallLowering::CallLoweringInfo Info;
370   Info.CallConv = TLI.getLibcallCallingConv(Libcall);
371   Info.Callee = MachineOperand::CreateES(Name);
372   Info.OrigRet = Result;
373   std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs));
374   if (!CLI.lowerCall(MIRBuilder, Info))
375     return LegalizerHelper::UnableToLegalize;
376 
377   return LegalizerHelper::Legalized;
378 }
379 
380 // Useful for libcalls where all operands have the same type.
381 static LegalizerHelper::LegalizeResult
382 simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
383               Type *OpType) {
384   auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
385 
386   SmallVector<CallLowering::ArgInfo, 3> Args;
387   for (unsigned i = 1; i < MI.getNumOperands(); i++)
388     Args.push_back({MI.getOperand(i).getReg(), OpType});
389   return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), OpType},
390                        Args);
391 }
392 
393 LegalizerHelper::LegalizeResult
394 llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
395                        MachineInstr &MI) {
396   assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
397   auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
398 
399   SmallVector<CallLowering::ArgInfo, 3> Args;
400   for (unsigned i = 1; i < MI.getNumOperands(); i++) {
401     Register Reg = MI.getOperand(i).getReg();
402 
403     // Need derive an IR type for call lowering.
404     LLT OpLLT = MRI.getType(Reg);
405     Type *OpTy = nullptr;
406     if (OpLLT.isPointer())
407       OpTy = Type::getInt8PtrTy(Ctx, OpLLT.getAddressSpace());
408     else
409       OpTy = IntegerType::get(Ctx, OpLLT.getSizeInBits());
410     Args.push_back({Reg, OpTy});
411   }
412 
413   auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
414   auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
415   Intrinsic::ID ID = MI.getOperand(0).getIntrinsicID();
416   RTLIB::Libcall RTLibcall;
417   switch (ID) {
418   case Intrinsic::memcpy:
419     RTLibcall = RTLIB::MEMCPY;
420     break;
421   case Intrinsic::memset:
422     RTLibcall = RTLIB::MEMSET;
423     break;
424   case Intrinsic::memmove:
425     RTLibcall = RTLIB::MEMMOVE;
426     break;
427   default:
428     return LegalizerHelper::UnableToLegalize;
429   }
430   const char *Name = TLI.getLibcallName(RTLibcall);
431 
432   MIRBuilder.setInstr(MI);
433   MIRBuilder.getMF().getFrameInfo().setHasCalls(true);
434 
435   CallLowering::CallLoweringInfo Info;
436   Info.CallConv = TLI.getLibcallCallingConv(RTLibcall);
437   Info.Callee = MachineOperand::CreateES(Name);
438   Info.OrigRet = CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx));
439   Info.IsTailCall = isLibCallInTailPosition(MI);
440 
441   std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs));
442   if (!CLI.lowerCall(MIRBuilder, Info))
443     return LegalizerHelper::UnableToLegalize;
444 
445   if (Info.LoweredTailCall) {
446     assert(Info.IsTailCall && "Lowered tail call when it wasn't a tail call?");
447     // We must have a return following the call to get past
448     // isLibCallInTailPosition.
449     assert(MI.getNextNode() && MI.getNextNode()->isReturn() &&
450            "Expected instr following MI to be a return?");
451 
452     // We lowered a tail call, so the call is now the return from the block.
453     // Delete the old return.
454     MI.getNextNode()->eraseFromParent();
455   }
456 
457   return LegalizerHelper::Legalized;
458 }
459 
460 static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType,
461                                        Type *FromType) {
462   auto ToMVT = MVT::getVT(ToType);
463   auto FromMVT = MVT::getVT(FromType);
464 
465   switch (Opcode) {
466   case TargetOpcode::G_FPEXT:
467     return RTLIB::getFPEXT(FromMVT, ToMVT);
468   case TargetOpcode::G_FPTRUNC:
469     return RTLIB::getFPROUND(FromMVT, ToMVT);
470   case TargetOpcode::G_FPTOSI:
471     return RTLIB::getFPTOSINT(FromMVT, ToMVT);
472   case TargetOpcode::G_FPTOUI:
473     return RTLIB::getFPTOUINT(FromMVT, ToMVT);
474   case TargetOpcode::G_SITOFP:
475     return RTLIB::getSINTTOFP(FromMVT, ToMVT);
476   case TargetOpcode::G_UITOFP:
477     return RTLIB::getUINTTOFP(FromMVT, ToMVT);
478   }
479   llvm_unreachable("Unsupported libcall function");
480 }
481 
482 static LegalizerHelper::LegalizeResult
483 conversionLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, Type *ToType,
484                   Type *FromType) {
485   RTLIB::Libcall Libcall = getConvRTLibDesc(MI.getOpcode(), ToType, FromType);
486   return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), ToType},
487                        {{MI.getOperand(1).getReg(), FromType}});
488 }
489 
490 LegalizerHelper::LegalizeResult
491 LegalizerHelper::libcall(MachineInstr &MI) {
492   LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
493   unsigned Size = LLTy.getSizeInBits();
494   auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
495 
496   MIRBuilder.setInstr(MI);
497 
498   switch (MI.getOpcode()) {
499   default:
500     return UnableToLegalize;
501   case TargetOpcode::G_SDIV:
502   case TargetOpcode::G_UDIV:
503   case TargetOpcode::G_SREM:
504   case TargetOpcode::G_UREM:
505   case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
506     Type *HLTy = IntegerType::get(Ctx, Size);
507     auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
508     if (Status != Legalized)
509       return Status;
510     break;
511   }
512   case TargetOpcode::G_FADD:
513   case TargetOpcode::G_FSUB:
514   case TargetOpcode::G_FMUL:
515   case TargetOpcode::G_FDIV:
516   case TargetOpcode::G_FMA:
517   case TargetOpcode::G_FPOW:
518   case TargetOpcode::G_FREM:
519   case TargetOpcode::G_FCOS:
520   case TargetOpcode::G_FSIN:
521   case TargetOpcode::G_FLOG10:
522   case TargetOpcode::G_FLOG:
523   case TargetOpcode::G_FLOG2:
524   case TargetOpcode::G_FEXP:
525   case TargetOpcode::G_FEXP2:
526   case TargetOpcode::G_FCEIL:
527   case TargetOpcode::G_FFLOOR: {
528     if (Size > 64) {
529       LLVM_DEBUG(dbgs() << "Size " << Size << " too large to legalize.\n");
530       return UnableToLegalize;
531     }
532     Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
533     auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
534     if (Status != Legalized)
535       return Status;
536     break;
537   }
538   case TargetOpcode::G_FPEXT: {
539     // FIXME: Support other floating point types (half, fp128 etc)
540     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
541     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
542     if (ToSize != 64 || FromSize != 32)
543       return UnableToLegalize;
544     LegalizeResult Status = conversionLibcall(
545         MI, MIRBuilder, Type::getDoubleTy(Ctx), Type::getFloatTy(Ctx));
546     if (Status != Legalized)
547       return Status;
548     break;
549   }
550   case TargetOpcode::G_FPTRUNC: {
551     // FIXME: Support other floating point types (half, fp128 etc)
552     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
553     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
554     if (ToSize != 32 || FromSize != 64)
555       return UnableToLegalize;
556     LegalizeResult Status = conversionLibcall(
557         MI, MIRBuilder, Type::getFloatTy(Ctx), Type::getDoubleTy(Ctx));
558     if (Status != Legalized)
559       return Status;
560     break;
561   }
562   case TargetOpcode::G_FPTOSI:
563   case TargetOpcode::G_FPTOUI: {
564     // FIXME: Support other types
565     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
566     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
567     if ((ToSize != 32 && ToSize != 64) || (FromSize != 32 && FromSize != 64))
568       return UnableToLegalize;
569     LegalizeResult Status = conversionLibcall(
570         MI, MIRBuilder,
571         ToSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx),
572         FromSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx));
573     if (Status != Legalized)
574       return Status;
575     break;
576   }
577   case TargetOpcode::G_SITOFP:
578   case TargetOpcode::G_UITOFP: {
579     // FIXME: Support other types
580     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
581     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
582     if ((FromSize != 32 && FromSize != 64) || (ToSize != 32 && ToSize != 64))
583       return UnableToLegalize;
584     LegalizeResult Status = conversionLibcall(
585         MI, MIRBuilder,
586         ToSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx),
587         FromSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx));
588     if (Status != Legalized)
589       return Status;
590     break;
591   }
592   }
593 
594   MI.eraseFromParent();
595   return Legalized;
596 }
597 
598 LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
599                                                               unsigned TypeIdx,
600                                                               LLT NarrowTy) {
601   MIRBuilder.setInstr(MI);
602 
603   uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
604   uint64_t NarrowSize = NarrowTy.getSizeInBits();
605 
606   switch (MI.getOpcode()) {
607   default:
608     return UnableToLegalize;
609   case TargetOpcode::G_IMPLICIT_DEF: {
610     // FIXME: add support for when SizeOp0 isn't an exact multiple of
611     // NarrowSize.
612     if (SizeOp0 % NarrowSize != 0)
613       return UnableToLegalize;
614     int NumParts = SizeOp0 / NarrowSize;
615 
616     SmallVector<Register, 2> DstRegs;
617     for (int i = 0; i < NumParts; ++i)
618       DstRegs.push_back(
619           MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
620 
621     Register DstReg = MI.getOperand(0).getReg();
622     if(MRI.getType(DstReg).isVector())
623       MIRBuilder.buildBuildVector(DstReg, DstRegs);
624     else
625       MIRBuilder.buildMerge(DstReg, DstRegs);
626     MI.eraseFromParent();
627     return Legalized;
628   }
629   case TargetOpcode::G_CONSTANT: {
630     LLT Ty = MRI.getType(MI.getOperand(0).getReg());
631     const APInt &Val = MI.getOperand(1).getCImm()->getValue();
632     unsigned TotalSize = Ty.getSizeInBits();
633     unsigned NarrowSize = NarrowTy.getSizeInBits();
634     int NumParts = TotalSize / NarrowSize;
635 
636     SmallVector<Register, 4> PartRegs;
637     for (int I = 0; I != NumParts; ++I) {
638       unsigned Offset = I * NarrowSize;
639       auto K = MIRBuilder.buildConstant(NarrowTy,
640                                         Val.lshr(Offset).trunc(NarrowSize));
641       PartRegs.push_back(K.getReg(0));
642     }
643 
644     LLT LeftoverTy;
645     unsigned LeftoverBits = TotalSize - NumParts * NarrowSize;
646     SmallVector<Register, 1> LeftoverRegs;
647     if (LeftoverBits != 0) {
648       LeftoverTy = LLT::scalar(LeftoverBits);
649       auto K = MIRBuilder.buildConstant(
650         LeftoverTy,
651         Val.lshr(NumParts * NarrowSize).trunc(LeftoverBits));
652       LeftoverRegs.push_back(K.getReg(0));
653     }
654 
655     insertParts(MI.getOperand(0).getReg(),
656                 Ty, NarrowTy, PartRegs, LeftoverTy, LeftoverRegs);
657 
658     MI.eraseFromParent();
659     return Legalized;
660   }
661   case TargetOpcode::G_SEXT: {
662     if (TypeIdx != 0)
663       return UnableToLegalize;
664 
665     Register SrcReg = MI.getOperand(1).getReg();
666     LLT SrcTy = MRI.getType(SrcReg);
667 
668     // FIXME: support the general case where the requested NarrowTy may not be
669     // the same as the source type. E.g. s128 = sext(s32)
670     if ((SrcTy.getSizeInBits() != SizeOp0 / 2) ||
671         SrcTy.getSizeInBits() != NarrowTy.getSizeInBits()) {
672       LLVM_DEBUG(dbgs() << "Can't narrow sext to type " << NarrowTy << "\n");
673       return UnableToLegalize;
674     }
675 
676     // Shift the sign bit of the low register through the high register.
677     auto ShiftAmt =
678         MIRBuilder.buildConstant(LLT::scalar(64), NarrowTy.getSizeInBits() - 1);
679     auto Shift = MIRBuilder.buildAShr(NarrowTy, SrcReg, ShiftAmt);
680     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {SrcReg, Shift.getReg(0)});
681     MI.eraseFromParent();
682     return Legalized;
683   }
684   case TargetOpcode::G_ZEXT: {
685     if (TypeIdx != 0)
686       return UnableToLegalize;
687 
688     LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
689     uint64_t SizeOp1 = SrcTy.getSizeInBits();
690     if (SizeOp0 % SizeOp1 != 0)
691       return UnableToLegalize;
692 
693     // Generate a merge where the bottom bits are taken from the source, and
694     // zero everything else.
695     Register ZeroReg = MIRBuilder.buildConstant(SrcTy, 0).getReg(0);
696     unsigned NumParts = SizeOp0 / SizeOp1;
697     SmallVector<Register, 4> Srcs = {MI.getOperand(1).getReg()};
698     for (unsigned Part = 1; Part < NumParts; ++Part)
699       Srcs.push_back(ZeroReg);
700     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), Srcs);
701     MI.eraseFromParent();
702     return Legalized;
703   }
704   case TargetOpcode::G_TRUNC: {
705     if (TypeIdx != 1)
706       return UnableToLegalize;
707 
708     uint64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
709     if (NarrowTy.getSizeInBits() * 2 != SizeOp1) {
710       LLVM_DEBUG(dbgs() << "Can't narrow trunc to type " << NarrowTy << "\n");
711       return UnableToLegalize;
712     }
713 
714     auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1).getReg());
715     MIRBuilder.buildCopy(MI.getOperand(0).getReg(), Unmerge.getReg(0));
716     MI.eraseFromParent();
717     return Legalized;
718   }
719 
720   case TargetOpcode::G_ADD: {
721     // FIXME: add support for when SizeOp0 isn't an exact multiple of
722     // NarrowSize.
723     if (SizeOp0 % NarrowSize != 0)
724       return UnableToLegalize;
725     // Expand in terms of carry-setting/consuming G_ADDE instructions.
726     int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
727 
728     SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
729     extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
730     extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
731 
732     Register CarryIn;
733     for (int i = 0; i < NumParts; ++i) {
734       Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
735       Register CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
736 
737       if (i == 0)
738         MIRBuilder.buildUAddo(DstReg, CarryOut, Src1Regs[i], Src2Regs[i]);
739       else {
740         MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i],
741                               Src2Regs[i], CarryIn);
742       }
743 
744       DstRegs.push_back(DstReg);
745       CarryIn = CarryOut;
746     }
747     Register DstReg = MI.getOperand(0).getReg();
748     if(MRI.getType(DstReg).isVector())
749       MIRBuilder.buildBuildVector(DstReg, DstRegs);
750     else
751       MIRBuilder.buildMerge(DstReg, DstRegs);
752     MI.eraseFromParent();
753     return Legalized;
754   }
755   case TargetOpcode::G_SUB: {
756     // FIXME: add support for when SizeOp0 isn't an exact multiple of
757     // NarrowSize.
758     if (SizeOp0 % NarrowSize != 0)
759       return UnableToLegalize;
760 
761     int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
762 
763     SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
764     extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
765     extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
766 
767     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
768     Register BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
769     MIRBuilder.buildInstr(TargetOpcode::G_USUBO, {DstReg, BorrowOut},
770                           {Src1Regs[0], Src2Regs[0]});
771     DstRegs.push_back(DstReg);
772     Register BorrowIn = BorrowOut;
773     for (int i = 1; i < NumParts; ++i) {
774       DstReg = MRI.createGenericVirtualRegister(NarrowTy);
775       BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
776 
777       MIRBuilder.buildInstr(TargetOpcode::G_USUBE, {DstReg, BorrowOut},
778                             {Src1Regs[i], Src2Regs[i], BorrowIn});
779 
780       DstRegs.push_back(DstReg);
781       BorrowIn = BorrowOut;
782     }
783     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
784     MI.eraseFromParent();
785     return Legalized;
786   }
787   case TargetOpcode::G_MUL:
788   case TargetOpcode::G_UMULH:
789     return narrowScalarMul(MI, NarrowTy);
790   case TargetOpcode::G_EXTRACT:
791     return narrowScalarExtract(MI, TypeIdx, NarrowTy);
792   case TargetOpcode::G_INSERT:
793     return narrowScalarInsert(MI, TypeIdx, NarrowTy);
794   case TargetOpcode::G_LOAD: {
795     const auto &MMO = **MI.memoperands_begin();
796     Register DstReg = MI.getOperand(0).getReg();
797     LLT DstTy = MRI.getType(DstReg);
798     if (DstTy.isVector())
799       return UnableToLegalize;
800 
801     if (8 * MMO.getSize() != DstTy.getSizeInBits()) {
802       Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
803       auto &MMO = **MI.memoperands_begin();
804       MIRBuilder.buildLoad(TmpReg, MI.getOperand(1).getReg(), MMO);
805       MIRBuilder.buildAnyExt(DstReg, TmpReg);
806       MI.eraseFromParent();
807       return Legalized;
808     }
809 
810     return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy);
811   }
812   case TargetOpcode::G_ZEXTLOAD:
813   case TargetOpcode::G_SEXTLOAD: {
814     bool ZExt = MI.getOpcode() == TargetOpcode::G_ZEXTLOAD;
815     Register DstReg = MI.getOperand(0).getReg();
816     Register PtrReg = MI.getOperand(1).getReg();
817 
818     Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
819     auto &MMO = **MI.memoperands_begin();
820     if (MMO.getSizeInBits() == NarrowSize) {
821       MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
822     } else {
823       unsigned ExtLoad = ZExt ? TargetOpcode::G_ZEXTLOAD
824         : TargetOpcode::G_SEXTLOAD;
825       MIRBuilder.buildInstr(ExtLoad)
826         .addDef(TmpReg)
827         .addUse(PtrReg)
828         .addMemOperand(&MMO);
829     }
830 
831     if (ZExt)
832       MIRBuilder.buildZExt(DstReg, TmpReg);
833     else
834       MIRBuilder.buildSExt(DstReg, TmpReg);
835 
836     MI.eraseFromParent();
837     return Legalized;
838   }
839   case TargetOpcode::G_STORE: {
840     const auto &MMO = **MI.memoperands_begin();
841 
842     Register SrcReg = MI.getOperand(0).getReg();
843     LLT SrcTy = MRI.getType(SrcReg);
844     if (SrcTy.isVector())
845       return UnableToLegalize;
846 
847     int NumParts = SizeOp0 / NarrowSize;
848     unsigned HandledSize = NumParts * NarrowTy.getSizeInBits();
849     unsigned LeftoverBits = SrcTy.getSizeInBits() - HandledSize;
850     if (SrcTy.isVector() && LeftoverBits != 0)
851       return UnableToLegalize;
852 
853     if (8 * MMO.getSize() != SrcTy.getSizeInBits()) {
854       Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
855       auto &MMO = **MI.memoperands_begin();
856       MIRBuilder.buildTrunc(TmpReg, SrcReg);
857       MIRBuilder.buildStore(TmpReg, MI.getOperand(1).getReg(), MMO);
858       MI.eraseFromParent();
859       return Legalized;
860     }
861 
862     return reduceLoadStoreWidth(MI, 0, NarrowTy);
863   }
864   case TargetOpcode::G_SELECT:
865     return narrowScalarSelect(MI, TypeIdx, NarrowTy);
866   case TargetOpcode::G_AND:
867   case TargetOpcode::G_OR:
868   case TargetOpcode::G_XOR: {
869     // Legalize bitwise operation:
870     // A = BinOp<Ty> B, C
871     // into:
872     // B1, ..., BN = G_UNMERGE_VALUES B
873     // C1, ..., CN = G_UNMERGE_VALUES C
874     // A1 = BinOp<Ty/N> B1, C2
875     // ...
876     // AN = BinOp<Ty/N> BN, CN
877     // A = G_MERGE_VALUES A1, ..., AN
878     return narrowScalarBasic(MI, TypeIdx, NarrowTy);
879   }
880   case TargetOpcode::G_SHL:
881   case TargetOpcode::G_LSHR:
882   case TargetOpcode::G_ASHR:
883     return narrowScalarShift(MI, TypeIdx, NarrowTy);
884   case TargetOpcode::G_CTLZ:
885   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
886   case TargetOpcode::G_CTTZ:
887   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
888   case TargetOpcode::G_CTPOP:
889     if (TypeIdx != 0)
890       return UnableToLegalize; // TODO
891 
892     Observer.changingInstr(MI);
893     narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT);
894     Observer.changedInstr(MI);
895     return Legalized;
896   case TargetOpcode::G_INTTOPTR:
897     if (TypeIdx != 1)
898       return UnableToLegalize;
899 
900     Observer.changingInstr(MI);
901     narrowScalarSrc(MI, NarrowTy, 1);
902     Observer.changedInstr(MI);
903     return Legalized;
904   case TargetOpcode::G_PTRTOINT:
905     if (TypeIdx != 0)
906       return UnableToLegalize;
907 
908     Observer.changingInstr(MI);
909     narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT);
910     Observer.changedInstr(MI);
911     return Legalized;
912   case TargetOpcode::G_PHI: {
913     unsigned NumParts = SizeOp0 / NarrowSize;
914     SmallVector<Register, 2> DstRegs;
915     SmallVector<SmallVector<Register, 2>, 2> SrcRegs;
916     DstRegs.resize(NumParts);
917     SrcRegs.resize(MI.getNumOperands() / 2);
918     Observer.changingInstr(MI);
919     for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
920       MachineBasicBlock &OpMBB = *MI.getOperand(i + 1).getMBB();
921       MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
922       extractParts(MI.getOperand(i).getReg(), NarrowTy, NumParts,
923                    SrcRegs[i / 2]);
924     }
925     MachineBasicBlock &MBB = *MI.getParent();
926     MIRBuilder.setInsertPt(MBB, MI);
927     for (unsigned i = 0; i < NumParts; ++i) {
928       DstRegs[i] = MRI.createGenericVirtualRegister(NarrowTy);
929       MachineInstrBuilder MIB =
930           MIRBuilder.buildInstr(TargetOpcode::G_PHI).addDef(DstRegs[i]);
931       for (unsigned j = 1; j < MI.getNumOperands(); j += 2)
932         MIB.addUse(SrcRegs[j / 2][i]).add(MI.getOperand(j + 1));
933     }
934     MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI());
935     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
936     Observer.changedInstr(MI);
937     MI.eraseFromParent();
938     return Legalized;
939   }
940   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
941   case TargetOpcode::G_INSERT_VECTOR_ELT: {
942     if (TypeIdx != 2)
943       return UnableToLegalize;
944 
945     int OpIdx = MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
946     Observer.changingInstr(MI);
947     narrowScalarSrc(MI, NarrowTy, OpIdx);
948     Observer.changedInstr(MI);
949     return Legalized;
950   }
951   case TargetOpcode::G_ICMP: {
952     uint64_t SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
953     if (NarrowSize * 2 != SrcSize)
954       return UnableToLegalize;
955 
956     Observer.changingInstr(MI);
957     Register LHSL = MRI.createGenericVirtualRegister(NarrowTy);
958     Register LHSH = MRI.createGenericVirtualRegister(NarrowTy);
959     MIRBuilder.buildUnmerge({LHSL, LHSH}, MI.getOperand(2).getReg());
960 
961     Register RHSL = MRI.createGenericVirtualRegister(NarrowTy);
962     Register RHSH = MRI.createGenericVirtualRegister(NarrowTy);
963     MIRBuilder.buildUnmerge({RHSL, RHSH}, MI.getOperand(3).getReg());
964 
965     CmpInst::Predicate Pred =
966         static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
967     LLT ResTy = MRI.getType(MI.getOperand(0).getReg());
968 
969     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
970       MachineInstrBuilder XorL = MIRBuilder.buildXor(NarrowTy, LHSL, RHSL);
971       MachineInstrBuilder XorH = MIRBuilder.buildXor(NarrowTy, LHSH, RHSH);
972       MachineInstrBuilder Or = MIRBuilder.buildOr(NarrowTy, XorL, XorH);
973       MachineInstrBuilder Zero = MIRBuilder.buildConstant(NarrowTy, 0);
974       MIRBuilder.buildICmp(Pred, MI.getOperand(0).getReg(), Or, Zero);
975     } else {
976       MachineInstrBuilder CmpH = MIRBuilder.buildICmp(Pred, ResTy, LHSH, RHSH);
977       MachineInstrBuilder CmpHEQ =
978           MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, ResTy, LHSH, RHSH);
979       MachineInstrBuilder CmpLU = MIRBuilder.buildICmp(
980           ICmpInst::getUnsignedPredicate(Pred), ResTy, LHSL, RHSL);
981       MIRBuilder.buildSelect(MI.getOperand(0).getReg(), CmpHEQ, CmpLU, CmpH);
982     }
983     Observer.changedInstr(MI);
984     MI.eraseFromParent();
985     return Legalized;
986   }
987   case TargetOpcode::G_SEXT_INREG: {
988     if (TypeIdx != 0)
989       return UnableToLegalize;
990 
991     if (!MI.getOperand(2).isImm())
992       return UnableToLegalize;
993     int64_t SizeInBits = MI.getOperand(2).getImm();
994 
995     // So long as the new type has more bits than the bits we're extending we
996     // don't need to break it apart.
997     if (NarrowTy.getScalarSizeInBits() >= SizeInBits) {
998       Observer.changingInstr(MI);
999       // We don't lose any non-extension bits by truncating the src and
1000       // sign-extending the dst.
1001       MachineOperand &MO1 = MI.getOperand(1);
1002       auto TruncMIB = MIRBuilder.buildTrunc(NarrowTy, MO1.getReg());
1003       MO1.setReg(TruncMIB->getOperand(0).getReg());
1004 
1005       MachineOperand &MO2 = MI.getOperand(0);
1006       Register DstExt = MRI.createGenericVirtualRegister(NarrowTy);
1007       MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
1008       MIRBuilder.buildInstr(TargetOpcode::G_SEXT, {MO2.getReg()}, {DstExt});
1009       MO2.setReg(DstExt);
1010       Observer.changedInstr(MI);
1011       return Legalized;
1012     }
1013 
1014     // Break it apart. Components below the extension point are unmodified. The
1015     // component containing the extension point becomes a narrower SEXT_INREG.
1016     // Components above it are ashr'd from the component containing the
1017     // extension point.
1018     if (SizeOp0 % NarrowSize != 0)
1019       return UnableToLegalize;
1020     int NumParts = SizeOp0 / NarrowSize;
1021 
1022     // List the registers where the destination will be scattered.
1023     SmallVector<Register, 2> DstRegs;
1024     // List the registers where the source will be split.
1025     SmallVector<Register, 2> SrcRegs;
1026 
1027     // Create all the temporary registers.
1028     for (int i = 0; i < NumParts; ++i) {
1029       Register SrcReg = MRI.createGenericVirtualRegister(NarrowTy);
1030 
1031       SrcRegs.push_back(SrcReg);
1032     }
1033 
1034     // Explode the big arguments into smaller chunks.
1035     MIRBuilder.buildUnmerge(SrcRegs, MI.getOperand(1).getReg());
1036 
1037     Register AshrCstReg =
1038         MIRBuilder.buildConstant(NarrowTy, NarrowTy.getScalarSizeInBits() - 1)
1039             ->getOperand(0)
1040             .getReg();
1041     Register FullExtensionReg = 0;
1042     Register PartialExtensionReg = 0;
1043 
1044     // Do the operation on each small part.
1045     for (int i = 0; i < NumParts; ++i) {
1046       if ((i + 1) * NarrowTy.getScalarSizeInBits() < SizeInBits)
1047         DstRegs.push_back(SrcRegs[i]);
1048       else if (i * NarrowTy.getScalarSizeInBits() > SizeInBits) {
1049         assert(PartialExtensionReg &&
1050                "Expected to visit partial extension before full");
1051         if (FullExtensionReg) {
1052           DstRegs.push_back(FullExtensionReg);
1053           continue;
1054         }
1055         DstRegs.push_back(MIRBuilder
1056                               .buildInstr(TargetOpcode::G_ASHR, {NarrowTy},
1057                                           {PartialExtensionReg, AshrCstReg})
1058                               ->getOperand(0)
1059                               .getReg());
1060         FullExtensionReg = DstRegs.back();
1061       } else {
1062         DstRegs.push_back(
1063             MIRBuilder
1064                 .buildInstr(
1065                     TargetOpcode::G_SEXT_INREG, {NarrowTy},
1066                     {SrcRegs[i], SizeInBits % NarrowTy.getScalarSizeInBits()})
1067                 ->getOperand(0)
1068                 .getReg());
1069         PartialExtensionReg = DstRegs.back();
1070       }
1071     }
1072 
1073     // Gather the destination registers into the final destination.
1074     Register DstReg = MI.getOperand(0).getReg();
1075     MIRBuilder.buildMerge(DstReg, DstRegs);
1076     MI.eraseFromParent();
1077     return Legalized;
1078   }
1079   }
1080 }
1081 
1082 void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy,
1083                                      unsigned OpIdx, unsigned ExtOpcode) {
1084   MachineOperand &MO = MI.getOperand(OpIdx);
1085   auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO.getReg()});
1086   MO.setReg(ExtB->getOperand(0).getReg());
1087 }
1088 
1089 void LegalizerHelper::narrowScalarSrc(MachineInstr &MI, LLT NarrowTy,
1090                                       unsigned OpIdx) {
1091   MachineOperand &MO = MI.getOperand(OpIdx);
1092   auto ExtB = MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, {NarrowTy},
1093                                     {MO.getReg()});
1094   MO.setReg(ExtB->getOperand(0).getReg());
1095 }
1096 
1097 void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
1098                                      unsigned OpIdx, unsigned TruncOpcode) {
1099   MachineOperand &MO = MI.getOperand(OpIdx);
1100   Register DstExt = MRI.createGenericVirtualRegister(WideTy);
1101   MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
1102   MIRBuilder.buildInstr(TruncOpcode, {MO.getReg()}, {DstExt});
1103   MO.setReg(DstExt);
1104 }
1105 
1106 void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy,
1107                                       unsigned OpIdx, unsigned ExtOpcode) {
1108   MachineOperand &MO = MI.getOperand(OpIdx);
1109   Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy);
1110   MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
1111   MIRBuilder.buildInstr(ExtOpcode, {MO.getReg()}, {DstTrunc});
1112   MO.setReg(DstTrunc);
1113 }
1114 
1115 void LegalizerHelper::moreElementsVectorDst(MachineInstr &MI, LLT WideTy,
1116                                             unsigned OpIdx) {
1117   MachineOperand &MO = MI.getOperand(OpIdx);
1118   Register DstExt = MRI.createGenericVirtualRegister(WideTy);
1119   MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
1120   MIRBuilder.buildExtract(MO.getReg(), DstExt, 0);
1121   MO.setReg(DstExt);
1122 }
1123 
1124 void LegalizerHelper::moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy,
1125                                             unsigned OpIdx) {
1126   MachineOperand &MO = MI.getOperand(OpIdx);
1127 
1128   LLT OldTy = MRI.getType(MO.getReg());
1129   unsigned OldElts = OldTy.getNumElements();
1130   unsigned NewElts = MoreTy.getNumElements();
1131 
1132   unsigned NumParts = NewElts / OldElts;
1133 
1134   // Use concat_vectors if the result is a multiple of the number of elements.
1135   if (NumParts * OldElts == NewElts) {
1136     SmallVector<Register, 8> Parts;
1137     Parts.push_back(MO.getReg());
1138 
1139     Register ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0);
1140     for (unsigned I = 1; I != NumParts; ++I)
1141       Parts.push_back(ImpDef);
1142 
1143     auto Concat = MIRBuilder.buildConcatVectors(MoreTy, Parts);
1144     MO.setReg(Concat.getReg(0));
1145     return;
1146   }
1147 
1148   Register MoreReg = MRI.createGenericVirtualRegister(MoreTy);
1149   Register ImpDef = MIRBuilder.buildUndef(MoreTy).getReg(0);
1150   MIRBuilder.buildInsert(MoreReg, ImpDef, MO.getReg(), 0);
1151   MO.setReg(MoreReg);
1152 }
1153 
1154 LegalizerHelper::LegalizeResult
1155 LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx,
1156                                         LLT WideTy) {
1157   if (TypeIdx != 1)
1158     return UnableToLegalize;
1159 
1160   Register DstReg = MI.getOperand(0).getReg();
1161   LLT DstTy = MRI.getType(DstReg);
1162   if (DstTy.isVector())
1163     return UnableToLegalize;
1164 
1165   Register Src1 = MI.getOperand(1).getReg();
1166   LLT SrcTy = MRI.getType(Src1);
1167   const int DstSize = DstTy.getSizeInBits();
1168   const int SrcSize = SrcTy.getSizeInBits();
1169   const int WideSize = WideTy.getSizeInBits();
1170   const int NumMerge = (DstSize + WideSize - 1) / WideSize;
1171 
1172   unsigned NumOps = MI.getNumOperands();
1173   unsigned NumSrc = MI.getNumOperands() - 1;
1174   unsigned PartSize = DstTy.getSizeInBits() / NumSrc;
1175 
1176   if (WideSize >= DstSize) {
1177     // Directly pack the bits in the target type.
1178     Register ResultReg = MIRBuilder.buildZExt(WideTy, Src1).getReg(0);
1179 
1180     for (unsigned I = 2; I != NumOps; ++I) {
1181       const unsigned Offset = (I - 1) * PartSize;
1182 
1183       Register SrcReg = MI.getOperand(I).getReg();
1184       assert(MRI.getType(SrcReg) == LLT::scalar(PartSize));
1185 
1186       auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg);
1187 
1188       Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg :
1189         MRI.createGenericVirtualRegister(WideTy);
1190 
1191       auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset);
1192       auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt);
1193       MIRBuilder.buildOr(NextResult, ResultReg, Shl);
1194       ResultReg = NextResult;
1195     }
1196 
1197     if (WideSize > DstSize)
1198       MIRBuilder.buildTrunc(DstReg, ResultReg);
1199     else if (DstTy.isPointer())
1200       MIRBuilder.buildIntToPtr(DstReg, ResultReg);
1201 
1202     MI.eraseFromParent();
1203     return Legalized;
1204   }
1205 
1206   // Unmerge the original values to the GCD type, and recombine to the next
1207   // multiple greater than the original type.
1208   //
1209   // %3:_(s12) = G_MERGE_VALUES %0:_(s4), %1:_(s4), %2:_(s4) -> s6
1210   // %4:_(s2), %5:_(s2) = G_UNMERGE_VALUES %0
1211   // %6:_(s2), %7:_(s2) = G_UNMERGE_VALUES %1
1212   // %8:_(s2), %9:_(s2) = G_UNMERGE_VALUES %2
1213   // %10:_(s6) = G_MERGE_VALUES %4, %5, %6
1214   // %11:_(s6) = G_MERGE_VALUES %7, %8, %9
1215   // %12:_(s12) = G_MERGE_VALUES %10, %11
1216   //
1217   // Padding with undef if necessary:
1218   //
1219   // %2:_(s8) = G_MERGE_VALUES %0:_(s4), %1:_(s4) -> s6
1220   // %3:_(s2), %4:_(s2) = G_UNMERGE_VALUES %0
1221   // %5:_(s2), %6:_(s2) = G_UNMERGE_VALUES %1
1222   // %7:_(s2) = G_IMPLICIT_DEF
1223   // %8:_(s6) = G_MERGE_VALUES %3, %4, %5
1224   // %9:_(s6) = G_MERGE_VALUES %6, %7, %7
1225   // %10:_(s12) = G_MERGE_VALUES %8, %9
1226 
1227   const int GCD = greatestCommonDivisor(SrcSize, WideSize);
1228   LLT GCDTy = LLT::scalar(GCD);
1229 
1230   SmallVector<Register, 8> Parts;
1231   SmallVector<Register, 8> NewMergeRegs;
1232   SmallVector<Register, 8> Unmerges;
1233   LLT WideDstTy = LLT::scalar(NumMerge * WideSize);
1234 
1235   // Decompose the original operands if they don't evenly divide.
1236   for (int I = 1, E = MI.getNumOperands(); I != E; ++I) {
1237     Register SrcReg = MI.getOperand(I).getReg();
1238     if (GCD == SrcSize) {
1239       Unmerges.push_back(SrcReg);
1240     } else {
1241       auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg);
1242       for (int J = 0, JE = Unmerge->getNumOperands() - 1; J != JE; ++J)
1243         Unmerges.push_back(Unmerge.getReg(J));
1244     }
1245   }
1246 
1247   // Pad with undef to the next size that is a multiple of the requested size.
1248   if (static_cast<int>(Unmerges.size()) != NumMerge * WideSize) {
1249     Register UndefReg = MIRBuilder.buildUndef(GCDTy).getReg(0);
1250     for (int I = Unmerges.size(); I != NumMerge * WideSize; ++I)
1251       Unmerges.push_back(UndefReg);
1252   }
1253 
1254   const int PartsPerGCD = WideSize / GCD;
1255 
1256   // Build merges of each piece.
1257   ArrayRef<Register> Slicer(Unmerges);
1258   for (int I = 0; I != NumMerge; ++I, Slicer = Slicer.drop_front(PartsPerGCD)) {
1259     auto Merge = MIRBuilder.buildMerge(WideTy, Slicer.take_front(PartsPerGCD));
1260     NewMergeRegs.push_back(Merge.getReg(0));
1261   }
1262 
1263   // A truncate may be necessary if the requested type doesn't evenly divide the
1264   // original result type.
1265   if (DstTy.getSizeInBits() == WideDstTy.getSizeInBits()) {
1266     MIRBuilder.buildMerge(DstReg, NewMergeRegs);
1267   } else {
1268     auto FinalMerge = MIRBuilder.buildMerge(WideDstTy, NewMergeRegs);
1269     MIRBuilder.buildTrunc(DstReg, FinalMerge.getReg(0));
1270   }
1271 
1272   MI.eraseFromParent();
1273   return Legalized;
1274 }
1275 
1276 LegalizerHelper::LegalizeResult
1277 LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
1278                                           LLT WideTy) {
1279   if (TypeIdx != 0)
1280     return UnableToLegalize;
1281 
1282   unsigned NumDst = MI.getNumOperands() - 1;
1283   Register SrcReg = MI.getOperand(NumDst).getReg();
1284   LLT SrcTy = MRI.getType(SrcReg);
1285   if (!SrcTy.isScalar())
1286     return UnableToLegalize;
1287 
1288   Register Dst0Reg = MI.getOperand(0).getReg();
1289   LLT DstTy = MRI.getType(Dst0Reg);
1290   if (!DstTy.isScalar())
1291     return UnableToLegalize;
1292 
1293   unsigned NewSrcSize = NumDst * WideTy.getSizeInBits();
1294   LLT NewSrcTy = LLT::scalar(NewSrcSize);
1295   unsigned SizeDiff = WideTy.getSizeInBits() - DstTy.getSizeInBits();
1296 
1297   auto WideSrc = MIRBuilder.buildZExt(NewSrcTy, SrcReg);
1298 
1299   for (unsigned I = 1; I != NumDst; ++I) {
1300     auto ShiftAmt = MIRBuilder.buildConstant(NewSrcTy, SizeDiff * I);
1301     auto Shl = MIRBuilder.buildShl(NewSrcTy, WideSrc, ShiftAmt);
1302     WideSrc = MIRBuilder.buildOr(NewSrcTy, WideSrc, Shl);
1303   }
1304 
1305   Observer.changingInstr(MI);
1306 
1307   MI.getOperand(NumDst).setReg(WideSrc->getOperand(0).getReg());
1308   for (unsigned I = 0; I != NumDst; ++I)
1309     widenScalarDst(MI, WideTy, I);
1310 
1311   Observer.changedInstr(MI);
1312 
1313   return Legalized;
1314 }
1315 
1316 LegalizerHelper::LegalizeResult
1317 LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx,
1318                                     LLT WideTy) {
1319   Register DstReg = MI.getOperand(0).getReg();
1320   Register SrcReg = MI.getOperand(1).getReg();
1321   LLT SrcTy = MRI.getType(SrcReg);
1322 
1323   LLT DstTy = MRI.getType(DstReg);
1324   unsigned Offset = MI.getOperand(2).getImm();
1325 
1326   if (TypeIdx == 0) {
1327     if (SrcTy.isVector() || DstTy.isVector())
1328       return UnableToLegalize;
1329 
1330     SrcOp Src(SrcReg);
1331     if (SrcTy.isPointer()) {
1332       // Extracts from pointers can be handled only if they are really just
1333       // simple integers.
1334       const DataLayout &DL = MIRBuilder.getDataLayout();
1335       if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace()))
1336         return UnableToLegalize;
1337 
1338       LLT SrcAsIntTy = LLT::scalar(SrcTy.getSizeInBits());
1339       Src = MIRBuilder.buildPtrToInt(SrcAsIntTy, Src);
1340       SrcTy = SrcAsIntTy;
1341     }
1342 
1343     if (DstTy.isPointer())
1344       return UnableToLegalize;
1345 
1346     if (Offset == 0) {
1347       // Avoid a shift in the degenerate case.
1348       MIRBuilder.buildTrunc(DstReg,
1349                             MIRBuilder.buildAnyExtOrTrunc(WideTy, Src));
1350       MI.eraseFromParent();
1351       return Legalized;
1352     }
1353 
1354     // Do a shift in the source type.
1355     LLT ShiftTy = SrcTy;
1356     if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) {
1357       Src = MIRBuilder.buildAnyExt(WideTy, Src);
1358       ShiftTy = WideTy;
1359     } else if (WideTy.getSizeInBits() > SrcTy.getSizeInBits())
1360       return UnableToLegalize;
1361 
1362     auto LShr = MIRBuilder.buildLShr(
1363       ShiftTy, Src, MIRBuilder.buildConstant(ShiftTy, Offset));
1364     MIRBuilder.buildTrunc(DstReg, LShr);
1365     MI.eraseFromParent();
1366     return Legalized;
1367   }
1368 
1369   if (SrcTy.isScalar()) {
1370     Observer.changingInstr(MI);
1371     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1372     Observer.changedInstr(MI);
1373     return Legalized;
1374   }
1375 
1376   if (!SrcTy.isVector())
1377     return UnableToLegalize;
1378 
1379   if (DstTy != SrcTy.getElementType())
1380     return UnableToLegalize;
1381 
1382   if (Offset % SrcTy.getScalarSizeInBits() != 0)
1383     return UnableToLegalize;
1384 
1385   Observer.changingInstr(MI);
1386   widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1387 
1388   MI.getOperand(2).setImm((WideTy.getSizeInBits() / SrcTy.getSizeInBits()) *
1389                           Offset);
1390   widenScalarDst(MI, WideTy.getScalarType(), 0);
1391   Observer.changedInstr(MI);
1392   return Legalized;
1393 }
1394 
1395 LegalizerHelper::LegalizeResult
1396 LegalizerHelper::widenScalarInsert(MachineInstr &MI, unsigned TypeIdx,
1397                                    LLT WideTy) {
1398   if (TypeIdx != 0)
1399     return UnableToLegalize;
1400   Observer.changingInstr(MI);
1401   widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1402   widenScalarDst(MI, WideTy);
1403   Observer.changedInstr(MI);
1404   return Legalized;
1405 }
1406 
1407 LegalizerHelper::LegalizeResult
1408 LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
1409   MIRBuilder.setInstr(MI);
1410 
1411   switch (MI.getOpcode()) {
1412   default:
1413     return UnableToLegalize;
1414   case TargetOpcode::G_EXTRACT:
1415     return widenScalarExtract(MI, TypeIdx, WideTy);
1416   case TargetOpcode::G_INSERT:
1417     return widenScalarInsert(MI, TypeIdx, WideTy);
1418   case TargetOpcode::G_MERGE_VALUES:
1419     return widenScalarMergeValues(MI, TypeIdx, WideTy);
1420   case TargetOpcode::G_UNMERGE_VALUES:
1421     return widenScalarUnmergeValues(MI, TypeIdx, WideTy);
1422   case TargetOpcode::G_UADDO:
1423   case TargetOpcode::G_USUBO: {
1424     if (TypeIdx == 1)
1425       return UnableToLegalize; // TODO
1426     auto LHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy},
1427                                          {MI.getOperand(2).getReg()});
1428     auto RHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy},
1429                                          {MI.getOperand(3).getReg()});
1430     unsigned Opcode = MI.getOpcode() == TargetOpcode::G_UADDO
1431                           ? TargetOpcode::G_ADD
1432                           : TargetOpcode::G_SUB;
1433     // Do the arithmetic in the larger type.
1434     auto NewOp = MIRBuilder.buildInstr(Opcode, {WideTy}, {LHSZext, RHSZext});
1435     LLT OrigTy = MRI.getType(MI.getOperand(0).getReg());
1436     APInt Mask = APInt::getAllOnesValue(OrigTy.getSizeInBits());
1437     auto AndOp = MIRBuilder.buildInstr(
1438         TargetOpcode::G_AND, {WideTy},
1439         {NewOp, MIRBuilder.buildConstant(WideTy, Mask.getZExtValue())});
1440     // There is no overflow if the AndOp is the same as NewOp.
1441     MIRBuilder.buildICmp(CmpInst::ICMP_NE, MI.getOperand(1).getReg(), NewOp,
1442                          AndOp);
1443     // Now trunc the NewOp to the original result.
1444     MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), NewOp);
1445     MI.eraseFromParent();
1446     return Legalized;
1447   }
1448   case TargetOpcode::G_CTTZ:
1449   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
1450   case TargetOpcode::G_CTLZ:
1451   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
1452   case TargetOpcode::G_CTPOP: {
1453     if (TypeIdx == 0) {
1454       Observer.changingInstr(MI);
1455       widenScalarDst(MI, WideTy, 0);
1456       Observer.changedInstr(MI);
1457       return Legalized;
1458     }
1459 
1460     Register SrcReg = MI.getOperand(1).getReg();
1461 
1462     // First ZEXT the input.
1463     auto MIBSrc = MIRBuilder.buildZExt(WideTy, SrcReg);
1464     LLT CurTy = MRI.getType(SrcReg);
1465     if (MI.getOpcode() == TargetOpcode::G_CTTZ) {
1466       // The count is the same in the larger type except if the original
1467       // value was zero.  This can be handled by setting the bit just off
1468       // the top of the original type.
1469       auto TopBit =
1470           APInt::getOneBitSet(WideTy.getSizeInBits(), CurTy.getSizeInBits());
1471       MIBSrc = MIRBuilder.buildOr(
1472         WideTy, MIBSrc, MIRBuilder.buildConstant(WideTy, TopBit));
1473     }
1474 
1475     // Perform the operation at the larger size.
1476     auto MIBNewOp = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy}, {MIBSrc});
1477     // This is already the correct result for CTPOP and CTTZs
1478     if (MI.getOpcode() == TargetOpcode::G_CTLZ ||
1479         MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF) {
1480       // The correct result is NewOp - (Difference in widety and current ty).
1481       unsigned SizeDiff = WideTy.getSizeInBits() - CurTy.getSizeInBits();
1482       MIBNewOp = MIRBuilder.buildInstr(
1483           TargetOpcode::G_SUB, {WideTy},
1484           {MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff)});
1485     }
1486 
1487     MIRBuilder.buildZExtOrTrunc(MI.getOperand(0), MIBNewOp);
1488     MI.eraseFromParent();
1489     return Legalized;
1490   }
1491   case TargetOpcode::G_BSWAP: {
1492     Observer.changingInstr(MI);
1493     Register DstReg = MI.getOperand(0).getReg();
1494 
1495     Register ShrReg = MRI.createGenericVirtualRegister(WideTy);
1496     Register DstExt = MRI.createGenericVirtualRegister(WideTy);
1497     Register ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy);
1498     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1499 
1500     MI.getOperand(0).setReg(DstExt);
1501 
1502     MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
1503 
1504     LLT Ty = MRI.getType(DstReg);
1505     unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits();
1506     MIRBuilder.buildConstant(ShiftAmtReg, DiffBits);
1507     MIRBuilder.buildInstr(TargetOpcode::G_LSHR)
1508       .addDef(ShrReg)
1509       .addUse(DstExt)
1510       .addUse(ShiftAmtReg);
1511 
1512     MIRBuilder.buildTrunc(DstReg, ShrReg);
1513     Observer.changedInstr(MI);
1514     return Legalized;
1515   }
1516   case TargetOpcode::G_BITREVERSE: {
1517     Observer.changingInstr(MI);
1518 
1519     Register DstReg = MI.getOperand(0).getReg();
1520     LLT Ty = MRI.getType(DstReg);
1521     unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits();
1522 
1523     Register DstExt = MRI.createGenericVirtualRegister(WideTy);
1524     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1525     MI.getOperand(0).setReg(DstExt);
1526     MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
1527 
1528     auto ShiftAmt = MIRBuilder.buildConstant(WideTy, DiffBits);
1529     auto Shift = MIRBuilder.buildLShr(WideTy, DstExt, ShiftAmt);
1530     MIRBuilder.buildTrunc(DstReg, Shift);
1531     Observer.changedInstr(MI);
1532     return Legalized;
1533   }
1534   case TargetOpcode::G_ADD:
1535   case TargetOpcode::G_AND:
1536   case TargetOpcode::G_MUL:
1537   case TargetOpcode::G_OR:
1538   case TargetOpcode::G_XOR:
1539   case TargetOpcode::G_SUB:
1540     // Perform operation at larger width (any extension is fines here, high bits
1541     // don't affect the result) and then truncate the result back to the
1542     // original type.
1543     Observer.changingInstr(MI);
1544     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1545     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
1546     widenScalarDst(MI, WideTy);
1547     Observer.changedInstr(MI);
1548     return Legalized;
1549 
1550   case TargetOpcode::G_SHL:
1551     Observer.changingInstr(MI);
1552 
1553     if (TypeIdx == 0) {
1554       widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1555       widenScalarDst(MI, WideTy);
1556     } else {
1557       assert(TypeIdx == 1);
1558       // The "number of bits to shift" operand must preserve its value as an
1559       // unsigned integer:
1560       widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
1561     }
1562 
1563     Observer.changedInstr(MI);
1564     return Legalized;
1565 
1566   case TargetOpcode::G_SDIV:
1567   case TargetOpcode::G_SREM:
1568   case TargetOpcode::G_SMIN:
1569   case TargetOpcode::G_SMAX:
1570     Observer.changingInstr(MI);
1571     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
1572     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
1573     widenScalarDst(MI, WideTy);
1574     Observer.changedInstr(MI);
1575     return Legalized;
1576 
1577   case TargetOpcode::G_ASHR:
1578   case TargetOpcode::G_LSHR:
1579     Observer.changingInstr(MI);
1580 
1581     if (TypeIdx == 0) {
1582       unsigned CvtOp = MI.getOpcode() == TargetOpcode::G_ASHR ?
1583         TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
1584 
1585       widenScalarSrc(MI, WideTy, 1, CvtOp);
1586       widenScalarDst(MI, WideTy);
1587     } else {
1588       assert(TypeIdx == 1);
1589       // The "number of bits to shift" operand must preserve its value as an
1590       // unsigned integer:
1591       widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
1592     }
1593 
1594     Observer.changedInstr(MI);
1595     return Legalized;
1596   case TargetOpcode::G_UDIV:
1597   case TargetOpcode::G_UREM:
1598   case TargetOpcode::G_UMIN:
1599   case TargetOpcode::G_UMAX:
1600     Observer.changingInstr(MI);
1601     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
1602     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
1603     widenScalarDst(MI, WideTy);
1604     Observer.changedInstr(MI);
1605     return Legalized;
1606 
1607   case TargetOpcode::G_SELECT:
1608     Observer.changingInstr(MI);
1609     if (TypeIdx == 0) {
1610       // Perform operation at larger width (any extension is fine here, high
1611       // bits don't affect the result) and then truncate the result back to the
1612       // original type.
1613       widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
1614       widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
1615       widenScalarDst(MI, WideTy);
1616     } else {
1617       bool IsVec = MRI.getType(MI.getOperand(1).getReg()).isVector();
1618       // Explicit extension is required here since high bits affect the result.
1619       widenScalarSrc(MI, WideTy, 1, MIRBuilder.getBoolExtOp(IsVec, false));
1620     }
1621     Observer.changedInstr(MI);
1622     return Legalized;
1623 
1624   case TargetOpcode::G_FPTOSI:
1625   case TargetOpcode::G_FPTOUI:
1626     if (TypeIdx != 0)
1627       return UnableToLegalize;
1628     Observer.changingInstr(MI);
1629     widenScalarDst(MI, WideTy);
1630     Observer.changedInstr(MI);
1631     return Legalized;
1632 
1633   case TargetOpcode::G_SITOFP:
1634     if (TypeIdx != 1)
1635       return UnableToLegalize;
1636     Observer.changingInstr(MI);
1637     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
1638     Observer.changedInstr(MI);
1639     return Legalized;
1640 
1641   case TargetOpcode::G_UITOFP:
1642     if (TypeIdx != 1)
1643       return UnableToLegalize;
1644     Observer.changingInstr(MI);
1645     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
1646     Observer.changedInstr(MI);
1647     return Legalized;
1648 
1649   case TargetOpcode::G_LOAD:
1650   case TargetOpcode::G_SEXTLOAD:
1651   case TargetOpcode::G_ZEXTLOAD:
1652     Observer.changingInstr(MI);
1653     widenScalarDst(MI, WideTy);
1654     Observer.changedInstr(MI);
1655     return Legalized;
1656 
1657   case TargetOpcode::G_STORE: {
1658     if (TypeIdx != 0)
1659       return UnableToLegalize;
1660 
1661     LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1662     if (!isPowerOf2_32(Ty.getSizeInBits()))
1663       return UnableToLegalize;
1664 
1665     Observer.changingInstr(MI);
1666 
1667     unsigned ExtType = Ty.getScalarSizeInBits() == 1 ?
1668       TargetOpcode::G_ZEXT : TargetOpcode::G_ANYEXT;
1669     widenScalarSrc(MI, WideTy, 0, ExtType);
1670 
1671     Observer.changedInstr(MI);
1672     return Legalized;
1673   }
1674   case TargetOpcode::G_CONSTANT: {
1675     MachineOperand &SrcMO = MI.getOperand(1);
1676     LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
1677     const APInt &Val = SrcMO.getCImm()->getValue().sext(WideTy.getSizeInBits());
1678     Observer.changingInstr(MI);
1679     SrcMO.setCImm(ConstantInt::get(Ctx, Val));
1680 
1681     widenScalarDst(MI, WideTy);
1682     Observer.changedInstr(MI);
1683     return Legalized;
1684   }
1685   case TargetOpcode::G_FCONSTANT: {
1686     MachineOperand &SrcMO = MI.getOperand(1);
1687     LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
1688     APFloat Val = SrcMO.getFPImm()->getValueAPF();
1689     bool LosesInfo;
1690     switch (WideTy.getSizeInBits()) {
1691     case 32:
1692       Val.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
1693                   &LosesInfo);
1694       break;
1695     case 64:
1696       Val.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
1697                   &LosesInfo);
1698       break;
1699     default:
1700       return UnableToLegalize;
1701     }
1702 
1703     assert(!LosesInfo && "extend should always be lossless");
1704 
1705     Observer.changingInstr(MI);
1706     SrcMO.setFPImm(ConstantFP::get(Ctx, Val));
1707 
1708     widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
1709     Observer.changedInstr(MI);
1710     return Legalized;
1711   }
1712   case TargetOpcode::G_IMPLICIT_DEF: {
1713     Observer.changingInstr(MI);
1714     widenScalarDst(MI, WideTy);
1715     Observer.changedInstr(MI);
1716     return Legalized;
1717   }
1718   case TargetOpcode::G_BRCOND:
1719     Observer.changingInstr(MI);
1720     widenScalarSrc(MI, WideTy, 0, MIRBuilder.getBoolExtOp(false, false));
1721     Observer.changedInstr(MI);
1722     return Legalized;
1723 
1724   case TargetOpcode::G_FCMP:
1725     Observer.changingInstr(MI);
1726     if (TypeIdx == 0)
1727       widenScalarDst(MI, WideTy);
1728     else {
1729       widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
1730       widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT);
1731     }
1732     Observer.changedInstr(MI);
1733     return Legalized;
1734 
1735   case TargetOpcode::G_ICMP:
1736     Observer.changingInstr(MI);
1737     if (TypeIdx == 0)
1738       widenScalarDst(MI, WideTy);
1739     else {
1740       unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>(
1741                                MI.getOperand(1).getPredicate()))
1742                                ? TargetOpcode::G_SEXT
1743                                : TargetOpcode::G_ZEXT;
1744       widenScalarSrc(MI, WideTy, 2, ExtOpcode);
1745       widenScalarSrc(MI, WideTy, 3, ExtOpcode);
1746     }
1747     Observer.changedInstr(MI);
1748     return Legalized;
1749 
1750   case TargetOpcode::G_GEP:
1751     assert(TypeIdx == 1 && "unable to legalize pointer of GEP");
1752     Observer.changingInstr(MI);
1753     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
1754     Observer.changedInstr(MI);
1755     return Legalized;
1756 
1757   case TargetOpcode::G_PHI: {
1758     assert(TypeIdx == 0 && "Expecting only Idx 0");
1759 
1760     Observer.changingInstr(MI);
1761     for (unsigned I = 1; I < MI.getNumOperands(); I += 2) {
1762       MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
1763       MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
1764       widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT);
1765     }
1766 
1767     MachineBasicBlock &MBB = *MI.getParent();
1768     MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
1769     widenScalarDst(MI, WideTy);
1770     Observer.changedInstr(MI);
1771     return Legalized;
1772   }
1773   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1774     if (TypeIdx == 0) {
1775       Register VecReg = MI.getOperand(1).getReg();
1776       LLT VecTy = MRI.getType(VecReg);
1777       Observer.changingInstr(MI);
1778 
1779       widenScalarSrc(MI, LLT::vector(VecTy.getNumElements(),
1780                                      WideTy.getSizeInBits()),
1781                      1, TargetOpcode::G_SEXT);
1782 
1783       widenScalarDst(MI, WideTy, 0);
1784       Observer.changedInstr(MI);
1785       return Legalized;
1786     }
1787 
1788     if (TypeIdx != 2)
1789       return UnableToLegalize;
1790     Observer.changingInstr(MI);
1791     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
1792     Observer.changedInstr(MI);
1793     return Legalized;
1794   }
1795   case TargetOpcode::G_FADD:
1796   case TargetOpcode::G_FMUL:
1797   case TargetOpcode::G_FSUB:
1798   case TargetOpcode::G_FMA:
1799   case TargetOpcode::G_FMAD:
1800   case TargetOpcode::G_FNEG:
1801   case TargetOpcode::G_FABS:
1802   case TargetOpcode::G_FCANONICALIZE:
1803   case TargetOpcode::G_FMINNUM:
1804   case TargetOpcode::G_FMAXNUM:
1805   case TargetOpcode::G_FMINNUM_IEEE:
1806   case TargetOpcode::G_FMAXNUM_IEEE:
1807   case TargetOpcode::G_FMINIMUM:
1808   case TargetOpcode::G_FMAXIMUM:
1809   case TargetOpcode::G_FDIV:
1810   case TargetOpcode::G_FREM:
1811   case TargetOpcode::G_FCEIL:
1812   case TargetOpcode::G_FFLOOR:
1813   case TargetOpcode::G_FCOS:
1814   case TargetOpcode::G_FSIN:
1815   case TargetOpcode::G_FLOG10:
1816   case TargetOpcode::G_FLOG:
1817   case TargetOpcode::G_FLOG2:
1818   case TargetOpcode::G_FRINT:
1819   case TargetOpcode::G_FNEARBYINT:
1820   case TargetOpcode::G_FSQRT:
1821   case TargetOpcode::G_FEXP:
1822   case TargetOpcode::G_FEXP2:
1823   case TargetOpcode::G_FPOW:
1824   case TargetOpcode::G_INTRINSIC_TRUNC:
1825   case TargetOpcode::G_INTRINSIC_ROUND:
1826     assert(TypeIdx == 0);
1827     Observer.changingInstr(MI);
1828 
1829     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
1830       widenScalarSrc(MI, WideTy, I, TargetOpcode::G_FPEXT);
1831 
1832     widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
1833     Observer.changedInstr(MI);
1834     return Legalized;
1835   case TargetOpcode::G_INTTOPTR:
1836     if (TypeIdx != 1)
1837       return UnableToLegalize;
1838 
1839     Observer.changingInstr(MI);
1840     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
1841     Observer.changedInstr(MI);
1842     return Legalized;
1843   case TargetOpcode::G_PTRTOINT:
1844     if (TypeIdx != 0)
1845       return UnableToLegalize;
1846 
1847     Observer.changingInstr(MI);
1848     widenScalarDst(MI, WideTy, 0);
1849     Observer.changedInstr(MI);
1850     return Legalized;
1851   case TargetOpcode::G_BUILD_VECTOR: {
1852     Observer.changingInstr(MI);
1853 
1854     const LLT WideEltTy = TypeIdx == 1 ? WideTy : WideTy.getElementType();
1855     for (int I = 1, E = MI.getNumOperands(); I != E; ++I)
1856       widenScalarSrc(MI, WideEltTy, I, TargetOpcode::G_ANYEXT);
1857 
1858     // Avoid changing the result vector type if the source element type was
1859     // requested.
1860     if (TypeIdx == 1) {
1861       auto &TII = *MI.getMF()->getSubtarget().getInstrInfo();
1862       MI.setDesc(TII.get(TargetOpcode::G_BUILD_VECTOR_TRUNC));
1863     } else {
1864       widenScalarDst(MI, WideTy, 0);
1865     }
1866 
1867     Observer.changedInstr(MI);
1868     return Legalized;
1869   }
1870   case TargetOpcode::G_SEXT_INREG:
1871     if (TypeIdx != 0)
1872       return UnableToLegalize;
1873 
1874     Observer.changingInstr(MI);
1875     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1876     widenScalarDst(MI, WideTy, 0, TargetOpcode::G_TRUNC);
1877     Observer.changedInstr(MI);
1878     return Legalized;
1879   }
1880 }
1881 
1882 LegalizerHelper::LegalizeResult
1883 LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
1884   using namespace TargetOpcode;
1885   MIRBuilder.setInstr(MI);
1886 
1887   switch(MI.getOpcode()) {
1888   default:
1889     return UnableToLegalize;
1890   case TargetOpcode::G_SREM:
1891   case TargetOpcode::G_UREM: {
1892     Register QuotReg = MRI.createGenericVirtualRegister(Ty);
1893     MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV)
1894         .addDef(QuotReg)
1895         .addUse(MI.getOperand(1).getReg())
1896         .addUse(MI.getOperand(2).getReg());
1897 
1898     Register ProdReg = MRI.createGenericVirtualRegister(Ty);
1899     MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg());
1900     MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
1901                         ProdReg);
1902     MI.eraseFromParent();
1903     return Legalized;
1904   }
1905   case TargetOpcode::G_SMULO:
1906   case TargetOpcode::G_UMULO: {
1907     // Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the
1908     // result.
1909     Register Res = MI.getOperand(0).getReg();
1910     Register Overflow = MI.getOperand(1).getReg();
1911     Register LHS = MI.getOperand(2).getReg();
1912     Register RHS = MI.getOperand(3).getReg();
1913 
1914     MIRBuilder.buildMul(Res, LHS, RHS);
1915 
1916     unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO
1917                           ? TargetOpcode::G_SMULH
1918                           : TargetOpcode::G_UMULH;
1919 
1920     Register HiPart = MRI.createGenericVirtualRegister(Ty);
1921     MIRBuilder.buildInstr(Opcode)
1922       .addDef(HiPart)
1923       .addUse(LHS)
1924       .addUse(RHS);
1925 
1926     Register Zero = MRI.createGenericVirtualRegister(Ty);
1927     MIRBuilder.buildConstant(Zero, 0);
1928 
1929     // For *signed* multiply, overflow is detected by checking:
1930     // (hi != (lo >> bitwidth-1))
1931     if (Opcode == TargetOpcode::G_SMULH) {
1932       Register Shifted = MRI.createGenericVirtualRegister(Ty);
1933       Register ShiftAmt = MRI.createGenericVirtualRegister(Ty);
1934       MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1);
1935       MIRBuilder.buildInstr(TargetOpcode::G_ASHR)
1936         .addDef(Shifted)
1937         .addUse(Res)
1938         .addUse(ShiftAmt);
1939       MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted);
1940     } else {
1941       MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero);
1942     }
1943     MI.eraseFromParent();
1944     return Legalized;
1945   }
1946   case TargetOpcode::G_FNEG: {
1947     // TODO: Handle vector types once we are able to
1948     // represent them.
1949     if (Ty.isVector())
1950       return UnableToLegalize;
1951     Register Res = MI.getOperand(0).getReg();
1952     Type *ZeroTy;
1953     LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
1954     switch (Ty.getSizeInBits()) {
1955     case 16:
1956       ZeroTy = Type::getHalfTy(Ctx);
1957       break;
1958     case 32:
1959       ZeroTy = Type::getFloatTy(Ctx);
1960       break;
1961     case 64:
1962       ZeroTy = Type::getDoubleTy(Ctx);
1963       break;
1964     case 128:
1965       ZeroTy = Type::getFP128Ty(Ctx);
1966       break;
1967     default:
1968       llvm_unreachable("unexpected floating-point type");
1969     }
1970     ConstantFP &ZeroForNegation =
1971         *cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy));
1972     auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation);
1973     Register SubByReg = MI.getOperand(1).getReg();
1974     Register ZeroReg = Zero->getOperand(0).getReg();
1975     MIRBuilder.buildInstr(TargetOpcode::G_FSUB, {Res}, {ZeroReg, SubByReg},
1976                           MI.getFlags());
1977     MI.eraseFromParent();
1978     return Legalized;
1979   }
1980   case TargetOpcode::G_FSUB: {
1981     // Lower (G_FSUB LHS, RHS) to (G_FADD LHS, (G_FNEG RHS)).
1982     // First, check if G_FNEG is marked as Lower. If so, we may
1983     // end up with an infinite loop as G_FSUB is used to legalize G_FNEG.
1984     if (LI.getAction({G_FNEG, {Ty}}).Action == Lower)
1985       return UnableToLegalize;
1986     Register Res = MI.getOperand(0).getReg();
1987     Register LHS = MI.getOperand(1).getReg();
1988     Register RHS = MI.getOperand(2).getReg();
1989     Register Neg = MRI.createGenericVirtualRegister(Ty);
1990     MIRBuilder.buildInstr(TargetOpcode::G_FNEG).addDef(Neg).addUse(RHS);
1991     MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Res}, {LHS, Neg}, MI.getFlags());
1992     MI.eraseFromParent();
1993     return Legalized;
1994   }
1995   case TargetOpcode::G_FMAD:
1996     return lowerFMad(MI);
1997   case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
1998     Register OldValRes = MI.getOperand(0).getReg();
1999     Register SuccessRes = MI.getOperand(1).getReg();
2000     Register Addr = MI.getOperand(2).getReg();
2001     Register CmpVal = MI.getOperand(3).getReg();
2002     Register NewVal = MI.getOperand(4).getReg();
2003     MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal,
2004                                   **MI.memoperands_begin());
2005     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal);
2006     MI.eraseFromParent();
2007     return Legalized;
2008   }
2009   case TargetOpcode::G_LOAD:
2010   case TargetOpcode::G_SEXTLOAD:
2011   case TargetOpcode::G_ZEXTLOAD: {
2012     // Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT
2013     Register DstReg = MI.getOperand(0).getReg();
2014     Register PtrReg = MI.getOperand(1).getReg();
2015     LLT DstTy = MRI.getType(DstReg);
2016     auto &MMO = **MI.memoperands_begin();
2017 
2018     if (DstTy.getSizeInBits() == MMO.getSizeInBits()) {
2019       if (MI.getOpcode() == TargetOpcode::G_LOAD) {
2020         // This load needs splitting into power of 2 sized loads.
2021         if (DstTy.isVector())
2022           return UnableToLegalize;
2023         if (isPowerOf2_32(DstTy.getSizeInBits()))
2024           return UnableToLegalize; // Don't know what we're being asked to do.
2025 
2026         // Our strategy here is to generate anyextending loads for the smaller
2027         // types up to next power-2 result type, and then combine the two larger
2028         // result values together, before truncating back down to the non-pow-2
2029         // type.
2030         // E.g. v1 = i24 load =>
2031         // v2 = i32 load (2 byte)
2032         // v3 = i32 load (1 byte)
2033         // v4 = i32 shl v3, 16
2034         // v5 = i32 or v4, v2
2035         // v1 = i24 trunc v5
2036         // By doing this we generate the correct truncate which should get
2037         // combined away as an artifact with a matching extend.
2038         uint64_t LargeSplitSize = PowerOf2Floor(DstTy.getSizeInBits());
2039         uint64_t SmallSplitSize = DstTy.getSizeInBits() - LargeSplitSize;
2040 
2041         MachineFunction &MF = MIRBuilder.getMF();
2042         MachineMemOperand *LargeMMO =
2043             MF.getMachineMemOperand(&MMO, 0, LargeSplitSize / 8);
2044         MachineMemOperand *SmallMMO = MF.getMachineMemOperand(
2045             &MMO, LargeSplitSize / 8, SmallSplitSize / 8);
2046 
2047         LLT PtrTy = MRI.getType(PtrReg);
2048         unsigned AnyExtSize = NextPowerOf2(DstTy.getSizeInBits());
2049         LLT AnyExtTy = LLT::scalar(AnyExtSize);
2050         Register LargeLdReg = MRI.createGenericVirtualRegister(AnyExtTy);
2051         Register SmallLdReg = MRI.createGenericVirtualRegister(AnyExtTy);
2052         auto LargeLoad =
2053             MIRBuilder.buildLoad(LargeLdReg, PtrReg, *LargeMMO);
2054 
2055         auto OffsetCst =
2056             MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8);
2057         Register GEPReg = MRI.createGenericVirtualRegister(PtrTy);
2058         auto SmallPtr = MIRBuilder.buildGEP(GEPReg, PtrReg, OffsetCst.getReg(0));
2059         auto SmallLoad = MIRBuilder.buildLoad(SmallLdReg, SmallPtr.getReg(0),
2060                                               *SmallMMO);
2061 
2062         auto ShiftAmt = MIRBuilder.buildConstant(AnyExtTy, LargeSplitSize);
2063         auto Shift = MIRBuilder.buildShl(AnyExtTy, SmallLoad, ShiftAmt);
2064         auto Or = MIRBuilder.buildOr(AnyExtTy, Shift, LargeLoad);
2065         MIRBuilder.buildTrunc(DstReg, {Or.getReg(0)});
2066         MI.eraseFromParent();
2067         return Legalized;
2068       }
2069       MIRBuilder.buildLoad(DstReg, PtrReg, MMO);
2070       MI.eraseFromParent();
2071       return Legalized;
2072     }
2073 
2074     if (DstTy.isScalar()) {
2075       Register TmpReg =
2076           MRI.createGenericVirtualRegister(LLT::scalar(MMO.getSizeInBits()));
2077       MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
2078       switch (MI.getOpcode()) {
2079       default:
2080         llvm_unreachable("Unexpected opcode");
2081       case TargetOpcode::G_LOAD:
2082         MIRBuilder.buildAnyExt(DstReg, TmpReg);
2083         break;
2084       case TargetOpcode::G_SEXTLOAD:
2085         MIRBuilder.buildSExt(DstReg, TmpReg);
2086         break;
2087       case TargetOpcode::G_ZEXTLOAD:
2088         MIRBuilder.buildZExt(DstReg, TmpReg);
2089         break;
2090       }
2091       MI.eraseFromParent();
2092       return Legalized;
2093     }
2094 
2095     return UnableToLegalize;
2096   }
2097   case TargetOpcode::G_STORE: {
2098     // Lower a non-power of 2 store into multiple pow-2 stores.
2099     // E.g. split an i24 store into an i16 store + i8 store.
2100     // We do this by first extending the stored value to the next largest power
2101     // of 2 type, and then using truncating stores to store the components.
2102     // By doing this, likewise with G_LOAD, generate an extend that can be
2103     // artifact-combined away instead of leaving behind extracts.
2104     Register SrcReg = MI.getOperand(0).getReg();
2105     Register PtrReg = MI.getOperand(1).getReg();
2106     LLT SrcTy = MRI.getType(SrcReg);
2107     MachineMemOperand &MMO = **MI.memoperands_begin();
2108     if (SrcTy.getSizeInBits() != MMO.getSizeInBits())
2109       return UnableToLegalize;
2110     if (SrcTy.isVector())
2111       return UnableToLegalize;
2112     if (isPowerOf2_32(SrcTy.getSizeInBits()))
2113       return UnableToLegalize; // Don't know what we're being asked to do.
2114 
2115     // Extend to the next pow-2.
2116     const LLT ExtendTy = LLT::scalar(NextPowerOf2(SrcTy.getSizeInBits()));
2117     auto ExtVal = MIRBuilder.buildAnyExt(ExtendTy, SrcReg);
2118 
2119     // Obtain the smaller value by shifting away the larger value.
2120     uint64_t LargeSplitSize = PowerOf2Floor(SrcTy.getSizeInBits());
2121     uint64_t SmallSplitSize = SrcTy.getSizeInBits() - LargeSplitSize;
2122     auto ShiftAmt = MIRBuilder.buildConstant(ExtendTy, LargeSplitSize);
2123     auto SmallVal = MIRBuilder.buildLShr(ExtendTy, ExtVal, ShiftAmt);
2124 
2125     // Generate the GEP and truncating stores.
2126     LLT PtrTy = MRI.getType(PtrReg);
2127     auto OffsetCst =
2128         MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8);
2129     Register GEPReg = MRI.createGenericVirtualRegister(PtrTy);
2130     auto SmallPtr = MIRBuilder.buildGEP(GEPReg, PtrReg, OffsetCst.getReg(0));
2131 
2132     MachineFunction &MF = MIRBuilder.getMF();
2133     MachineMemOperand *LargeMMO =
2134         MF.getMachineMemOperand(&MMO, 0, LargeSplitSize / 8);
2135     MachineMemOperand *SmallMMO =
2136         MF.getMachineMemOperand(&MMO, LargeSplitSize / 8, SmallSplitSize / 8);
2137     MIRBuilder.buildStore(ExtVal.getReg(0), PtrReg, *LargeMMO);
2138     MIRBuilder.buildStore(SmallVal.getReg(0), SmallPtr.getReg(0), *SmallMMO);
2139     MI.eraseFromParent();
2140     return Legalized;
2141   }
2142   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
2143   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
2144   case TargetOpcode::G_CTLZ:
2145   case TargetOpcode::G_CTTZ:
2146   case TargetOpcode::G_CTPOP:
2147     return lowerBitCount(MI, TypeIdx, Ty);
2148   case G_UADDO: {
2149     Register Res = MI.getOperand(0).getReg();
2150     Register CarryOut = MI.getOperand(1).getReg();
2151     Register LHS = MI.getOperand(2).getReg();
2152     Register RHS = MI.getOperand(3).getReg();
2153 
2154     MIRBuilder.buildAdd(Res, LHS, RHS);
2155     MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, RHS);
2156 
2157     MI.eraseFromParent();
2158     return Legalized;
2159   }
2160   case G_UADDE: {
2161     Register Res = MI.getOperand(0).getReg();
2162     Register CarryOut = MI.getOperand(1).getReg();
2163     Register LHS = MI.getOperand(2).getReg();
2164     Register RHS = MI.getOperand(3).getReg();
2165     Register CarryIn = MI.getOperand(4).getReg();
2166 
2167     Register TmpRes = MRI.createGenericVirtualRegister(Ty);
2168     Register ZExtCarryIn = MRI.createGenericVirtualRegister(Ty);
2169 
2170     MIRBuilder.buildAdd(TmpRes, LHS, RHS);
2171     MIRBuilder.buildZExt(ZExtCarryIn, CarryIn);
2172     MIRBuilder.buildAdd(Res, TmpRes, ZExtCarryIn);
2173     MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, LHS);
2174 
2175     MI.eraseFromParent();
2176     return Legalized;
2177   }
2178   case G_USUBO: {
2179     Register Res = MI.getOperand(0).getReg();
2180     Register BorrowOut = MI.getOperand(1).getReg();
2181     Register LHS = MI.getOperand(2).getReg();
2182     Register RHS = MI.getOperand(3).getReg();
2183 
2184     MIRBuilder.buildSub(Res, LHS, RHS);
2185     MIRBuilder.buildICmp(CmpInst::ICMP_ULT, BorrowOut, LHS, RHS);
2186 
2187     MI.eraseFromParent();
2188     return Legalized;
2189   }
2190   case G_USUBE: {
2191     Register Res = MI.getOperand(0).getReg();
2192     Register BorrowOut = MI.getOperand(1).getReg();
2193     Register LHS = MI.getOperand(2).getReg();
2194     Register RHS = MI.getOperand(3).getReg();
2195     Register BorrowIn = MI.getOperand(4).getReg();
2196 
2197     Register TmpRes = MRI.createGenericVirtualRegister(Ty);
2198     Register ZExtBorrowIn = MRI.createGenericVirtualRegister(Ty);
2199     Register LHS_EQ_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
2200     Register LHS_ULT_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
2201 
2202     MIRBuilder.buildSub(TmpRes, LHS, RHS);
2203     MIRBuilder.buildZExt(ZExtBorrowIn, BorrowIn);
2204     MIRBuilder.buildSub(Res, TmpRes, ZExtBorrowIn);
2205     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LHS_EQ_RHS, LHS, RHS);
2206     MIRBuilder.buildICmp(CmpInst::ICMP_ULT, LHS_ULT_RHS, LHS, RHS);
2207     MIRBuilder.buildSelect(BorrowOut, LHS_EQ_RHS, BorrowIn, LHS_ULT_RHS);
2208 
2209     MI.eraseFromParent();
2210     return Legalized;
2211   }
2212   case G_UITOFP:
2213     return lowerUITOFP(MI, TypeIdx, Ty);
2214   case G_SITOFP:
2215     return lowerSITOFP(MI, TypeIdx, Ty);
2216   case G_FPTOUI:
2217     return lowerFPTOUI(MI, TypeIdx, Ty);
2218   case G_SMIN:
2219   case G_SMAX:
2220   case G_UMIN:
2221   case G_UMAX:
2222     return lowerMinMax(MI, TypeIdx, Ty);
2223   case G_FCOPYSIGN:
2224     return lowerFCopySign(MI, TypeIdx, Ty);
2225   case G_FMINNUM:
2226   case G_FMAXNUM:
2227     return lowerFMinNumMaxNum(MI);
2228   case G_UNMERGE_VALUES:
2229     return lowerUnmergeValues(MI);
2230   case TargetOpcode::G_SEXT_INREG: {
2231     assert(MI.getOperand(2).isImm() && "Expected immediate");
2232     int64_t SizeInBits = MI.getOperand(2).getImm();
2233 
2234     Register DstReg = MI.getOperand(0).getReg();
2235     Register SrcReg = MI.getOperand(1).getReg();
2236     LLT DstTy = MRI.getType(DstReg);
2237     Register TmpRes = MRI.createGenericVirtualRegister(DstTy);
2238 
2239     auto MIBSz = MIRBuilder.buildConstant(DstTy, DstTy.getScalarSizeInBits() - SizeInBits);
2240     MIRBuilder.buildInstr(TargetOpcode::G_SHL, {TmpRes}, {SrcReg, MIBSz->getOperand(0).getReg()});
2241     MIRBuilder.buildInstr(TargetOpcode::G_ASHR, {DstReg}, {TmpRes, MIBSz->getOperand(0).getReg()});
2242     MI.eraseFromParent();
2243     return Legalized;
2244   }
2245   case G_SHUFFLE_VECTOR:
2246     return lowerShuffleVector(MI);
2247   case G_DYN_STACKALLOC:
2248     return lowerDynStackAlloc(MI);
2249   }
2250 }
2251 
2252 LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
2253     MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) {
2254   SmallVector<Register, 2> DstRegs;
2255 
2256   unsigned NarrowSize = NarrowTy.getSizeInBits();
2257   Register DstReg = MI.getOperand(0).getReg();
2258   unsigned Size = MRI.getType(DstReg).getSizeInBits();
2259   int NumParts = Size / NarrowSize;
2260   // FIXME: Don't know how to handle the situation where the small vectors
2261   // aren't all the same size yet.
2262   if (Size % NarrowSize != 0)
2263     return UnableToLegalize;
2264 
2265   for (int i = 0; i < NumParts; ++i) {
2266     Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
2267     MIRBuilder.buildUndef(TmpReg);
2268     DstRegs.push_back(TmpReg);
2269   }
2270 
2271   if (NarrowTy.isVector())
2272     MIRBuilder.buildConcatVectors(DstReg, DstRegs);
2273   else
2274     MIRBuilder.buildBuildVector(DstReg, DstRegs);
2275 
2276   MI.eraseFromParent();
2277   return Legalized;
2278 }
2279 
2280 LegalizerHelper::LegalizeResult
2281 LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
2282                                           LLT NarrowTy) {
2283   const unsigned Opc = MI.getOpcode();
2284   const unsigned NumOps = MI.getNumOperands() - 1;
2285   const unsigned NarrowSize = NarrowTy.getSizeInBits();
2286   const Register DstReg = MI.getOperand(0).getReg();
2287   const unsigned Flags = MI.getFlags();
2288   const LLT DstTy = MRI.getType(DstReg);
2289   const unsigned Size = DstTy.getSizeInBits();
2290   const int NumParts = Size / NarrowSize;
2291   const LLT EltTy = DstTy.getElementType();
2292   const unsigned EltSize = EltTy.getSizeInBits();
2293   const unsigned BitsForNumParts = NarrowSize * NumParts;
2294 
2295   // Check if we have any leftovers. If we do, then only handle the case where
2296   // the leftover is one element.
2297   if (BitsForNumParts != Size && BitsForNumParts + EltSize != Size)
2298     return UnableToLegalize;
2299 
2300   if (BitsForNumParts != Size) {
2301     Register AccumDstReg = MRI.createGenericVirtualRegister(DstTy);
2302     MIRBuilder.buildUndef(AccumDstReg);
2303 
2304     // Handle the pieces which evenly divide into the requested type with
2305     // extract/op/insert sequence.
2306     for (unsigned Offset = 0; Offset < BitsForNumParts; Offset += NarrowSize) {
2307       SmallVector<SrcOp, 4> SrcOps;
2308       for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
2309         Register PartOpReg = MRI.createGenericVirtualRegister(NarrowTy);
2310         MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), Offset);
2311         SrcOps.push_back(PartOpReg);
2312       }
2313 
2314       Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy);
2315       MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags);
2316 
2317       Register PartInsertReg = MRI.createGenericVirtualRegister(DstTy);
2318       MIRBuilder.buildInsert(PartInsertReg, AccumDstReg, PartDstReg, Offset);
2319       AccumDstReg = PartInsertReg;
2320     }
2321 
2322     // Handle the remaining element sized leftover piece.
2323     SmallVector<SrcOp, 4> SrcOps;
2324     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
2325       Register PartOpReg = MRI.createGenericVirtualRegister(EltTy);
2326       MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(),
2327                               BitsForNumParts);
2328       SrcOps.push_back(PartOpReg);
2329     }
2330 
2331     Register PartDstReg = MRI.createGenericVirtualRegister(EltTy);
2332     MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags);
2333     MIRBuilder.buildInsert(DstReg, AccumDstReg, PartDstReg, BitsForNumParts);
2334     MI.eraseFromParent();
2335 
2336     return Legalized;
2337   }
2338 
2339   SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
2340 
2341   extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs);
2342 
2343   if (NumOps >= 2)
2344     extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src1Regs);
2345 
2346   if (NumOps >= 3)
2347     extractParts(MI.getOperand(3).getReg(), NarrowTy, NumParts, Src2Regs);
2348 
2349   for (int i = 0; i < NumParts; ++i) {
2350     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
2351 
2352     if (NumOps == 1)
2353       MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i]}, Flags);
2354     else if (NumOps == 2) {
2355       MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i], Src1Regs[i]}, Flags);
2356     } else if (NumOps == 3) {
2357       MIRBuilder.buildInstr(Opc, {DstReg},
2358                             {Src0Regs[i], Src1Regs[i], Src2Regs[i]}, Flags);
2359     }
2360 
2361     DstRegs.push_back(DstReg);
2362   }
2363 
2364   if (NarrowTy.isVector())
2365     MIRBuilder.buildConcatVectors(DstReg, DstRegs);
2366   else
2367     MIRBuilder.buildBuildVector(DstReg, DstRegs);
2368 
2369   MI.eraseFromParent();
2370   return Legalized;
2371 }
2372 
2373 // Handle splitting vector operations which need to have the same number of
2374 // elements in each type index, but each type index may have a different element
2375 // type.
2376 //
2377 // e.g.  <4 x s64> = G_SHL <4 x s64>, <4 x s32> ->
2378 //       <2 x s64> = G_SHL <2 x s64>, <2 x s32>
2379 //       <2 x s64> = G_SHL <2 x s64>, <2 x s32>
2380 //
2381 // Also handles some irregular breakdown cases, e.g.
2382 // e.g.  <3 x s64> = G_SHL <3 x s64>, <3 x s32> ->
2383 //       <2 x s64> = G_SHL <2 x s64>, <2 x s32>
2384 //             s64 = G_SHL s64, s32
2385 LegalizerHelper::LegalizeResult
2386 LegalizerHelper::fewerElementsVectorMultiEltType(
2387   MachineInstr &MI, unsigned TypeIdx, LLT NarrowTyArg) {
2388   if (TypeIdx != 0)
2389     return UnableToLegalize;
2390 
2391   const LLT NarrowTy0 = NarrowTyArg;
2392   const unsigned NewNumElts =
2393       NarrowTy0.isVector() ? NarrowTy0.getNumElements() : 1;
2394 
2395   const Register DstReg = MI.getOperand(0).getReg();
2396   LLT DstTy = MRI.getType(DstReg);
2397   LLT LeftoverTy0;
2398 
2399   // All of the operands need to have the same number of elements, so if we can
2400   // determine a type breakdown for the result type, we can for all of the
2401   // source types.
2402   int NumParts = getNarrowTypeBreakDown(DstTy, NarrowTy0, LeftoverTy0).first;
2403   if (NumParts < 0)
2404     return UnableToLegalize;
2405 
2406   SmallVector<MachineInstrBuilder, 4> NewInsts;
2407 
2408   SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
2409   SmallVector<Register, 4> PartRegs, LeftoverRegs;
2410 
2411   for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
2412     LLT LeftoverTy;
2413     Register SrcReg = MI.getOperand(I).getReg();
2414     LLT SrcTyI = MRI.getType(SrcReg);
2415     LLT NarrowTyI = LLT::scalarOrVector(NewNumElts, SrcTyI.getScalarType());
2416     LLT LeftoverTyI;
2417 
2418     // Split this operand into the requested typed registers, and any leftover
2419     // required to reproduce the original type.
2420     if (!extractParts(SrcReg, SrcTyI, NarrowTyI, LeftoverTyI, PartRegs,
2421                       LeftoverRegs))
2422       return UnableToLegalize;
2423 
2424     if (I == 1) {
2425       // For the first operand, create an instruction for each part and setup
2426       // the result.
2427       for (Register PartReg : PartRegs) {
2428         Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy0);
2429         NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode())
2430                                .addDef(PartDstReg)
2431                                .addUse(PartReg));
2432         DstRegs.push_back(PartDstReg);
2433       }
2434 
2435       for (Register LeftoverReg : LeftoverRegs) {
2436         Register PartDstReg = MRI.createGenericVirtualRegister(LeftoverTy0);
2437         NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode())
2438                                .addDef(PartDstReg)
2439                                .addUse(LeftoverReg));
2440         LeftoverDstRegs.push_back(PartDstReg);
2441       }
2442     } else {
2443       assert(NewInsts.size() == PartRegs.size() + LeftoverRegs.size());
2444 
2445       // Add the newly created operand splits to the existing instructions. The
2446       // odd-sized pieces are ordered after the requested NarrowTyArg sized
2447       // pieces.
2448       unsigned InstCount = 0;
2449       for (unsigned J = 0, JE = PartRegs.size(); J != JE; ++J)
2450         NewInsts[InstCount++].addUse(PartRegs[J]);
2451       for (unsigned J = 0, JE = LeftoverRegs.size(); J != JE; ++J)
2452         NewInsts[InstCount++].addUse(LeftoverRegs[J]);
2453     }
2454 
2455     PartRegs.clear();
2456     LeftoverRegs.clear();
2457   }
2458 
2459   // Insert the newly built operations and rebuild the result register.
2460   for (auto &MIB : NewInsts)
2461     MIRBuilder.insertInstr(MIB);
2462 
2463   insertParts(DstReg, DstTy, NarrowTy0, DstRegs, LeftoverTy0, LeftoverDstRegs);
2464 
2465   MI.eraseFromParent();
2466   return Legalized;
2467 }
2468 
2469 LegalizerHelper::LegalizeResult
2470 LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
2471                                           LLT NarrowTy) {
2472   if (TypeIdx != 0)
2473     return UnableToLegalize;
2474 
2475   Register DstReg = MI.getOperand(0).getReg();
2476   Register SrcReg = MI.getOperand(1).getReg();
2477   LLT DstTy = MRI.getType(DstReg);
2478   LLT SrcTy = MRI.getType(SrcReg);
2479 
2480   LLT NarrowTy0 = NarrowTy;
2481   LLT NarrowTy1;
2482   unsigned NumParts;
2483 
2484   if (NarrowTy.isVector()) {
2485     // Uneven breakdown not handled.
2486     NumParts = DstTy.getNumElements() / NarrowTy.getNumElements();
2487     if (NumParts * NarrowTy.getNumElements() != DstTy.getNumElements())
2488       return UnableToLegalize;
2489 
2490     NarrowTy1 = LLT::vector(NumParts, SrcTy.getElementType().getSizeInBits());
2491   } else {
2492     NumParts = DstTy.getNumElements();
2493     NarrowTy1 = SrcTy.getElementType();
2494   }
2495 
2496   SmallVector<Register, 4> SrcRegs, DstRegs;
2497   extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs);
2498 
2499   for (unsigned I = 0; I < NumParts; ++I) {
2500     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
2501     MachineInstr *NewInst = MIRBuilder.buildInstr(MI.getOpcode())
2502       .addDef(DstReg)
2503       .addUse(SrcRegs[I]);
2504 
2505     NewInst->setFlags(MI.getFlags());
2506     DstRegs.push_back(DstReg);
2507   }
2508 
2509   if (NarrowTy.isVector())
2510     MIRBuilder.buildConcatVectors(DstReg, DstRegs);
2511   else
2512     MIRBuilder.buildBuildVector(DstReg, DstRegs);
2513 
2514   MI.eraseFromParent();
2515   return Legalized;
2516 }
2517 
2518 LegalizerHelper::LegalizeResult
2519 LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx,
2520                                         LLT NarrowTy) {
2521   Register DstReg = MI.getOperand(0).getReg();
2522   Register Src0Reg = MI.getOperand(2).getReg();
2523   LLT DstTy = MRI.getType(DstReg);
2524   LLT SrcTy = MRI.getType(Src0Reg);
2525 
2526   unsigned NumParts;
2527   LLT NarrowTy0, NarrowTy1;
2528 
2529   if (TypeIdx == 0) {
2530     unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
2531     unsigned OldElts = DstTy.getNumElements();
2532 
2533     NarrowTy0 = NarrowTy;
2534     NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : DstTy.getNumElements();
2535     NarrowTy1 = NarrowTy.isVector() ?
2536       LLT::vector(NarrowTy.getNumElements(), SrcTy.getScalarSizeInBits()) :
2537       SrcTy.getElementType();
2538 
2539   } else {
2540     unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
2541     unsigned OldElts = SrcTy.getNumElements();
2542 
2543     NumParts = NarrowTy.isVector() ? (OldElts / NewElts) :
2544       NarrowTy.getNumElements();
2545     NarrowTy0 = LLT::vector(NarrowTy.getNumElements(),
2546                             DstTy.getScalarSizeInBits());
2547     NarrowTy1 = NarrowTy;
2548   }
2549 
2550   // FIXME: Don't know how to handle the situation where the small vectors
2551   // aren't all the same size yet.
2552   if (NarrowTy1.isVector() &&
2553       NarrowTy1.getNumElements() * NumParts != DstTy.getNumElements())
2554     return UnableToLegalize;
2555 
2556   CmpInst::Predicate Pred
2557     = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
2558 
2559   SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
2560   extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs);
2561   extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs);
2562 
2563   for (unsigned I = 0; I < NumParts; ++I) {
2564     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
2565     DstRegs.push_back(DstReg);
2566 
2567     if (MI.getOpcode() == TargetOpcode::G_ICMP)
2568       MIRBuilder.buildICmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]);
2569     else {
2570       MachineInstr *NewCmp
2571         = MIRBuilder.buildFCmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]);
2572       NewCmp->setFlags(MI.getFlags());
2573     }
2574   }
2575 
2576   if (NarrowTy1.isVector())
2577     MIRBuilder.buildConcatVectors(DstReg, DstRegs);
2578   else
2579     MIRBuilder.buildBuildVector(DstReg, DstRegs);
2580 
2581   MI.eraseFromParent();
2582   return Legalized;
2583 }
2584 
2585 LegalizerHelper::LegalizeResult
2586 LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx,
2587                                            LLT NarrowTy) {
2588   Register DstReg = MI.getOperand(0).getReg();
2589   Register CondReg = MI.getOperand(1).getReg();
2590 
2591   unsigned NumParts = 0;
2592   LLT NarrowTy0, NarrowTy1;
2593 
2594   LLT DstTy = MRI.getType(DstReg);
2595   LLT CondTy = MRI.getType(CondReg);
2596   unsigned Size = DstTy.getSizeInBits();
2597 
2598   assert(TypeIdx == 0 || CondTy.isVector());
2599 
2600   if (TypeIdx == 0) {
2601     NarrowTy0 = NarrowTy;
2602     NarrowTy1 = CondTy;
2603 
2604     unsigned NarrowSize = NarrowTy0.getSizeInBits();
2605     // FIXME: Don't know how to handle the situation where the small vectors
2606     // aren't all the same size yet.
2607     if (Size % NarrowSize != 0)
2608       return UnableToLegalize;
2609 
2610     NumParts = Size / NarrowSize;
2611 
2612     // Need to break down the condition type
2613     if (CondTy.isVector()) {
2614       if (CondTy.getNumElements() == NumParts)
2615         NarrowTy1 = CondTy.getElementType();
2616       else
2617         NarrowTy1 = LLT::vector(CondTy.getNumElements() / NumParts,
2618                                 CondTy.getScalarSizeInBits());
2619     }
2620   } else {
2621     NumParts = CondTy.getNumElements();
2622     if (NarrowTy.isVector()) {
2623       // TODO: Handle uneven breakdown.
2624       if (NumParts * NarrowTy.getNumElements() != CondTy.getNumElements())
2625         return UnableToLegalize;
2626 
2627       return UnableToLegalize;
2628     } else {
2629       NarrowTy0 = DstTy.getElementType();
2630       NarrowTy1 = NarrowTy;
2631     }
2632   }
2633 
2634   SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
2635   if (CondTy.isVector())
2636     extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs);
2637 
2638   extractParts(MI.getOperand(2).getReg(), NarrowTy0, NumParts, Src1Regs);
2639   extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs);
2640 
2641   for (unsigned i = 0; i < NumParts; ++i) {
2642     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
2643     MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg,
2644                            Src1Regs[i], Src2Regs[i]);
2645     DstRegs.push_back(DstReg);
2646   }
2647 
2648   if (NarrowTy0.isVector())
2649     MIRBuilder.buildConcatVectors(DstReg, DstRegs);
2650   else
2651     MIRBuilder.buildBuildVector(DstReg, DstRegs);
2652 
2653   MI.eraseFromParent();
2654   return Legalized;
2655 }
2656 
2657 LegalizerHelper::LegalizeResult
2658 LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
2659                                         LLT NarrowTy) {
2660   const Register DstReg = MI.getOperand(0).getReg();
2661   LLT PhiTy = MRI.getType(DstReg);
2662   LLT LeftoverTy;
2663 
2664   // All of the operands need to have the same number of elements, so if we can
2665   // determine a type breakdown for the result type, we can for all of the
2666   // source types.
2667   int NumParts, NumLeftover;
2668   std::tie(NumParts, NumLeftover)
2669     = getNarrowTypeBreakDown(PhiTy, NarrowTy, LeftoverTy);
2670   if (NumParts < 0)
2671     return UnableToLegalize;
2672 
2673   SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
2674   SmallVector<MachineInstrBuilder, 4> NewInsts;
2675 
2676   const int TotalNumParts = NumParts + NumLeftover;
2677 
2678   // Insert the new phis in the result block first.
2679   for (int I = 0; I != TotalNumParts; ++I) {
2680     LLT Ty = I < NumParts ? NarrowTy : LeftoverTy;
2681     Register PartDstReg = MRI.createGenericVirtualRegister(Ty);
2682     NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI)
2683                        .addDef(PartDstReg));
2684     if (I < NumParts)
2685       DstRegs.push_back(PartDstReg);
2686     else
2687       LeftoverDstRegs.push_back(PartDstReg);
2688   }
2689 
2690   MachineBasicBlock *MBB = MI.getParent();
2691   MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI());
2692   insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs);
2693 
2694   SmallVector<Register, 4> PartRegs, LeftoverRegs;
2695 
2696   // Insert code to extract the incoming values in each predecessor block.
2697   for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2698     PartRegs.clear();
2699     LeftoverRegs.clear();
2700 
2701     Register SrcReg = MI.getOperand(I).getReg();
2702     MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
2703     MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
2704 
2705     LLT Unused;
2706     if (!extractParts(SrcReg, PhiTy, NarrowTy, Unused, PartRegs,
2707                       LeftoverRegs))
2708       return UnableToLegalize;
2709 
2710     // Add the newly created operand splits to the existing instructions. The
2711     // odd-sized pieces are ordered after the requested NarrowTyArg sized
2712     // pieces.
2713     for (int J = 0; J != TotalNumParts; ++J) {
2714       MachineInstrBuilder MIB = NewInsts[J];
2715       MIB.addUse(J < NumParts ? PartRegs[J] : LeftoverRegs[J - NumParts]);
2716       MIB.addMBB(&OpMBB);
2717     }
2718   }
2719 
2720   MI.eraseFromParent();
2721   return Legalized;
2722 }
2723 
2724 LegalizerHelper::LegalizeResult
2725 LegalizerHelper::fewerElementsVectorUnmergeValues(MachineInstr &MI,
2726                                                   unsigned TypeIdx,
2727                                                   LLT NarrowTy) {
2728   if (TypeIdx != 1)
2729     return UnableToLegalize;
2730 
2731   const int NumDst = MI.getNumOperands() - 1;
2732   const Register SrcReg = MI.getOperand(NumDst).getReg();
2733   LLT SrcTy = MRI.getType(SrcReg);
2734 
2735   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2736 
2737   // TODO: Create sequence of extracts.
2738   if (DstTy == NarrowTy)
2739     return UnableToLegalize;
2740 
2741   LLT GCDTy = getGCDType(SrcTy, NarrowTy);
2742   if (DstTy == GCDTy) {
2743     // This would just be a copy of the same unmerge.
2744     // TODO: Create extracts, pad with undef and create intermediate merges.
2745     return UnableToLegalize;
2746   }
2747 
2748   auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg);
2749   const int NumUnmerge = Unmerge->getNumOperands() - 1;
2750   const int PartsPerUnmerge = NumDst / NumUnmerge;
2751 
2752   for (int I = 0; I != NumUnmerge; ++I) {
2753     auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES);
2754 
2755     for (int J = 0; J != PartsPerUnmerge; ++J)
2756       MIB.addDef(MI.getOperand(I * PartsPerUnmerge + J).getReg());
2757     MIB.addUse(Unmerge.getReg(I));
2758   }
2759 
2760   MI.eraseFromParent();
2761   return Legalized;
2762 }
2763 
2764 LegalizerHelper::LegalizeResult
2765 LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
2766                                       LLT NarrowTy) {
2767   // FIXME: Don't know how to handle secondary types yet.
2768   if (TypeIdx != 0)
2769     return UnableToLegalize;
2770 
2771   MachineMemOperand *MMO = *MI.memoperands_begin();
2772 
2773   // This implementation doesn't work for atomics. Give up instead of doing
2774   // something invalid.
2775   if (MMO->getOrdering() != AtomicOrdering::NotAtomic ||
2776       MMO->getFailureOrdering() != AtomicOrdering::NotAtomic)
2777     return UnableToLegalize;
2778 
2779   bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
2780   Register ValReg = MI.getOperand(0).getReg();
2781   Register AddrReg = MI.getOperand(1).getReg();
2782   LLT ValTy = MRI.getType(ValReg);
2783 
2784   int NumParts = -1;
2785   int NumLeftover = -1;
2786   LLT LeftoverTy;
2787   SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs;
2788   if (IsLoad) {
2789     std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy);
2790   } else {
2791     if (extractParts(ValReg, ValTy, NarrowTy, LeftoverTy, NarrowRegs,
2792                      NarrowLeftoverRegs)) {
2793       NumParts = NarrowRegs.size();
2794       NumLeftover = NarrowLeftoverRegs.size();
2795     }
2796   }
2797 
2798   if (NumParts == -1)
2799     return UnableToLegalize;
2800 
2801   const LLT OffsetTy = LLT::scalar(MRI.getType(AddrReg).getScalarSizeInBits());
2802 
2803   unsigned TotalSize = ValTy.getSizeInBits();
2804 
2805   // Split the load/store into PartTy sized pieces starting at Offset. If this
2806   // is a load, return the new registers in ValRegs. For a store, each elements
2807   // of ValRegs should be PartTy. Returns the next offset that needs to be
2808   // handled.
2809   auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs,
2810                              unsigned Offset) -> unsigned {
2811     MachineFunction &MF = MIRBuilder.getMF();
2812     unsigned PartSize = PartTy.getSizeInBits();
2813     for (unsigned Idx = 0, E = NumParts; Idx != E && Offset < TotalSize;
2814          Offset += PartSize, ++Idx) {
2815       unsigned ByteSize = PartSize / 8;
2816       unsigned ByteOffset = Offset / 8;
2817       Register NewAddrReg;
2818 
2819       MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
2820 
2821       MachineMemOperand *NewMMO =
2822         MF.getMachineMemOperand(MMO, ByteOffset, ByteSize);
2823 
2824       if (IsLoad) {
2825         Register Dst = MRI.createGenericVirtualRegister(PartTy);
2826         ValRegs.push_back(Dst);
2827         MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO);
2828       } else {
2829         MIRBuilder.buildStore(ValRegs[Idx], NewAddrReg, *NewMMO);
2830       }
2831     }
2832 
2833     return Offset;
2834   };
2835 
2836   unsigned HandledOffset = splitTypePieces(NarrowTy, NarrowRegs, 0);
2837 
2838   // Handle the rest of the register if this isn't an even type breakdown.
2839   if (LeftoverTy.isValid())
2840     splitTypePieces(LeftoverTy, NarrowLeftoverRegs, HandledOffset);
2841 
2842   if (IsLoad) {
2843     insertParts(ValReg, ValTy, NarrowTy, NarrowRegs,
2844                 LeftoverTy, NarrowLeftoverRegs);
2845   }
2846 
2847   MI.eraseFromParent();
2848   return Legalized;
2849 }
2850 
2851 LegalizerHelper::LegalizeResult
2852 LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
2853                                      LLT NarrowTy) {
2854   using namespace TargetOpcode;
2855 
2856   MIRBuilder.setInstr(MI);
2857   switch (MI.getOpcode()) {
2858   case G_IMPLICIT_DEF:
2859     return fewerElementsVectorImplicitDef(MI, TypeIdx, NarrowTy);
2860   case G_AND:
2861   case G_OR:
2862   case G_XOR:
2863   case G_ADD:
2864   case G_SUB:
2865   case G_MUL:
2866   case G_SMULH:
2867   case G_UMULH:
2868   case G_FADD:
2869   case G_FMUL:
2870   case G_FSUB:
2871   case G_FNEG:
2872   case G_FABS:
2873   case G_FCANONICALIZE:
2874   case G_FDIV:
2875   case G_FREM:
2876   case G_FMA:
2877   case G_FMAD:
2878   case G_FPOW:
2879   case G_FEXP:
2880   case G_FEXP2:
2881   case G_FLOG:
2882   case G_FLOG2:
2883   case G_FLOG10:
2884   case G_FNEARBYINT:
2885   case G_FCEIL:
2886   case G_FFLOOR:
2887   case G_FRINT:
2888   case G_INTRINSIC_ROUND:
2889   case G_INTRINSIC_TRUNC:
2890   case G_FCOS:
2891   case G_FSIN:
2892   case G_FSQRT:
2893   case G_BSWAP:
2894   case G_BITREVERSE:
2895   case G_SDIV:
2896   case G_SMIN:
2897   case G_SMAX:
2898   case G_UMIN:
2899   case G_UMAX:
2900   case G_FMINNUM:
2901   case G_FMAXNUM:
2902   case G_FMINNUM_IEEE:
2903   case G_FMAXNUM_IEEE:
2904   case G_FMINIMUM:
2905   case G_FMAXIMUM:
2906     return fewerElementsVectorBasic(MI, TypeIdx, NarrowTy);
2907   case G_SHL:
2908   case G_LSHR:
2909   case G_ASHR:
2910   case G_CTLZ:
2911   case G_CTLZ_ZERO_UNDEF:
2912   case G_CTTZ:
2913   case G_CTTZ_ZERO_UNDEF:
2914   case G_CTPOP:
2915   case G_FCOPYSIGN:
2916     return fewerElementsVectorMultiEltType(MI, TypeIdx, NarrowTy);
2917   case G_ZEXT:
2918   case G_SEXT:
2919   case G_ANYEXT:
2920   case G_FPEXT:
2921   case G_FPTRUNC:
2922   case G_SITOFP:
2923   case G_UITOFP:
2924   case G_FPTOSI:
2925   case G_FPTOUI:
2926   case G_INTTOPTR:
2927   case G_PTRTOINT:
2928   case G_ADDRSPACE_CAST:
2929     return fewerElementsVectorCasts(MI, TypeIdx, NarrowTy);
2930   case G_ICMP:
2931   case G_FCMP:
2932     return fewerElementsVectorCmp(MI, TypeIdx, NarrowTy);
2933   case G_SELECT:
2934     return fewerElementsVectorSelect(MI, TypeIdx, NarrowTy);
2935   case G_PHI:
2936     return fewerElementsVectorPhi(MI, TypeIdx, NarrowTy);
2937   case G_UNMERGE_VALUES:
2938     return fewerElementsVectorUnmergeValues(MI, TypeIdx, NarrowTy);
2939   case G_LOAD:
2940   case G_STORE:
2941     return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy);
2942   default:
2943     return UnableToLegalize;
2944   }
2945 }
2946 
2947 LegalizerHelper::LegalizeResult
2948 LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
2949                                              const LLT HalfTy, const LLT AmtTy) {
2950 
2951   Register InL = MRI.createGenericVirtualRegister(HalfTy);
2952   Register InH = MRI.createGenericVirtualRegister(HalfTy);
2953   MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
2954 
2955   if (Amt.isNullValue()) {
2956     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {InL, InH});
2957     MI.eraseFromParent();
2958     return Legalized;
2959   }
2960 
2961   LLT NVT = HalfTy;
2962   unsigned NVTBits = HalfTy.getSizeInBits();
2963   unsigned VTBits = 2 * NVTBits;
2964 
2965   SrcOp Lo(Register(0)), Hi(Register(0));
2966   if (MI.getOpcode() == TargetOpcode::G_SHL) {
2967     if (Amt.ugt(VTBits)) {
2968       Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
2969     } else if (Amt.ugt(NVTBits)) {
2970       Lo = MIRBuilder.buildConstant(NVT, 0);
2971       Hi = MIRBuilder.buildShl(NVT, InL,
2972                                MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
2973     } else if (Amt == NVTBits) {
2974       Lo = MIRBuilder.buildConstant(NVT, 0);
2975       Hi = InL;
2976     } else {
2977       Lo = MIRBuilder.buildShl(NVT, InL, MIRBuilder.buildConstant(AmtTy, Amt));
2978       auto OrLHS =
2979           MIRBuilder.buildShl(NVT, InH, MIRBuilder.buildConstant(AmtTy, Amt));
2980       auto OrRHS = MIRBuilder.buildLShr(
2981           NVT, InL, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
2982       Hi = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
2983     }
2984   } else if (MI.getOpcode() == TargetOpcode::G_LSHR) {
2985     if (Amt.ugt(VTBits)) {
2986       Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
2987     } else if (Amt.ugt(NVTBits)) {
2988       Lo = MIRBuilder.buildLShr(NVT, InH,
2989                                 MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
2990       Hi = MIRBuilder.buildConstant(NVT, 0);
2991     } else if (Amt == NVTBits) {
2992       Lo = InH;
2993       Hi = MIRBuilder.buildConstant(NVT, 0);
2994     } else {
2995       auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt);
2996 
2997       auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst);
2998       auto OrRHS = MIRBuilder.buildShl(
2999           NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
3000 
3001       Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
3002       Hi = MIRBuilder.buildLShr(NVT, InH, ShiftAmtConst);
3003     }
3004   } else {
3005     if (Amt.ugt(VTBits)) {
3006       Hi = Lo = MIRBuilder.buildAShr(
3007           NVT, InH, MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
3008     } else if (Amt.ugt(NVTBits)) {
3009       Lo = MIRBuilder.buildAShr(NVT, InH,
3010                                 MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
3011       Hi = MIRBuilder.buildAShr(NVT, InH,
3012                                 MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
3013     } else if (Amt == NVTBits) {
3014       Lo = InH;
3015       Hi = MIRBuilder.buildAShr(NVT, InH,
3016                                 MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
3017     } else {
3018       auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt);
3019 
3020       auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst);
3021       auto OrRHS = MIRBuilder.buildShl(
3022           NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
3023 
3024       Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
3025       Hi = MIRBuilder.buildAShr(NVT, InH, ShiftAmtConst);
3026     }
3027   }
3028 
3029   MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {Lo.getReg(), Hi.getReg()});
3030   MI.eraseFromParent();
3031 
3032   return Legalized;
3033 }
3034 
3035 // TODO: Optimize if constant shift amount.
3036 LegalizerHelper::LegalizeResult
3037 LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
3038                                    LLT RequestedTy) {
3039   if (TypeIdx == 1) {
3040     Observer.changingInstr(MI);
3041     narrowScalarSrc(MI, RequestedTy, 2);
3042     Observer.changedInstr(MI);
3043     return Legalized;
3044   }
3045 
3046   Register DstReg = MI.getOperand(0).getReg();
3047   LLT DstTy = MRI.getType(DstReg);
3048   if (DstTy.isVector())
3049     return UnableToLegalize;
3050 
3051   Register Amt = MI.getOperand(2).getReg();
3052   LLT ShiftAmtTy = MRI.getType(Amt);
3053   const unsigned DstEltSize = DstTy.getScalarSizeInBits();
3054   if (DstEltSize % 2 != 0)
3055     return UnableToLegalize;
3056 
3057   // Ignore the input type. We can only go to exactly half the size of the
3058   // input. If that isn't small enough, the resulting pieces will be further
3059   // legalized.
3060   const unsigned NewBitSize = DstEltSize / 2;
3061   const LLT HalfTy = LLT::scalar(NewBitSize);
3062   const LLT CondTy = LLT::scalar(1);
3063 
3064   if (const MachineInstr *KShiftAmt =
3065           getOpcodeDef(TargetOpcode::G_CONSTANT, Amt, MRI)) {
3066     return narrowScalarShiftByConstant(
3067         MI, KShiftAmt->getOperand(1).getCImm()->getValue(), HalfTy, ShiftAmtTy);
3068   }
3069 
3070   // TODO: Expand with known bits.
3071 
3072   // Handle the fully general expansion by an unknown amount.
3073   auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize);
3074 
3075   Register InL = MRI.createGenericVirtualRegister(HalfTy);
3076   Register InH = MRI.createGenericVirtualRegister(HalfTy);
3077   MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
3078 
3079   auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits);
3080   auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt);
3081 
3082   auto Zero = MIRBuilder.buildConstant(ShiftAmtTy, 0);
3083   auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits);
3084   auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero);
3085 
3086   Register ResultRegs[2];
3087   switch (MI.getOpcode()) {
3088   case TargetOpcode::G_SHL: {
3089     // Short: ShAmt < NewBitSize
3090     auto LoS = MIRBuilder.buildShl(HalfTy, InL, Amt);
3091 
3092     auto LoOr = MIRBuilder.buildLShr(HalfTy, InL, AmtLack);
3093     auto HiOr = MIRBuilder.buildShl(HalfTy, InH, Amt);
3094     auto HiS = MIRBuilder.buildOr(HalfTy, LoOr, HiOr);
3095 
3096     // Long: ShAmt >= NewBitSize
3097     auto LoL = MIRBuilder.buildConstant(HalfTy, 0);         // Lo part is zero.
3098     auto HiL = MIRBuilder.buildShl(HalfTy, InL, AmtExcess); // Hi from Lo part.
3099 
3100     auto Lo = MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL);
3101     auto Hi = MIRBuilder.buildSelect(
3102         HalfTy, IsZero, InH, MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL));
3103 
3104     ResultRegs[0] = Lo.getReg(0);
3105     ResultRegs[1] = Hi.getReg(0);
3106     break;
3107   }
3108   case TargetOpcode::G_LSHR:
3109   case TargetOpcode::G_ASHR: {
3110     // Short: ShAmt < NewBitSize
3111     auto HiS = MIRBuilder.buildInstr(MI.getOpcode(), {HalfTy}, {InH, Amt});
3112 
3113     auto LoOr = MIRBuilder.buildLShr(HalfTy, InL, Amt);
3114     auto HiOr = MIRBuilder.buildShl(HalfTy, InH, AmtLack);
3115     auto LoS = MIRBuilder.buildOr(HalfTy, LoOr, HiOr);
3116 
3117     // Long: ShAmt >= NewBitSize
3118     MachineInstrBuilder HiL;
3119     if (MI.getOpcode() == TargetOpcode::G_LSHR) {
3120       HiL = MIRBuilder.buildConstant(HalfTy, 0);            // Hi part is zero.
3121     } else {
3122       auto ShiftAmt = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize - 1);
3123       HiL = MIRBuilder.buildAShr(HalfTy, InH, ShiftAmt);    // Sign of Hi part.
3124     }
3125     auto LoL = MIRBuilder.buildInstr(MI.getOpcode(), {HalfTy},
3126                                      {InH, AmtExcess});     // Lo from Hi part.
3127 
3128     auto Lo = MIRBuilder.buildSelect(
3129         HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL));
3130 
3131     auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL);
3132 
3133     ResultRegs[0] = Lo.getReg(0);
3134     ResultRegs[1] = Hi.getReg(0);
3135     break;
3136   }
3137   default:
3138     llvm_unreachable("not a shift");
3139   }
3140 
3141   MIRBuilder.buildMerge(DstReg, ResultRegs);
3142   MI.eraseFromParent();
3143   return Legalized;
3144 }
3145 
3146 LegalizerHelper::LegalizeResult
3147 LegalizerHelper::moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
3148                                        LLT MoreTy) {
3149   assert(TypeIdx == 0 && "Expecting only Idx 0");
3150 
3151   Observer.changingInstr(MI);
3152   for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
3153     MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
3154     MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
3155     moreElementsVectorSrc(MI, MoreTy, I);
3156   }
3157 
3158   MachineBasicBlock &MBB = *MI.getParent();
3159   MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
3160   moreElementsVectorDst(MI, MoreTy, 0);
3161   Observer.changedInstr(MI);
3162   return Legalized;
3163 }
3164 
3165 LegalizerHelper::LegalizeResult
3166 LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
3167                                     LLT MoreTy) {
3168   MIRBuilder.setInstr(MI);
3169   unsigned Opc = MI.getOpcode();
3170   switch (Opc) {
3171   case TargetOpcode::G_IMPLICIT_DEF:
3172   case TargetOpcode::G_LOAD: {
3173     if (TypeIdx != 0)
3174       return UnableToLegalize;
3175     Observer.changingInstr(MI);
3176     moreElementsVectorDst(MI, MoreTy, 0);
3177     Observer.changedInstr(MI);
3178     return Legalized;
3179   }
3180   case TargetOpcode::G_STORE:
3181     if (TypeIdx != 0)
3182       return UnableToLegalize;
3183     Observer.changingInstr(MI);
3184     moreElementsVectorSrc(MI, MoreTy, 0);
3185     Observer.changedInstr(MI);
3186     return Legalized;
3187   case TargetOpcode::G_AND:
3188   case TargetOpcode::G_OR:
3189   case TargetOpcode::G_XOR:
3190   case TargetOpcode::G_SMIN:
3191   case TargetOpcode::G_SMAX:
3192   case TargetOpcode::G_UMIN:
3193   case TargetOpcode::G_UMAX: {
3194     Observer.changingInstr(MI);
3195     moreElementsVectorSrc(MI, MoreTy, 1);
3196     moreElementsVectorSrc(MI, MoreTy, 2);
3197     moreElementsVectorDst(MI, MoreTy, 0);
3198     Observer.changedInstr(MI);
3199     return Legalized;
3200   }
3201   case TargetOpcode::G_EXTRACT:
3202     if (TypeIdx != 1)
3203       return UnableToLegalize;
3204     Observer.changingInstr(MI);
3205     moreElementsVectorSrc(MI, MoreTy, 1);
3206     Observer.changedInstr(MI);
3207     return Legalized;
3208   case TargetOpcode::G_INSERT:
3209     if (TypeIdx != 0)
3210       return UnableToLegalize;
3211     Observer.changingInstr(MI);
3212     moreElementsVectorSrc(MI, MoreTy, 1);
3213     moreElementsVectorDst(MI, MoreTy, 0);
3214     Observer.changedInstr(MI);
3215     return Legalized;
3216   case TargetOpcode::G_SELECT:
3217     if (TypeIdx != 0)
3218       return UnableToLegalize;
3219     if (MRI.getType(MI.getOperand(1).getReg()).isVector())
3220       return UnableToLegalize;
3221 
3222     Observer.changingInstr(MI);
3223     moreElementsVectorSrc(MI, MoreTy, 2);
3224     moreElementsVectorSrc(MI, MoreTy, 3);
3225     moreElementsVectorDst(MI, MoreTy, 0);
3226     Observer.changedInstr(MI);
3227     return Legalized;
3228   case TargetOpcode::G_UNMERGE_VALUES: {
3229     if (TypeIdx != 1)
3230       return UnableToLegalize;
3231 
3232     LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
3233     int NumDst = MI.getNumOperands() - 1;
3234     moreElementsVectorSrc(MI, MoreTy, NumDst);
3235 
3236     auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES);
3237     for (int I = 0; I != NumDst; ++I)
3238       MIB.addDef(MI.getOperand(I).getReg());
3239 
3240     int NewNumDst = MoreTy.getSizeInBits() / DstTy.getSizeInBits();
3241     for (int I = NumDst; I != NewNumDst; ++I)
3242       MIB.addDef(MRI.createGenericVirtualRegister(DstTy));
3243 
3244     MIB.addUse(MI.getOperand(NumDst).getReg());
3245     MI.eraseFromParent();
3246     return Legalized;
3247   }
3248   case TargetOpcode::G_PHI:
3249     return moreElementsVectorPhi(MI, TypeIdx, MoreTy);
3250   default:
3251     return UnableToLegalize;
3252   }
3253 }
3254 
3255 void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
3256                                         ArrayRef<Register> Src1Regs,
3257                                         ArrayRef<Register> Src2Regs,
3258                                         LLT NarrowTy) {
3259   MachineIRBuilder &B = MIRBuilder;
3260   unsigned SrcParts = Src1Regs.size();
3261   unsigned DstParts = DstRegs.size();
3262 
3263   unsigned DstIdx = 0; // Low bits of the result.
3264   Register FactorSum =
3265       B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0);
3266   DstRegs[DstIdx] = FactorSum;
3267 
3268   unsigned CarrySumPrevDstIdx;
3269   SmallVector<Register, 4> Factors;
3270 
3271   for (DstIdx = 1; DstIdx < DstParts; DstIdx++) {
3272     // Collect low parts of muls for DstIdx.
3273     for (unsigned i = DstIdx + 1 < SrcParts ? 0 : DstIdx - SrcParts + 1;
3274          i <= std::min(DstIdx, SrcParts - 1); ++i) {
3275       MachineInstrBuilder Mul =
3276           B.buildMul(NarrowTy, Src1Regs[DstIdx - i], Src2Regs[i]);
3277       Factors.push_back(Mul.getReg(0));
3278     }
3279     // Collect high parts of muls from previous DstIdx.
3280     for (unsigned i = DstIdx < SrcParts ? 0 : DstIdx - SrcParts;
3281          i <= std::min(DstIdx - 1, SrcParts - 1); ++i) {
3282       MachineInstrBuilder Umulh =
3283           B.buildUMulH(NarrowTy, Src1Regs[DstIdx - 1 - i], Src2Regs[i]);
3284       Factors.push_back(Umulh.getReg(0));
3285     }
3286     // Add CarrySum from additons calculated for previous DstIdx.
3287     if (DstIdx != 1) {
3288       Factors.push_back(CarrySumPrevDstIdx);
3289     }
3290 
3291     Register CarrySum;
3292     // Add all factors and accumulate all carries into CarrySum.
3293     if (DstIdx != DstParts - 1) {
3294       MachineInstrBuilder Uaddo =
3295           B.buildUAddo(NarrowTy, LLT::scalar(1), Factors[0], Factors[1]);
3296       FactorSum = Uaddo.getReg(0);
3297       CarrySum = B.buildZExt(NarrowTy, Uaddo.getReg(1)).getReg(0);
3298       for (unsigned i = 2; i < Factors.size(); ++i) {
3299         MachineInstrBuilder Uaddo =
3300             B.buildUAddo(NarrowTy, LLT::scalar(1), FactorSum, Factors[i]);
3301         FactorSum = Uaddo.getReg(0);
3302         MachineInstrBuilder Carry = B.buildZExt(NarrowTy, Uaddo.getReg(1));
3303         CarrySum = B.buildAdd(NarrowTy, CarrySum, Carry).getReg(0);
3304       }
3305     } else {
3306       // Since value for the next index is not calculated, neither is CarrySum.
3307       FactorSum = B.buildAdd(NarrowTy, Factors[0], Factors[1]).getReg(0);
3308       for (unsigned i = 2; i < Factors.size(); ++i)
3309         FactorSum = B.buildAdd(NarrowTy, FactorSum, Factors[i]).getReg(0);
3310     }
3311 
3312     CarrySumPrevDstIdx = CarrySum;
3313     DstRegs[DstIdx] = FactorSum;
3314     Factors.clear();
3315   }
3316 }
3317 
3318 LegalizerHelper::LegalizeResult
3319 LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
3320   Register DstReg = MI.getOperand(0).getReg();
3321   Register Src1 = MI.getOperand(1).getReg();
3322   Register Src2 = MI.getOperand(2).getReg();
3323 
3324   LLT Ty = MRI.getType(DstReg);
3325   if (Ty.isVector())
3326     return UnableToLegalize;
3327 
3328   unsigned SrcSize = MRI.getType(Src1).getSizeInBits();
3329   unsigned DstSize = Ty.getSizeInBits();
3330   unsigned NarrowSize = NarrowTy.getSizeInBits();
3331   if (DstSize % NarrowSize != 0 || SrcSize % NarrowSize != 0)
3332     return UnableToLegalize;
3333 
3334   unsigned NumDstParts = DstSize / NarrowSize;
3335   unsigned NumSrcParts = SrcSize / NarrowSize;
3336   bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH;
3337   unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1);
3338 
3339   SmallVector<Register, 2> Src1Parts, Src2Parts, DstTmpRegs;
3340   extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts);
3341   extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts);
3342   DstTmpRegs.resize(DstTmpParts);
3343   multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy);
3344 
3345   // Take only high half of registers if this is high mul.
3346   ArrayRef<Register> DstRegs(
3347       IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts);
3348   MIRBuilder.buildMerge(DstReg, DstRegs);
3349   MI.eraseFromParent();
3350   return Legalized;
3351 }
3352 
3353 LegalizerHelper::LegalizeResult
3354 LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx,
3355                                      LLT NarrowTy) {
3356   if (TypeIdx != 1)
3357     return UnableToLegalize;
3358 
3359   uint64_t NarrowSize = NarrowTy.getSizeInBits();
3360 
3361   int64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
3362   // FIXME: add support for when SizeOp1 isn't an exact multiple of
3363   // NarrowSize.
3364   if (SizeOp1 % NarrowSize != 0)
3365     return UnableToLegalize;
3366   int NumParts = SizeOp1 / NarrowSize;
3367 
3368   SmallVector<Register, 2> SrcRegs, DstRegs;
3369   SmallVector<uint64_t, 2> Indexes;
3370   extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
3371 
3372   Register OpReg = MI.getOperand(0).getReg();
3373   uint64_t OpStart = MI.getOperand(2).getImm();
3374   uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
3375   for (int i = 0; i < NumParts; ++i) {
3376     unsigned SrcStart = i * NarrowSize;
3377 
3378     if (SrcStart + NarrowSize <= OpStart || SrcStart >= OpStart + OpSize) {
3379       // No part of the extract uses this subregister, ignore it.
3380       continue;
3381     } else if (SrcStart == OpStart && NarrowTy == MRI.getType(OpReg)) {
3382       // The entire subregister is extracted, forward the value.
3383       DstRegs.push_back(SrcRegs[i]);
3384       continue;
3385     }
3386 
3387     // OpSegStart is where this destination segment would start in OpReg if it
3388     // extended infinitely in both directions.
3389     int64_t ExtractOffset;
3390     uint64_t SegSize;
3391     if (OpStart < SrcStart) {
3392       ExtractOffset = 0;
3393       SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart);
3394     } else {
3395       ExtractOffset = OpStart - SrcStart;
3396       SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize);
3397     }
3398 
3399     Register SegReg = SrcRegs[i];
3400     if (ExtractOffset != 0 || SegSize != NarrowSize) {
3401       // A genuine extract is needed.
3402       SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
3403       MIRBuilder.buildExtract(SegReg, SrcRegs[i], ExtractOffset);
3404     }
3405 
3406     DstRegs.push_back(SegReg);
3407   }
3408 
3409   Register DstReg = MI.getOperand(0).getReg();
3410   if(MRI.getType(DstReg).isVector())
3411     MIRBuilder.buildBuildVector(DstReg, DstRegs);
3412   else
3413     MIRBuilder.buildMerge(DstReg, DstRegs);
3414   MI.eraseFromParent();
3415   return Legalized;
3416 }
3417 
3418 LegalizerHelper::LegalizeResult
3419 LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx,
3420                                     LLT NarrowTy) {
3421   // FIXME: Don't know how to handle secondary types yet.
3422   if (TypeIdx != 0)
3423     return UnableToLegalize;
3424 
3425   uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
3426   uint64_t NarrowSize = NarrowTy.getSizeInBits();
3427 
3428   // FIXME: add support for when SizeOp0 isn't an exact multiple of
3429   // NarrowSize.
3430   if (SizeOp0 % NarrowSize != 0)
3431     return UnableToLegalize;
3432 
3433   int NumParts = SizeOp0 / NarrowSize;
3434 
3435   SmallVector<Register, 2> SrcRegs, DstRegs;
3436   SmallVector<uint64_t, 2> Indexes;
3437   extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
3438 
3439   Register OpReg = MI.getOperand(2).getReg();
3440   uint64_t OpStart = MI.getOperand(3).getImm();
3441   uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
3442   for (int i = 0; i < NumParts; ++i) {
3443     unsigned DstStart = i * NarrowSize;
3444 
3445     if (DstStart + NarrowSize <= OpStart || DstStart >= OpStart + OpSize) {
3446       // No part of the insert affects this subregister, forward the original.
3447       DstRegs.push_back(SrcRegs[i]);
3448       continue;
3449     } else if (DstStart == OpStart && NarrowTy == MRI.getType(OpReg)) {
3450       // The entire subregister is defined by this insert, forward the new
3451       // value.
3452       DstRegs.push_back(OpReg);
3453       continue;
3454     }
3455 
3456     // OpSegStart is where this destination segment would start in OpReg if it
3457     // extended infinitely in both directions.
3458     int64_t ExtractOffset, InsertOffset;
3459     uint64_t SegSize;
3460     if (OpStart < DstStart) {
3461       InsertOffset = 0;
3462       ExtractOffset = DstStart - OpStart;
3463       SegSize = std::min(NarrowSize, OpStart + OpSize - DstStart);
3464     } else {
3465       InsertOffset = OpStart - DstStart;
3466       ExtractOffset = 0;
3467       SegSize =
3468         std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart);
3469     }
3470 
3471     Register SegReg = OpReg;
3472     if (ExtractOffset != 0 || SegSize != OpSize) {
3473       // A genuine extract is needed.
3474       SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
3475       MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset);
3476     }
3477 
3478     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
3479     MIRBuilder.buildInsert(DstReg, SrcRegs[i], SegReg, InsertOffset);
3480     DstRegs.push_back(DstReg);
3481   }
3482 
3483   assert(DstRegs.size() == (unsigned)NumParts && "not all parts covered");
3484   Register DstReg = MI.getOperand(0).getReg();
3485   if(MRI.getType(DstReg).isVector())
3486     MIRBuilder.buildBuildVector(DstReg, DstRegs);
3487   else
3488     MIRBuilder.buildMerge(DstReg, DstRegs);
3489   MI.eraseFromParent();
3490   return Legalized;
3491 }
3492 
3493 LegalizerHelper::LegalizeResult
3494 LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx,
3495                                    LLT NarrowTy) {
3496   Register DstReg = MI.getOperand(0).getReg();
3497   LLT DstTy = MRI.getType(DstReg);
3498 
3499   assert(MI.getNumOperands() == 3 && TypeIdx == 0);
3500 
3501   SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
3502   SmallVector<Register, 4> Src0Regs, Src0LeftoverRegs;
3503   SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
3504   LLT LeftoverTy;
3505   if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy,
3506                     Src0Regs, Src0LeftoverRegs))
3507     return UnableToLegalize;
3508 
3509   LLT Unused;
3510   if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, Unused,
3511                     Src1Regs, Src1LeftoverRegs))
3512     llvm_unreachable("inconsistent extractParts result");
3513 
3514   for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) {
3515     auto Inst = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy},
3516                                         {Src0Regs[I], Src1Regs[I]});
3517     DstRegs.push_back(Inst->getOperand(0).getReg());
3518   }
3519 
3520   for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) {
3521     auto Inst = MIRBuilder.buildInstr(
3522       MI.getOpcode(),
3523       {LeftoverTy}, {Src0LeftoverRegs[I], Src1LeftoverRegs[I]});
3524     DstLeftoverRegs.push_back(Inst->getOperand(0).getReg());
3525   }
3526 
3527   insertParts(DstReg, DstTy, NarrowTy, DstRegs,
3528               LeftoverTy, DstLeftoverRegs);
3529 
3530   MI.eraseFromParent();
3531   return Legalized;
3532 }
3533 
3534 LegalizerHelper::LegalizeResult
3535 LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
3536                                     LLT NarrowTy) {
3537   if (TypeIdx != 0)
3538     return UnableToLegalize;
3539 
3540   Register CondReg = MI.getOperand(1).getReg();
3541   LLT CondTy = MRI.getType(CondReg);
3542   if (CondTy.isVector()) // TODO: Handle vselect
3543     return UnableToLegalize;
3544 
3545   Register DstReg = MI.getOperand(0).getReg();
3546   LLT DstTy = MRI.getType(DstReg);
3547 
3548   SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
3549   SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
3550   SmallVector<Register, 4> Src2Regs, Src2LeftoverRegs;
3551   LLT LeftoverTy;
3552   if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy,
3553                     Src1Regs, Src1LeftoverRegs))
3554     return UnableToLegalize;
3555 
3556   LLT Unused;
3557   if (!extractParts(MI.getOperand(3).getReg(), DstTy, NarrowTy, Unused,
3558                     Src2Regs, Src2LeftoverRegs))
3559     llvm_unreachable("inconsistent extractParts result");
3560 
3561   for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) {
3562     auto Select = MIRBuilder.buildSelect(NarrowTy,
3563                                          CondReg, Src1Regs[I], Src2Regs[I]);
3564     DstRegs.push_back(Select->getOperand(0).getReg());
3565   }
3566 
3567   for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) {
3568     auto Select = MIRBuilder.buildSelect(
3569       LeftoverTy, CondReg, Src1LeftoverRegs[I], Src2LeftoverRegs[I]);
3570     DstLeftoverRegs.push_back(Select->getOperand(0).getReg());
3571   }
3572 
3573   insertParts(DstReg, DstTy, NarrowTy, DstRegs,
3574               LeftoverTy, DstLeftoverRegs);
3575 
3576   MI.eraseFromParent();
3577   return Legalized;
3578 }
3579 
3580 LegalizerHelper::LegalizeResult
3581 LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3582   unsigned Opc = MI.getOpcode();
3583   auto &TII = *MI.getMF()->getSubtarget().getInstrInfo();
3584   auto isSupported = [this](const LegalityQuery &Q) {
3585     auto QAction = LI.getAction(Q).Action;
3586     return QAction == Legal || QAction == Libcall || QAction == Custom;
3587   };
3588   switch (Opc) {
3589   default:
3590     return UnableToLegalize;
3591   case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
3592     // This trivially expands to CTLZ.
3593     Observer.changingInstr(MI);
3594     MI.setDesc(TII.get(TargetOpcode::G_CTLZ));
3595     Observer.changedInstr(MI);
3596     return Legalized;
3597   }
3598   case TargetOpcode::G_CTLZ: {
3599     Register SrcReg = MI.getOperand(1).getReg();
3600     unsigned Len = Ty.getSizeInBits();
3601     if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {Ty, Ty}})) {
3602       // If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero.
3603       auto MIBCtlzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTLZ_ZERO_UNDEF,
3604                                              {Ty}, {SrcReg});
3605       auto MIBZero = MIRBuilder.buildConstant(Ty, 0);
3606       auto MIBLen = MIRBuilder.buildConstant(Ty, Len);
3607       auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
3608                                           SrcReg, MIBZero);
3609       MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen,
3610                              MIBCtlzZU);
3611       MI.eraseFromParent();
3612       return Legalized;
3613     }
3614     // for now, we do this:
3615     // NewLen = NextPowerOf2(Len);
3616     // x = x | (x >> 1);
3617     // x = x | (x >> 2);
3618     // ...
3619     // x = x | (x >>16);
3620     // x = x | (x >>32); // for 64-bit input
3621     // Upto NewLen/2
3622     // return Len - popcount(x);
3623     //
3624     // Ref: "Hacker's Delight" by Henry Warren
3625     Register Op = SrcReg;
3626     unsigned NewLen = PowerOf2Ceil(Len);
3627     for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) {
3628       auto MIBShiftAmt = MIRBuilder.buildConstant(Ty, 1ULL << i);
3629       auto MIBOp = MIRBuilder.buildInstr(
3630           TargetOpcode::G_OR, {Ty},
3631           {Op, MIRBuilder.buildInstr(TargetOpcode::G_LSHR, {Ty},
3632                                      {Op, MIBShiftAmt})});
3633       Op = MIBOp->getOperand(0).getReg();
3634     }
3635     auto MIBPop = MIRBuilder.buildInstr(TargetOpcode::G_CTPOP, {Ty}, {Op});
3636     MIRBuilder.buildInstr(TargetOpcode::G_SUB, {MI.getOperand(0).getReg()},
3637                           {MIRBuilder.buildConstant(Ty, Len), MIBPop});
3638     MI.eraseFromParent();
3639     return Legalized;
3640   }
3641   case TargetOpcode::G_CTTZ_ZERO_UNDEF: {
3642     // This trivially expands to CTTZ.
3643     Observer.changingInstr(MI);
3644     MI.setDesc(TII.get(TargetOpcode::G_CTTZ));
3645     Observer.changedInstr(MI);
3646     return Legalized;
3647   }
3648   case TargetOpcode::G_CTTZ: {
3649     Register SrcReg = MI.getOperand(1).getReg();
3650     unsigned Len = Ty.getSizeInBits();
3651     if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {Ty, Ty}})) {
3652       // If CTTZ_ZERO_UNDEF is legal or custom, emit that and a select with
3653       // zero.
3654       auto MIBCttzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF,
3655                                              {Ty}, {SrcReg});
3656       auto MIBZero = MIRBuilder.buildConstant(Ty, 0);
3657       auto MIBLen = MIRBuilder.buildConstant(Ty, Len);
3658       auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
3659                                           SrcReg, MIBZero);
3660       MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen,
3661                              MIBCttzZU);
3662       MI.eraseFromParent();
3663       return Legalized;
3664     }
3665     // for now, we use: { return popcount(~x & (x - 1)); }
3666     // unless the target has ctlz but not ctpop, in which case we use:
3667     // { return 32 - nlz(~x & (x-1)); }
3668     // Ref: "Hacker's Delight" by Henry Warren
3669     auto MIBCstNeg1 = MIRBuilder.buildConstant(Ty, -1);
3670     auto MIBNot =
3671         MIRBuilder.buildInstr(TargetOpcode::G_XOR, {Ty}, {SrcReg, MIBCstNeg1});
3672     auto MIBTmp = MIRBuilder.buildInstr(
3673         TargetOpcode::G_AND, {Ty},
3674         {MIBNot, MIRBuilder.buildInstr(TargetOpcode::G_ADD, {Ty},
3675                                        {SrcReg, MIBCstNeg1})});
3676     if (!isSupported({TargetOpcode::G_CTPOP, {Ty, Ty}}) &&
3677         isSupported({TargetOpcode::G_CTLZ, {Ty, Ty}})) {
3678       auto MIBCstLen = MIRBuilder.buildConstant(Ty, Len);
3679       MIRBuilder.buildInstr(
3680           TargetOpcode::G_SUB, {MI.getOperand(0).getReg()},
3681           {MIBCstLen,
3682            MIRBuilder.buildInstr(TargetOpcode::G_CTLZ, {Ty}, {MIBTmp})});
3683       MI.eraseFromParent();
3684       return Legalized;
3685     }
3686     MI.setDesc(TII.get(TargetOpcode::G_CTPOP));
3687     MI.getOperand(1).setReg(MIBTmp->getOperand(0).getReg());
3688     return Legalized;
3689   }
3690   }
3691 }
3692 
3693 // Expand s32 = G_UITOFP s64 using bit operations to an IEEE float
3694 // representation.
3695 LegalizerHelper::LegalizeResult
3696 LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) {
3697   Register Dst = MI.getOperand(0).getReg();
3698   Register Src = MI.getOperand(1).getReg();
3699   const LLT S64 = LLT::scalar(64);
3700   const LLT S32 = LLT::scalar(32);
3701   const LLT S1 = LLT::scalar(1);
3702 
3703   assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S32);
3704 
3705   // unsigned cul2f(ulong u) {
3706   //   uint lz = clz(u);
3707   //   uint e = (u != 0) ? 127U + 63U - lz : 0;
3708   //   u = (u << lz) & 0x7fffffffffffffffUL;
3709   //   ulong t = u & 0xffffffffffUL;
3710   //   uint v = (e << 23) | (uint)(u >> 40);
3711   //   uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
3712   //   return as_float(v + r);
3713   // }
3714 
3715   auto Zero32 = MIRBuilder.buildConstant(S32, 0);
3716   auto Zero64 = MIRBuilder.buildConstant(S64, 0);
3717 
3718   auto LZ = MIRBuilder.buildCTLZ_ZERO_UNDEF(S32, Src);
3719 
3720   auto K = MIRBuilder.buildConstant(S32, 127U + 63U);
3721   auto Sub = MIRBuilder.buildSub(S32, K, LZ);
3722 
3723   auto NotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, Src, Zero64);
3724   auto E = MIRBuilder.buildSelect(S32, NotZero, Sub, Zero32);
3725 
3726   auto Mask0 = MIRBuilder.buildConstant(S64, (-1ULL) >> 1);
3727   auto ShlLZ = MIRBuilder.buildShl(S64, Src, LZ);
3728 
3729   auto U = MIRBuilder.buildAnd(S64, ShlLZ, Mask0);
3730 
3731   auto Mask1 = MIRBuilder.buildConstant(S64, 0xffffffffffULL);
3732   auto T = MIRBuilder.buildAnd(S64, U, Mask1);
3733 
3734   auto UShl = MIRBuilder.buildLShr(S64, U, MIRBuilder.buildConstant(S64, 40));
3735   auto ShlE = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 23));
3736   auto V = MIRBuilder.buildOr(S32, ShlE, MIRBuilder.buildTrunc(S32, UShl));
3737 
3738   auto C = MIRBuilder.buildConstant(S64, 0x8000000000ULL);
3739   auto RCmp = MIRBuilder.buildICmp(CmpInst::ICMP_UGT, S1, T, C);
3740   auto TCmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, T, C);
3741   auto One = MIRBuilder.buildConstant(S32, 1);
3742 
3743   auto VTrunc1 = MIRBuilder.buildAnd(S32, V, One);
3744   auto Select0 = MIRBuilder.buildSelect(S32, TCmp, VTrunc1, Zero32);
3745   auto R = MIRBuilder.buildSelect(S32, RCmp, One, Select0);
3746   MIRBuilder.buildAdd(Dst, V, R);
3747 
3748   return Legalized;
3749 }
3750 
3751 LegalizerHelper::LegalizeResult
3752 LegalizerHelper::lowerUITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3753   Register Dst = MI.getOperand(0).getReg();
3754   Register Src = MI.getOperand(1).getReg();
3755   LLT DstTy = MRI.getType(Dst);
3756   LLT SrcTy = MRI.getType(Src);
3757 
3758   if (SrcTy != LLT::scalar(64))
3759     return UnableToLegalize;
3760 
3761   if (DstTy == LLT::scalar(32)) {
3762     // TODO: SelectionDAG has several alternative expansions to port which may
3763     // be more reasonble depending on the available instructions. If a target
3764     // has sitofp, does not have CTLZ, or can efficiently use f64 as an
3765     // intermediate type, this is probably worse.
3766     return lowerU64ToF32BitOps(MI);
3767   }
3768 
3769   return UnableToLegalize;
3770 }
3771 
3772 LegalizerHelper::LegalizeResult
3773 LegalizerHelper::lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3774   Register Dst = MI.getOperand(0).getReg();
3775   Register Src = MI.getOperand(1).getReg();
3776   LLT DstTy = MRI.getType(Dst);
3777   LLT SrcTy = MRI.getType(Src);
3778 
3779   const LLT S64 = LLT::scalar(64);
3780   const LLT S32 = LLT::scalar(32);
3781   const LLT S1 = LLT::scalar(1);
3782 
3783   if (SrcTy != S64)
3784     return UnableToLegalize;
3785 
3786   if (DstTy == S32) {
3787     // signed cl2f(long l) {
3788     //   long s = l >> 63;
3789     //   float r = cul2f((l + s) ^ s);
3790     //   return s ? -r : r;
3791     // }
3792     Register L = Src;
3793     auto SignBit = MIRBuilder.buildConstant(S64, 63);
3794     auto S = MIRBuilder.buildAShr(S64, L, SignBit);
3795 
3796     auto LPlusS = MIRBuilder.buildAdd(S64, L, S);
3797     auto Xor = MIRBuilder.buildXor(S64, LPlusS, S);
3798     auto R = MIRBuilder.buildUITOFP(S32, Xor);
3799 
3800     auto RNeg = MIRBuilder.buildFNeg(S32, R);
3801     auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, S,
3802                                             MIRBuilder.buildConstant(S64, 0));
3803     MIRBuilder.buildSelect(Dst, SignNotZero, RNeg, R);
3804     return Legalized;
3805   }
3806 
3807   return UnableToLegalize;
3808 }
3809 
3810 LegalizerHelper::LegalizeResult
3811 LegalizerHelper::lowerFPTOUI(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3812   Register Dst = MI.getOperand(0).getReg();
3813   Register Src = MI.getOperand(1).getReg();
3814   LLT DstTy = MRI.getType(Dst);
3815   LLT SrcTy = MRI.getType(Src);
3816   const LLT S64 = LLT::scalar(64);
3817   const LLT S32 = LLT::scalar(32);
3818 
3819   if (SrcTy != S64 && SrcTy != S32)
3820     return UnableToLegalize;
3821   if (DstTy != S32 && DstTy != S64)
3822     return UnableToLegalize;
3823 
3824   // FPTOSI gives same result as FPTOUI for positive signed integers.
3825   // FPTOUI needs to deal with fp values that convert to unsigned integers
3826   // greater or equal to 2^31 for float or 2^63 for double. For brevity 2^Exp.
3827 
3828   APInt TwoPExpInt = APInt::getSignMask(DstTy.getSizeInBits());
3829   APFloat TwoPExpFP(SrcTy.getSizeInBits() == 32 ? APFloat::IEEEsingle()
3830                                                 : APFloat::IEEEdouble(),
3831                     APInt::getNullValue(SrcTy.getSizeInBits()));
3832   TwoPExpFP.convertFromAPInt(TwoPExpInt, false, APFloat::rmNearestTiesToEven);
3833 
3834   MachineInstrBuilder FPTOSI = MIRBuilder.buildFPTOSI(DstTy, Src);
3835 
3836   MachineInstrBuilder Threshold = MIRBuilder.buildFConstant(SrcTy, TwoPExpFP);
3837   // For fp Value greater or equal to Threshold(2^Exp), we use FPTOSI on
3838   // (Value - 2^Exp) and add 2^Exp by setting highest bit in result to 1.
3839   MachineInstrBuilder FSub = MIRBuilder.buildFSub(SrcTy, Src, Threshold);
3840   MachineInstrBuilder ResLowBits = MIRBuilder.buildFPTOSI(DstTy, FSub);
3841   MachineInstrBuilder ResHighBit = MIRBuilder.buildConstant(DstTy, TwoPExpInt);
3842   MachineInstrBuilder Res = MIRBuilder.buildXor(DstTy, ResLowBits, ResHighBit);
3843 
3844   MachineInstrBuilder FCMP =
3845       MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, DstTy, Src, Threshold);
3846   MIRBuilder.buildSelect(Dst, FCMP, FPTOSI, Res);
3847 
3848   MI.eraseFromParent();
3849   return Legalized;
3850 }
3851 
3852 static CmpInst::Predicate minMaxToCompare(unsigned Opc) {
3853   switch (Opc) {
3854   case TargetOpcode::G_SMIN:
3855     return CmpInst::ICMP_SLT;
3856   case TargetOpcode::G_SMAX:
3857     return CmpInst::ICMP_SGT;
3858   case TargetOpcode::G_UMIN:
3859     return CmpInst::ICMP_ULT;
3860   case TargetOpcode::G_UMAX:
3861     return CmpInst::ICMP_UGT;
3862   default:
3863     llvm_unreachable("not in integer min/max");
3864   }
3865 }
3866 
3867 LegalizerHelper::LegalizeResult
3868 LegalizerHelper::lowerMinMax(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3869   Register Dst = MI.getOperand(0).getReg();
3870   Register Src0 = MI.getOperand(1).getReg();
3871   Register Src1 = MI.getOperand(2).getReg();
3872 
3873   const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode());
3874   LLT CmpType = MRI.getType(Dst).changeElementSize(1);
3875 
3876   auto Cmp = MIRBuilder.buildICmp(Pred, CmpType, Src0, Src1);
3877   MIRBuilder.buildSelect(Dst, Cmp, Src0, Src1);
3878 
3879   MI.eraseFromParent();
3880   return Legalized;
3881 }
3882 
3883 LegalizerHelper::LegalizeResult
3884 LegalizerHelper::lowerFCopySign(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3885   Register Dst = MI.getOperand(0).getReg();
3886   Register Src0 = MI.getOperand(1).getReg();
3887   Register Src1 = MI.getOperand(2).getReg();
3888 
3889   const LLT Src0Ty = MRI.getType(Src0);
3890   const LLT Src1Ty = MRI.getType(Src1);
3891 
3892   const int Src0Size = Src0Ty.getScalarSizeInBits();
3893   const int Src1Size = Src1Ty.getScalarSizeInBits();
3894 
3895   auto SignBitMask = MIRBuilder.buildConstant(
3896     Src0Ty, APInt::getSignMask(Src0Size));
3897 
3898   auto NotSignBitMask = MIRBuilder.buildConstant(
3899     Src0Ty, APInt::getLowBitsSet(Src0Size, Src0Size - 1));
3900 
3901   auto And0 = MIRBuilder.buildAnd(Src0Ty, Src0, NotSignBitMask);
3902   MachineInstr *Or;
3903 
3904   if (Src0Ty == Src1Ty) {
3905     auto And1 = MIRBuilder.buildAnd(Src1Ty, Src0, SignBitMask);
3906     Or = MIRBuilder.buildOr(Dst, And0, And1);
3907   } else if (Src0Size > Src1Size) {
3908     auto ShiftAmt = MIRBuilder.buildConstant(Src0Ty, Src0Size - Src1Size);
3909     auto Zext = MIRBuilder.buildZExt(Src0Ty, Src1);
3910     auto Shift = MIRBuilder.buildShl(Src0Ty, Zext, ShiftAmt);
3911     auto And1 = MIRBuilder.buildAnd(Src0Ty, Shift, SignBitMask);
3912     Or = MIRBuilder.buildOr(Dst, And0, And1);
3913   } else {
3914     auto ShiftAmt = MIRBuilder.buildConstant(Src1Ty, Src1Size - Src0Size);
3915     auto Shift = MIRBuilder.buildLShr(Src1Ty, Src1, ShiftAmt);
3916     auto Trunc = MIRBuilder.buildTrunc(Src0Ty, Shift);
3917     auto And1 = MIRBuilder.buildAnd(Src0Ty, Trunc, SignBitMask);
3918     Or = MIRBuilder.buildOr(Dst, And0, And1);
3919   }
3920 
3921   // Be careful about setting nsz/nnan/ninf on every instruction, since the
3922   // constants are a nan and -0.0, but the final result should preserve
3923   // everything.
3924   if (unsigned Flags = MI.getFlags())
3925     Or->setFlags(Flags);
3926 
3927   MI.eraseFromParent();
3928   return Legalized;
3929 }
3930 
3931 LegalizerHelper::LegalizeResult
3932 LegalizerHelper::lowerFMinNumMaxNum(MachineInstr &MI) {
3933   unsigned NewOp = MI.getOpcode() == TargetOpcode::G_FMINNUM ?
3934     TargetOpcode::G_FMINNUM_IEEE : TargetOpcode::G_FMAXNUM_IEEE;
3935 
3936   Register Dst = MI.getOperand(0).getReg();
3937   Register Src0 = MI.getOperand(1).getReg();
3938   Register Src1 = MI.getOperand(2).getReg();
3939   LLT Ty = MRI.getType(Dst);
3940 
3941   if (!MI.getFlag(MachineInstr::FmNoNans)) {
3942     // Insert canonicalizes if it's possible we need to quiet to get correct
3943     // sNaN behavior.
3944 
3945     // Note this must be done here, and not as an optimization combine in the
3946     // absence of a dedicate quiet-snan instruction as we're using an
3947     // omni-purpose G_FCANONICALIZE.
3948     if (!isKnownNeverSNaN(Src0, MRI))
3949       Src0 = MIRBuilder.buildFCanonicalize(Ty, Src0, MI.getFlags()).getReg(0);
3950 
3951     if (!isKnownNeverSNaN(Src1, MRI))
3952       Src1 = MIRBuilder.buildFCanonicalize(Ty, Src1, MI.getFlags()).getReg(0);
3953   }
3954 
3955   // If there are no nans, it's safe to simply replace this with the non-IEEE
3956   // version.
3957   MIRBuilder.buildInstr(NewOp, {Dst}, {Src0, Src1}, MI.getFlags());
3958   MI.eraseFromParent();
3959   return Legalized;
3960 }
3961 
3962 LegalizerHelper::LegalizeResult LegalizerHelper::lowerFMad(MachineInstr &MI) {
3963   // Expand G_FMAD a, b, c -> G_FADD (G_FMUL a, b), c
3964   Register DstReg = MI.getOperand(0).getReg();
3965   LLT Ty = MRI.getType(DstReg);
3966   unsigned Flags = MI.getFlags();
3967 
3968   auto Mul = MIRBuilder.buildFMul(Ty, MI.getOperand(1), MI.getOperand(2),
3969                                   Flags);
3970   MIRBuilder.buildFAdd(DstReg, Mul, MI.getOperand(3), Flags);
3971   MI.eraseFromParent();
3972   return Legalized;
3973 }
3974 
3975 LegalizerHelper::LegalizeResult
3976 LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) {
3977   const unsigned NumDst = MI.getNumOperands() - 1;
3978   const Register SrcReg = MI.getOperand(NumDst).getReg();
3979   LLT SrcTy = MRI.getType(SrcReg);
3980 
3981   Register Dst0Reg = MI.getOperand(0).getReg();
3982   LLT DstTy = MRI.getType(Dst0Reg);
3983 
3984 
3985   // Expand scalarizing unmerge as bitcast to integer and shift.
3986   if (!DstTy.isVector() && SrcTy.isVector() &&
3987       SrcTy.getElementType() == DstTy) {
3988     LLT IntTy = LLT::scalar(SrcTy.getSizeInBits());
3989     Register Cast = MIRBuilder.buildBitcast(IntTy, SrcReg).getReg(0);
3990 
3991     MIRBuilder.buildTrunc(Dst0Reg, Cast);
3992 
3993     const unsigned DstSize = DstTy.getSizeInBits();
3994     unsigned Offset = DstSize;
3995     for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) {
3996       auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset);
3997       auto Shift = MIRBuilder.buildLShr(IntTy, Cast, ShiftAmt);
3998       MIRBuilder.buildTrunc(MI.getOperand(I), Shift);
3999     }
4000 
4001     MI.eraseFromParent();
4002     return Legalized;
4003   }
4004 
4005   return UnableToLegalize;
4006 }
4007 
4008 LegalizerHelper::LegalizeResult
4009 LegalizerHelper::lowerShuffleVector(MachineInstr &MI) {
4010   Register DstReg = MI.getOperand(0).getReg();
4011   Register Src0Reg = MI.getOperand(1).getReg();
4012   Register Src1Reg = MI.getOperand(2).getReg();
4013   LLT Src0Ty = MRI.getType(Src0Reg);
4014   LLT DstTy = MRI.getType(DstReg);
4015   LLT IdxTy = LLT::scalar(32);
4016 
4017   const Constant *ShufMask = MI.getOperand(3).getShuffleMask();
4018 
4019   SmallVector<int, 32> Mask;
4020   ShuffleVectorInst::getShuffleMask(ShufMask, Mask);
4021 
4022   if (DstTy.isScalar()) {
4023     if (Src0Ty.isVector())
4024       return UnableToLegalize;
4025 
4026     // This is just a SELECT.
4027     assert(Mask.size() == 1 && "Expected a single mask element");
4028     Register Val;
4029     if (Mask[0] < 0 || Mask[0] > 1)
4030       Val = MIRBuilder.buildUndef(DstTy).getReg(0);
4031     else
4032       Val = Mask[0] == 0 ? Src0Reg : Src1Reg;
4033     MIRBuilder.buildCopy(DstReg, Val);
4034     MI.eraseFromParent();
4035     return Legalized;
4036   }
4037 
4038   Register Undef;
4039   SmallVector<Register, 32> BuildVec;
4040   LLT EltTy = DstTy.getElementType();
4041 
4042   for (int Idx : Mask) {
4043     if (Idx < 0) {
4044       if (!Undef.isValid())
4045         Undef = MIRBuilder.buildUndef(EltTy).getReg(0);
4046       BuildVec.push_back(Undef);
4047       continue;
4048     }
4049 
4050     if (Src0Ty.isScalar()) {
4051       BuildVec.push_back(Idx == 0 ? Src0Reg : Src1Reg);
4052     } else {
4053       int NumElts = Src0Ty.getNumElements();
4054       Register SrcVec = Idx < NumElts ? Src0Reg : Src1Reg;
4055       int ExtractIdx = Idx < NumElts ? Idx : Idx - NumElts;
4056       auto IdxK = MIRBuilder.buildConstant(IdxTy, ExtractIdx);
4057       auto Extract = MIRBuilder.buildExtractVectorElement(EltTy, SrcVec, IdxK);
4058       BuildVec.push_back(Extract.getReg(0));
4059     }
4060   }
4061 
4062   MIRBuilder.buildBuildVector(DstReg, BuildVec);
4063   MI.eraseFromParent();
4064   return Legalized;
4065 }
4066 
4067 LegalizerHelper::LegalizeResult
4068 LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) {
4069   Register Dst = MI.getOperand(0).getReg();
4070   Register AllocSize = MI.getOperand(1).getReg();
4071   unsigned Align = MI.getOperand(2).getImm();
4072 
4073   const auto &MF = *MI.getMF();
4074   const auto &TLI = *MF.getSubtarget().getTargetLowering();
4075 
4076   LLT PtrTy = MRI.getType(Dst);
4077   LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
4078 
4079   Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
4080   auto SPTmp = MIRBuilder.buildCopy(PtrTy, SPReg);
4081   SPTmp = MIRBuilder.buildCast(IntPtrTy, SPTmp);
4082 
4083   // Subtract the final alloc from the SP. We use G_PTRTOINT here so we don't
4084   // have to generate an extra instruction to negate the alloc and then use
4085   // G_GEP to add the negative offset.
4086   auto Alloc = MIRBuilder.buildSub(IntPtrTy, SPTmp, AllocSize);
4087   if (Align) {
4088     APInt AlignMask(IntPtrTy.getSizeInBits(), Align, true);
4089     AlignMask.negate();
4090     auto AlignCst = MIRBuilder.buildConstant(IntPtrTy, AlignMask);
4091     Alloc = MIRBuilder.buildAnd(IntPtrTy, Alloc, AlignCst);
4092   }
4093 
4094   SPTmp = MIRBuilder.buildCast(PtrTy, Alloc);
4095   MIRBuilder.buildCopy(SPReg, SPTmp);
4096   MIRBuilder.buildCopy(Dst, SPTmp);
4097 
4098   MI.eraseFromParent();
4099   return Legalized;
4100 }
4101