1 //===-- llvm/CodeGen/GlobalISel/LegalizerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file implements the LegalizerHelper class to legalize
10 /// individual instructions and the LegalizeMachineIR wrapper pass for the
11 /// primary legalization.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
16 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
17 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
18 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetInstrInfo.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/CodeGen/TargetSubtargetInfo.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/MathExtras.h"
25 #include "llvm/Support/raw_ostream.h"
26 
27 #define DEBUG_TYPE "legalizer"
28 
29 using namespace llvm;
30 using namespace LegalizeActions;
31 
32 /// Try to break down \p OrigTy into \p NarrowTy sized pieces.
33 ///
34 /// Returns the number of \p NarrowTy elements needed to reconstruct \p OrigTy,
35 /// with any leftover piece as type \p LeftoverTy
36 ///
37 /// Returns -1 in the first element of the pair if the breakdown is not
38 /// satisfiable.
39 static std::pair<int, int>
40 getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy) {
41   assert(!LeftoverTy.isValid() && "this is an out argument");
42 
43   unsigned Size = OrigTy.getSizeInBits();
44   unsigned NarrowSize = NarrowTy.getSizeInBits();
45   unsigned NumParts = Size / NarrowSize;
46   unsigned LeftoverSize = Size - NumParts * NarrowSize;
47   assert(Size > NarrowSize);
48 
49   if (LeftoverSize == 0)
50     return {NumParts, 0};
51 
52   if (NarrowTy.isVector()) {
53     unsigned EltSize = OrigTy.getScalarSizeInBits();
54     if (LeftoverSize % EltSize != 0)
55       return {-1, -1};
56     LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize);
57   } else {
58     LeftoverTy = LLT::scalar(LeftoverSize);
59   }
60 
61   int NumLeftover = LeftoverSize / LeftoverTy.getSizeInBits();
62   return std::make_pair(NumParts, NumLeftover);
63 }
64 
65 LegalizerHelper::LegalizerHelper(MachineFunction &MF,
66                                  GISelChangeObserver &Observer,
67                                  MachineIRBuilder &Builder)
68     : MIRBuilder(Builder), MRI(MF.getRegInfo()),
69       LI(*MF.getSubtarget().getLegalizerInfo()), Observer(Observer) {
70   MIRBuilder.setMF(MF);
71   MIRBuilder.setChangeObserver(Observer);
72 }
73 
74 LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
75                                  GISelChangeObserver &Observer,
76                                  MachineIRBuilder &B)
77     : MIRBuilder(B), MRI(MF.getRegInfo()), LI(LI), Observer(Observer) {
78   MIRBuilder.setMF(MF);
79   MIRBuilder.setChangeObserver(Observer);
80 }
81 LegalizerHelper::LegalizeResult
82 LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
83   LLVM_DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
84 
85   if (MI.getOpcode() == TargetOpcode::G_INTRINSIC ||
86       MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS)
87     return LI.legalizeIntrinsic(MI, MRI, MIRBuilder) ? Legalized
88                                                      : UnableToLegalize;
89   auto Step = LI.getAction(MI, MRI);
90   switch (Step.Action) {
91   case Legal:
92     LLVM_DEBUG(dbgs() << ".. Already legal\n");
93     return AlreadyLegal;
94   case Libcall:
95     LLVM_DEBUG(dbgs() << ".. Convert to libcall\n");
96     return libcall(MI);
97   case NarrowScalar:
98     LLVM_DEBUG(dbgs() << ".. Narrow scalar\n");
99     return narrowScalar(MI, Step.TypeIdx, Step.NewType);
100   case WidenScalar:
101     LLVM_DEBUG(dbgs() << ".. Widen scalar\n");
102     return widenScalar(MI, Step.TypeIdx, Step.NewType);
103   case Lower:
104     LLVM_DEBUG(dbgs() << ".. Lower\n");
105     return lower(MI, Step.TypeIdx, Step.NewType);
106   case FewerElements:
107     LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n");
108     return fewerElementsVector(MI, Step.TypeIdx, Step.NewType);
109   case MoreElements:
110     LLVM_DEBUG(dbgs() << ".. Increase number of elements\n");
111     return moreElementsVector(MI, Step.TypeIdx, Step.NewType);
112   case Custom:
113     LLVM_DEBUG(dbgs() << ".. Custom legalization\n");
114     return LI.legalizeCustom(MI, MRI, MIRBuilder, Observer) ? Legalized
115                                                             : UnableToLegalize;
116   default:
117     LLVM_DEBUG(dbgs() << ".. Unable to legalize\n");
118     return UnableToLegalize;
119   }
120 }
121 
122 void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts,
123                                    SmallVectorImpl<Register> &VRegs) {
124   for (int i = 0; i < NumParts; ++i)
125     VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
126   MIRBuilder.buildUnmerge(VRegs, Reg);
127 }
128 
129 bool LegalizerHelper::extractParts(Register Reg, LLT RegTy,
130                                    LLT MainTy, LLT &LeftoverTy,
131                                    SmallVectorImpl<Register> &VRegs,
132                                    SmallVectorImpl<Register> &LeftoverRegs) {
133   assert(!LeftoverTy.isValid() && "this is an out argument");
134 
135   unsigned RegSize = RegTy.getSizeInBits();
136   unsigned MainSize = MainTy.getSizeInBits();
137   unsigned NumParts = RegSize / MainSize;
138   unsigned LeftoverSize = RegSize - NumParts * MainSize;
139 
140   // Use an unmerge when possible.
141   if (LeftoverSize == 0) {
142     for (unsigned I = 0; I < NumParts; ++I)
143       VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
144     MIRBuilder.buildUnmerge(VRegs, Reg);
145     return true;
146   }
147 
148   if (MainTy.isVector()) {
149     unsigned EltSize = MainTy.getScalarSizeInBits();
150     if (LeftoverSize % EltSize != 0)
151       return false;
152     LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize);
153   } else {
154     LeftoverTy = LLT::scalar(LeftoverSize);
155   }
156 
157   // For irregular sizes, extract the individual parts.
158   for (unsigned I = 0; I != NumParts; ++I) {
159     Register NewReg = MRI.createGenericVirtualRegister(MainTy);
160     VRegs.push_back(NewReg);
161     MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
162   }
163 
164   for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
165        Offset += LeftoverSize) {
166     Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
167     LeftoverRegs.push_back(NewReg);
168     MIRBuilder.buildExtract(NewReg, Reg, Offset);
169   }
170 
171   return true;
172 }
173 
174 void LegalizerHelper::insertParts(Register DstReg,
175                                   LLT ResultTy, LLT PartTy,
176                                   ArrayRef<Register> PartRegs,
177                                   LLT LeftoverTy,
178                                   ArrayRef<Register> LeftoverRegs) {
179   if (!LeftoverTy.isValid()) {
180     assert(LeftoverRegs.empty());
181 
182     if (!ResultTy.isVector()) {
183       MIRBuilder.buildMerge(DstReg, PartRegs);
184       return;
185     }
186 
187     if (PartTy.isVector())
188       MIRBuilder.buildConcatVectors(DstReg, PartRegs);
189     else
190       MIRBuilder.buildBuildVector(DstReg, PartRegs);
191     return;
192   }
193 
194   unsigned PartSize = PartTy.getSizeInBits();
195   unsigned LeftoverPartSize = LeftoverTy.getSizeInBits();
196 
197   Register CurResultReg = MRI.createGenericVirtualRegister(ResultTy);
198   MIRBuilder.buildUndef(CurResultReg);
199 
200   unsigned Offset = 0;
201   for (Register PartReg : PartRegs) {
202     Register NewResultReg = MRI.createGenericVirtualRegister(ResultTy);
203     MIRBuilder.buildInsert(NewResultReg, CurResultReg, PartReg, Offset);
204     CurResultReg = NewResultReg;
205     Offset += PartSize;
206   }
207 
208   for (unsigned I = 0, E = LeftoverRegs.size(); I != E; ++I) {
209     // Use the original output register for the final insert to avoid a copy.
210     Register NewResultReg = (I + 1 == E) ?
211       DstReg : MRI.createGenericVirtualRegister(ResultTy);
212 
213     MIRBuilder.buildInsert(NewResultReg, CurResultReg, LeftoverRegs[I], Offset);
214     CurResultReg = NewResultReg;
215     Offset += LeftoverPartSize;
216   }
217 }
218 
219 static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
220   switch (Opcode) {
221   case TargetOpcode::G_SDIV:
222     assert((Size == 32 || Size == 64) && "Unsupported size");
223     return Size == 64 ? RTLIB::SDIV_I64 : RTLIB::SDIV_I32;
224   case TargetOpcode::G_UDIV:
225     assert((Size == 32 || Size == 64) && "Unsupported size");
226     return Size == 64 ? RTLIB::UDIV_I64 : RTLIB::UDIV_I32;
227   case TargetOpcode::G_SREM:
228     assert((Size == 32 || Size == 64) && "Unsupported size");
229     return Size == 64 ? RTLIB::SREM_I64 : RTLIB::SREM_I32;
230   case TargetOpcode::G_UREM:
231     assert((Size == 32 || Size == 64) && "Unsupported size");
232     return Size == 64 ? RTLIB::UREM_I64 : RTLIB::UREM_I32;
233   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
234     assert(Size == 32 && "Unsupported size");
235     return RTLIB::CTLZ_I32;
236   case TargetOpcode::G_FADD:
237     assert((Size == 32 || Size == 64) && "Unsupported size");
238     return Size == 64 ? RTLIB::ADD_F64 : RTLIB::ADD_F32;
239   case TargetOpcode::G_FSUB:
240     assert((Size == 32 || Size == 64) && "Unsupported size");
241     return Size == 64 ? RTLIB::SUB_F64 : RTLIB::SUB_F32;
242   case TargetOpcode::G_FMUL:
243     assert((Size == 32 || Size == 64) && "Unsupported size");
244     return Size == 64 ? RTLIB::MUL_F64 : RTLIB::MUL_F32;
245   case TargetOpcode::G_FDIV:
246     assert((Size == 32 || Size == 64) && "Unsupported size");
247     return Size == 64 ? RTLIB::DIV_F64 : RTLIB::DIV_F32;
248   case TargetOpcode::G_FEXP:
249     assert((Size == 32 || Size == 64) && "Unsupported size");
250     return Size == 64 ? RTLIB::EXP_F64 : RTLIB::EXP_F32;
251   case TargetOpcode::G_FEXP2:
252     assert((Size == 32 || Size == 64) && "Unsupported size");
253     return Size == 64 ? RTLIB::EXP2_F64 : RTLIB::EXP2_F32;
254   case TargetOpcode::G_FREM:
255     return Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32;
256   case TargetOpcode::G_FPOW:
257     return Size == 64 ? RTLIB::POW_F64 : RTLIB::POW_F32;
258   case TargetOpcode::G_FMA:
259     assert((Size == 32 || Size == 64) && "Unsupported size");
260     return Size == 64 ? RTLIB::FMA_F64 : RTLIB::FMA_F32;
261   case TargetOpcode::G_FSIN:
262     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
263     return Size == 128 ? RTLIB::SIN_F128
264                        : Size == 64 ? RTLIB::SIN_F64 : RTLIB::SIN_F32;
265   case TargetOpcode::G_FCOS:
266     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
267     return Size == 128 ? RTLIB::COS_F128
268                        : Size == 64 ? RTLIB::COS_F64 : RTLIB::COS_F32;
269   case TargetOpcode::G_FLOG10:
270     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
271     return Size == 128 ? RTLIB::LOG10_F128
272                        : Size == 64 ? RTLIB::LOG10_F64 : RTLIB::LOG10_F32;
273   case TargetOpcode::G_FLOG:
274     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
275     return Size == 128 ? RTLIB::LOG_F128
276                        : Size == 64 ? RTLIB::LOG_F64 : RTLIB::LOG_F32;
277   case TargetOpcode::G_FLOG2:
278     assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
279     return Size == 128 ? RTLIB::LOG2_F128
280                        : Size == 64 ? RTLIB::LOG2_F64 : RTLIB::LOG2_F32;
281   case TargetOpcode::G_FCEIL:
282     assert((Size == 32 || Size == 64) && "Unsupported size");
283     return Size == 64 ? RTLIB::CEIL_F64 : RTLIB::CEIL_F32;
284   case TargetOpcode::G_FFLOOR:
285     assert((Size == 32 || Size == 64) && "Unsupported size");
286     return Size == 64 ? RTLIB::FLOOR_F64 : RTLIB::FLOOR_F32;
287   }
288   llvm_unreachable("Unknown libcall function");
289 }
290 
291 LegalizerHelper::LegalizeResult
292 llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
293                     const CallLowering::ArgInfo &Result,
294                     ArrayRef<CallLowering::ArgInfo> Args) {
295   auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
296   auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
297   const char *Name = TLI.getLibcallName(Libcall);
298 
299   MIRBuilder.getMF().getFrameInfo().setHasCalls(true);
300   if (!CLI.lowerCall(MIRBuilder, TLI.getLibcallCallingConv(Libcall),
301                      MachineOperand::CreateES(Name), Result, Args))
302     return LegalizerHelper::UnableToLegalize;
303 
304   return LegalizerHelper::Legalized;
305 }
306 
307 // Useful for libcalls where all operands have the same type.
308 static LegalizerHelper::LegalizeResult
309 simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
310               Type *OpType) {
311   auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
312 
313   SmallVector<CallLowering::ArgInfo, 3> Args;
314   for (unsigned i = 1; i < MI.getNumOperands(); i++)
315     Args.push_back({MI.getOperand(i).getReg(), OpType});
316   return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), OpType},
317                        Args);
318 }
319 
320 LegalizerHelper::LegalizeResult
321 llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
322                        MachineInstr &MI) {
323   assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
324   auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
325 
326   SmallVector<CallLowering::ArgInfo, 3> Args;
327   for (unsigned i = 1; i < MI.getNumOperands(); i++) {
328     Register Reg = MI.getOperand(i).getReg();
329 
330     // Need derive an IR type for call lowering.
331     LLT OpLLT = MRI.getType(Reg);
332     Type *OpTy = nullptr;
333     if (OpLLT.isPointer())
334       OpTy = Type::getInt8PtrTy(Ctx, OpLLT.getAddressSpace());
335     else
336       OpTy = IntegerType::get(Ctx, OpLLT.getSizeInBits());
337     Args.push_back({Reg, OpTy});
338   }
339 
340   auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
341   auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
342   Intrinsic::ID ID = MI.getOperand(0).getIntrinsicID();
343   RTLIB::Libcall RTLibcall;
344   switch (ID) {
345   case Intrinsic::memcpy:
346     RTLibcall = RTLIB::MEMCPY;
347     break;
348   case Intrinsic::memset:
349     RTLibcall = RTLIB::MEMSET;
350     break;
351   case Intrinsic::memmove:
352     RTLibcall = RTLIB::MEMMOVE;
353     break;
354   default:
355     return LegalizerHelper::UnableToLegalize;
356   }
357   const char *Name = TLI.getLibcallName(RTLibcall);
358 
359   MIRBuilder.setInstr(MI);
360   MIRBuilder.getMF().getFrameInfo().setHasCalls(true);
361   if (!CLI.lowerCall(MIRBuilder, TLI.getLibcallCallingConv(RTLibcall),
362                      MachineOperand::CreateES(Name),
363                      CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx)), Args))
364     return LegalizerHelper::UnableToLegalize;
365 
366   return LegalizerHelper::Legalized;
367 }
368 
369 static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType,
370                                        Type *FromType) {
371   auto ToMVT = MVT::getVT(ToType);
372   auto FromMVT = MVT::getVT(FromType);
373 
374   switch (Opcode) {
375   case TargetOpcode::G_FPEXT:
376     return RTLIB::getFPEXT(FromMVT, ToMVT);
377   case TargetOpcode::G_FPTRUNC:
378     return RTLIB::getFPROUND(FromMVT, ToMVT);
379   case TargetOpcode::G_FPTOSI:
380     return RTLIB::getFPTOSINT(FromMVT, ToMVT);
381   case TargetOpcode::G_FPTOUI:
382     return RTLIB::getFPTOUINT(FromMVT, ToMVT);
383   case TargetOpcode::G_SITOFP:
384     return RTLIB::getSINTTOFP(FromMVT, ToMVT);
385   case TargetOpcode::G_UITOFP:
386     return RTLIB::getUINTTOFP(FromMVT, ToMVT);
387   }
388   llvm_unreachable("Unsupported libcall function");
389 }
390 
391 static LegalizerHelper::LegalizeResult
392 conversionLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, Type *ToType,
393                   Type *FromType) {
394   RTLIB::Libcall Libcall = getConvRTLibDesc(MI.getOpcode(), ToType, FromType);
395   return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), ToType},
396                        {{MI.getOperand(1).getReg(), FromType}});
397 }
398 
399 LegalizerHelper::LegalizeResult
400 LegalizerHelper::libcall(MachineInstr &MI) {
401   LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
402   unsigned Size = LLTy.getSizeInBits();
403   auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
404 
405   MIRBuilder.setInstr(MI);
406 
407   switch (MI.getOpcode()) {
408   default:
409     return UnableToLegalize;
410   case TargetOpcode::G_SDIV:
411   case TargetOpcode::G_UDIV:
412   case TargetOpcode::G_SREM:
413   case TargetOpcode::G_UREM:
414   case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
415     Type *HLTy = IntegerType::get(Ctx, Size);
416     auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
417     if (Status != Legalized)
418       return Status;
419     break;
420   }
421   case TargetOpcode::G_FADD:
422   case TargetOpcode::G_FSUB:
423   case TargetOpcode::G_FMUL:
424   case TargetOpcode::G_FDIV:
425   case TargetOpcode::G_FMA:
426   case TargetOpcode::G_FPOW:
427   case TargetOpcode::G_FREM:
428   case TargetOpcode::G_FCOS:
429   case TargetOpcode::G_FSIN:
430   case TargetOpcode::G_FLOG10:
431   case TargetOpcode::G_FLOG:
432   case TargetOpcode::G_FLOG2:
433   case TargetOpcode::G_FEXP:
434   case TargetOpcode::G_FEXP2:
435   case TargetOpcode::G_FCEIL:
436   case TargetOpcode::G_FFLOOR: {
437     if (Size > 64) {
438       LLVM_DEBUG(dbgs() << "Size " << Size << " too large to legalize.\n");
439       return UnableToLegalize;
440     }
441     Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
442     auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
443     if (Status != Legalized)
444       return Status;
445     break;
446   }
447   case TargetOpcode::G_FPEXT: {
448     // FIXME: Support other floating point types (half, fp128 etc)
449     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
450     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
451     if (ToSize != 64 || FromSize != 32)
452       return UnableToLegalize;
453     LegalizeResult Status = conversionLibcall(
454         MI, MIRBuilder, Type::getDoubleTy(Ctx), Type::getFloatTy(Ctx));
455     if (Status != Legalized)
456       return Status;
457     break;
458   }
459   case TargetOpcode::G_FPTRUNC: {
460     // FIXME: Support other floating point types (half, fp128 etc)
461     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
462     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
463     if (ToSize != 32 || FromSize != 64)
464       return UnableToLegalize;
465     LegalizeResult Status = conversionLibcall(
466         MI, MIRBuilder, Type::getFloatTy(Ctx), Type::getDoubleTy(Ctx));
467     if (Status != Legalized)
468       return Status;
469     break;
470   }
471   case TargetOpcode::G_FPTOSI:
472   case TargetOpcode::G_FPTOUI: {
473     // FIXME: Support other types
474     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
475     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
476     if ((ToSize != 32 && ToSize != 64) || (FromSize != 32 && FromSize != 64))
477       return UnableToLegalize;
478     LegalizeResult Status = conversionLibcall(
479         MI, MIRBuilder,
480         ToSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx),
481         FromSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx));
482     if (Status != Legalized)
483       return Status;
484     break;
485   }
486   case TargetOpcode::G_SITOFP:
487   case TargetOpcode::G_UITOFP: {
488     // FIXME: Support other types
489     unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
490     unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
491     if ((FromSize != 32 && FromSize != 64) || (ToSize != 32 && ToSize != 64))
492       return UnableToLegalize;
493     LegalizeResult Status = conversionLibcall(
494         MI, MIRBuilder,
495         ToSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx),
496         FromSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx));
497     if (Status != Legalized)
498       return Status;
499     break;
500   }
501   }
502 
503   MI.eraseFromParent();
504   return Legalized;
505 }
506 
507 LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
508                                                               unsigned TypeIdx,
509                                                               LLT NarrowTy) {
510   MIRBuilder.setInstr(MI);
511 
512   uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
513   uint64_t NarrowSize = NarrowTy.getSizeInBits();
514 
515   switch (MI.getOpcode()) {
516   default:
517     return UnableToLegalize;
518   case TargetOpcode::G_IMPLICIT_DEF: {
519     // FIXME: add support for when SizeOp0 isn't an exact multiple of
520     // NarrowSize.
521     if (SizeOp0 % NarrowSize != 0)
522       return UnableToLegalize;
523     int NumParts = SizeOp0 / NarrowSize;
524 
525     SmallVector<Register, 2> DstRegs;
526     for (int i = 0; i < NumParts; ++i)
527       DstRegs.push_back(
528           MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
529 
530     Register DstReg = MI.getOperand(0).getReg();
531     if(MRI.getType(DstReg).isVector())
532       MIRBuilder.buildBuildVector(DstReg, DstRegs);
533     else
534       MIRBuilder.buildMerge(DstReg, DstRegs);
535     MI.eraseFromParent();
536     return Legalized;
537   }
538   case TargetOpcode::G_CONSTANT: {
539     LLT Ty = MRI.getType(MI.getOperand(0).getReg());
540     const APInt &Val = MI.getOperand(1).getCImm()->getValue();
541     unsigned TotalSize = Ty.getSizeInBits();
542     unsigned NarrowSize = NarrowTy.getSizeInBits();
543     int NumParts = TotalSize / NarrowSize;
544 
545     SmallVector<Register, 4> PartRegs;
546     for (int I = 0; I != NumParts; ++I) {
547       unsigned Offset = I * NarrowSize;
548       auto K = MIRBuilder.buildConstant(NarrowTy,
549                                         Val.lshr(Offset).trunc(NarrowSize));
550       PartRegs.push_back(K.getReg(0));
551     }
552 
553     LLT LeftoverTy;
554     unsigned LeftoverBits = TotalSize - NumParts * NarrowSize;
555     SmallVector<Register, 1> LeftoverRegs;
556     if (LeftoverBits != 0) {
557       LeftoverTy = LLT::scalar(LeftoverBits);
558       auto K = MIRBuilder.buildConstant(
559         LeftoverTy,
560         Val.lshr(NumParts * NarrowSize).trunc(LeftoverBits));
561       LeftoverRegs.push_back(K.getReg(0));
562     }
563 
564     insertParts(MI.getOperand(0).getReg(),
565                 Ty, NarrowTy, PartRegs, LeftoverTy, LeftoverRegs);
566 
567     MI.eraseFromParent();
568     return Legalized;
569   }
570   case TargetOpcode::G_SEXT: {
571     if (TypeIdx != 0)
572       return UnableToLegalize;
573 
574     if (NarrowTy.getSizeInBits() != SizeOp0 / 2) {
575       LLVM_DEBUG(dbgs() << "Can't narrow sext to type " << NarrowTy << "\n");
576       return UnableToLegalize;
577     }
578 
579     Register SrcReg = MI.getOperand(1).getReg();
580 
581     // Shift the sign bit of the low register through the high register.
582     auto ShiftAmt =
583         MIRBuilder.buildConstant(LLT::scalar(64), NarrowTy.getSizeInBits() - 1);
584     auto Shift = MIRBuilder.buildAShr(NarrowTy, SrcReg, ShiftAmt);
585     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {SrcReg, Shift.getReg(0)});
586     MI.eraseFromParent();
587     return Legalized;
588   }
589 
590   case TargetOpcode::G_ADD: {
591     // FIXME: add support for when SizeOp0 isn't an exact multiple of
592     // NarrowSize.
593     if (SizeOp0 % NarrowSize != 0)
594       return UnableToLegalize;
595     // Expand in terms of carry-setting/consuming G_ADDE instructions.
596     int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
597 
598     SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
599     extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
600     extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
601 
602     Register CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1));
603     MIRBuilder.buildConstant(CarryIn, 0);
604 
605     for (int i = 0; i < NumParts; ++i) {
606       Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
607       Register CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
608 
609       MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i],
610                             Src2Regs[i], CarryIn);
611 
612       DstRegs.push_back(DstReg);
613       CarryIn = CarryOut;
614     }
615     Register DstReg = MI.getOperand(0).getReg();
616     if(MRI.getType(DstReg).isVector())
617       MIRBuilder.buildBuildVector(DstReg, DstRegs);
618     else
619       MIRBuilder.buildMerge(DstReg, DstRegs);
620     MI.eraseFromParent();
621     return Legalized;
622   }
623   case TargetOpcode::G_SUB: {
624     // FIXME: add support for when SizeOp0 isn't an exact multiple of
625     // NarrowSize.
626     if (SizeOp0 % NarrowSize != 0)
627       return UnableToLegalize;
628 
629     int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
630 
631     SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
632     extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
633     extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
634 
635     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
636     Register BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
637     MIRBuilder.buildInstr(TargetOpcode::G_USUBO, {DstReg, BorrowOut},
638                           {Src1Regs[0], Src2Regs[0]});
639     DstRegs.push_back(DstReg);
640     Register BorrowIn = BorrowOut;
641     for (int i = 1; i < NumParts; ++i) {
642       DstReg = MRI.createGenericVirtualRegister(NarrowTy);
643       BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
644 
645       MIRBuilder.buildInstr(TargetOpcode::G_USUBE, {DstReg, BorrowOut},
646                             {Src1Regs[i], Src2Regs[i], BorrowIn});
647 
648       DstRegs.push_back(DstReg);
649       BorrowIn = BorrowOut;
650     }
651     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
652     MI.eraseFromParent();
653     return Legalized;
654   }
655   case TargetOpcode::G_MUL:
656   case TargetOpcode::G_UMULH:
657     return narrowScalarMul(MI, NarrowTy);
658   case TargetOpcode::G_EXTRACT:
659     return narrowScalarExtract(MI, TypeIdx, NarrowTy);
660   case TargetOpcode::G_INSERT:
661     return narrowScalarInsert(MI, TypeIdx, NarrowTy);
662   case TargetOpcode::G_LOAD: {
663     const auto &MMO = **MI.memoperands_begin();
664     Register DstReg = MI.getOperand(0).getReg();
665     LLT DstTy = MRI.getType(DstReg);
666     if (DstTy.isVector())
667       return UnableToLegalize;
668 
669     if (8 * MMO.getSize() != DstTy.getSizeInBits()) {
670       Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
671       auto &MMO = **MI.memoperands_begin();
672       MIRBuilder.buildLoad(TmpReg, MI.getOperand(1).getReg(), MMO);
673       MIRBuilder.buildAnyExt(DstReg, TmpReg);
674       MI.eraseFromParent();
675       return Legalized;
676     }
677 
678     return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy);
679   }
680   case TargetOpcode::G_ZEXTLOAD:
681   case TargetOpcode::G_SEXTLOAD: {
682     bool ZExt = MI.getOpcode() == TargetOpcode::G_ZEXTLOAD;
683     Register DstReg = MI.getOperand(0).getReg();
684     Register PtrReg = MI.getOperand(1).getReg();
685 
686     Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
687     auto &MMO = **MI.memoperands_begin();
688     if (MMO.getSizeInBits() == NarrowSize) {
689       MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
690     } else {
691       unsigned ExtLoad = ZExt ? TargetOpcode::G_ZEXTLOAD
692         : TargetOpcode::G_SEXTLOAD;
693       MIRBuilder.buildInstr(ExtLoad)
694         .addDef(TmpReg)
695         .addUse(PtrReg)
696         .addMemOperand(&MMO);
697     }
698 
699     if (ZExt)
700       MIRBuilder.buildZExt(DstReg, TmpReg);
701     else
702       MIRBuilder.buildSExt(DstReg, TmpReg);
703 
704     MI.eraseFromParent();
705     return Legalized;
706   }
707   case TargetOpcode::G_STORE: {
708     const auto &MMO = **MI.memoperands_begin();
709 
710     Register SrcReg = MI.getOperand(0).getReg();
711     LLT SrcTy = MRI.getType(SrcReg);
712     if (SrcTy.isVector())
713       return UnableToLegalize;
714 
715     int NumParts = SizeOp0 / NarrowSize;
716     unsigned HandledSize = NumParts * NarrowTy.getSizeInBits();
717     unsigned LeftoverBits = SrcTy.getSizeInBits() - HandledSize;
718     if (SrcTy.isVector() && LeftoverBits != 0)
719       return UnableToLegalize;
720 
721     if (8 * MMO.getSize() != SrcTy.getSizeInBits()) {
722       Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
723       auto &MMO = **MI.memoperands_begin();
724       MIRBuilder.buildTrunc(TmpReg, SrcReg);
725       MIRBuilder.buildStore(TmpReg, MI.getOperand(1).getReg(), MMO);
726       MI.eraseFromParent();
727       return Legalized;
728     }
729 
730     return reduceLoadStoreWidth(MI, 0, NarrowTy);
731   }
732   case TargetOpcode::G_SELECT:
733     return narrowScalarSelect(MI, TypeIdx, NarrowTy);
734   case TargetOpcode::G_AND:
735   case TargetOpcode::G_OR:
736   case TargetOpcode::G_XOR: {
737     // Legalize bitwise operation:
738     // A = BinOp<Ty> B, C
739     // into:
740     // B1, ..., BN = G_UNMERGE_VALUES B
741     // C1, ..., CN = G_UNMERGE_VALUES C
742     // A1 = BinOp<Ty/N> B1, C2
743     // ...
744     // AN = BinOp<Ty/N> BN, CN
745     // A = G_MERGE_VALUES A1, ..., AN
746     return narrowScalarBasic(MI, TypeIdx, NarrowTy);
747   }
748   case TargetOpcode::G_SHL:
749   case TargetOpcode::G_LSHR:
750   case TargetOpcode::G_ASHR:
751     return narrowScalarShift(MI, TypeIdx, NarrowTy);
752   case TargetOpcode::G_CTLZ:
753   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
754   case TargetOpcode::G_CTTZ:
755   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
756   case TargetOpcode::G_CTPOP:
757     if (TypeIdx != 0)
758       return UnableToLegalize; // TODO
759 
760     Observer.changingInstr(MI);
761     narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT);
762     Observer.changedInstr(MI);
763     return Legalized;
764   case TargetOpcode::G_INTTOPTR:
765     if (TypeIdx != 1)
766       return UnableToLegalize;
767 
768     Observer.changingInstr(MI);
769     narrowScalarSrc(MI, NarrowTy, 1);
770     Observer.changedInstr(MI);
771     return Legalized;
772   case TargetOpcode::G_PTRTOINT:
773     if (TypeIdx != 0)
774       return UnableToLegalize;
775 
776     Observer.changingInstr(MI);
777     narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT);
778     Observer.changedInstr(MI);
779     return Legalized;
780   case TargetOpcode::G_PHI: {
781     unsigned NumParts = SizeOp0 / NarrowSize;
782     SmallVector<Register, 2> DstRegs;
783     SmallVector<SmallVector<Register, 2>, 2> SrcRegs;
784     DstRegs.resize(NumParts);
785     SrcRegs.resize(MI.getNumOperands() / 2);
786     Observer.changingInstr(MI);
787     for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
788       MachineBasicBlock &OpMBB = *MI.getOperand(i + 1).getMBB();
789       MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
790       extractParts(MI.getOperand(i).getReg(), NarrowTy, NumParts,
791                    SrcRegs[i / 2]);
792     }
793     MachineBasicBlock &MBB = *MI.getParent();
794     MIRBuilder.setInsertPt(MBB, MI);
795     for (unsigned i = 0; i < NumParts; ++i) {
796       DstRegs[i] = MRI.createGenericVirtualRegister(NarrowTy);
797       MachineInstrBuilder MIB =
798           MIRBuilder.buildInstr(TargetOpcode::G_PHI).addDef(DstRegs[i]);
799       for (unsigned j = 1; j < MI.getNumOperands(); j += 2)
800         MIB.addUse(SrcRegs[j / 2][i]).add(MI.getOperand(j + 1));
801     }
802     MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
803     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
804     Observer.changedInstr(MI);
805     MI.eraseFromParent();
806     return Legalized;
807   }
808   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
809   case TargetOpcode::G_INSERT_VECTOR_ELT: {
810     if (TypeIdx != 2)
811       return UnableToLegalize;
812 
813     int OpIdx = MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
814     Observer.changingInstr(MI);
815     narrowScalarSrc(MI, NarrowTy, OpIdx);
816     Observer.changedInstr(MI);
817     return Legalized;
818   }
819   case TargetOpcode::G_ICMP: {
820     uint64_t SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
821     if (NarrowSize * 2 != SrcSize)
822       return UnableToLegalize;
823 
824     Observer.changingInstr(MI);
825     Register LHSL = MRI.createGenericVirtualRegister(NarrowTy);
826     Register LHSH = MRI.createGenericVirtualRegister(NarrowTy);
827     MIRBuilder.buildUnmerge({LHSL, LHSH}, MI.getOperand(2).getReg());
828 
829     Register RHSL = MRI.createGenericVirtualRegister(NarrowTy);
830     Register RHSH = MRI.createGenericVirtualRegister(NarrowTy);
831     MIRBuilder.buildUnmerge({RHSL, RHSH}, MI.getOperand(3).getReg());
832 
833     CmpInst::Predicate Pred =
834         static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
835     LLT ResTy = MRI.getType(MI.getOperand(0).getReg());
836 
837     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
838       MachineInstrBuilder XorL = MIRBuilder.buildXor(NarrowTy, LHSL, RHSL);
839       MachineInstrBuilder XorH = MIRBuilder.buildXor(NarrowTy, LHSH, RHSH);
840       MachineInstrBuilder Or = MIRBuilder.buildOr(NarrowTy, XorL, XorH);
841       MachineInstrBuilder Zero = MIRBuilder.buildConstant(NarrowTy, 0);
842       MIRBuilder.buildICmp(Pred, MI.getOperand(0).getReg(), Or, Zero);
843     } else {
844       MachineInstrBuilder CmpH = MIRBuilder.buildICmp(Pred, ResTy, LHSH, RHSH);
845       MachineInstrBuilder CmpHEQ =
846           MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, ResTy, LHSH, RHSH);
847       MachineInstrBuilder CmpLU = MIRBuilder.buildICmp(
848           ICmpInst::getUnsignedPredicate(Pred), ResTy, LHSL, RHSL);
849       MIRBuilder.buildSelect(MI.getOperand(0).getReg(), CmpHEQ, CmpLU, CmpH);
850     }
851     Observer.changedInstr(MI);
852     MI.eraseFromParent();
853     return Legalized;
854   }
855   }
856 }
857 
858 void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy,
859                                      unsigned OpIdx, unsigned ExtOpcode) {
860   MachineOperand &MO = MI.getOperand(OpIdx);
861   auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO.getReg()});
862   MO.setReg(ExtB->getOperand(0).getReg());
863 }
864 
865 void LegalizerHelper::narrowScalarSrc(MachineInstr &MI, LLT NarrowTy,
866                                       unsigned OpIdx) {
867   MachineOperand &MO = MI.getOperand(OpIdx);
868   auto ExtB = MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, {NarrowTy},
869                                     {MO.getReg()});
870   MO.setReg(ExtB->getOperand(0).getReg());
871 }
872 
873 void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
874                                      unsigned OpIdx, unsigned TruncOpcode) {
875   MachineOperand &MO = MI.getOperand(OpIdx);
876   Register DstExt = MRI.createGenericVirtualRegister(WideTy);
877   MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
878   MIRBuilder.buildInstr(TruncOpcode, {MO.getReg()}, {DstExt});
879   MO.setReg(DstExt);
880 }
881 
882 void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy,
883                                       unsigned OpIdx, unsigned ExtOpcode) {
884   MachineOperand &MO = MI.getOperand(OpIdx);
885   Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy);
886   MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
887   MIRBuilder.buildInstr(ExtOpcode, {MO.getReg()}, {DstTrunc});
888   MO.setReg(DstTrunc);
889 }
890 
891 void LegalizerHelper::moreElementsVectorDst(MachineInstr &MI, LLT WideTy,
892                                             unsigned OpIdx) {
893   MachineOperand &MO = MI.getOperand(OpIdx);
894   Register DstExt = MRI.createGenericVirtualRegister(WideTy);
895   MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
896   MIRBuilder.buildExtract(MO.getReg(), DstExt, 0);
897   MO.setReg(DstExt);
898 }
899 
900 void LegalizerHelper::moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy,
901                                             unsigned OpIdx) {
902   MachineOperand &MO = MI.getOperand(OpIdx);
903 
904   LLT OldTy = MRI.getType(MO.getReg());
905   unsigned OldElts = OldTy.getNumElements();
906   unsigned NewElts = MoreTy.getNumElements();
907 
908   unsigned NumParts = NewElts / OldElts;
909 
910   // Use concat_vectors if the result is a multiple of the number of elements.
911   if (NumParts * OldElts == NewElts) {
912     SmallVector<Register, 8> Parts;
913     Parts.push_back(MO.getReg());
914 
915     Register ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0);
916     for (unsigned I = 1; I != NumParts; ++I)
917       Parts.push_back(ImpDef);
918 
919     auto Concat = MIRBuilder.buildConcatVectors(MoreTy, Parts);
920     MO.setReg(Concat.getReg(0));
921     return;
922   }
923 
924   Register MoreReg = MRI.createGenericVirtualRegister(MoreTy);
925   Register ImpDef = MIRBuilder.buildUndef(MoreTy).getReg(0);
926   MIRBuilder.buildInsert(MoreReg, ImpDef, MO.getReg(), 0);
927   MO.setReg(MoreReg);
928 }
929 
930 LegalizerHelper::LegalizeResult
931 LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx,
932                                         LLT WideTy) {
933   if (TypeIdx != 1)
934     return UnableToLegalize;
935 
936   Register DstReg = MI.getOperand(0).getReg();
937   LLT DstTy = MRI.getType(DstReg);
938   if (DstTy.isVector())
939     return UnableToLegalize;
940 
941   Register Src1 = MI.getOperand(1).getReg();
942   LLT SrcTy = MRI.getType(Src1);
943   const int DstSize = DstTy.getSizeInBits();
944   const int SrcSize = SrcTy.getSizeInBits();
945   const int WideSize = WideTy.getSizeInBits();
946   const int NumMerge = (DstSize + WideSize - 1) / WideSize;
947 
948   unsigned NumOps = MI.getNumOperands();
949   unsigned NumSrc = MI.getNumOperands() - 1;
950   unsigned PartSize = DstTy.getSizeInBits() / NumSrc;
951 
952   if (WideSize >= DstSize) {
953     // Directly pack the bits in the target type.
954     Register ResultReg = MIRBuilder.buildZExt(WideTy, Src1).getReg(0);
955 
956     for (unsigned I = 2; I != NumOps; ++I) {
957       const unsigned Offset = (I - 1) * PartSize;
958 
959       Register SrcReg = MI.getOperand(I).getReg();
960       assert(MRI.getType(SrcReg) == LLT::scalar(PartSize));
961 
962       auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg);
963 
964       Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg :
965         MRI.createGenericVirtualRegister(WideTy);
966 
967       auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset);
968       auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt);
969       MIRBuilder.buildOr(NextResult, ResultReg, Shl);
970       ResultReg = NextResult;
971     }
972 
973     if (WideSize > DstSize)
974       MIRBuilder.buildTrunc(DstReg, ResultReg);
975     else if (DstTy.isPointer())
976       MIRBuilder.buildIntToPtr(DstReg, ResultReg);
977 
978     MI.eraseFromParent();
979     return Legalized;
980   }
981 
982   // Unmerge the original values to the GCD type, and recombine to the next
983   // multiple greater than the original type.
984   //
985   // %3:_(s12) = G_MERGE_VALUES %0:_(s4), %1:_(s4), %2:_(s4) -> s6
986   // %4:_(s2), %5:_(s2) = G_UNMERGE_VALUES %0
987   // %6:_(s2), %7:_(s2) = G_UNMERGE_VALUES %1
988   // %8:_(s2), %9:_(s2) = G_UNMERGE_VALUES %2
989   // %10:_(s6) = G_MERGE_VALUES %4, %5, %6
990   // %11:_(s6) = G_MERGE_VALUES %7, %8, %9
991   // %12:_(s12) = G_MERGE_VALUES %10, %11
992   //
993   // Padding with undef if necessary:
994   //
995   // %2:_(s8) = G_MERGE_VALUES %0:_(s4), %1:_(s4) -> s6
996   // %3:_(s2), %4:_(s2) = G_UNMERGE_VALUES %0
997   // %5:_(s2), %6:_(s2) = G_UNMERGE_VALUES %1
998   // %7:_(s2) = G_IMPLICIT_DEF
999   // %8:_(s6) = G_MERGE_VALUES %3, %4, %5
1000   // %9:_(s6) = G_MERGE_VALUES %6, %7, %7
1001   // %10:_(s12) = G_MERGE_VALUES %8, %9
1002 
1003   const int GCD = greatestCommonDivisor(SrcSize, WideSize);
1004   LLT GCDTy = LLT::scalar(GCD);
1005 
1006   SmallVector<Register, 8> Parts;
1007   SmallVector<Register, 8> NewMergeRegs;
1008   SmallVector<Register, 8> Unmerges;
1009   LLT WideDstTy = LLT::scalar(NumMerge * WideSize);
1010 
1011   // Decompose the original operands if they don't evenly divide.
1012   for (int I = 1, E = MI.getNumOperands(); I != E; ++I) {
1013     Register SrcReg = MI.getOperand(I).getReg();
1014     if (GCD == SrcSize) {
1015       Unmerges.push_back(SrcReg);
1016     } else {
1017       auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg);
1018       for (int J = 0, JE = Unmerge->getNumOperands() - 1; J != JE; ++J)
1019         Unmerges.push_back(Unmerge.getReg(J));
1020     }
1021   }
1022 
1023   // Pad with undef to the next size that is a multiple of the requested size.
1024   if (static_cast<int>(Unmerges.size()) != NumMerge * WideSize) {
1025     Register UndefReg = MIRBuilder.buildUndef(GCDTy).getReg(0);
1026     for (int I = Unmerges.size(); I != NumMerge * WideSize; ++I)
1027       Unmerges.push_back(UndefReg);
1028   }
1029 
1030   const int PartsPerGCD = WideSize / GCD;
1031 
1032   // Build merges of each piece.
1033   ArrayRef<Register> Slicer(Unmerges);
1034   for (int I = 0; I != NumMerge; ++I, Slicer = Slicer.drop_front(PartsPerGCD)) {
1035     auto Merge = MIRBuilder.buildMerge(WideTy, Slicer.take_front(PartsPerGCD));
1036     NewMergeRegs.push_back(Merge.getReg(0));
1037   }
1038 
1039   // A truncate may be necessary if the requested type doesn't evenly divide the
1040   // original result type.
1041   if (DstTy.getSizeInBits() == WideDstTy.getSizeInBits()) {
1042     MIRBuilder.buildMerge(DstReg, NewMergeRegs);
1043   } else {
1044     auto FinalMerge = MIRBuilder.buildMerge(WideDstTy, NewMergeRegs);
1045     MIRBuilder.buildTrunc(DstReg, FinalMerge.getReg(0));
1046   }
1047 
1048   MI.eraseFromParent();
1049   return Legalized;
1050 }
1051 
1052 LegalizerHelper::LegalizeResult
1053 LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
1054                                           LLT WideTy) {
1055   if (TypeIdx != 0)
1056     return UnableToLegalize;
1057 
1058   unsigned NumDst = MI.getNumOperands() - 1;
1059   Register SrcReg = MI.getOperand(NumDst).getReg();
1060   LLT SrcTy = MRI.getType(SrcReg);
1061   if (!SrcTy.isScalar())
1062     return UnableToLegalize;
1063 
1064   Register Dst0Reg = MI.getOperand(0).getReg();
1065   LLT DstTy = MRI.getType(Dst0Reg);
1066   if (!DstTy.isScalar())
1067     return UnableToLegalize;
1068 
1069   unsigned NewSrcSize = NumDst * WideTy.getSizeInBits();
1070   LLT NewSrcTy = LLT::scalar(NewSrcSize);
1071   unsigned SizeDiff = WideTy.getSizeInBits() - DstTy.getSizeInBits();
1072 
1073   auto WideSrc = MIRBuilder.buildZExt(NewSrcTy, SrcReg);
1074 
1075   for (unsigned I = 1; I != NumDst; ++I) {
1076     auto ShiftAmt = MIRBuilder.buildConstant(NewSrcTy, SizeDiff * I);
1077     auto Shl = MIRBuilder.buildShl(NewSrcTy, WideSrc, ShiftAmt);
1078     WideSrc = MIRBuilder.buildOr(NewSrcTy, WideSrc, Shl);
1079   }
1080 
1081   Observer.changingInstr(MI);
1082 
1083   MI.getOperand(NumDst).setReg(WideSrc->getOperand(0).getReg());
1084   for (unsigned I = 0; I != NumDst; ++I)
1085     widenScalarDst(MI, WideTy, I);
1086 
1087   Observer.changedInstr(MI);
1088 
1089   return Legalized;
1090 }
1091 
1092 LegalizerHelper::LegalizeResult
1093 LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx,
1094                                     LLT WideTy) {
1095   Register DstReg = MI.getOperand(0).getReg();
1096   Register SrcReg = MI.getOperand(1).getReg();
1097   LLT SrcTy = MRI.getType(SrcReg);
1098 
1099   LLT DstTy = MRI.getType(DstReg);
1100   unsigned Offset = MI.getOperand(2).getImm();
1101 
1102   if (TypeIdx == 0) {
1103     if (SrcTy.isVector() || DstTy.isVector())
1104       return UnableToLegalize;
1105 
1106     SrcOp Src(SrcReg);
1107     if (SrcTy.isPointer()) {
1108       // Extracts from pointers can be handled only if they are really just
1109       // simple integers.
1110       const DataLayout &DL = MIRBuilder.getDataLayout();
1111       if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace()))
1112         return UnableToLegalize;
1113 
1114       LLT SrcAsIntTy = LLT::scalar(SrcTy.getSizeInBits());
1115       Src = MIRBuilder.buildPtrToInt(SrcAsIntTy, Src);
1116       SrcTy = SrcAsIntTy;
1117     }
1118 
1119     if (DstTy.isPointer())
1120       return UnableToLegalize;
1121 
1122     if (Offset == 0) {
1123       // Avoid a shift in the degenerate case.
1124       MIRBuilder.buildTrunc(DstReg,
1125                             MIRBuilder.buildAnyExtOrTrunc(WideTy, Src));
1126       MI.eraseFromParent();
1127       return Legalized;
1128     }
1129 
1130     // Do a shift in the source type.
1131     LLT ShiftTy = SrcTy;
1132     if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) {
1133       Src = MIRBuilder.buildAnyExt(WideTy, Src);
1134       ShiftTy = WideTy;
1135     } else if (WideTy.getSizeInBits() > SrcTy.getSizeInBits())
1136       return UnableToLegalize;
1137 
1138     auto LShr = MIRBuilder.buildLShr(
1139       ShiftTy, Src, MIRBuilder.buildConstant(ShiftTy, Offset));
1140     MIRBuilder.buildTrunc(DstReg, LShr);
1141     MI.eraseFromParent();
1142     return Legalized;
1143   }
1144 
1145   if (SrcTy.isScalar()) {
1146     Observer.changingInstr(MI);
1147     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1148     Observer.changedInstr(MI);
1149     return Legalized;
1150   }
1151 
1152   if (!SrcTy.isVector())
1153     return UnableToLegalize;
1154 
1155   if (DstTy != SrcTy.getElementType())
1156     return UnableToLegalize;
1157 
1158   if (Offset % SrcTy.getScalarSizeInBits() != 0)
1159     return UnableToLegalize;
1160 
1161   Observer.changingInstr(MI);
1162   widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1163 
1164   MI.getOperand(2).setImm((WideTy.getSizeInBits() / SrcTy.getSizeInBits()) *
1165                           Offset);
1166   widenScalarDst(MI, WideTy.getScalarType(), 0);
1167   Observer.changedInstr(MI);
1168   return Legalized;
1169 }
1170 
1171 LegalizerHelper::LegalizeResult
1172 LegalizerHelper::widenScalarInsert(MachineInstr &MI, unsigned TypeIdx,
1173                                    LLT WideTy) {
1174   if (TypeIdx != 0)
1175     return UnableToLegalize;
1176   Observer.changingInstr(MI);
1177   widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1178   widenScalarDst(MI, WideTy);
1179   Observer.changedInstr(MI);
1180   return Legalized;
1181 }
1182 
1183 LegalizerHelper::LegalizeResult
1184 LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
1185   MIRBuilder.setInstr(MI);
1186 
1187   switch (MI.getOpcode()) {
1188   default:
1189     return UnableToLegalize;
1190   case TargetOpcode::G_EXTRACT:
1191     return widenScalarExtract(MI, TypeIdx, WideTy);
1192   case TargetOpcode::G_INSERT:
1193     return widenScalarInsert(MI, TypeIdx, WideTy);
1194   case TargetOpcode::G_MERGE_VALUES:
1195     return widenScalarMergeValues(MI, TypeIdx, WideTy);
1196   case TargetOpcode::G_UNMERGE_VALUES:
1197     return widenScalarUnmergeValues(MI, TypeIdx, WideTy);
1198   case TargetOpcode::G_UADDO:
1199   case TargetOpcode::G_USUBO: {
1200     if (TypeIdx == 1)
1201       return UnableToLegalize; // TODO
1202     auto LHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy},
1203                                          {MI.getOperand(2).getReg()});
1204     auto RHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy},
1205                                          {MI.getOperand(3).getReg()});
1206     unsigned Opcode = MI.getOpcode() == TargetOpcode::G_UADDO
1207                           ? TargetOpcode::G_ADD
1208                           : TargetOpcode::G_SUB;
1209     // Do the arithmetic in the larger type.
1210     auto NewOp = MIRBuilder.buildInstr(Opcode, {WideTy}, {LHSZext, RHSZext});
1211     LLT OrigTy = MRI.getType(MI.getOperand(0).getReg());
1212     APInt Mask = APInt::getAllOnesValue(OrigTy.getSizeInBits());
1213     auto AndOp = MIRBuilder.buildInstr(
1214         TargetOpcode::G_AND, {WideTy},
1215         {NewOp, MIRBuilder.buildConstant(WideTy, Mask.getZExtValue())});
1216     // There is no overflow if the AndOp is the same as NewOp.
1217     MIRBuilder.buildICmp(CmpInst::ICMP_NE, MI.getOperand(1).getReg(), NewOp,
1218                          AndOp);
1219     // Now trunc the NewOp to the original result.
1220     MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), NewOp);
1221     MI.eraseFromParent();
1222     return Legalized;
1223   }
1224   case TargetOpcode::G_CTTZ:
1225   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
1226   case TargetOpcode::G_CTLZ:
1227   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
1228   case TargetOpcode::G_CTPOP: {
1229     if (TypeIdx == 0) {
1230       Observer.changingInstr(MI);
1231       widenScalarDst(MI, WideTy, 0);
1232       Observer.changedInstr(MI);
1233       return Legalized;
1234     }
1235 
1236     Register SrcReg = MI.getOperand(1).getReg();
1237 
1238     // First ZEXT the input.
1239     auto MIBSrc = MIRBuilder.buildZExt(WideTy, SrcReg);
1240     LLT CurTy = MRI.getType(SrcReg);
1241     if (MI.getOpcode() == TargetOpcode::G_CTTZ) {
1242       // The count is the same in the larger type except if the original
1243       // value was zero.  This can be handled by setting the bit just off
1244       // the top of the original type.
1245       auto TopBit =
1246           APInt::getOneBitSet(WideTy.getSizeInBits(), CurTy.getSizeInBits());
1247       MIBSrc = MIRBuilder.buildOr(
1248         WideTy, MIBSrc, MIRBuilder.buildConstant(WideTy, TopBit));
1249     }
1250 
1251     // Perform the operation at the larger size.
1252     auto MIBNewOp = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy}, {MIBSrc});
1253     // This is already the correct result for CTPOP and CTTZs
1254     if (MI.getOpcode() == TargetOpcode::G_CTLZ ||
1255         MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF) {
1256       // The correct result is NewOp - (Difference in widety and current ty).
1257       unsigned SizeDiff = WideTy.getSizeInBits() - CurTy.getSizeInBits();
1258       MIBNewOp = MIRBuilder.buildInstr(
1259           TargetOpcode::G_SUB, {WideTy},
1260           {MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff)});
1261     }
1262 
1263     MIRBuilder.buildZExtOrTrunc(MI.getOperand(0), MIBNewOp);
1264     MI.eraseFromParent();
1265     return Legalized;
1266   }
1267   case TargetOpcode::G_BSWAP: {
1268     Observer.changingInstr(MI);
1269     Register DstReg = MI.getOperand(0).getReg();
1270 
1271     Register ShrReg = MRI.createGenericVirtualRegister(WideTy);
1272     Register DstExt = MRI.createGenericVirtualRegister(WideTy);
1273     Register ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy);
1274     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1275 
1276     MI.getOperand(0).setReg(DstExt);
1277 
1278     MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
1279 
1280     LLT Ty = MRI.getType(DstReg);
1281     unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits();
1282     MIRBuilder.buildConstant(ShiftAmtReg, DiffBits);
1283     MIRBuilder.buildInstr(TargetOpcode::G_LSHR)
1284       .addDef(ShrReg)
1285       .addUse(DstExt)
1286       .addUse(ShiftAmtReg);
1287 
1288     MIRBuilder.buildTrunc(DstReg, ShrReg);
1289     Observer.changedInstr(MI);
1290     return Legalized;
1291   }
1292   case TargetOpcode::G_ADD:
1293   case TargetOpcode::G_AND:
1294   case TargetOpcode::G_MUL:
1295   case TargetOpcode::G_OR:
1296   case TargetOpcode::G_XOR:
1297   case TargetOpcode::G_SUB:
1298     // Perform operation at larger width (any extension is fines here, high bits
1299     // don't affect the result) and then truncate the result back to the
1300     // original type.
1301     Observer.changingInstr(MI);
1302     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1303     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
1304     widenScalarDst(MI, WideTy);
1305     Observer.changedInstr(MI);
1306     return Legalized;
1307 
1308   case TargetOpcode::G_SHL:
1309     Observer.changingInstr(MI);
1310 
1311     if (TypeIdx == 0) {
1312       widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
1313       widenScalarDst(MI, WideTy);
1314     } else {
1315       assert(TypeIdx == 1);
1316       // The "number of bits to shift" operand must preserve its value as an
1317       // unsigned integer:
1318       widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
1319     }
1320 
1321     Observer.changedInstr(MI);
1322     return Legalized;
1323 
1324   case TargetOpcode::G_SDIV:
1325   case TargetOpcode::G_SREM:
1326   case TargetOpcode::G_SMIN:
1327   case TargetOpcode::G_SMAX:
1328     Observer.changingInstr(MI);
1329     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
1330     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
1331     widenScalarDst(MI, WideTy);
1332     Observer.changedInstr(MI);
1333     return Legalized;
1334 
1335   case TargetOpcode::G_ASHR:
1336   case TargetOpcode::G_LSHR:
1337     Observer.changingInstr(MI);
1338 
1339     if (TypeIdx == 0) {
1340       unsigned CvtOp = MI.getOpcode() == TargetOpcode::G_ASHR ?
1341         TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
1342 
1343       widenScalarSrc(MI, WideTy, 1, CvtOp);
1344       widenScalarDst(MI, WideTy);
1345     } else {
1346       assert(TypeIdx == 1);
1347       // The "number of bits to shift" operand must preserve its value as an
1348       // unsigned integer:
1349       widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
1350     }
1351 
1352     Observer.changedInstr(MI);
1353     return Legalized;
1354   case TargetOpcode::G_UDIV:
1355   case TargetOpcode::G_UREM:
1356   case TargetOpcode::G_UMIN:
1357   case TargetOpcode::G_UMAX:
1358     Observer.changingInstr(MI);
1359     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
1360     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
1361     widenScalarDst(MI, WideTy);
1362     Observer.changedInstr(MI);
1363     return Legalized;
1364 
1365   case TargetOpcode::G_SELECT:
1366     Observer.changingInstr(MI);
1367     if (TypeIdx == 0) {
1368       // Perform operation at larger width (any extension is fine here, high
1369       // bits don't affect the result) and then truncate the result back to the
1370       // original type.
1371       widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
1372       widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
1373       widenScalarDst(MI, WideTy);
1374     } else {
1375       bool IsVec = MRI.getType(MI.getOperand(1).getReg()).isVector();
1376       // Explicit extension is required here since high bits affect the result.
1377       widenScalarSrc(MI, WideTy, 1, MIRBuilder.getBoolExtOp(IsVec, false));
1378     }
1379     Observer.changedInstr(MI);
1380     return Legalized;
1381 
1382   case TargetOpcode::G_FPTOSI:
1383   case TargetOpcode::G_FPTOUI:
1384     if (TypeIdx != 0)
1385       return UnableToLegalize;
1386     Observer.changingInstr(MI);
1387     widenScalarDst(MI, WideTy);
1388     Observer.changedInstr(MI);
1389     return Legalized;
1390 
1391   case TargetOpcode::G_SITOFP:
1392     if (TypeIdx != 1)
1393       return UnableToLegalize;
1394     Observer.changingInstr(MI);
1395     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
1396     Observer.changedInstr(MI);
1397     return Legalized;
1398 
1399   case TargetOpcode::G_UITOFP:
1400     if (TypeIdx != 1)
1401       return UnableToLegalize;
1402     Observer.changingInstr(MI);
1403     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
1404     Observer.changedInstr(MI);
1405     return Legalized;
1406 
1407   case TargetOpcode::G_LOAD:
1408   case TargetOpcode::G_SEXTLOAD:
1409   case TargetOpcode::G_ZEXTLOAD:
1410     Observer.changingInstr(MI);
1411     widenScalarDst(MI, WideTy);
1412     Observer.changedInstr(MI);
1413     return Legalized;
1414 
1415   case TargetOpcode::G_STORE: {
1416     if (TypeIdx != 0)
1417       return UnableToLegalize;
1418 
1419     LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1420     if (!isPowerOf2_32(Ty.getSizeInBits()))
1421       return UnableToLegalize;
1422 
1423     Observer.changingInstr(MI);
1424 
1425     unsigned ExtType = Ty.getScalarSizeInBits() == 1 ?
1426       TargetOpcode::G_ZEXT : TargetOpcode::G_ANYEXT;
1427     widenScalarSrc(MI, WideTy, 0, ExtType);
1428 
1429     Observer.changedInstr(MI);
1430     return Legalized;
1431   }
1432   case TargetOpcode::G_CONSTANT: {
1433     MachineOperand &SrcMO = MI.getOperand(1);
1434     LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
1435     const APInt &Val = SrcMO.getCImm()->getValue().sext(WideTy.getSizeInBits());
1436     Observer.changingInstr(MI);
1437     SrcMO.setCImm(ConstantInt::get(Ctx, Val));
1438 
1439     widenScalarDst(MI, WideTy);
1440     Observer.changedInstr(MI);
1441     return Legalized;
1442   }
1443   case TargetOpcode::G_FCONSTANT: {
1444     MachineOperand &SrcMO = MI.getOperand(1);
1445     LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
1446     APFloat Val = SrcMO.getFPImm()->getValueAPF();
1447     bool LosesInfo;
1448     switch (WideTy.getSizeInBits()) {
1449     case 32:
1450       Val.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
1451                   &LosesInfo);
1452       break;
1453     case 64:
1454       Val.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
1455                   &LosesInfo);
1456       break;
1457     default:
1458       return UnableToLegalize;
1459     }
1460 
1461     assert(!LosesInfo && "extend should always be lossless");
1462 
1463     Observer.changingInstr(MI);
1464     SrcMO.setFPImm(ConstantFP::get(Ctx, Val));
1465 
1466     widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
1467     Observer.changedInstr(MI);
1468     return Legalized;
1469   }
1470   case TargetOpcode::G_IMPLICIT_DEF: {
1471     Observer.changingInstr(MI);
1472     widenScalarDst(MI, WideTy);
1473     Observer.changedInstr(MI);
1474     return Legalized;
1475   }
1476   case TargetOpcode::G_BRCOND:
1477     Observer.changingInstr(MI);
1478     widenScalarSrc(MI, WideTy, 0, MIRBuilder.getBoolExtOp(false, false));
1479     Observer.changedInstr(MI);
1480     return Legalized;
1481 
1482   case TargetOpcode::G_FCMP:
1483     Observer.changingInstr(MI);
1484     if (TypeIdx == 0)
1485       widenScalarDst(MI, WideTy);
1486     else {
1487       widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
1488       widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT);
1489     }
1490     Observer.changedInstr(MI);
1491     return Legalized;
1492 
1493   case TargetOpcode::G_ICMP:
1494     Observer.changingInstr(MI);
1495     if (TypeIdx == 0)
1496       widenScalarDst(MI, WideTy);
1497     else {
1498       unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>(
1499                                MI.getOperand(1).getPredicate()))
1500                                ? TargetOpcode::G_SEXT
1501                                : TargetOpcode::G_ZEXT;
1502       widenScalarSrc(MI, WideTy, 2, ExtOpcode);
1503       widenScalarSrc(MI, WideTy, 3, ExtOpcode);
1504     }
1505     Observer.changedInstr(MI);
1506     return Legalized;
1507 
1508   case TargetOpcode::G_GEP:
1509     assert(TypeIdx == 1 && "unable to legalize pointer of GEP");
1510     Observer.changingInstr(MI);
1511     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
1512     Observer.changedInstr(MI);
1513     return Legalized;
1514 
1515   case TargetOpcode::G_PHI: {
1516     assert(TypeIdx == 0 && "Expecting only Idx 0");
1517 
1518     Observer.changingInstr(MI);
1519     for (unsigned I = 1; I < MI.getNumOperands(); I += 2) {
1520       MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
1521       MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
1522       widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT);
1523     }
1524 
1525     MachineBasicBlock &MBB = *MI.getParent();
1526     MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
1527     widenScalarDst(MI, WideTy);
1528     Observer.changedInstr(MI);
1529     return Legalized;
1530   }
1531   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1532     if (TypeIdx == 0) {
1533       Register VecReg = MI.getOperand(1).getReg();
1534       LLT VecTy = MRI.getType(VecReg);
1535       Observer.changingInstr(MI);
1536 
1537       widenScalarSrc(MI, LLT::vector(VecTy.getNumElements(),
1538                                      WideTy.getSizeInBits()),
1539                      1, TargetOpcode::G_SEXT);
1540 
1541       widenScalarDst(MI, WideTy, 0);
1542       Observer.changedInstr(MI);
1543       return Legalized;
1544     }
1545 
1546     if (TypeIdx != 2)
1547       return UnableToLegalize;
1548     Observer.changingInstr(MI);
1549     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
1550     Observer.changedInstr(MI);
1551     return Legalized;
1552   }
1553   case TargetOpcode::G_FADD:
1554   case TargetOpcode::G_FMUL:
1555   case TargetOpcode::G_FSUB:
1556   case TargetOpcode::G_FMA:
1557   case TargetOpcode::G_FNEG:
1558   case TargetOpcode::G_FABS:
1559   case TargetOpcode::G_FCANONICALIZE:
1560   case TargetOpcode::G_FMINNUM:
1561   case TargetOpcode::G_FMAXNUM:
1562   case TargetOpcode::G_FMINNUM_IEEE:
1563   case TargetOpcode::G_FMAXNUM_IEEE:
1564   case TargetOpcode::G_FMINIMUM:
1565   case TargetOpcode::G_FMAXIMUM:
1566   case TargetOpcode::G_FDIV:
1567   case TargetOpcode::G_FREM:
1568   case TargetOpcode::G_FCEIL:
1569   case TargetOpcode::G_FFLOOR:
1570   case TargetOpcode::G_FCOS:
1571   case TargetOpcode::G_FSIN:
1572   case TargetOpcode::G_FLOG10:
1573   case TargetOpcode::G_FLOG:
1574   case TargetOpcode::G_FLOG2:
1575   case TargetOpcode::G_FRINT:
1576   case TargetOpcode::G_FNEARBYINT:
1577   case TargetOpcode::G_FSQRT:
1578   case TargetOpcode::G_FEXP:
1579   case TargetOpcode::G_FEXP2:
1580   case TargetOpcode::G_FPOW:
1581   case TargetOpcode::G_INTRINSIC_TRUNC:
1582   case TargetOpcode::G_INTRINSIC_ROUND:
1583     assert(TypeIdx == 0);
1584     Observer.changingInstr(MI);
1585 
1586     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
1587       widenScalarSrc(MI, WideTy, I, TargetOpcode::G_FPEXT);
1588 
1589     widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
1590     Observer.changedInstr(MI);
1591     return Legalized;
1592   case TargetOpcode::G_INTTOPTR:
1593     if (TypeIdx != 1)
1594       return UnableToLegalize;
1595 
1596     Observer.changingInstr(MI);
1597     widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
1598     Observer.changedInstr(MI);
1599     return Legalized;
1600   case TargetOpcode::G_PTRTOINT:
1601     if (TypeIdx != 0)
1602       return UnableToLegalize;
1603 
1604     Observer.changingInstr(MI);
1605     widenScalarDst(MI, WideTy, 0);
1606     Observer.changedInstr(MI);
1607     return Legalized;
1608   case TargetOpcode::G_BUILD_VECTOR: {
1609     Observer.changingInstr(MI);
1610 
1611     const LLT WideEltTy = TypeIdx == 1 ? WideTy : WideTy.getElementType();
1612     for (int I = 1, E = MI.getNumOperands(); I != E; ++I)
1613       widenScalarSrc(MI, WideEltTy, I, TargetOpcode::G_ANYEXT);
1614 
1615     // Avoid changing the result vector type if the source element type was
1616     // requested.
1617     if (TypeIdx == 1) {
1618       auto &TII = *MI.getMF()->getSubtarget().getInstrInfo();
1619       MI.setDesc(TII.get(TargetOpcode::G_BUILD_VECTOR_TRUNC));
1620     } else {
1621       widenScalarDst(MI, WideTy, 0);
1622     }
1623 
1624     Observer.changedInstr(MI);
1625     return Legalized;
1626   }
1627   }
1628 }
1629 
1630 LegalizerHelper::LegalizeResult
1631 LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
1632   using namespace TargetOpcode;
1633   MIRBuilder.setInstr(MI);
1634 
1635   switch(MI.getOpcode()) {
1636   default:
1637     return UnableToLegalize;
1638   case TargetOpcode::G_SREM:
1639   case TargetOpcode::G_UREM: {
1640     Register QuotReg = MRI.createGenericVirtualRegister(Ty);
1641     MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV)
1642         .addDef(QuotReg)
1643         .addUse(MI.getOperand(1).getReg())
1644         .addUse(MI.getOperand(2).getReg());
1645 
1646     Register ProdReg = MRI.createGenericVirtualRegister(Ty);
1647     MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg());
1648     MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
1649                         ProdReg);
1650     MI.eraseFromParent();
1651     return Legalized;
1652   }
1653   case TargetOpcode::G_SMULO:
1654   case TargetOpcode::G_UMULO: {
1655     // Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the
1656     // result.
1657     Register Res = MI.getOperand(0).getReg();
1658     Register Overflow = MI.getOperand(1).getReg();
1659     Register LHS = MI.getOperand(2).getReg();
1660     Register RHS = MI.getOperand(3).getReg();
1661 
1662     MIRBuilder.buildMul(Res, LHS, RHS);
1663 
1664     unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO
1665                           ? TargetOpcode::G_SMULH
1666                           : TargetOpcode::G_UMULH;
1667 
1668     Register HiPart = MRI.createGenericVirtualRegister(Ty);
1669     MIRBuilder.buildInstr(Opcode)
1670       .addDef(HiPart)
1671       .addUse(LHS)
1672       .addUse(RHS);
1673 
1674     Register Zero = MRI.createGenericVirtualRegister(Ty);
1675     MIRBuilder.buildConstant(Zero, 0);
1676 
1677     // For *signed* multiply, overflow is detected by checking:
1678     // (hi != (lo >> bitwidth-1))
1679     if (Opcode == TargetOpcode::G_SMULH) {
1680       Register Shifted = MRI.createGenericVirtualRegister(Ty);
1681       Register ShiftAmt = MRI.createGenericVirtualRegister(Ty);
1682       MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1);
1683       MIRBuilder.buildInstr(TargetOpcode::G_ASHR)
1684         .addDef(Shifted)
1685         .addUse(Res)
1686         .addUse(ShiftAmt);
1687       MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted);
1688     } else {
1689       MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero);
1690     }
1691     MI.eraseFromParent();
1692     return Legalized;
1693   }
1694   case TargetOpcode::G_FNEG: {
1695     // TODO: Handle vector types once we are able to
1696     // represent them.
1697     if (Ty.isVector())
1698       return UnableToLegalize;
1699     Register Res = MI.getOperand(0).getReg();
1700     Type *ZeroTy;
1701     LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
1702     switch (Ty.getSizeInBits()) {
1703     case 16:
1704       ZeroTy = Type::getHalfTy(Ctx);
1705       break;
1706     case 32:
1707       ZeroTy = Type::getFloatTy(Ctx);
1708       break;
1709     case 64:
1710       ZeroTy = Type::getDoubleTy(Ctx);
1711       break;
1712     case 128:
1713       ZeroTy = Type::getFP128Ty(Ctx);
1714       break;
1715     default:
1716       llvm_unreachable("unexpected floating-point type");
1717     }
1718     ConstantFP &ZeroForNegation =
1719         *cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy));
1720     auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation);
1721     Register SubByReg = MI.getOperand(1).getReg();
1722     Register ZeroReg = Zero->getOperand(0).getReg();
1723     MIRBuilder.buildInstr(TargetOpcode::G_FSUB, {Res}, {ZeroReg, SubByReg},
1724                           MI.getFlags());
1725     MI.eraseFromParent();
1726     return Legalized;
1727   }
1728   case TargetOpcode::G_FSUB: {
1729     // Lower (G_FSUB LHS, RHS) to (G_FADD LHS, (G_FNEG RHS)).
1730     // First, check if G_FNEG is marked as Lower. If so, we may
1731     // end up with an infinite loop as G_FSUB is used to legalize G_FNEG.
1732     if (LI.getAction({G_FNEG, {Ty}}).Action == Lower)
1733       return UnableToLegalize;
1734     Register Res = MI.getOperand(0).getReg();
1735     Register LHS = MI.getOperand(1).getReg();
1736     Register RHS = MI.getOperand(2).getReg();
1737     Register Neg = MRI.createGenericVirtualRegister(Ty);
1738     MIRBuilder.buildInstr(TargetOpcode::G_FNEG).addDef(Neg).addUse(RHS);
1739     MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Res}, {LHS, Neg}, MI.getFlags());
1740     MI.eraseFromParent();
1741     return Legalized;
1742   }
1743   case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
1744     Register OldValRes = MI.getOperand(0).getReg();
1745     Register SuccessRes = MI.getOperand(1).getReg();
1746     Register Addr = MI.getOperand(2).getReg();
1747     Register CmpVal = MI.getOperand(3).getReg();
1748     Register NewVal = MI.getOperand(4).getReg();
1749     MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal,
1750                                   **MI.memoperands_begin());
1751     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal);
1752     MI.eraseFromParent();
1753     return Legalized;
1754   }
1755   case TargetOpcode::G_LOAD:
1756   case TargetOpcode::G_SEXTLOAD:
1757   case TargetOpcode::G_ZEXTLOAD: {
1758     // Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT
1759     Register DstReg = MI.getOperand(0).getReg();
1760     Register PtrReg = MI.getOperand(1).getReg();
1761     LLT DstTy = MRI.getType(DstReg);
1762     auto &MMO = **MI.memoperands_begin();
1763 
1764     if (DstTy.getSizeInBits() == MMO.getSize() /* in bytes */ * 8) {
1765       // In the case of G_LOAD, this was a non-extending load already and we're
1766       // about to lower to the same instruction.
1767       if (MI.getOpcode() == TargetOpcode::G_LOAD)
1768           return UnableToLegalize;
1769       MIRBuilder.buildLoad(DstReg, PtrReg, MMO);
1770       MI.eraseFromParent();
1771       return Legalized;
1772     }
1773 
1774     if (DstTy.isScalar()) {
1775       Register TmpReg =
1776           MRI.createGenericVirtualRegister(LLT::scalar(MMO.getSizeInBits()));
1777       MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
1778       switch (MI.getOpcode()) {
1779       default:
1780         llvm_unreachable("Unexpected opcode");
1781       case TargetOpcode::G_LOAD:
1782         MIRBuilder.buildAnyExt(DstReg, TmpReg);
1783         break;
1784       case TargetOpcode::G_SEXTLOAD:
1785         MIRBuilder.buildSExt(DstReg, TmpReg);
1786         break;
1787       case TargetOpcode::G_ZEXTLOAD:
1788         MIRBuilder.buildZExt(DstReg, TmpReg);
1789         break;
1790       }
1791       MI.eraseFromParent();
1792       return Legalized;
1793     }
1794 
1795     return UnableToLegalize;
1796   }
1797   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
1798   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
1799   case TargetOpcode::G_CTLZ:
1800   case TargetOpcode::G_CTTZ:
1801   case TargetOpcode::G_CTPOP:
1802     return lowerBitCount(MI, TypeIdx, Ty);
1803   case G_UADDO: {
1804     Register Res = MI.getOperand(0).getReg();
1805     Register CarryOut = MI.getOperand(1).getReg();
1806     Register LHS = MI.getOperand(2).getReg();
1807     Register RHS = MI.getOperand(3).getReg();
1808 
1809     MIRBuilder.buildAdd(Res, LHS, RHS);
1810     MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, RHS);
1811 
1812     MI.eraseFromParent();
1813     return Legalized;
1814   }
1815   case G_UADDE: {
1816     Register Res = MI.getOperand(0).getReg();
1817     Register CarryOut = MI.getOperand(1).getReg();
1818     Register LHS = MI.getOperand(2).getReg();
1819     Register RHS = MI.getOperand(3).getReg();
1820     Register CarryIn = MI.getOperand(4).getReg();
1821 
1822     Register TmpRes = MRI.createGenericVirtualRegister(Ty);
1823     Register ZExtCarryIn = MRI.createGenericVirtualRegister(Ty);
1824 
1825     MIRBuilder.buildAdd(TmpRes, LHS, RHS);
1826     MIRBuilder.buildZExt(ZExtCarryIn, CarryIn);
1827     MIRBuilder.buildAdd(Res, TmpRes, ZExtCarryIn);
1828     MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, LHS);
1829 
1830     MI.eraseFromParent();
1831     return Legalized;
1832   }
1833   case G_USUBO: {
1834     Register Res = MI.getOperand(0).getReg();
1835     Register BorrowOut = MI.getOperand(1).getReg();
1836     Register LHS = MI.getOperand(2).getReg();
1837     Register RHS = MI.getOperand(3).getReg();
1838 
1839     MIRBuilder.buildSub(Res, LHS, RHS);
1840     MIRBuilder.buildICmp(CmpInst::ICMP_ULT, BorrowOut, LHS, RHS);
1841 
1842     MI.eraseFromParent();
1843     return Legalized;
1844   }
1845   case G_USUBE: {
1846     Register Res = MI.getOperand(0).getReg();
1847     Register BorrowOut = MI.getOperand(1).getReg();
1848     Register LHS = MI.getOperand(2).getReg();
1849     Register RHS = MI.getOperand(3).getReg();
1850     Register BorrowIn = MI.getOperand(4).getReg();
1851 
1852     Register TmpRes = MRI.createGenericVirtualRegister(Ty);
1853     Register ZExtBorrowIn = MRI.createGenericVirtualRegister(Ty);
1854     Register LHS_EQ_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
1855     Register LHS_ULT_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
1856 
1857     MIRBuilder.buildSub(TmpRes, LHS, RHS);
1858     MIRBuilder.buildZExt(ZExtBorrowIn, BorrowIn);
1859     MIRBuilder.buildSub(Res, TmpRes, ZExtBorrowIn);
1860     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LHS_EQ_RHS, LHS, RHS);
1861     MIRBuilder.buildICmp(CmpInst::ICMP_ULT, LHS_ULT_RHS, LHS, RHS);
1862     MIRBuilder.buildSelect(BorrowOut, LHS_EQ_RHS, BorrowIn, LHS_ULT_RHS);
1863 
1864     MI.eraseFromParent();
1865     return Legalized;
1866   }
1867   case G_UITOFP:
1868     return lowerUITOFP(MI, TypeIdx, Ty);
1869   case G_SITOFP:
1870     return lowerSITOFP(MI, TypeIdx, Ty);
1871   case G_SMIN:
1872   case G_SMAX:
1873   case G_UMIN:
1874   case G_UMAX:
1875     return lowerMinMax(MI, TypeIdx, Ty);
1876   case G_FCOPYSIGN:
1877     return lowerFCopySign(MI, TypeIdx, Ty);
1878   case G_FMINNUM:
1879   case G_FMAXNUM:
1880     return lowerFMinNumMaxNum(MI);
1881   case G_UNMERGE_VALUES:
1882     return lowerUnmergeValues(MI);
1883   }
1884 }
1885 
1886 LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
1887     MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) {
1888   SmallVector<Register, 2> DstRegs;
1889 
1890   unsigned NarrowSize = NarrowTy.getSizeInBits();
1891   Register DstReg = MI.getOperand(0).getReg();
1892   unsigned Size = MRI.getType(DstReg).getSizeInBits();
1893   int NumParts = Size / NarrowSize;
1894   // FIXME: Don't know how to handle the situation where the small vectors
1895   // aren't all the same size yet.
1896   if (Size % NarrowSize != 0)
1897     return UnableToLegalize;
1898 
1899   for (int i = 0; i < NumParts; ++i) {
1900     Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
1901     MIRBuilder.buildUndef(TmpReg);
1902     DstRegs.push_back(TmpReg);
1903   }
1904 
1905   if (NarrowTy.isVector())
1906     MIRBuilder.buildConcatVectors(DstReg, DstRegs);
1907   else
1908     MIRBuilder.buildBuildVector(DstReg, DstRegs);
1909 
1910   MI.eraseFromParent();
1911   return Legalized;
1912 }
1913 
1914 LegalizerHelper::LegalizeResult
1915 LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
1916                                           LLT NarrowTy) {
1917   const unsigned Opc = MI.getOpcode();
1918   const unsigned NumOps = MI.getNumOperands() - 1;
1919   const unsigned NarrowSize = NarrowTy.getSizeInBits();
1920   const Register DstReg = MI.getOperand(0).getReg();
1921   const unsigned Flags = MI.getFlags();
1922   const LLT DstTy = MRI.getType(DstReg);
1923   const unsigned Size = DstTy.getSizeInBits();
1924   const int NumParts = Size / NarrowSize;
1925   const LLT EltTy = DstTy.getElementType();
1926   const unsigned EltSize = EltTy.getSizeInBits();
1927   const unsigned BitsForNumParts = NarrowSize * NumParts;
1928 
1929   // Check if we have any leftovers. If we do, then only handle the case where
1930   // the leftover is one element.
1931   if (BitsForNumParts != Size && BitsForNumParts + EltSize != Size)
1932     return UnableToLegalize;
1933 
1934   if (BitsForNumParts != Size) {
1935     Register AccumDstReg = MRI.createGenericVirtualRegister(DstTy);
1936     MIRBuilder.buildUndef(AccumDstReg);
1937 
1938     // Handle the pieces which evenly divide into the requested type with
1939     // extract/op/insert sequence.
1940     for (unsigned Offset = 0; Offset < BitsForNumParts; Offset += NarrowSize) {
1941       SmallVector<SrcOp, 4> SrcOps;
1942       for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
1943         Register PartOpReg = MRI.createGenericVirtualRegister(NarrowTy);
1944         MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), Offset);
1945         SrcOps.push_back(PartOpReg);
1946       }
1947 
1948       Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy);
1949       MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags);
1950 
1951       Register PartInsertReg = MRI.createGenericVirtualRegister(DstTy);
1952       MIRBuilder.buildInsert(PartInsertReg, AccumDstReg, PartDstReg, Offset);
1953       AccumDstReg = PartInsertReg;
1954     }
1955 
1956     // Handle the remaining element sized leftover piece.
1957     SmallVector<SrcOp, 4> SrcOps;
1958     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
1959       Register PartOpReg = MRI.createGenericVirtualRegister(EltTy);
1960       MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(),
1961                               BitsForNumParts);
1962       SrcOps.push_back(PartOpReg);
1963     }
1964 
1965     Register PartDstReg = MRI.createGenericVirtualRegister(EltTy);
1966     MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags);
1967     MIRBuilder.buildInsert(DstReg, AccumDstReg, PartDstReg, BitsForNumParts);
1968     MI.eraseFromParent();
1969 
1970     return Legalized;
1971   }
1972 
1973   SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
1974 
1975   extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs);
1976 
1977   if (NumOps >= 2)
1978     extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src1Regs);
1979 
1980   if (NumOps >= 3)
1981     extractParts(MI.getOperand(3).getReg(), NarrowTy, NumParts, Src2Regs);
1982 
1983   for (int i = 0; i < NumParts; ++i) {
1984     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
1985 
1986     if (NumOps == 1)
1987       MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i]}, Flags);
1988     else if (NumOps == 2) {
1989       MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i], Src1Regs[i]}, Flags);
1990     } else if (NumOps == 3) {
1991       MIRBuilder.buildInstr(Opc, {DstReg},
1992                             {Src0Regs[i], Src1Regs[i], Src2Regs[i]}, Flags);
1993     }
1994 
1995     DstRegs.push_back(DstReg);
1996   }
1997 
1998   if (NarrowTy.isVector())
1999     MIRBuilder.buildConcatVectors(DstReg, DstRegs);
2000   else
2001     MIRBuilder.buildBuildVector(DstReg, DstRegs);
2002 
2003   MI.eraseFromParent();
2004   return Legalized;
2005 }
2006 
2007 // Handle splitting vector operations which need to have the same number of
2008 // elements in each type index, but each type index may have a different element
2009 // type.
2010 //
2011 // e.g.  <4 x s64> = G_SHL <4 x s64>, <4 x s32> ->
2012 //       <2 x s64> = G_SHL <2 x s64>, <2 x s32>
2013 //       <2 x s64> = G_SHL <2 x s64>, <2 x s32>
2014 //
2015 // Also handles some irregular breakdown cases, e.g.
2016 // e.g.  <3 x s64> = G_SHL <3 x s64>, <3 x s32> ->
2017 //       <2 x s64> = G_SHL <2 x s64>, <2 x s32>
2018 //             s64 = G_SHL s64, s32
2019 LegalizerHelper::LegalizeResult
2020 LegalizerHelper::fewerElementsVectorMultiEltType(
2021   MachineInstr &MI, unsigned TypeIdx, LLT NarrowTyArg) {
2022   if (TypeIdx != 0)
2023     return UnableToLegalize;
2024 
2025   const LLT NarrowTy0 = NarrowTyArg;
2026   const unsigned NewNumElts =
2027       NarrowTy0.isVector() ? NarrowTy0.getNumElements() : 1;
2028 
2029   const Register DstReg = MI.getOperand(0).getReg();
2030   LLT DstTy = MRI.getType(DstReg);
2031   LLT LeftoverTy0;
2032 
2033   // All of the operands need to have the same number of elements, so if we can
2034   // determine a type breakdown for the result type, we can for all of the
2035   // source types.
2036   int NumParts = getNarrowTypeBreakDown(DstTy, NarrowTy0, LeftoverTy0).first;
2037   if (NumParts < 0)
2038     return UnableToLegalize;
2039 
2040   SmallVector<MachineInstrBuilder, 4> NewInsts;
2041 
2042   SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
2043   SmallVector<Register, 4> PartRegs, LeftoverRegs;
2044 
2045   for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
2046     LLT LeftoverTy;
2047     Register SrcReg = MI.getOperand(I).getReg();
2048     LLT SrcTyI = MRI.getType(SrcReg);
2049     LLT NarrowTyI = LLT::scalarOrVector(NewNumElts, SrcTyI.getScalarType());
2050     LLT LeftoverTyI;
2051 
2052     // Split this operand into the requested typed registers, and any leftover
2053     // required to reproduce the original type.
2054     if (!extractParts(SrcReg, SrcTyI, NarrowTyI, LeftoverTyI, PartRegs,
2055                       LeftoverRegs))
2056       return UnableToLegalize;
2057 
2058     if (I == 1) {
2059       // For the first operand, create an instruction for each part and setup
2060       // the result.
2061       for (Register PartReg : PartRegs) {
2062         Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy0);
2063         NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode())
2064                                .addDef(PartDstReg)
2065                                .addUse(PartReg));
2066         DstRegs.push_back(PartDstReg);
2067       }
2068 
2069       for (Register LeftoverReg : LeftoverRegs) {
2070         Register PartDstReg = MRI.createGenericVirtualRegister(LeftoverTy0);
2071         NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode())
2072                                .addDef(PartDstReg)
2073                                .addUse(LeftoverReg));
2074         LeftoverDstRegs.push_back(PartDstReg);
2075       }
2076     } else {
2077       assert(NewInsts.size() == PartRegs.size() + LeftoverRegs.size());
2078 
2079       // Add the newly created operand splits to the existing instructions. The
2080       // odd-sized pieces are ordered after the requested NarrowTyArg sized
2081       // pieces.
2082       unsigned InstCount = 0;
2083       for (unsigned J = 0, JE = PartRegs.size(); J != JE; ++J)
2084         NewInsts[InstCount++].addUse(PartRegs[J]);
2085       for (unsigned J = 0, JE = LeftoverRegs.size(); J != JE; ++J)
2086         NewInsts[InstCount++].addUse(LeftoverRegs[J]);
2087     }
2088 
2089     PartRegs.clear();
2090     LeftoverRegs.clear();
2091   }
2092 
2093   // Insert the newly built operations and rebuild the result register.
2094   for (auto &MIB : NewInsts)
2095     MIRBuilder.insertInstr(MIB);
2096 
2097   insertParts(DstReg, DstTy, NarrowTy0, DstRegs, LeftoverTy0, LeftoverDstRegs);
2098 
2099   MI.eraseFromParent();
2100   return Legalized;
2101 }
2102 
2103 LegalizerHelper::LegalizeResult
2104 LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
2105                                           LLT NarrowTy) {
2106   if (TypeIdx != 0)
2107     return UnableToLegalize;
2108 
2109   Register DstReg = MI.getOperand(0).getReg();
2110   Register SrcReg = MI.getOperand(1).getReg();
2111   LLT DstTy = MRI.getType(DstReg);
2112   LLT SrcTy = MRI.getType(SrcReg);
2113 
2114   LLT NarrowTy0 = NarrowTy;
2115   LLT NarrowTy1;
2116   unsigned NumParts;
2117 
2118   if (NarrowTy.isVector()) {
2119     // Uneven breakdown not handled.
2120     NumParts = DstTy.getNumElements() / NarrowTy.getNumElements();
2121     if (NumParts * NarrowTy.getNumElements() != DstTy.getNumElements())
2122       return UnableToLegalize;
2123 
2124     NarrowTy1 = LLT::vector(NumParts, SrcTy.getElementType().getSizeInBits());
2125   } else {
2126     NumParts = DstTy.getNumElements();
2127     NarrowTy1 = SrcTy.getElementType();
2128   }
2129 
2130   SmallVector<Register, 4> SrcRegs, DstRegs;
2131   extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs);
2132 
2133   for (unsigned I = 0; I < NumParts; ++I) {
2134     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
2135     MachineInstr *NewInst = MIRBuilder.buildInstr(MI.getOpcode())
2136       .addDef(DstReg)
2137       .addUse(SrcRegs[I]);
2138 
2139     NewInst->setFlags(MI.getFlags());
2140     DstRegs.push_back(DstReg);
2141   }
2142 
2143   if (NarrowTy.isVector())
2144     MIRBuilder.buildConcatVectors(DstReg, DstRegs);
2145   else
2146     MIRBuilder.buildBuildVector(DstReg, DstRegs);
2147 
2148   MI.eraseFromParent();
2149   return Legalized;
2150 }
2151 
2152 LegalizerHelper::LegalizeResult
2153 LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx,
2154                                         LLT NarrowTy) {
2155   Register DstReg = MI.getOperand(0).getReg();
2156   Register Src0Reg = MI.getOperand(2).getReg();
2157   LLT DstTy = MRI.getType(DstReg);
2158   LLT SrcTy = MRI.getType(Src0Reg);
2159 
2160   unsigned NumParts;
2161   LLT NarrowTy0, NarrowTy1;
2162 
2163   if (TypeIdx == 0) {
2164     unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
2165     unsigned OldElts = DstTy.getNumElements();
2166 
2167     NarrowTy0 = NarrowTy;
2168     NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : DstTy.getNumElements();
2169     NarrowTy1 = NarrowTy.isVector() ?
2170       LLT::vector(NarrowTy.getNumElements(), SrcTy.getScalarSizeInBits()) :
2171       SrcTy.getElementType();
2172 
2173   } else {
2174     unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
2175     unsigned OldElts = SrcTy.getNumElements();
2176 
2177     NumParts = NarrowTy.isVector() ? (OldElts / NewElts) :
2178       NarrowTy.getNumElements();
2179     NarrowTy0 = LLT::vector(NarrowTy.getNumElements(),
2180                             DstTy.getScalarSizeInBits());
2181     NarrowTy1 = NarrowTy;
2182   }
2183 
2184   // FIXME: Don't know how to handle the situation where the small vectors
2185   // aren't all the same size yet.
2186   if (NarrowTy1.isVector() &&
2187       NarrowTy1.getNumElements() * NumParts != DstTy.getNumElements())
2188     return UnableToLegalize;
2189 
2190   CmpInst::Predicate Pred
2191     = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
2192 
2193   SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
2194   extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs);
2195   extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs);
2196 
2197   for (unsigned I = 0; I < NumParts; ++I) {
2198     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
2199     DstRegs.push_back(DstReg);
2200 
2201     if (MI.getOpcode() == TargetOpcode::G_ICMP)
2202       MIRBuilder.buildICmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]);
2203     else {
2204       MachineInstr *NewCmp
2205         = MIRBuilder.buildFCmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]);
2206       NewCmp->setFlags(MI.getFlags());
2207     }
2208   }
2209 
2210   if (NarrowTy1.isVector())
2211     MIRBuilder.buildConcatVectors(DstReg, DstRegs);
2212   else
2213     MIRBuilder.buildBuildVector(DstReg, DstRegs);
2214 
2215   MI.eraseFromParent();
2216   return Legalized;
2217 }
2218 
2219 LegalizerHelper::LegalizeResult
2220 LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx,
2221                                            LLT NarrowTy) {
2222   Register DstReg = MI.getOperand(0).getReg();
2223   Register CondReg = MI.getOperand(1).getReg();
2224 
2225   unsigned NumParts = 0;
2226   LLT NarrowTy0, NarrowTy1;
2227 
2228   LLT DstTy = MRI.getType(DstReg);
2229   LLT CondTy = MRI.getType(CondReg);
2230   unsigned Size = DstTy.getSizeInBits();
2231 
2232   assert(TypeIdx == 0 || CondTy.isVector());
2233 
2234   if (TypeIdx == 0) {
2235     NarrowTy0 = NarrowTy;
2236     NarrowTy1 = CondTy;
2237 
2238     unsigned NarrowSize = NarrowTy0.getSizeInBits();
2239     // FIXME: Don't know how to handle the situation where the small vectors
2240     // aren't all the same size yet.
2241     if (Size % NarrowSize != 0)
2242       return UnableToLegalize;
2243 
2244     NumParts = Size / NarrowSize;
2245 
2246     // Need to break down the condition type
2247     if (CondTy.isVector()) {
2248       if (CondTy.getNumElements() == NumParts)
2249         NarrowTy1 = CondTy.getElementType();
2250       else
2251         NarrowTy1 = LLT::vector(CondTy.getNumElements() / NumParts,
2252                                 CondTy.getScalarSizeInBits());
2253     }
2254   } else {
2255     NumParts = CondTy.getNumElements();
2256     if (NarrowTy.isVector()) {
2257       // TODO: Handle uneven breakdown.
2258       if (NumParts * NarrowTy.getNumElements() != CondTy.getNumElements())
2259         return UnableToLegalize;
2260 
2261       return UnableToLegalize;
2262     } else {
2263       NarrowTy0 = DstTy.getElementType();
2264       NarrowTy1 = NarrowTy;
2265     }
2266   }
2267 
2268   SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
2269   if (CondTy.isVector())
2270     extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs);
2271 
2272   extractParts(MI.getOperand(2).getReg(), NarrowTy0, NumParts, Src1Regs);
2273   extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs);
2274 
2275   for (unsigned i = 0; i < NumParts; ++i) {
2276     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
2277     MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg,
2278                            Src1Regs[i], Src2Regs[i]);
2279     DstRegs.push_back(DstReg);
2280   }
2281 
2282   if (NarrowTy0.isVector())
2283     MIRBuilder.buildConcatVectors(DstReg, DstRegs);
2284   else
2285     MIRBuilder.buildBuildVector(DstReg, DstRegs);
2286 
2287   MI.eraseFromParent();
2288   return Legalized;
2289 }
2290 
2291 LegalizerHelper::LegalizeResult
2292 LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
2293                                         LLT NarrowTy) {
2294   const Register DstReg = MI.getOperand(0).getReg();
2295   LLT PhiTy = MRI.getType(DstReg);
2296   LLT LeftoverTy;
2297 
2298   // All of the operands need to have the same number of elements, so if we can
2299   // determine a type breakdown for the result type, we can for all of the
2300   // source types.
2301   int NumParts, NumLeftover;
2302   std::tie(NumParts, NumLeftover)
2303     = getNarrowTypeBreakDown(PhiTy, NarrowTy, LeftoverTy);
2304   if (NumParts < 0)
2305     return UnableToLegalize;
2306 
2307   SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
2308   SmallVector<MachineInstrBuilder, 4> NewInsts;
2309 
2310   const int TotalNumParts = NumParts + NumLeftover;
2311 
2312   // Insert the new phis in the result block first.
2313   for (int I = 0; I != TotalNumParts; ++I) {
2314     LLT Ty = I < NumParts ? NarrowTy : LeftoverTy;
2315     Register PartDstReg = MRI.createGenericVirtualRegister(Ty);
2316     NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI)
2317                        .addDef(PartDstReg));
2318     if (I < NumParts)
2319       DstRegs.push_back(PartDstReg);
2320     else
2321       LeftoverDstRegs.push_back(PartDstReg);
2322   }
2323 
2324   MachineBasicBlock *MBB = MI.getParent();
2325   MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI());
2326   insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs);
2327 
2328   SmallVector<Register, 4> PartRegs, LeftoverRegs;
2329 
2330   // Insert code to extract the incoming values in each predecessor block.
2331   for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2332     PartRegs.clear();
2333     LeftoverRegs.clear();
2334 
2335     Register SrcReg = MI.getOperand(I).getReg();
2336     MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
2337     MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
2338 
2339     LLT Unused;
2340     if (!extractParts(SrcReg, PhiTy, NarrowTy, Unused, PartRegs,
2341                       LeftoverRegs))
2342       return UnableToLegalize;
2343 
2344     // Add the newly created operand splits to the existing instructions. The
2345     // odd-sized pieces are ordered after the requested NarrowTyArg sized
2346     // pieces.
2347     for (int J = 0; J != TotalNumParts; ++J) {
2348       MachineInstrBuilder MIB = NewInsts[J];
2349       MIB.addUse(J < NumParts ? PartRegs[J] : LeftoverRegs[J - NumParts]);
2350       MIB.addMBB(&OpMBB);
2351     }
2352   }
2353 
2354   MI.eraseFromParent();
2355   return Legalized;
2356 }
2357 
2358 LegalizerHelper::LegalizeResult
2359 LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
2360                                       LLT NarrowTy) {
2361   // FIXME: Don't know how to handle secondary types yet.
2362   if (TypeIdx != 0)
2363     return UnableToLegalize;
2364 
2365   MachineMemOperand *MMO = *MI.memoperands_begin();
2366 
2367   // This implementation doesn't work for atomics. Give up instead of doing
2368   // something invalid.
2369   if (MMO->getOrdering() != AtomicOrdering::NotAtomic ||
2370       MMO->getFailureOrdering() != AtomicOrdering::NotAtomic)
2371     return UnableToLegalize;
2372 
2373   bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
2374   Register ValReg = MI.getOperand(0).getReg();
2375   Register AddrReg = MI.getOperand(1).getReg();
2376   LLT ValTy = MRI.getType(ValReg);
2377 
2378   int NumParts = -1;
2379   int NumLeftover = -1;
2380   LLT LeftoverTy;
2381   SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs;
2382   if (IsLoad) {
2383     std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy);
2384   } else {
2385     if (extractParts(ValReg, ValTy, NarrowTy, LeftoverTy, NarrowRegs,
2386                      NarrowLeftoverRegs)) {
2387       NumParts = NarrowRegs.size();
2388       NumLeftover = NarrowLeftoverRegs.size();
2389     }
2390   }
2391 
2392   if (NumParts == -1)
2393     return UnableToLegalize;
2394 
2395   const LLT OffsetTy = LLT::scalar(MRI.getType(AddrReg).getScalarSizeInBits());
2396 
2397   unsigned TotalSize = ValTy.getSizeInBits();
2398 
2399   // Split the load/store into PartTy sized pieces starting at Offset. If this
2400   // is a load, return the new registers in ValRegs. For a store, each elements
2401   // of ValRegs should be PartTy. Returns the next offset that needs to be
2402   // handled.
2403   auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs,
2404                              unsigned Offset) -> unsigned {
2405     MachineFunction &MF = MIRBuilder.getMF();
2406     unsigned PartSize = PartTy.getSizeInBits();
2407     for (unsigned Idx = 0, E = NumParts; Idx != E && Offset < TotalSize;
2408          Offset += PartSize, ++Idx) {
2409       unsigned ByteSize = PartSize / 8;
2410       unsigned ByteOffset = Offset / 8;
2411       Register NewAddrReg;
2412 
2413       MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
2414 
2415       MachineMemOperand *NewMMO =
2416         MF.getMachineMemOperand(MMO, ByteOffset, ByteSize);
2417 
2418       if (IsLoad) {
2419         Register Dst = MRI.createGenericVirtualRegister(PartTy);
2420         ValRegs.push_back(Dst);
2421         MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO);
2422       } else {
2423         MIRBuilder.buildStore(ValRegs[Idx], NewAddrReg, *NewMMO);
2424       }
2425     }
2426 
2427     return Offset;
2428   };
2429 
2430   unsigned HandledOffset = splitTypePieces(NarrowTy, NarrowRegs, 0);
2431 
2432   // Handle the rest of the register if this isn't an even type breakdown.
2433   if (LeftoverTy.isValid())
2434     splitTypePieces(LeftoverTy, NarrowLeftoverRegs, HandledOffset);
2435 
2436   if (IsLoad) {
2437     insertParts(ValReg, ValTy, NarrowTy, NarrowRegs,
2438                 LeftoverTy, NarrowLeftoverRegs);
2439   }
2440 
2441   MI.eraseFromParent();
2442   return Legalized;
2443 }
2444 
2445 LegalizerHelper::LegalizeResult
2446 LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
2447                                      LLT NarrowTy) {
2448   using namespace TargetOpcode;
2449 
2450   MIRBuilder.setInstr(MI);
2451   switch (MI.getOpcode()) {
2452   case G_IMPLICIT_DEF:
2453     return fewerElementsVectorImplicitDef(MI, TypeIdx, NarrowTy);
2454   case G_AND:
2455   case G_OR:
2456   case G_XOR:
2457   case G_ADD:
2458   case G_SUB:
2459   case G_MUL:
2460   case G_SMULH:
2461   case G_UMULH:
2462   case G_FADD:
2463   case G_FMUL:
2464   case G_FSUB:
2465   case G_FNEG:
2466   case G_FABS:
2467   case G_FCANONICALIZE:
2468   case G_FDIV:
2469   case G_FREM:
2470   case G_FMA:
2471   case G_FPOW:
2472   case G_FEXP:
2473   case G_FEXP2:
2474   case G_FLOG:
2475   case G_FLOG2:
2476   case G_FLOG10:
2477   case G_FNEARBYINT:
2478   case G_FCEIL:
2479   case G_FFLOOR:
2480   case G_FRINT:
2481   case G_INTRINSIC_ROUND:
2482   case G_INTRINSIC_TRUNC:
2483   case G_FCOS:
2484   case G_FSIN:
2485   case G_FSQRT:
2486   case G_BSWAP:
2487   case G_SDIV:
2488   case G_SMIN:
2489   case G_SMAX:
2490   case G_UMIN:
2491   case G_UMAX:
2492   case G_FMINNUM:
2493   case G_FMAXNUM:
2494   case G_FMINNUM_IEEE:
2495   case G_FMAXNUM_IEEE:
2496   case G_FMINIMUM:
2497   case G_FMAXIMUM:
2498     return fewerElementsVectorBasic(MI, TypeIdx, NarrowTy);
2499   case G_SHL:
2500   case G_LSHR:
2501   case G_ASHR:
2502   case G_CTLZ:
2503   case G_CTLZ_ZERO_UNDEF:
2504   case G_CTTZ:
2505   case G_CTTZ_ZERO_UNDEF:
2506   case G_CTPOP:
2507   case G_FCOPYSIGN:
2508     return fewerElementsVectorMultiEltType(MI, TypeIdx, NarrowTy);
2509   case G_ZEXT:
2510   case G_SEXT:
2511   case G_ANYEXT:
2512   case G_FPEXT:
2513   case G_FPTRUNC:
2514   case G_SITOFP:
2515   case G_UITOFP:
2516   case G_FPTOSI:
2517   case G_FPTOUI:
2518   case G_INTTOPTR:
2519   case G_PTRTOINT:
2520   case G_ADDRSPACE_CAST:
2521     return fewerElementsVectorCasts(MI, TypeIdx, NarrowTy);
2522   case G_ICMP:
2523   case G_FCMP:
2524     return fewerElementsVectorCmp(MI, TypeIdx, NarrowTy);
2525   case G_SELECT:
2526     return fewerElementsVectorSelect(MI, TypeIdx, NarrowTy);
2527   case G_PHI:
2528     return fewerElementsVectorPhi(MI, TypeIdx, NarrowTy);
2529   case G_LOAD:
2530   case G_STORE:
2531     return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy);
2532   default:
2533     return UnableToLegalize;
2534   }
2535 }
2536 
2537 LegalizerHelper::LegalizeResult
2538 LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
2539                                              const LLT HalfTy, const LLT AmtTy) {
2540 
2541   Register InL = MRI.createGenericVirtualRegister(HalfTy);
2542   Register InH = MRI.createGenericVirtualRegister(HalfTy);
2543   MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
2544 
2545   if (Amt.isNullValue()) {
2546     MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {InL, InH});
2547     MI.eraseFromParent();
2548     return Legalized;
2549   }
2550 
2551   LLT NVT = HalfTy;
2552   unsigned NVTBits = HalfTy.getSizeInBits();
2553   unsigned VTBits = 2 * NVTBits;
2554 
2555   SrcOp Lo(Register(0)), Hi(Register(0));
2556   if (MI.getOpcode() == TargetOpcode::G_SHL) {
2557     if (Amt.ugt(VTBits)) {
2558       Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
2559     } else if (Amt.ugt(NVTBits)) {
2560       Lo = MIRBuilder.buildConstant(NVT, 0);
2561       Hi = MIRBuilder.buildShl(NVT, InL,
2562                                MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
2563     } else if (Amt == NVTBits) {
2564       Lo = MIRBuilder.buildConstant(NVT, 0);
2565       Hi = InL;
2566     } else {
2567       Lo = MIRBuilder.buildShl(NVT, InL, MIRBuilder.buildConstant(AmtTy, Amt));
2568       auto OrLHS =
2569           MIRBuilder.buildShl(NVT, InH, MIRBuilder.buildConstant(AmtTy, Amt));
2570       auto OrRHS = MIRBuilder.buildLShr(
2571           NVT, InL, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
2572       Hi = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
2573     }
2574   } else if (MI.getOpcode() == TargetOpcode::G_LSHR) {
2575     if (Amt.ugt(VTBits)) {
2576       Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
2577     } else if (Amt.ugt(NVTBits)) {
2578       Lo = MIRBuilder.buildLShr(NVT, InH,
2579                                 MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
2580       Hi = MIRBuilder.buildConstant(NVT, 0);
2581     } else if (Amt == NVTBits) {
2582       Lo = InH;
2583       Hi = MIRBuilder.buildConstant(NVT, 0);
2584     } else {
2585       auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt);
2586 
2587       auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst);
2588       auto OrRHS = MIRBuilder.buildShl(
2589           NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
2590 
2591       Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
2592       Hi = MIRBuilder.buildLShr(NVT, InH, ShiftAmtConst);
2593     }
2594   } else {
2595     if (Amt.ugt(VTBits)) {
2596       Hi = Lo = MIRBuilder.buildAShr(
2597           NVT, InH, MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
2598     } else if (Amt.ugt(NVTBits)) {
2599       Lo = MIRBuilder.buildAShr(NVT, InH,
2600                                 MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
2601       Hi = MIRBuilder.buildAShr(NVT, InH,
2602                                 MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
2603     } else if (Amt == NVTBits) {
2604       Lo = InH;
2605       Hi = MIRBuilder.buildAShr(NVT, InH,
2606                                 MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
2607     } else {
2608       auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt);
2609 
2610       auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst);
2611       auto OrRHS = MIRBuilder.buildShl(
2612           NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
2613 
2614       Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
2615       Hi = MIRBuilder.buildAShr(NVT, InH, ShiftAmtConst);
2616     }
2617   }
2618 
2619   MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {Lo.getReg(), Hi.getReg()});
2620   MI.eraseFromParent();
2621 
2622   return Legalized;
2623 }
2624 
2625 // TODO: Optimize if constant shift amount.
2626 LegalizerHelper::LegalizeResult
2627 LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
2628                                    LLT RequestedTy) {
2629   if (TypeIdx == 1) {
2630     Observer.changingInstr(MI);
2631     narrowScalarSrc(MI, RequestedTy, 2);
2632     Observer.changedInstr(MI);
2633     return Legalized;
2634   }
2635 
2636   Register DstReg = MI.getOperand(0).getReg();
2637   LLT DstTy = MRI.getType(DstReg);
2638   if (DstTy.isVector())
2639     return UnableToLegalize;
2640 
2641   Register Amt = MI.getOperand(2).getReg();
2642   LLT ShiftAmtTy = MRI.getType(Amt);
2643   const unsigned DstEltSize = DstTy.getScalarSizeInBits();
2644   if (DstEltSize % 2 != 0)
2645     return UnableToLegalize;
2646 
2647   // Ignore the input type. We can only go to exactly half the size of the
2648   // input. If that isn't small enough, the resulting pieces will be further
2649   // legalized.
2650   const unsigned NewBitSize = DstEltSize / 2;
2651   const LLT HalfTy = LLT::scalar(NewBitSize);
2652   const LLT CondTy = LLT::scalar(1);
2653 
2654   if (const MachineInstr *KShiftAmt =
2655           getOpcodeDef(TargetOpcode::G_CONSTANT, Amt, MRI)) {
2656     return narrowScalarShiftByConstant(
2657         MI, KShiftAmt->getOperand(1).getCImm()->getValue(), HalfTy, ShiftAmtTy);
2658   }
2659 
2660   // TODO: Expand with known bits.
2661 
2662   // Handle the fully general expansion by an unknown amount.
2663   auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize);
2664 
2665   Register InL = MRI.createGenericVirtualRegister(HalfTy);
2666   Register InH = MRI.createGenericVirtualRegister(HalfTy);
2667   MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
2668 
2669   auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits);
2670   auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt);
2671 
2672   auto Zero = MIRBuilder.buildConstant(ShiftAmtTy, 0);
2673   auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits);
2674   auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero);
2675 
2676   Register ResultRegs[2];
2677   switch (MI.getOpcode()) {
2678   case TargetOpcode::G_SHL: {
2679     // Short: ShAmt < NewBitSize
2680     auto LoS = MIRBuilder.buildShl(HalfTy, InH, Amt);
2681 
2682     auto OrLHS = MIRBuilder.buildShl(HalfTy, InH, Amt);
2683     auto OrRHS = MIRBuilder.buildLShr(HalfTy, InL, AmtLack);
2684     auto HiS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS);
2685 
2686     // Long: ShAmt >= NewBitSize
2687     auto LoL = MIRBuilder.buildConstant(HalfTy, 0);         // Lo part is zero.
2688     auto HiL = MIRBuilder.buildShl(HalfTy, InL, AmtExcess); // Hi from Lo part.
2689 
2690     auto Lo = MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL);
2691     auto Hi = MIRBuilder.buildSelect(
2692         HalfTy, IsZero, InH, MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL));
2693 
2694     ResultRegs[0] = Lo.getReg(0);
2695     ResultRegs[1] = Hi.getReg(0);
2696     break;
2697   }
2698   case TargetOpcode::G_LSHR: {
2699     // Short: ShAmt < NewBitSize
2700     auto HiS = MIRBuilder.buildLShr(HalfTy, InH, Amt);
2701 
2702     auto OrLHS = MIRBuilder.buildLShr(HalfTy, InL, Amt);
2703     auto OrRHS = MIRBuilder.buildShl(HalfTy, InH, AmtLack);
2704     auto LoS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS);
2705 
2706     // Long: ShAmt >= NewBitSize
2707     auto HiL = MIRBuilder.buildConstant(HalfTy, 0);          // Hi part is zero.
2708     auto LoL = MIRBuilder.buildLShr(HalfTy, InH, AmtExcess); // Lo from Hi part.
2709 
2710     auto Lo = MIRBuilder.buildSelect(
2711         HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL));
2712     auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL);
2713 
2714     ResultRegs[0] = Lo.getReg(0);
2715     ResultRegs[1] = Hi.getReg(0);
2716     break;
2717   }
2718   case TargetOpcode::G_ASHR: {
2719     // Short: ShAmt < NewBitSize
2720     auto HiS = MIRBuilder.buildAShr(HalfTy, InH, Amt);
2721 
2722     auto OrLHS = MIRBuilder.buildLShr(HalfTy, InL, Amt);
2723     auto OrRHS = MIRBuilder.buildLShr(HalfTy, InH, AmtLack);
2724     auto LoS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS);
2725 
2726     // Long: ShAmt >= NewBitSize
2727 
2728     // Sign of Hi part.
2729     auto HiL = MIRBuilder.buildAShr(
2730         HalfTy, InH, MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize - 1));
2731 
2732     auto LoL = MIRBuilder.buildAShr(HalfTy, InH, AmtExcess); // Lo from Hi part.
2733 
2734     auto Lo = MIRBuilder.buildSelect(
2735         HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL));
2736 
2737     auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL);
2738 
2739     ResultRegs[0] = Lo.getReg(0);
2740     ResultRegs[1] = Hi.getReg(0);
2741     break;
2742   }
2743   default:
2744     llvm_unreachable("not a shift");
2745   }
2746 
2747   MIRBuilder.buildMerge(DstReg, ResultRegs);
2748   MI.eraseFromParent();
2749   return Legalized;
2750 }
2751 
2752 LegalizerHelper::LegalizeResult
2753 LegalizerHelper::moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
2754                                        LLT MoreTy) {
2755   assert(TypeIdx == 0 && "Expecting only Idx 0");
2756 
2757   Observer.changingInstr(MI);
2758   for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2759     MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
2760     MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
2761     moreElementsVectorSrc(MI, MoreTy, I);
2762   }
2763 
2764   MachineBasicBlock &MBB = *MI.getParent();
2765   MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
2766   moreElementsVectorDst(MI, MoreTy, 0);
2767   Observer.changedInstr(MI);
2768   return Legalized;
2769 }
2770 
2771 LegalizerHelper::LegalizeResult
2772 LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
2773                                     LLT MoreTy) {
2774   MIRBuilder.setInstr(MI);
2775   unsigned Opc = MI.getOpcode();
2776   switch (Opc) {
2777   case TargetOpcode::G_IMPLICIT_DEF:
2778   case TargetOpcode::G_LOAD: {
2779     if (TypeIdx != 0)
2780       return UnableToLegalize;
2781     Observer.changingInstr(MI);
2782     moreElementsVectorDst(MI, MoreTy, 0);
2783     Observer.changedInstr(MI);
2784     return Legalized;
2785   }
2786   case TargetOpcode::G_STORE:
2787     if (TypeIdx != 0)
2788       return UnableToLegalize;
2789     Observer.changingInstr(MI);
2790     moreElementsVectorSrc(MI, MoreTy, 0);
2791     Observer.changedInstr(MI);
2792     return Legalized;
2793   case TargetOpcode::G_AND:
2794   case TargetOpcode::G_OR:
2795   case TargetOpcode::G_XOR:
2796   case TargetOpcode::G_SMIN:
2797   case TargetOpcode::G_SMAX:
2798   case TargetOpcode::G_UMIN:
2799   case TargetOpcode::G_UMAX: {
2800     Observer.changingInstr(MI);
2801     moreElementsVectorSrc(MI, MoreTy, 1);
2802     moreElementsVectorSrc(MI, MoreTy, 2);
2803     moreElementsVectorDst(MI, MoreTy, 0);
2804     Observer.changedInstr(MI);
2805     return Legalized;
2806   }
2807   case TargetOpcode::G_EXTRACT:
2808     if (TypeIdx != 1)
2809       return UnableToLegalize;
2810     Observer.changingInstr(MI);
2811     moreElementsVectorSrc(MI, MoreTy, 1);
2812     Observer.changedInstr(MI);
2813     return Legalized;
2814   case TargetOpcode::G_INSERT:
2815     if (TypeIdx != 0)
2816       return UnableToLegalize;
2817     Observer.changingInstr(MI);
2818     moreElementsVectorSrc(MI, MoreTy, 1);
2819     moreElementsVectorDst(MI, MoreTy, 0);
2820     Observer.changedInstr(MI);
2821     return Legalized;
2822   case TargetOpcode::G_SELECT:
2823     if (TypeIdx != 0)
2824       return UnableToLegalize;
2825     if (MRI.getType(MI.getOperand(1).getReg()).isVector())
2826       return UnableToLegalize;
2827 
2828     Observer.changingInstr(MI);
2829     moreElementsVectorSrc(MI, MoreTy, 2);
2830     moreElementsVectorSrc(MI, MoreTy, 3);
2831     moreElementsVectorDst(MI, MoreTy, 0);
2832     Observer.changedInstr(MI);
2833     return Legalized;
2834   case TargetOpcode::G_PHI:
2835     return moreElementsVectorPhi(MI, TypeIdx, MoreTy);
2836   default:
2837     return UnableToLegalize;
2838   }
2839 }
2840 
2841 void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
2842                                         ArrayRef<Register> Src1Regs,
2843                                         ArrayRef<Register> Src2Regs,
2844                                         LLT NarrowTy) {
2845   MachineIRBuilder &B = MIRBuilder;
2846   unsigned SrcParts = Src1Regs.size();
2847   unsigned DstParts = DstRegs.size();
2848 
2849   unsigned DstIdx = 0; // Low bits of the result.
2850   Register FactorSum =
2851       B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0);
2852   DstRegs[DstIdx] = FactorSum;
2853 
2854   unsigned CarrySumPrevDstIdx;
2855   SmallVector<Register, 4> Factors;
2856 
2857   for (DstIdx = 1; DstIdx < DstParts; DstIdx++) {
2858     // Collect low parts of muls for DstIdx.
2859     for (unsigned i = DstIdx + 1 < SrcParts ? 0 : DstIdx - SrcParts + 1;
2860          i <= std::min(DstIdx, SrcParts - 1); ++i) {
2861       MachineInstrBuilder Mul =
2862           B.buildMul(NarrowTy, Src1Regs[DstIdx - i], Src2Regs[i]);
2863       Factors.push_back(Mul.getReg(0));
2864     }
2865     // Collect high parts of muls from previous DstIdx.
2866     for (unsigned i = DstIdx < SrcParts ? 0 : DstIdx - SrcParts;
2867          i <= std::min(DstIdx - 1, SrcParts - 1); ++i) {
2868       MachineInstrBuilder Umulh =
2869           B.buildUMulH(NarrowTy, Src1Regs[DstIdx - 1 - i], Src2Regs[i]);
2870       Factors.push_back(Umulh.getReg(0));
2871     }
2872     // Add CarrySum from additons calculated for previous DstIdx.
2873     if (DstIdx != 1) {
2874       Factors.push_back(CarrySumPrevDstIdx);
2875     }
2876 
2877     Register CarrySum;
2878     // Add all factors and accumulate all carries into CarrySum.
2879     if (DstIdx != DstParts - 1) {
2880       MachineInstrBuilder Uaddo =
2881           B.buildUAddo(NarrowTy, LLT::scalar(1), Factors[0], Factors[1]);
2882       FactorSum = Uaddo.getReg(0);
2883       CarrySum = B.buildZExt(NarrowTy, Uaddo.getReg(1)).getReg(0);
2884       for (unsigned i = 2; i < Factors.size(); ++i) {
2885         MachineInstrBuilder Uaddo =
2886             B.buildUAddo(NarrowTy, LLT::scalar(1), FactorSum, Factors[i]);
2887         FactorSum = Uaddo.getReg(0);
2888         MachineInstrBuilder Carry = B.buildZExt(NarrowTy, Uaddo.getReg(1));
2889         CarrySum = B.buildAdd(NarrowTy, CarrySum, Carry).getReg(0);
2890       }
2891     } else {
2892       // Since value for the next index is not calculated, neither is CarrySum.
2893       FactorSum = B.buildAdd(NarrowTy, Factors[0], Factors[1]).getReg(0);
2894       for (unsigned i = 2; i < Factors.size(); ++i)
2895         FactorSum = B.buildAdd(NarrowTy, FactorSum, Factors[i]).getReg(0);
2896     }
2897 
2898     CarrySumPrevDstIdx = CarrySum;
2899     DstRegs[DstIdx] = FactorSum;
2900     Factors.clear();
2901   }
2902 }
2903 
2904 LegalizerHelper::LegalizeResult
2905 LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
2906   Register DstReg = MI.getOperand(0).getReg();
2907   Register Src1 = MI.getOperand(1).getReg();
2908   Register Src2 = MI.getOperand(2).getReg();
2909 
2910   LLT Ty = MRI.getType(DstReg);
2911   if (Ty.isVector())
2912     return UnableToLegalize;
2913 
2914   unsigned SrcSize = MRI.getType(Src1).getSizeInBits();
2915   unsigned DstSize = Ty.getSizeInBits();
2916   unsigned NarrowSize = NarrowTy.getSizeInBits();
2917   if (DstSize % NarrowSize != 0 || SrcSize % NarrowSize != 0)
2918     return UnableToLegalize;
2919 
2920   unsigned NumDstParts = DstSize / NarrowSize;
2921   unsigned NumSrcParts = SrcSize / NarrowSize;
2922   bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH;
2923   unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1);
2924 
2925   SmallVector<Register, 2> Src1Parts, Src2Parts, DstTmpRegs;
2926   extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts);
2927   extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts);
2928   DstTmpRegs.resize(DstTmpParts);
2929   multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy);
2930 
2931   // Take only high half of registers if this is high mul.
2932   ArrayRef<Register> DstRegs(
2933       IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts);
2934   MIRBuilder.buildMerge(DstReg, DstRegs);
2935   MI.eraseFromParent();
2936   return Legalized;
2937 }
2938 
2939 LegalizerHelper::LegalizeResult
2940 LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx,
2941                                      LLT NarrowTy) {
2942   if (TypeIdx != 1)
2943     return UnableToLegalize;
2944 
2945   uint64_t NarrowSize = NarrowTy.getSizeInBits();
2946 
2947   int64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
2948   // FIXME: add support for when SizeOp1 isn't an exact multiple of
2949   // NarrowSize.
2950   if (SizeOp1 % NarrowSize != 0)
2951     return UnableToLegalize;
2952   int NumParts = SizeOp1 / NarrowSize;
2953 
2954   SmallVector<Register, 2> SrcRegs, DstRegs;
2955   SmallVector<uint64_t, 2> Indexes;
2956   extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
2957 
2958   Register OpReg = MI.getOperand(0).getReg();
2959   uint64_t OpStart = MI.getOperand(2).getImm();
2960   uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
2961   for (int i = 0; i < NumParts; ++i) {
2962     unsigned SrcStart = i * NarrowSize;
2963 
2964     if (SrcStart + NarrowSize <= OpStart || SrcStart >= OpStart + OpSize) {
2965       // No part of the extract uses this subregister, ignore it.
2966       continue;
2967     } else if (SrcStart == OpStart && NarrowTy == MRI.getType(OpReg)) {
2968       // The entire subregister is extracted, forward the value.
2969       DstRegs.push_back(SrcRegs[i]);
2970       continue;
2971     }
2972 
2973     // OpSegStart is where this destination segment would start in OpReg if it
2974     // extended infinitely in both directions.
2975     int64_t ExtractOffset;
2976     uint64_t SegSize;
2977     if (OpStart < SrcStart) {
2978       ExtractOffset = 0;
2979       SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart);
2980     } else {
2981       ExtractOffset = OpStart - SrcStart;
2982       SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize);
2983     }
2984 
2985     Register SegReg = SrcRegs[i];
2986     if (ExtractOffset != 0 || SegSize != NarrowSize) {
2987       // A genuine extract is needed.
2988       SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
2989       MIRBuilder.buildExtract(SegReg, SrcRegs[i], ExtractOffset);
2990     }
2991 
2992     DstRegs.push_back(SegReg);
2993   }
2994 
2995   Register DstReg = MI.getOperand(0).getReg();
2996   if(MRI.getType(DstReg).isVector())
2997     MIRBuilder.buildBuildVector(DstReg, DstRegs);
2998   else
2999     MIRBuilder.buildMerge(DstReg, DstRegs);
3000   MI.eraseFromParent();
3001   return Legalized;
3002 }
3003 
3004 LegalizerHelper::LegalizeResult
3005 LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx,
3006                                     LLT NarrowTy) {
3007   // FIXME: Don't know how to handle secondary types yet.
3008   if (TypeIdx != 0)
3009     return UnableToLegalize;
3010 
3011   uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
3012   uint64_t NarrowSize = NarrowTy.getSizeInBits();
3013 
3014   // FIXME: add support for when SizeOp0 isn't an exact multiple of
3015   // NarrowSize.
3016   if (SizeOp0 % NarrowSize != 0)
3017     return UnableToLegalize;
3018 
3019   int NumParts = SizeOp0 / NarrowSize;
3020 
3021   SmallVector<Register, 2> SrcRegs, DstRegs;
3022   SmallVector<uint64_t, 2> Indexes;
3023   extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
3024 
3025   Register OpReg = MI.getOperand(2).getReg();
3026   uint64_t OpStart = MI.getOperand(3).getImm();
3027   uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
3028   for (int i = 0; i < NumParts; ++i) {
3029     unsigned DstStart = i * NarrowSize;
3030 
3031     if (DstStart + NarrowSize <= OpStart || DstStart >= OpStart + OpSize) {
3032       // No part of the insert affects this subregister, forward the original.
3033       DstRegs.push_back(SrcRegs[i]);
3034       continue;
3035     } else if (DstStart == OpStart && NarrowTy == MRI.getType(OpReg)) {
3036       // The entire subregister is defined by this insert, forward the new
3037       // value.
3038       DstRegs.push_back(OpReg);
3039       continue;
3040     }
3041 
3042     // OpSegStart is where this destination segment would start in OpReg if it
3043     // extended infinitely in both directions.
3044     int64_t ExtractOffset, InsertOffset;
3045     uint64_t SegSize;
3046     if (OpStart < DstStart) {
3047       InsertOffset = 0;
3048       ExtractOffset = DstStart - OpStart;
3049       SegSize = std::min(NarrowSize, OpStart + OpSize - DstStart);
3050     } else {
3051       InsertOffset = OpStart - DstStart;
3052       ExtractOffset = 0;
3053       SegSize =
3054         std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart);
3055     }
3056 
3057     Register SegReg = OpReg;
3058     if (ExtractOffset != 0 || SegSize != OpSize) {
3059       // A genuine extract is needed.
3060       SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
3061       MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset);
3062     }
3063 
3064     Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
3065     MIRBuilder.buildInsert(DstReg, SrcRegs[i], SegReg, InsertOffset);
3066     DstRegs.push_back(DstReg);
3067   }
3068 
3069   assert(DstRegs.size() == (unsigned)NumParts && "not all parts covered");
3070   Register DstReg = MI.getOperand(0).getReg();
3071   if(MRI.getType(DstReg).isVector())
3072     MIRBuilder.buildBuildVector(DstReg, DstRegs);
3073   else
3074     MIRBuilder.buildMerge(DstReg, DstRegs);
3075   MI.eraseFromParent();
3076   return Legalized;
3077 }
3078 
3079 LegalizerHelper::LegalizeResult
3080 LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx,
3081                                    LLT NarrowTy) {
3082   Register DstReg = MI.getOperand(0).getReg();
3083   LLT DstTy = MRI.getType(DstReg);
3084 
3085   assert(MI.getNumOperands() == 3 && TypeIdx == 0);
3086 
3087   SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
3088   SmallVector<Register, 4> Src0Regs, Src0LeftoverRegs;
3089   SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
3090   LLT LeftoverTy;
3091   if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy,
3092                     Src0Regs, Src0LeftoverRegs))
3093     return UnableToLegalize;
3094 
3095   LLT Unused;
3096   if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, Unused,
3097                     Src1Regs, Src1LeftoverRegs))
3098     llvm_unreachable("inconsistent extractParts result");
3099 
3100   for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) {
3101     auto Inst = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy},
3102                                         {Src0Regs[I], Src1Regs[I]});
3103     DstRegs.push_back(Inst->getOperand(0).getReg());
3104   }
3105 
3106   for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) {
3107     auto Inst = MIRBuilder.buildInstr(
3108       MI.getOpcode(),
3109       {LeftoverTy}, {Src0LeftoverRegs[I], Src1LeftoverRegs[I]});
3110     DstLeftoverRegs.push_back(Inst->getOperand(0).getReg());
3111   }
3112 
3113   insertParts(DstReg, DstTy, NarrowTy, DstRegs,
3114               LeftoverTy, DstLeftoverRegs);
3115 
3116   MI.eraseFromParent();
3117   return Legalized;
3118 }
3119 
3120 LegalizerHelper::LegalizeResult
3121 LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
3122                                     LLT NarrowTy) {
3123   if (TypeIdx != 0)
3124     return UnableToLegalize;
3125 
3126   Register CondReg = MI.getOperand(1).getReg();
3127   LLT CondTy = MRI.getType(CondReg);
3128   if (CondTy.isVector()) // TODO: Handle vselect
3129     return UnableToLegalize;
3130 
3131   Register DstReg = MI.getOperand(0).getReg();
3132   LLT DstTy = MRI.getType(DstReg);
3133 
3134   SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
3135   SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
3136   SmallVector<Register, 4> Src2Regs, Src2LeftoverRegs;
3137   LLT LeftoverTy;
3138   if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy,
3139                     Src1Regs, Src1LeftoverRegs))
3140     return UnableToLegalize;
3141 
3142   LLT Unused;
3143   if (!extractParts(MI.getOperand(3).getReg(), DstTy, NarrowTy, Unused,
3144                     Src2Regs, Src2LeftoverRegs))
3145     llvm_unreachable("inconsistent extractParts result");
3146 
3147   for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) {
3148     auto Select = MIRBuilder.buildSelect(NarrowTy,
3149                                          CondReg, Src1Regs[I], Src2Regs[I]);
3150     DstRegs.push_back(Select->getOperand(0).getReg());
3151   }
3152 
3153   for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) {
3154     auto Select = MIRBuilder.buildSelect(
3155       LeftoverTy, CondReg, Src1LeftoverRegs[I], Src2LeftoverRegs[I]);
3156     DstLeftoverRegs.push_back(Select->getOperand(0).getReg());
3157   }
3158 
3159   insertParts(DstReg, DstTy, NarrowTy, DstRegs,
3160               LeftoverTy, DstLeftoverRegs);
3161 
3162   MI.eraseFromParent();
3163   return Legalized;
3164 }
3165 
3166 LegalizerHelper::LegalizeResult
3167 LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3168   unsigned Opc = MI.getOpcode();
3169   auto &TII = *MI.getMF()->getSubtarget().getInstrInfo();
3170   auto isSupported = [this](const LegalityQuery &Q) {
3171     auto QAction = LI.getAction(Q).Action;
3172     return QAction == Legal || QAction == Libcall || QAction == Custom;
3173   };
3174   switch (Opc) {
3175   default:
3176     return UnableToLegalize;
3177   case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
3178     // This trivially expands to CTLZ.
3179     Observer.changingInstr(MI);
3180     MI.setDesc(TII.get(TargetOpcode::G_CTLZ));
3181     Observer.changedInstr(MI);
3182     return Legalized;
3183   }
3184   case TargetOpcode::G_CTLZ: {
3185     Register SrcReg = MI.getOperand(1).getReg();
3186     unsigned Len = Ty.getSizeInBits();
3187     if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {Ty, Ty}})) {
3188       // If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero.
3189       auto MIBCtlzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTLZ_ZERO_UNDEF,
3190                                              {Ty}, {SrcReg});
3191       auto MIBZero = MIRBuilder.buildConstant(Ty, 0);
3192       auto MIBLen = MIRBuilder.buildConstant(Ty, Len);
3193       auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
3194                                           SrcReg, MIBZero);
3195       MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen,
3196                              MIBCtlzZU);
3197       MI.eraseFromParent();
3198       return Legalized;
3199     }
3200     // for now, we do this:
3201     // NewLen = NextPowerOf2(Len);
3202     // x = x | (x >> 1);
3203     // x = x | (x >> 2);
3204     // ...
3205     // x = x | (x >>16);
3206     // x = x | (x >>32); // for 64-bit input
3207     // Upto NewLen/2
3208     // return Len - popcount(x);
3209     //
3210     // Ref: "Hacker's Delight" by Henry Warren
3211     Register Op = SrcReg;
3212     unsigned NewLen = PowerOf2Ceil(Len);
3213     for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) {
3214       auto MIBShiftAmt = MIRBuilder.buildConstant(Ty, 1ULL << i);
3215       auto MIBOp = MIRBuilder.buildInstr(
3216           TargetOpcode::G_OR, {Ty},
3217           {Op, MIRBuilder.buildInstr(TargetOpcode::G_LSHR, {Ty},
3218                                      {Op, MIBShiftAmt})});
3219       Op = MIBOp->getOperand(0).getReg();
3220     }
3221     auto MIBPop = MIRBuilder.buildInstr(TargetOpcode::G_CTPOP, {Ty}, {Op});
3222     MIRBuilder.buildInstr(TargetOpcode::G_SUB, {MI.getOperand(0).getReg()},
3223                           {MIRBuilder.buildConstant(Ty, Len), MIBPop});
3224     MI.eraseFromParent();
3225     return Legalized;
3226   }
3227   case TargetOpcode::G_CTTZ_ZERO_UNDEF: {
3228     // This trivially expands to CTTZ.
3229     Observer.changingInstr(MI);
3230     MI.setDesc(TII.get(TargetOpcode::G_CTTZ));
3231     Observer.changedInstr(MI);
3232     return Legalized;
3233   }
3234   case TargetOpcode::G_CTTZ: {
3235     Register SrcReg = MI.getOperand(1).getReg();
3236     unsigned Len = Ty.getSizeInBits();
3237     if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {Ty, Ty}})) {
3238       // If CTTZ_ZERO_UNDEF is legal or custom, emit that and a select with
3239       // zero.
3240       auto MIBCttzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF,
3241                                              {Ty}, {SrcReg});
3242       auto MIBZero = MIRBuilder.buildConstant(Ty, 0);
3243       auto MIBLen = MIRBuilder.buildConstant(Ty, Len);
3244       auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
3245                                           SrcReg, MIBZero);
3246       MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen,
3247                              MIBCttzZU);
3248       MI.eraseFromParent();
3249       return Legalized;
3250     }
3251     // for now, we use: { return popcount(~x & (x - 1)); }
3252     // unless the target has ctlz but not ctpop, in which case we use:
3253     // { return 32 - nlz(~x & (x-1)); }
3254     // Ref: "Hacker's Delight" by Henry Warren
3255     auto MIBCstNeg1 = MIRBuilder.buildConstant(Ty, -1);
3256     auto MIBNot =
3257         MIRBuilder.buildInstr(TargetOpcode::G_XOR, {Ty}, {SrcReg, MIBCstNeg1});
3258     auto MIBTmp = MIRBuilder.buildInstr(
3259         TargetOpcode::G_AND, {Ty},
3260         {MIBNot, MIRBuilder.buildInstr(TargetOpcode::G_ADD, {Ty},
3261                                        {SrcReg, MIBCstNeg1})});
3262     if (!isSupported({TargetOpcode::G_CTPOP, {Ty, Ty}}) &&
3263         isSupported({TargetOpcode::G_CTLZ, {Ty, Ty}})) {
3264       auto MIBCstLen = MIRBuilder.buildConstant(Ty, Len);
3265       MIRBuilder.buildInstr(
3266           TargetOpcode::G_SUB, {MI.getOperand(0).getReg()},
3267           {MIBCstLen,
3268            MIRBuilder.buildInstr(TargetOpcode::G_CTLZ, {Ty}, {MIBTmp})});
3269       MI.eraseFromParent();
3270       return Legalized;
3271     }
3272     MI.setDesc(TII.get(TargetOpcode::G_CTPOP));
3273     MI.getOperand(1).setReg(MIBTmp->getOperand(0).getReg());
3274     return Legalized;
3275   }
3276   }
3277 }
3278 
3279 // Expand s32 = G_UITOFP s64 using bit operations to an IEEE float
3280 // representation.
3281 LegalizerHelper::LegalizeResult
3282 LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) {
3283   Register Dst = MI.getOperand(0).getReg();
3284   Register Src = MI.getOperand(1).getReg();
3285   const LLT S64 = LLT::scalar(64);
3286   const LLT S32 = LLT::scalar(32);
3287   const LLT S1 = LLT::scalar(1);
3288 
3289   assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S32);
3290 
3291   // unsigned cul2f(ulong u) {
3292   //   uint lz = clz(u);
3293   //   uint e = (u != 0) ? 127U + 63U - lz : 0;
3294   //   u = (u << lz) & 0x7fffffffffffffffUL;
3295   //   ulong t = u & 0xffffffffffUL;
3296   //   uint v = (e << 23) | (uint)(u >> 40);
3297   //   uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
3298   //   return as_float(v + r);
3299   // }
3300 
3301   auto Zero32 = MIRBuilder.buildConstant(S32, 0);
3302   auto Zero64 = MIRBuilder.buildConstant(S64, 0);
3303 
3304   auto LZ = MIRBuilder.buildCTLZ_ZERO_UNDEF(S32, Src);
3305 
3306   auto K = MIRBuilder.buildConstant(S32, 127U + 63U);
3307   auto Sub = MIRBuilder.buildSub(S32, K, LZ);
3308 
3309   auto NotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, Src, Zero64);
3310   auto E = MIRBuilder.buildSelect(S32, NotZero, Sub, Zero32);
3311 
3312   auto Mask0 = MIRBuilder.buildConstant(S64, (-1ULL) >> 1);
3313   auto ShlLZ = MIRBuilder.buildShl(S64, Src, LZ);
3314 
3315   auto U = MIRBuilder.buildAnd(S64, ShlLZ, Mask0);
3316 
3317   auto Mask1 = MIRBuilder.buildConstant(S64, 0xffffffffffULL);
3318   auto T = MIRBuilder.buildAnd(S64, U, Mask1);
3319 
3320   auto UShl = MIRBuilder.buildLShr(S64, U, MIRBuilder.buildConstant(S64, 40));
3321   auto ShlE = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 23));
3322   auto V = MIRBuilder.buildOr(S32, ShlE, MIRBuilder.buildTrunc(S32, UShl));
3323 
3324   auto C = MIRBuilder.buildConstant(S64, 0x8000000000ULL);
3325   auto RCmp = MIRBuilder.buildICmp(CmpInst::ICMP_UGT, S1, T, C);
3326   auto TCmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, T, C);
3327   auto One = MIRBuilder.buildConstant(S32, 1);
3328 
3329   auto VTrunc1 = MIRBuilder.buildAnd(S32, V, One);
3330   auto Select0 = MIRBuilder.buildSelect(S32, TCmp, VTrunc1, Zero32);
3331   auto R = MIRBuilder.buildSelect(S32, RCmp, One, Select0);
3332   MIRBuilder.buildAdd(Dst, V, R);
3333 
3334   return Legalized;
3335 }
3336 
3337 LegalizerHelper::LegalizeResult
3338 LegalizerHelper::lowerUITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3339   Register Dst = MI.getOperand(0).getReg();
3340   Register Src = MI.getOperand(1).getReg();
3341   LLT DstTy = MRI.getType(Dst);
3342   LLT SrcTy = MRI.getType(Src);
3343 
3344   if (SrcTy != LLT::scalar(64))
3345     return UnableToLegalize;
3346 
3347   if (DstTy == LLT::scalar(32)) {
3348     // TODO: SelectionDAG has several alternative expansions to port which may
3349     // be more reasonble depending on the available instructions. If a target
3350     // has sitofp, does not have CTLZ, or can efficiently use f64 as an
3351     // intermediate type, this is probably worse.
3352     return lowerU64ToF32BitOps(MI);
3353   }
3354 
3355   return UnableToLegalize;
3356 }
3357 
3358 LegalizerHelper::LegalizeResult
3359 LegalizerHelper::lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3360   Register Dst = MI.getOperand(0).getReg();
3361   Register Src = MI.getOperand(1).getReg();
3362   LLT DstTy = MRI.getType(Dst);
3363   LLT SrcTy = MRI.getType(Src);
3364 
3365   const LLT S64 = LLT::scalar(64);
3366   const LLT S32 = LLT::scalar(32);
3367   const LLT S1 = LLT::scalar(1);
3368 
3369   if (SrcTy != S64)
3370     return UnableToLegalize;
3371 
3372   if (DstTy == S32) {
3373     // signed cl2f(long l) {
3374     //   long s = l >> 63;
3375     //   float r = cul2f((l + s) ^ s);
3376     //   return s ? -r : r;
3377     // }
3378     Register L = Src;
3379     auto SignBit = MIRBuilder.buildConstant(S64, 63);
3380     auto S = MIRBuilder.buildAShr(S64, L, SignBit);
3381 
3382     auto LPlusS = MIRBuilder.buildAdd(S64, L, S);
3383     auto Xor = MIRBuilder.buildXor(S64, LPlusS, S);
3384     auto R = MIRBuilder.buildUITOFP(S32, Xor);
3385 
3386     auto RNeg = MIRBuilder.buildFNeg(S32, R);
3387     auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, S,
3388                                             MIRBuilder.buildConstant(S64, 0));
3389     MIRBuilder.buildSelect(Dst, SignNotZero, RNeg, R);
3390     return Legalized;
3391   }
3392 
3393   return UnableToLegalize;
3394 }
3395 
3396 static CmpInst::Predicate minMaxToCompare(unsigned Opc) {
3397   switch (Opc) {
3398   case TargetOpcode::G_SMIN:
3399     return CmpInst::ICMP_SLT;
3400   case TargetOpcode::G_SMAX:
3401     return CmpInst::ICMP_SGT;
3402   case TargetOpcode::G_UMIN:
3403     return CmpInst::ICMP_ULT;
3404   case TargetOpcode::G_UMAX:
3405     return CmpInst::ICMP_UGT;
3406   default:
3407     llvm_unreachable("not in integer min/max");
3408   }
3409 }
3410 
3411 LegalizerHelper::LegalizeResult
3412 LegalizerHelper::lowerMinMax(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3413   Register Dst = MI.getOperand(0).getReg();
3414   Register Src0 = MI.getOperand(1).getReg();
3415   Register Src1 = MI.getOperand(2).getReg();
3416 
3417   const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode());
3418   LLT CmpType = MRI.getType(Dst).changeElementSize(1);
3419 
3420   auto Cmp = MIRBuilder.buildICmp(Pred, CmpType, Src0, Src1);
3421   MIRBuilder.buildSelect(Dst, Cmp, Src0, Src1);
3422 
3423   MI.eraseFromParent();
3424   return Legalized;
3425 }
3426 
3427 LegalizerHelper::LegalizeResult
3428 LegalizerHelper::lowerFCopySign(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
3429   Register Dst = MI.getOperand(0).getReg();
3430   Register Src0 = MI.getOperand(1).getReg();
3431   Register Src1 = MI.getOperand(2).getReg();
3432 
3433   const LLT Src0Ty = MRI.getType(Src0);
3434   const LLT Src1Ty = MRI.getType(Src1);
3435 
3436   const int Src0Size = Src0Ty.getScalarSizeInBits();
3437   const int Src1Size = Src1Ty.getScalarSizeInBits();
3438 
3439   auto SignBitMask = MIRBuilder.buildConstant(
3440     Src0Ty, APInt::getSignMask(Src0Size));
3441 
3442   auto NotSignBitMask = MIRBuilder.buildConstant(
3443     Src0Ty, APInt::getLowBitsSet(Src0Size, Src0Size - 1));
3444 
3445   auto And0 = MIRBuilder.buildAnd(Src0Ty, Src0, NotSignBitMask);
3446   MachineInstr *Or;
3447 
3448   if (Src0Ty == Src1Ty) {
3449     auto And1 = MIRBuilder.buildAnd(Src1Ty, Src0, SignBitMask);
3450     Or = MIRBuilder.buildOr(Dst, And0, And1);
3451   } else if (Src0Size > Src1Size) {
3452     auto ShiftAmt = MIRBuilder.buildConstant(Src0Ty, Src0Size - Src1Size);
3453     auto Zext = MIRBuilder.buildZExt(Src0Ty, Src1);
3454     auto Shift = MIRBuilder.buildShl(Src0Ty, Zext, ShiftAmt);
3455     auto And1 = MIRBuilder.buildAnd(Src0Ty, Shift, SignBitMask);
3456     Or = MIRBuilder.buildOr(Dst, And0, And1);
3457   } else {
3458     auto ShiftAmt = MIRBuilder.buildConstant(Src1Ty, Src1Size - Src0Size);
3459     auto Shift = MIRBuilder.buildLShr(Src1Ty, Src1, ShiftAmt);
3460     auto Trunc = MIRBuilder.buildTrunc(Src0Ty, Shift);
3461     auto And1 = MIRBuilder.buildAnd(Src0Ty, Trunc, SignBitMask);
3462     Or = MIRBuilder.buildOr(Dst, And0, And1);
3463   }
3464 
3465   // Be careful about setting nsz/nnan/ninf on every instruction, since the
3466   // constants are a nan and -0.0, but the final result should preserve
3467   // everything.
3468   if (unsigned Flags = MI.getFlags())
3469     Or->setFlags(Flags);
3470 
3471   MI.eraseFromParent();
3472   return Legalized;
3473 }
3474 
3475 LegalizerHelper::LegalizeResult
3476 LegalizerHelper::lowerFMinNumMaxNum(MachineInstr &MI) {
3477   unsigned NewOp = MI.getOpcode() == TargetOpcode::G_FMINNUM ?
3478     TargetOpcode::G_FMINNUM_IEEE : TargetOpcode::G_FMAXNUM_IEEE;
3479 
3480   Register Dst = MI.getOperand(0).getReg();
3481   Register Src0 = MI.getOperand(1).getReg();
3482   Register Src1 = MI.getOperand(2).getReg();
3483   LLT Ty = MRI.getType(Dst);
3484 
3485   if (!MI.getFlag(MachineInstr::FmNoNans)) {
3486     // Insert canonicalizes if it's possible we need to quiet to get correct
3487     // sNaN behavior.
3488 
3489     // Note this must be done here, and not as an optimization combine in the
3490     // absence of a dedicate quiet-snan instruction as we're using an
3491     // omni-purpose G_FCANONICALIZE.
3492     if (!isKnownNeverSNaN(Src0, MRI))
3493       Src0 = MIRBuilder.buildFCanonicalize(Ty, Src0, MI.getFlags()).getReg(0);
3494 
3495     if (!isKnownNeverSNaN(Src1, MRI))
3496       Src1 = MIRBuilder.buildFCanonicalize(Ty, Src1, MI.getFlags()).getReg(0);
3497   }
3498 
3499   // If there are no nans, it's safe to simply replace this with the non-IEEE
3500   // version.
3501   MIRBuilder.buildInstr(NewOp, {Dst}, {Src0, Src1}, MI.getFlags());
3502   MI.eraseFromParent();
3503   return Legalized;
3504 }
3505 
3506 LegalizerHelper::LegalizeResult
3507 LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) {
3508   const unsigned NumDst = MI.getNumOperands() - 1;
3509   const Register SrcReg = MI.getOperand(NumDst).getReg();
3510   LLT SrcTy = MRI.getType(SrcReg);
3511 
3512   Register Dst0Reg = MI.getOperand(0).getReg();
3513   LLT DstTy = MRI.getType(Dst0Reg);
3514 
3515 
3516   // Expand scalarizing unmerge as bitcast to integer and shift.
3517   if (!DstTy.isVector() && SrcTy.isVector() &&
3518       SrcTy.getElementType() == DstTy) {
3519     LLT IntTy = LLT::scalar(SrcTy.getSizeInBits());
3520     Register Cast = MIRBuilder.buildBitcast(IntTy, SrcReg).getReg(0);
3521 
3522     MIRBuilder.buildTrunc(Dst0Reg, Cast);
3523 
3524     const unsigned DstSize = DstTy.getSizeInBits();
3525     unsigned Offset = DstSize;
3526     for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) {
3527       auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset);
3528       auto Shift = MIRBuilder.buildLShr(IntTy, Cast, ShiftAmt);
3529       MIRBuilder.buildTrunc(MI.getOperand(I), Shift);
3530     }
3531 
3532     MI.eraseFromParent();
3533     return Legalized;
3534   }
3535 
3536   return UnableToLegalize;
3537 }
3538