1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
13 
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
23 
24 using namespace llvm;
25 
26 void MachineIRBuilder::setMF(MachineFunction &MF) {
27   State.MF = &MF;
28   State.MBB = nullptr;
29   State.MRI = &MF.getRegInfo();
30   State.TII = MF.getSubtarget().getInstrInfo();
31   State.DL = DebugLoc();
32   State.II = MachineBasicBlock::iterator();
33   State.Observer = nullptr;
34 }
35 
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39 
40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41   MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
42   return MIB;
43 }
44 
45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46   getMBB().insert(getInsertPt(), MIB);
47   recordInsertion(MIB);
48   return MIB;
49 }
50 
51 MachineInstrBuilder
52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53                                       const MDNode *Expr) {
54   assert(isa<DILocalVariable>(Variable) && "not a variable");
55   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56   assert(
57       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58       "Expected inlined-at fields to agree");
59   return insertInstr(BuildMI(getMF(), getDL(),
60                              getTII().get(TargetOpcode::DBG_VALUE),
61                              /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63 
64 MachineInstrBuilder
65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66                                         const MDNode *Expr) {
67   assert(isa<DILocalVariable>(Variable) && "not a variable");
68   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69   assert(
70       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71       "Expected inlined-at fields to agree");
72   return insertInstr(BuildMI(getMF(), getDL(),
73                              getTII().get(TargetOpcode::DBG_VALUE),
74                              /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76 
77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78                                                       const MDNode *Variable,
79                                                       const MDNode *Expr) {
80   assert(isa<DILocalVariable>(Variable) && "not a variable");
81   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82   assert(
83       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84       "Expected inlined-at fields to agree");
85   return buildInstr(TargetOpcode::DBG_VALUE)
86       .addFrameIndex(FI)
87       .addImm(0)
88       .addMetadata(Variable)
89       .addMetadata(Expr);
90 }
91 
92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93                                                          const MDNode *Variable,
94                                                          const MDNode *Expr) {
95   assert(isa<DILocalVariable>(Variable) && "not a variable");
96   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97   assert(
98       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99       "Expected inlined-at fields to agree");
100   auto MIB = buildInstr(TargetOpcode::DBG_VALUE);
101   if (auto *CI = dyn_cast<ConstantInt>(&C)) {
102     if (CI->getBitWidth() > 64)
103       MIB.addCImm(CI);
104     else
105       MIB.addImm(CI->getZExtValue());
106   } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
107     MIB.addFPImm(CFP);
108   } else {
109     // Insert %noreg if we didn't find a usable constant and had to drop it.
110     MIB.addReg(0U);
111   }
112 
113   return MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
114 }
115 
116 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
117   assert(isa<DILabel>(Label) && "not a label");
118   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
119          "Expected inlined-at fields to agree");
120   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
121 
122   return MIB.addMetadata(Label);
123 }
124 
125 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
126                                                          const SrcOp &Size,
127                                                          Align Alignment) {
128   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
129   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
130   Res.addDefToMIB(*getMRI(), MIB);
131   Size.addSrcToMIB(MIB);
132   MIB.addImm(Alignment.value());
133   return MIB;
134 }
135 
136 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
137                                                       int Idx) {
138   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
139   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
140   Res.addDefToMIB(*getMRI(), MIB);
141   MIB.addFrameIndex(Idx);
142   return MIB;
143 }
144 
145 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
146                                                        const GlobalValue *GV) {
147   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
149              GV->getType()->getAddressSpace() &&
150          "address space mismatch");
151 
152   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
153   Res.addDefToMIB(*getMRI(), MIB);
154   MIB.addGlobalAddress(GV);
155   return MIB;
156 }
157 
158 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
159                                                      unsigned JTI) {
160   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
161       .addJumpTableIndex(JTI);
162 }
163 
164 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
165                                         const LLT Op1) {
166   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
167   assert((Res == Op0 && Res == Op1) && "type mismatch");
168 }
169 
170 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
171                                        const LLT Op1) {
172   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
173   assert((Res == Op0) && "type mismatch");
174 }
175 
176 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
177                                                   const SrcOp &Op0,
178                                                   const SrcOp &Op1) {
179   assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
180          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
181   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
182 
183   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
184 }
185 
186 Optional<MachineInstrBuilder>
187 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
188                                     const LLT ValueTy, uint64_t Value) {
189   assert(Res == 0 && "Res is a result argument");
190   assert(ValueTy.isScalar()  && "invalid offset type");
191 
192   if (Value == 0) {
193     Res = Op0;
194     return None;
195   }
196 
197   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
198   auto Cst = buildConstant(ValueTy, Value);
199   return buildPtrAdd(Res, Op0, Cst.getReg(0));
200 }
201 
202 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
203                                                           const SrcOp &Op0,
204                                                           uint32_t NumBits) {
205   LLT PtrTy = Res.getLLTTy(*getMRI());
206   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
207   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
208   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
209   return buildPtrMask(Res, Op0, MaskReg);
210 }
211 
212 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
213   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
214 }
215 
216 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
217   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
218   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
219 }
220 
221 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
222                                                 unsigned JTI,
223                                                 Register IndexReg) {
224   assert(getMRI()->getType(TablePtr).isPointer() &&
225          "Table reg must be a pointer");
226   return buildInstr(TargetOpcode::G_BRJT)
227       .addUse(TablePtr)
228       .addJumpTableIndex(JTI)
229       .addUse(IndexReg);
230 }
231 
232 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
233                                                 const SrcOp &Op) {
234   return buildInstr(TargetOpcode::COPY, Res, Op);
235 }
236 
237 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
238                                                     const ConstantInt &Val) {
239   LLT Ty = Res.getLLTTy(*getMRI());
240   LLT EltTy = Ty.getScalarType();
241   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
242          "creating constant with the wrong size");
243 
244   if (Ty.isVector()) {
245     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
246     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
247     .addCImm(&Val);
248     return buildSplatVector(Res, Const);
249   }
250 
251   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
252   Const->setDebugLoc(DebugLoc());
253   Res.addDefToMIB(*getMRI(), Const);
254   Const.addCImm(&Val);
255   return Const;
256 }
257 
258 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
259                                                     int64_t Val) {
260   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
261                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
262   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
263   return buildConstant(Res, *CI);
264 }
265 
266 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
267                                                      const ConstantFP &Val) {
268   LLT Ty = Res.getLLTTy(*getMRI());
269   LLT EltTy = Ty.getScalarType();
270 
271   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
272          == EltTy.getSizeInBits() &&
273          "creating fconstant with the wrong size");
274 
275   assert(!Ty.isPointer() && "invalid operand type");
276 
277   if (Ty.isVector()) {
278     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
279     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
280     .addFPImm(&Val);
281 
282     return buildSplatVector(Res, Const);
283   }
284 
285   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
286   Const->setDebugLoc(DebugLoc());
287   Res.addDefToMIB(*getMRI(), Const);
288   Const.addFPImm(&Val);
289   return Const;
290 }
291 
292 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
293                                                     const APInt &Val) {
294   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
295   return buildConstant(Res, *CI);
296 }
297 
298 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
299                                                      double Val) {
300   LLT DstTy = Res.getLLTTy(*getMRI());
301   auto &Ctx = getMF().getFunction().getContext();
302   auto *CFP =
303       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
304   return buildFConstant(Res, *CFP);
305 }
306 
307 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
308                                                      const APFloat &Val) {
309   auto &Ctx = getMF().getFunction().getContext();
310   auto *CFP = ConstantFP::get(Ctx, Val);
311   return buildFConstant(Res, *CFP);
312 }
313 
314 MachineInstrBuilder MachineIRBuilder::buildBrCond(Register Tst,
315                                                   MachineBasicBlock &Dest) {
316   assert(getMRI()->getType(Tst).isScalar() && "invalid operand type");
317 
318   return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
319 }
320 
321 MachineInstrBuilder MachineIRBuilder::buildLoad(const DstOp &Res,
322                                                 const SrcOp &Addr,
323                                                 MachineMemOperand &MMO) {
324   return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
325 }
326 
327 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
328                                                      const DstOp &Res,
329                                                      const SrcOp &Addr,
330                                                      MachineMemOperand &MMO) {
331   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
332   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
333 
334   auto MIB = buildInstr(Opcode);
335   Res.addDefToMIB(*getMRI(), MIB);
336   Addr.addSrcToMIB(MIB);
337   MIB.addMemOperand(&MMO);
338   return MIB;
339 }
340 
341 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
342   const DstOp &Dst, const SrcOp &BasePtr,
343   MachineMemOperand &BaseMMO, int64_t Offset) {
344   LLT LoadTy = Dst.getLLTTy(*getMRI());
345   MachineMemOperand *OffsetMMO =
346     getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes());
347 
348   if (Offset == 0) // This may be a size or type changing load.
349     return buildLoad(Dst, BasePtr, *OffsetMMO);
350 
351   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
352   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
353   auto ConstOffset = buildConstant(OffsetTy, Offset);
354   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
355   return buildLoad(Dst, Ptr, *OffsetMMO);
356 }
357 
358 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
359                                                  const SrcOp &Addr,
360                                                  MachineMemOperand &MMO) {
361   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
362   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
363 
364   auto MIB = buildInstr(TargetOpcode::G_STORE);
365   Val.addSrcToMIB(MIB);
366   Addr.addSrcToMIB(MIB);
367   MIB.addMemOperand(&MMO);
368   return MIB;
369 }
370 
371 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
372                                                   const SrcOp &Op) {
373   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
374 }
375 
376 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
377                                                 const SrcOp &Op) {
378   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
379 }
380 
381 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
382                                                 const SrcOp &Op) {
383   return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
384 }
385 
386 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
387   const auto *TLI = getMF().getSubtarget().getTargetLowering();
388   switch (TLI->getBooleanContents(IsVec, IsFP)) {
389   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
390     return TargetOpcode::G_SEXT;
391   case TargetLoweringBase::ZeroOrOneBooleanContent:
392     return TargetOpcode::G_ZEXT;
393   default:
394     return TargetOpcode::G_ANYEXT;
395   }
396 }
397 
398 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
399                                                    const SrcOp &Op,
400                                                    bool IsFP) {
401   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
402   return buildInstr(ExtOp, Res, Op);
403 }
404 
405 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
406                                                       const DstOp &Res,
407                                                       const SrcOp &Op) {
408   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
409           TargetOpcode::G_SEXT == ExtOpc) &&
410          "Expecting Extending Opc");
411   assert(Res.getLLTTy(*getMRI()).isScalar() ||
412          Res.getLLTTy(*getMRI()).isVector());
413   assert(Res.getLLTTy(*getMRI()).isScalar() ==
414          Op.getLLTTy(*getMRI()).isScalar());
415 
416   unsigned Opcode = TargetOpcode::COPY;
417   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
418       Op.getLLTTy(*getMRI()).getSizeInBits())
419     Opcode = ExtOpc;
420   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
421            Op.getLLTTy(*getMRI()).getSizeInBits())
422     Opcode = TargetOpcode::G_TRUNC;
423   else
424     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
425 
426   return buildInstr(Opcode, Res, Op);
427 }
428 
429 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
430                                                        const SrcOp &Op) {
431   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
432 }
433 
434 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
435                                                        const SrcOp &Op) {
436   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
437 }
438 
439 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
440                                                          const SrcOp &Op) {
441   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
442 }
443 
444 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
445                                                 const SrcOp &Src) {
446   LLT SrcTy = Src.getLLTTy(*getMRI());
447   LLT DstTy = Dst.getLLTTy(*getMRI());
448   if (SrcTy == DstTy)
449     return buildCopy(Dst, Src);
450 
451   unsigned Opcode;
452   if (SrcTy.isPointer() && DstTy.isScalar())
453     Opcode = TargetOpcode::G_PTRTOINT;
454   else if (DstTy.isPointer() && SrcTy.isScalar())
455     Opcode = TargetOpcode::G_INTTOPTR;
456   else {
457     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
458     Opcode = TargetOpcode::G_BITCAST;
459   }
460 
461   return buildInstr(Opcode, Dst, Src);
462 }
463 
464 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
465                                                    const SrcOp &Src,
466                                                    uint64_t Index) {
467   LLT SrcTy = Src.getLLTTy(*getMRI());
468   LLT DstTy = Dst.getLLTTy(*getMRI());
469 
470 #ifndef NDEBUG
471   assert(SrcTy.isValid() && "invalid operand type");
472   assert(DstTy.isValid() && "invalid operand type");
473   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
474          "extracting off end of register");
475 #endif
476 
477   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
478     assert(Index == 0 && "insertion past the end of a register");
479     return buildCast(Dst, Src);
480   }
481 
482   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
483   Dst.addDefToMIB(*getMRI(), Extract);
484   Src.addSrcToMIB(Extract);
485   Extract.addImm(Index);
486   return Extract;
487 }
488 
489 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
490                                      ArrayRef<uint64_t> Indices) {
491 #ifndef NDEBUG
492   assert(Ops.size() == Indices.size() && "incompatible args");
493   assert(!Ops.empty() && "invalid trivial sequence");
494   assert(llvm::is_sorted(Indices) &&
495          "sequence offsets must be in ascending order");
496 
497   assert(getMRI()->getType(Res).isValid() && "invalid operand type");
498   for (auto Op : Ops)
499     assert(getMRI()->getType(Op).isValid() && "invalid operand type");
500 #endif
501 
502   LLT ResTy = getMRI()->getType(Res);
503   LLT OpTy = getMRI()->getType(Ops[0]);
504   unsigned OpSize = OpTy.getSizeInBits();
505   bool MaybeMerge = true;
506   for (unsigned i = 0; i < Ops.size(); ++i) {
507     if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
508       MaybeMerge = false;
509       break;
510     }
511   }
512 
513   if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
514     buildMerge(Res, Ops);
515     return;
516   }
517 
518   Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
519   buildUndef(ResIn);
520 
521   for (unsigned i = 0; i < Ops.size(); ++i) {
522     Register ResOut = i + 1 == Ops.size()
523                           ? Res
524                           : getMRI()->createGenericVirtualRegister(ResTy);
525     buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
526     ResIn = ResOut;
527   }
528 }
529 
530 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
531   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
532 }
533 
534 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
535                                                  ArrayRef<Register> Ops) {
536   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
537   // we need some temporary storage for the DstOp objects. Here we use a
538   // sufficiently large SmallVector to not go through the heap.
539   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
540   assert(TmpVec.size() > 1);
541   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
542 }
543 
544 MachineInstrBuilder
545 MachineIRBuilder::buildMerge(const DstOp &Res,
546                              std::initializer_list<SrcOp> Ops) {
547   assert(Ops.size() > 1);
548   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
549 }
550 
551 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
552                                                    const SrcOp &Op) {
553   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
554   // we need some temporary storage for the DstOp objects. Here we use a
555   // sufficiently large SmallVector to not go through the heap.
556   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
557   assert(TmpVec.size() > 1);
558   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
559 }
560 
561 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
562                                                    const SrcOp &Op) {
563   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
564   SmallVector<Register, 8> TmpVec;
565   for (unsigned I = 0; I != NumReg; ++I)
566     TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
567   return buildUnmerge(TmpVec, Op);
568 }
569 
570 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
571                                                    const SrcOp &Op) {
572   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
573   // we need some temporary storage for the DstOp objects. Here we use a
574   // sufficiently large SmallVector to not go through the heap.
575   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
576   assert(TmpVec.size() > 1);
577   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
578 }
579 
580 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
581                                                        ArrayRef<Register> Ops) {
582   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
583   // we need some temporary storage for the DstOp objects. Here we use a
584   // sufficiently large SmallVector to not go through the heap.
585   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
586   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
587 }
588 
589 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
590                                                        const SrcOp &Src) {
591   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
592   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
593 }
594 
595 MachineInstrBuilder
596 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
597                                         ArrayRef<Register> Ops) {
598   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
599   // we need some temporary storage for the DstOp objects. Here we use a
600   // sufficiently large SmallVector to not go through the heap.
601   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
602   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
603 }
604 
605 MachineInstrBuilder
606 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
607   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
608   // we need some temporary storage for the DstOp objects. Here we use a
609   // sufficiently large SmallVector to not go through the heap.
610   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
611   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
612 }
613 
614 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
615                                                   const SrcOp &Src,
616                                                   const SrcOp &Op,
617                                                   unsigned Index) {
618   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
619              Res.getLLTTy(*getMRI()).getSizeInBits() &&
620          "insertion past the end of a register");
621 
622   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
623       Op.getLLTTy(*getMRI()).getSizeInBits()) {
624     return buildCast(Res, Op);
625   }
626 
627   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
628 }
629 
630 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
631                                                      ArrayRef<Register> ResultRegs,
632                                                      bool HasSideEffects) {
633   auto MIB =
634       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
635                                 : TargetOpcode::G_INTRINSIC);
636   for (unsigned ResultReg : ResultRegs)
637     MIB.addDef(ResultReg);
638   MIB.addIntrinsicID(ID);
639   return MIB;
640 }
641 
642 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
643                                                      ArrayRef<DstOp> Results,
644                                                      bool HasSideEffects) {
645   auto MIB =
646       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
647                                 : TargetOpcode::G_INTRINSIC);
648   for (DstOp Result : Results)
649     Result.addDefToMIB(*getMRI(), MIB);
650   MIB.addIntrinsicID(ID);
651   return MIB;
652 }
653 
654 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
655                                                  const SrcOp &Op) {
656   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
657 }
658 
659 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
660                                                    const SrcOp &Op,
661                                                    Optional<unsigned> Flags) {
662   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
663 }
664 
665 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
666                                                 const DstOp &Res,
667                                                 const SrcOp &Op0,
668                                                 const SrcOp &Op1) {
669   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
670 }
671 
672 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
673                                                 const DstOp &Res,
674                                                 const SrcOp &Op0,
675                                                 const SrcOp &Op1,
676                                                 Optional<unsigned> Flags) {
677 
678   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
679 }
680 
681 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
682                                                   const SrcOp &Tst,
683                                                   const SrcOp &Op0,
684                                                   const SrcOp &Op1,
685                                                   Optional<unsigned> Flags) {
686 
687   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
688 }
689 
690 MachineInstrBuilder
691 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
692                                            const SrcOp &Elt, const SrcOp &Idx) {
693   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
694 }
695 
696 MachineInstrBuilder
697 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
698                                             const SrcOp &Idx) {
699   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
700 }
701 
702 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
703     Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
704     Register NewVal, MachineMemOperand &MMO) {
705 #ifndef NDEBUG
706   LLT OldValResTy = getMRI()->getType(OldValRes);
707   LLT SuccessResTy = getMRI()->getType(SuccessRes);
708   LLT AddrTy = getMRI()->getType(Addr);
709   LLT CmpValTy = getMRI()->getType(CmpVal);
710   LLT NewValTy = getMRI()->getType(NewVal);
711   assert(OldValResTy.isScalar() && "invalid operand type");
712   assert(SuccessResTy.isScalar() && "invalid operand type");
713   assert(AddrTy.isPointer() && "invalid operand type");
714   assert(CmpValTy.isValid() && "invalid operand type");
715   assert(NewValTy.isValid() && "invalid operand type");
716   assert(OldValResTy == CmpValTy && "type mismatch");
717   assert(OldValResTy == NewValTy && "type mismatch");
718 #endif
719 
720   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
721       .addDef(OldValRes)
722       .addDef(SuccessRes)
723       .addUse(Addr)
724       .addUse(CmpVal)
725       .addUse(NewVal)
726       .addMemOperand(&MMO);
727 }
728 
729 MachineInstrBuilder
730 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
731                                      Register CmpVal, Register NewVal,
732                                      MachineMemOperand &MMO) {
733 #ifndef NDEBUG
734   LLT OldValResTy = getMRI()->getType(OldValRes);
735   LLT AddrTy = getMRI()->getType(Addr);
736   LLT CmpValTy = getMRI()->getType(CmpVal);
737   LLT NewValTy = getMRI()->getType(NewVal);
738   assert(OldValResTy.isScalar() && "invalid operand type");
739   assert(AddrTy.isPointer() && "invalid operand type");
740   assert(CmpValTy.isValid() && "invalid operand type");
741   assert(NewValTy.isValid() && "invalid operand type");
742   assert(OldValResTy == CmpValTy && "type mismatch");
743   assert(OldValResTy == NewValTy && "type mismatch");
744 #endif
745 
746   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
747       .addDef(OldValRes)
748       .addUse(Addr)
749       .addUse(CmpVal)
750       .addUse(NewVal)
751       .addMemOperand(&MMO);
752 }
753 
754 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
755   unsigned Opcode, const DstOp &OldValRes,
756   const SrcOp &Addr, const SrcOp &Val,
757   MachineMemOperand &MMO) {
758 
759 #ifndef NDEBUG
760   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
761   LLT AddrTy = Addr.getLLTTy(*getMRI());
762   LLT ValTy = Val.getLLTTy(*getMRI());
763   assert(OldValResTy.isScalar() && "invalid operand type");
764   assert(AddrTy.isPointer() && "invalid operand type");
765   assert(ValTy.isValid() && "invalid operand type");
766   assert(OldValResTy == ValTy && "type mismatch");
767   assert(MMO.isAtomic() && "not atomic mem operand");
768 #endif
769 
770   auto MIB = buildInstr(Opcode);
771   OldValRes.addDefToMIB(*getMRI(), MIB);
772   Addr.addSrcToMIB(MIB);
773   Val.addSrcToMIB(MIB);
774   MIB.addMemOperand(&MMO);
775   return MIB;
776 }
777 
778 MachineInstrBuilder
779 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
780                                      Register Val, MachineMemOperand &MMO) {
781   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
782                         MMO);
783 }
784 MachineInstrBuilder
785 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
786                                     Register Val, MachineMemOperand &MMO) {
787   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
788                         MMO);
789 }
790 MachineInstrBuilder
791 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
792                                     Register Val, MachineMemOperand &MMO) {
793   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
794                         MMO);
795 }
796 MachineInstrBuilder
797 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
798                                     Register Val, MachineMemOperand &MMO) {
799   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
800                         MMO);
801 }
802 MachineInstrBuilder
803 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
804                                      Register Val, MachineMemOperand &MMO) {
805   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
806                         MMO);
807 }
808 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
809                                                        Register Addr,
810                                                        Register Val,
811                                                        MachineMemOperand &MMO) {
812   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
813                         MMO);
814 }
815 MachineInstrBuilder
816 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
817                                     Register Val, MachineMemOperand &MMO) {
818   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
819                         MMO);
820 }
821 MachineInstrBuilder
822 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
823                                     Register Val, MachineMemOperand &MMO) {
824   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
825                         MMO);
826 }
827 MachineInstrBuilder
828 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
829                                     Register Val, MachineMemOperand &MMO) {
830   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
831                         MMO);
832 }
833 MachineInstrBuilder
834 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
835                                      Register Val, MachineMemOperand &MMO) {
836   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
837                         MMO);
838 }
839 MachineInstrBuilder
840 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
841                                      Register Val, MachineMemOperand &MMO) {
842   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
843                         MMO);
844 }
845 
846 MachineInstrBuilder
847 MachineIRBuilder::buildAtomicRMWFAdd(
848   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
849   MachineMemOperand &MMO) {
850   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
851                         MMO);
852 }
853 
854 MachineInstrBuilder
855 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
856                                      MachineMemOperand &MMO) {
857   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
858                         MMO);
859 }
860 
861 MachineInstrBuilder
862 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
863   return buildInstr(TargetOpcode::G_FENCE)
864     .addImm(Ordering)
865     .addImm(Scope);
866 }
867 
868 MachineInstrBuilder
869 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
870 #ifndef NDEBUG
871   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
872 #endif
873 
874   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
875 }
876 
877 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
878                                         bool IsExtend) {
879 #ifndef NDEBUG
880   if (DstTy.isVector()) {
881     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
882     assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
883            "different number of elements in a trunc/ext");
884   } else
885     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
886 
887   if (IsExtend)
888     assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
889            "invalid narrowing extend");
890   else
891     assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
892            "invalid widening trunc");
893 #endif
894 }
895 
896 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
897                                         const LLT Op0Ty, const LLT Op1Ty) {
898 #ifndef NDEBUG
899   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
900          "invalid operand type");
901   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
902   if (ResTy.isScalar() || ResTy.isPointer())
903     assert(TstTy.isScalar() && "type mismatch");
904   else
905     assert((TstTy.isScalar() ||
906             (TstTy.isVector() &&
907              TstTy.getNumElements() == Op0Ty.getNumElements())) &&
908            "type mismatch");
909 #endif
910 }
911 
912 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
913                                                  ArrayRef<DstOp> DstOps,
914                                                  ArrayRef<SrcOp> SrcOps,
915                                                  Optional<unsigned> Flags) {
916   switch (Opc) {
917   default:
918     break;
919   case TargetOpcode::G_SELECT: {
920     assert(DstOps.size() == 1 && "Invalid select");
921     assert(SrcOps.size() == 3 && "Invalid select");
922     validateSelectOp(
923         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
924         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
925     break;
926   }
927   case TargetOpcode::G_ADD:
928   case TargetOpcode::G_AND:
929   case TargetOpcode::G_MUL:
930   case TargetOpcode::G_OR:
931   case TargetOpcode::G_SUB:
932   case TargetOpcode::G_XOR:
933   case TargetOpcode::G_UDIV:
934   case TargetOpcode::G_SDIV:
935   case TargetOpcode::G_UREM:
936   case TargetOpcode::G_SREM:
937   case TargetOpcode::G_SMIN:
938   case TargetOpcode::G_SMAX:
939   case TargetOpcode::G_UMIN:
940   case TargetOpcode::G_UMAX:
941   case TargetOpcode::G_UADDSAT:
942   case TargetOpcode::G_SADDSAT:
943   case TargetOpcode::G_USUBSAT:
944   case TargetOpcode::G_SSUBSAT: {
945     // All these are binary ops.
946     assert(DstOps.size() == 1 && "Invalid Dst");
947     assert(SrcOps.size() == 2 && "Invalid Srcs");
948     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
949                      SrcOps[0].getLLTTy(*getMRI()),
950                      SrcOps[1].getLLTTy(*getMRI()));
951     break;
952   }
953   case TargetOpcode::G_SHL:
954   case TargetOpcode::G_ASHR:
955   case TargetOpcode::G_LSHR: {
956     assert(DstOps.size() == 1 && "Invalid Dst");
957     assert(SrcOps.size() == 2 && "Invalid Srcs");
958     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
959                     SrcOps[0].getLLTTy(*getMRI()),
960                     SrcOps[1].getLLTTy(*getMRI()));
961     break;
962   }
963   case TargetOpcode::G_SEXT:
964   case TargetOpcode::G_ZEXT:
965   case TargetOpcode::G_ANYEXT:
966     assert(DstOps.size() == 1 && "Invalid Dst");
967     assert(SrcOps.size() == 1 && "Invalid Srcs");
968     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
969                      SrcOps[0].getLLTTy(*getMRI()), true);
970     break;
971   case TargetOpcode::G_TRUNC:
972   case TargetOpcode::G_FPTRUNC: {
973     assert(DstOps.size() == 1 && "Invalid Dst");
974     assert(SrcOps.size() == 1 && "Invalid Srcs");
975     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
976                      SrcOps[0].getLLTTy(*getMRI()), false);
977     break;
978   }
979   case TargetOpcode::G_BITCAST: {
980     assert(DstOps.size() == 1 && "Invalid Dst");
981     assert(SrcOps.size() == 1 && "Invalid Srcs");
982     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
983            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
984     break;
985   }
986   case TargetOpcode::COPY:
987     assert(DstOps.size() == 1 && "Invalid Dst");
988     // If the caller wants to add a subreg source it has to be done separately
989     // so we may not have any SrcOps at this point yet.
990     break;
991   case TargetOpcode::G_FCMP:
992   case TargetOpcode::G_ICMP: {
993     assert(DstOps.size() == 1 && "Invalid Dst Operands");
994     assert(SrcOps.size() == 3 && "Invalid Src Operands");
995     // For F/ICMP, the first src operand is the predicate, followed by
996     // the two comparands.
997     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
998            "Expecting predicate");
999     assert([&]() -> bool {
1000       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1001       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1002                                          : CmpInst::isFPPredicate(Pred);
1003     }() && "Invalid predicate");
1004     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1005            "Type mismatch");
1006     assert([&]() -> bool {
1007       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1008       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1009       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1010         return DstTy.isScalar();
1011       else
1012         return DstTy.isVector() &&
1013                DstTy.getNumElements() == Op0Ty.getNumElements();
1014     }() && "Type Mismatch");
1015     break;
1016   }
1017   case TargetOpcode::G_UNMERGE_VALUES: {
1018     assert(!DstOps.empty() && "Invalid trivial sequence");
1019     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1020     assert(std::all_of(DstOps.begin(), DstOps.end(),
1021                        [&, this](const DstOp &Op) {
1022                          return Op.getLLTTy(*getMRI()) ==
1023                                 DstOps[0].getLLTTy(*getMRI());
1024                        }) &&
1025            "type mismatch in output list");
1026     assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1027                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1028            "input operands do not cover output register");
1029     break;
1030   }
1031   case TargetOpcode::G_MERGE_VALUES: {
1032     assert(!SrcOps.empty() && "invalid trivial sequence");
1033     assert(DstOps.size() == 1 && "Invalid Dst");
1034     assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1035                        [&, this](const SrcOp &Op) {
1036                          return Op.getLLTTy(*getMRI()) ==
1037                                 SrcOps[0].getLLTTy(*getMRI());
1038                        }) &&
1039            "type mismatch in input list");
1040     assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1041                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1042            "input operands do not cover output register");
1043     if (SrcOps.size() == 1)
1044       return buildCast(DstOps[0], SrcOps[0]);
1045     if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1046       if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1047         return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1048       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1049     }
1050     break;
1051   }
1052   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1053     assert(DstOps.size() == 1 && "Invalid Dst size");
1054     assert(SrcOps.size() == 2 && "Invalid Src size");
1055     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1056     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1057             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1058            "Invalid operand type");
1059     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1060     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1061                DstOps[0].getLLTTy(*getMRI()) &&
1062            "Type mismatch");
1063     break;
1064   }
1065   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1066     assert(DstOps.size() == 1 && "Invalid dst size");
1067     assert(SrcOps.size() == 3 && "Invalid src size");
1068     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1069            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1070     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1071                SrcOps[1].getLLTTy(*getMRI()) &&
1072            "Type mismatch");
1073     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1074     assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1075                SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1076            "Type mismatch");
1077     break;
1078   }
1079   case TargetOpcode::G_BUILD_VECTOR: {
1080     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1081            "Must have at least 2 operands");
1082     assert(DstOps.size() == 1 && "Invalid DstOps");
1083     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1084            "Res type must be a vector");
1085     assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1086                        [&, this](const SrcOp &Op) {
1087                          return Op.getLLTTy(*getMRI()) ==
1088                                 SrcOps[0].getLLTTy(*getMRI());
1089                        }) &&
1090            "type mismatch in input list");
1091     assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1092                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1093            "input scalars do not exactly cover the output vector register");
1094     break;
1095   }
1096   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1097     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1098            "Must have at least 2 operands");
1099     assert(DstOps.size() == 1 && "Invalid DstOps");
1100     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1101            "Res type must be a vector");
1102     assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1103                        [&, this](const SrcOp &Op) {
1104                          return Op.getLLTTy(*getMRI()) ==
1105                                 SrcOps[0].getLLTTy(*getMRI());
1106                        }) &&
1107            "type mismatch in input list");
1108     if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1109         DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1110       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1111     break;
1112   }
1113   case TargetOpcode::G_CONCAT_VECTORS: {
1114     assert(DstOps.size() == 1 && "Invalid DstOps");
1115     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1116            "Must have at least 2 operands");
1117     assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1118                        [&, this](const SrcOp &Op) {
1119                          return (Op.getLLTTy(*getMRI()).isVector() &&
1120                                  Op.getLLTTy(*getMRI()) ==
1121                                      SrcOps[0].getLLTTy(*getMRI()));
1122                        }) &&
1123            "type mismatch in input list");
1124     assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1125                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1126            "input vectors do not exactly cover the output vector register");
1127     break;
1128   }
1129   case TargetOpcode::G_UADDE: {
1130     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1131     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1132     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1133     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1134            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1135            "Invalid operand");
1136     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1137     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1138            "type mismatch");
1139     break;
1140   }
1141   }
1142 
1143   auto MIB = buildInstr(Opc);
1144   for (const DstOp &Op : DstOps)
1145     Op.addDefToMIB(*getMRI(), MIB);
1146   for (const SrcOp &Op : SrcOps)
1147     Op.addSrcToMIB(MIB);
1148   if (Flags)
1149     MIB->setFlags(*Flags);
1150   return MIB;
1151 }
1152