1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/Analysis/MemoryLocation.h"
13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
23 
24 using namespace llvm;
25 
26 void MachineIRBuilder::setMF(MachineFunction &MF) {
27   State.MF = &MF;
28   State.MBB = nullptr;
29   State.MRI = &MF.getRegInfo();
30   State.TII = MF.getSubtarget().getInstrInfo();
31   State.DL = DebugLoc();
32   State.II = MachineBasicBlock::iterator();
33   State.Observer = nullptr;
34 }
35 
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39 
40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41   MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
42   return MIB;
43 }
44 
45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46   getMBB().insert(getInsertPt(), MIB);
47   recordInsertion(MIB);
48   return MIB;
49 }
50 
51 MachineInstrBuilder
52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53                                       const MDNode *Expr) {
54   assert(isa<DILocalVariable>(Variable) && "not a variable");
55   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56   assert(
57       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58       "Expected inlined-at fields to agree");
59   return insertInstr(BuildMI(getMF(), getDL(),
60                              getTII().get(TargetOpcode::DBG_VALUE),
61                              /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63 
64 MachineInstrBuilder
65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66                                         const MDNode *Expr) {
67   assert(isa<DILocalVariable>(Variable) && "not a variable");
68   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69   assert(
70       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71       "Expected inlined-at fields to agree");
72   return insertInstr(BuildMI(getMF(), getDL(),
73                              getTII().get(TargetOpcode::DBG_VALUE),
74                              /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76 
77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78                                                       const MDNode *Variable,
79                                                       const MDNode *Expr) {
80   assert(isa<DILocalVariable>(Variable) && "not a variable");
81   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82   assert(
83       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84       "Expected inlined-at fields to agree");
85   return buildInstr(TargetOpcode::DBG_VALUE)
86       .addFrameIndex(FI)
87       .addImm(0)
88       .addMetadata(Variable)
89       .addMetadata(Expr);
90 }
91 
92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93                                                          const MDNode *Variable,
94                                                          const MDNode *Expr) {
95   assert(isa<DILocalVariable>(Variable) && "not a variable");
96   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97   assert(
98       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99       "Expected inlined-at fields to agree");
100   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101   if (auto *CI = dyn_cast<ConstantInt>(&C)) {
102     if (CI->getBitWidth() > 64)
103       MIB.addCImm(CI);
104     else
105       MIB.addImm(CI->getZExtValue());
106   } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
107     MIB.addFPImm(CFP);
108   } else {
109     // Insert $noreg if we didn't find a usable constant and had to drop it.
110     MIB.addReg(Register());
111   }
112 
113   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
114   return insertInstr(MIB);
115 }
116 
117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
118   assert(isa<DILabel>(Label) && "not a label");
119   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
120          "Expected inlined-at fields to agree");
121   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
122 
123   return MIB.addMetadata(Label);
124 }
125 
126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
127                                                          const SrcOp &Size,
128                                                          Align Alignment) {
129   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
130   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
131   Res.addDefToMIB(*getMRI(), MIB);
132   Size.addSrcToMIB(MIB);
133   MIB.addImm(Alignment.value());
134   return MIB;
135 }
136 
137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
138                                                       int Idx) {
139   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
140   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
141   Res.addDefToMIB(*getMRI(), MIB);
142   MIB.addFrameIndex(Idx);
143   return MIB;
144 }
145 
146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
147                                                        const GlobalValue *GV) {
148   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
149   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
150              GV->getType()->getAddressSpace() &&
151          "address space mismatch");
152 
153   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
154   Res.addDefToMIB(*getMRI(), MIB);
155   MIB.addGlobalAddress(GV);
156   return MIB;
157 }
158 
159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
160                                                      unsigned JTI) {
161   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
162       .addJumpTableIndex(JTI);
163 }
164 
165 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
166   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
167   assert((Res == Op0) && "type mismatch");
168 }
169 
170 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
171                                         const LLT Op1) {
172   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
173   assert((Res == Op0 && Res == Op1) && "type mismatch");
174 }
175 
176 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
177                                        const LLT Op1) {
178   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
179   assert((Res == Op0) && "type mismatch");
180 }
181 
182 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
183                                                   const SrcOp &Op0,
184                                                   const SrcOp &Op1) {
185   assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
186          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
187   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
188 
189   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
190 }
191 
192 Optional<MachineInstrBuilder>
193 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
194                                     const LLT ValueTy, uint64_t Value) {
195   assert(Res == 0 && "Res is a result argument");
196   assert(ValueTy.isScalar()  && "invalid offset type");
197 
198   if (Value == 0) {
199     Res = Op0;
200     return None;
201   }
202 
203   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
204   auto Cst = buildConstant(ValueTy, Value);
205   return buildPtrAdd(Res, Op0, Cst.getReg(0));
206 }
207 
208 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
209                                                           const SrcOp &Op0,
210                                                           uint32_t NumBits) {
211   LLT PtrTy = Res.getLLTTy(*getMRI());
212   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
213   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
214   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
215   return buildPtrMask(Res, Op0, MaskReg);
216 }
217 
218 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
219   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
220 }
221 
222 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
223   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
224   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
225 }
226 
227 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
228                                                 unsigned JTI,
229                                                 Register IndexReg) {
230   assert(getMRI()->getType(TablePtr).isPointer() &&
231          "Table reg must be a pointer");
232   return buildInstr(TargetOpcode::G_BRJT)
233       .addUse(TablePtr)
234       .addJumpTableIndex(JTI)
235       .addUse(IndexReg);
236 }
237 
238 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
239                                                 const SrcOp &Op) {
240   return buildInstr(TargetOpcode::COPY, Res, Op);
241 }
242 
243 MachineInstrBuilder MachineIRBuilder::buildAssertSExt(const DstOp &Res,
244                                                       const SrcOp &Op,
245                                                       unsigned Size) {
246   return buildInstr(TargetOpcode::G_ASSERT_SEXT, Res, Op).addImm(Size);
247 }
248 
249 MachineInstrBuilder MachineIRBuilder::buildAssertZExt(const DstOp &Res,
250                                                       const SrcOp &Op,
251                                                       unsigned Size) {
252   return buildInstr(TargetOpcode::G_ASSERT_ZEXT, Res, Op).addImm(Size);
253 }
254 
255 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
256                                                     const ConstantInt &Val) {
257   LLT Ty = Res.getLLTTy(*getMRI());
258   LLT EltTy = Ty.getScalarType();
259   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
260          "creating constant with the wrong size");
261 
262   if (Ty.isVector()) {
263     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
264     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
265     .addCImm(&Val);
266     return buildSplatVector(Res, Const);
267   }
268 
269   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
270   Const->setDebugLoc(DebugLoc());
271   Res.addDefToMIB(*getMRI(), Const);
272   Const.addCImm(&Val);
273   return Const;
274 }
275 
276 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
277                                                     int64_t Val) {
278   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
279                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
280   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
281   return buildConstant(Res, *CI);
282 }
283 
284 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
285                                                      const ConstantFP &Val) {
286   LLT Ty = Res.getLLTTy(*getMRI());
287   LLT EltTy = Ty.getScalarType();
288 
289   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
290          == EltTy.getSizeInBits() &&
291          "creating fconstant with the wrong size");
292 
293   assert(!Ty.isPointer() && "invalid operand type");
294 
295   if (Ty.isVector()) {
296     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
297     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
298     .addFPImm(&Val);
299 
300     return buildSplatVector(Res, Const);
301   }
302 
303   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
304   Const->setDebugLoc(DebugLoc());
305   Res.addDefToMIB(*getMRI(), Const);
306   Const.addFPImm(&Val);
307   return Const;
308 }
309 
310 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
311                                                     const APInt &Val) {
312   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
313   return buildConstant(Res, *CI);
314 }
315 
316 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
317                                                      double Val) {
318   LLT DstTy = Res.getLLTTy(*getMRI());
319   auto &Ctx = getMF().getFunction().getContext();
320   auto *CFP =
321       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
322   return buildFConstant(Res, *CFP);
323 }
324 
325 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
326                                                      const APFloat &Val) {
327   auto &Ctx = getMF().getFunction().getContext();
328   auto *CFP = ConstantFP::get(Ctx, Val);
329   return buildFConstant(Res, *CFP);
330 }
331 
332 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
333                                                   MachineBasicBlock &Dest) {
334   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
335 
336   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
337   Tst.addSrcToMIB(MIB);
338   MIB.addMBB(&Dest);
339   return MIB;
340 }
341 
342 MachineInstrBuilder
343 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
344                             MachinePointerInfo PtrInfo, Align Alignment,
345                             MachineMemOperand::Flags MMOFlags,
346                             const AAMDNodes &AAInfo) {
347   MMOFlags |= MachineMemOperand::MOLoad;
348   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
349 
350   uint64_t Size = MemoryLocation::getSizeOrUnknown(
351       TypeSize::Fixed(Dst.getLLTTy(*getMRI()).getSizeInBytes()));
352   MachineMemOperand *MMO =
353       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
354   return buildLoad(Dst, Addr, *MMO);
355 }
356 
357 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
358                                                      const DstOp &Res,
359                                                      const SrcOp &Addr,
360                                                      MachineMemOperand &MMO) {
361   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
362   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
363 
364   auto MIB = buildInstr(Opcode);
365   Res.addDefToMIB(*getMRI(), MIB);
366   Addr.addSrcToMIB(MIB);
367   MIB.addMemOperand(&MMO);
368   return MIB;
369 }
370 
371 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
372   const DstOp &Dst, const SrcOp &BasePtr,
373   MachineMemOperand &BaseMMO, int64_t Offset) {
374   LLT LoadTy = Dst.getLLTTy(*getMRI());
375   MachineMemOperand *OffsetMMO =
376     getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes());
377 
378   if (Offset == 0) // This may be a size or type changing load.
379     return buildLoad(Dst, BasePtr, *OffsetMMO);
380 
381   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
382   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
383   auto ConstOffset = buildConstant(OffsetTy, Offset);
384   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
385   return buildLoad(Dst, Ptr, *OffsetMMO);
386 }
387 
388 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
389                                                  const SrcOp &Addr,
390                                                  MachineMemOperand &MMO) {
391   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
392   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
393 
394   auto MIB = buildInstr(TargetOpcode::G_STORE);
395   Val.addSrcToMIB(MIB);
396   Addr.addSrcToMIB(MIB);
397   MIB.addMemOperand(&MMO);
398   return MIB;
399 }
400 
401 MachineInstrBuilder
402 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
403                              MachinePointerInfo PtrInfo, Align Alignment,
404                              MachineMemOperand::Flags MMOFlags,
405                              const AAMDNodes &AAInfo) {
406   MMOFlags |= MachineMemOperand::MOStore;
407   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
408 
409   uint64_t Size = MemoryLocation::getSizeOrUnknown(
410       TypeSize::Fixed(Val.getLLTTy(*getMRI()).getSizeInBytes()));
411   MachineMemOperand *MMO =
412       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
413   return buildStore(Val, Addr, *MMO);
414 }
415 
416 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
417                                                   const SrcOp &Op) {
418   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
419 }
420 
421 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
422                                                 const SrcOp &Op) {
423   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
424 }
425 
426 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
427                                                 const SrcOp &Op) {
428   return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
429 }
430 
431 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
432   const auto *TLI = getMF().getSubtarget().getTargetLowering();
433   switch (TLI->getBooleanContents(IsVec, IsFP)) {
434   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
435     return TargetOpcode::G_SEXT;
436   case TargetLoweringBase::ZeroOrOneBooleanContent:
437     return TargetOpcode::G_ZEXT;
438   default:
439     return TargetOpcode::G_ANYEXT;
440   }
441 }
442 
443 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
444                                                    const SrcOp &Op,
445                                                    bool IsFP) {
446   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
447   return buildInstr(ExtOp, Res, Op);
448 }
449 
450 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
451                                                       const DstOp &Res,
452                                                       const SrcOp &Op) {
453   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
454           TargetOpcode::G_SEXT == ExtOpc) &&
455          "Expecting Extending Opc");
456   assert(Res.getLLTTy(*getMRI()).isScalar() ||
457          Res.getLLTTy(*getMRI()).isVector());
458   assert(Res.getLLTTy(*getMRI()).isScalar() ==
459          Op.getLLTTy(*getMRI()).isScalar());
460 
461   unsigned Opcode = TargetOpcode::COPY;
462   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
463       Op.getLLTTy(*getMRI()).getSizeInBits())
464     Opcode = ExtOpc;
465   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
466            Op.getLLTTy(*getMRI()).getSizeInBits())
467     Opcode = TargetOpcode::G_TRUNC;
468   else
469     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
470 
471   return buildInstr(Opcode, Res, Op);
472 }
473 
474 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
475                                                        const SrcOp &Op) {
476   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
477 }
478 
479 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
480                                                        const SrcOp &Op) {
481   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
482 }
483 
484 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
485                                                          const SrcOp &Op) {
486   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
487 }
488 
489 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
490                                                 const SrcOp &Src) {
491   LLT SrcTy = Src.getLLTTy(*getMRI());
492   LLT DstTy = Dst.getLLTTy(*getMRI());
493   if (SrcTy == DstTy)
494     return buildCopy(Dst, Src);
495 
496   unsigned Opcode;
497   if (SrcTy.isPointer() && DstTy.isScalar())
498     Opcode = TargetOpcode::G_PTRTOINT;
499   else if (DstTy.isPointer() && SrcTy.isScalar())
500     Opcode = TargetOpcode::G_INTTOPTR;
501   else {
502     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
503     Opcode = TargetOpcode::G_BITCAST;
504   }
505 
506   return buildInstr(Opcode, Dst, Src);
507 }
508 
509 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
510                                                    const SrcOp &Src,
511                                                    uint64_t Index) {
512   LLT SrcTy = Src.getLLTTy(*getMRI());
513   LLT DstTy = Dst.getLLTTy(*getMRI());
514 
515 #ifndef NDEBUG
516   assert(SrcTy.isValid() && "invalid operand type");
517   assert(DstTy.isValid() && "invalid operand type");
518   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
519          "extracting off end of register");
520 #endif
521 
522   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
523     assert(Index == 0 && "insertion past the end of a register");
524     return buildCast(Dst, Src);
525   }
526 
527   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
528   Dst.addDefToMIB(*getMRI(), Extract);
529   Src.addSrcToMIB(Extract);
530   Extract.addImm(Index);
531   return Extract;
532 }
533 
534 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
535                                      ArrayRef<uint64_t> Indices) {
536 #ifndef NDEBUG
537   assert(Ops.size() == Indices.size() && "incompatible args");
538   assert(!Ops.empty() && "invalid trivial sequence");
539   assert(llvm::is_sorted(Indices) &&
540          "sequence offsets must be in ascending order");
541 
542   assert(getMRI()->getType(Res).isValid() && "invalid operand type");
543   for (auto Op : Ops)
544     assert(getMRI()->getType(Op).isValid() && "invalid operand type");
545 #endif
546 
547   LLT ResTy = getMRI()->getType(Res);
548   LLT OpTy = getMRI()->getType(Ops[0]);
549   unsigned OpSize = OpTy.getSizeInBits();
550   bool MaybeMerge = true;
551   for (unsigned i = 0; i < Ops.size(); ++i) {
552     if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
553       MaybeMerge = false;
554       break;
555     }
556   }
557 
558   if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
559     buildMerge(Res, Ops);
560     return;
561   }
562 
563   Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
564   buildUndef(ResIn);
565 
566   for (unsigned i = 0; i < Ops.size(); ++i) {
567     Register ResOut = i + 1 == Ops.size()
568                           ? Res
569                           : getMRI()->createGenericVirtualRegister(ResTy);
570     buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
571     ResIn = ResOut;
572   }
573 }
574 
575 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
576   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
577 }
578 
579 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
580                                                  ArrayRef<Register> Ops) {
581   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
582   // we need some temporary storage for the DstOp objects. Here we use a
583   // sufficiently large SmallVector to not go through the heap.
584   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
585   assert(TmpVec.size() > 1);
586   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
587 }
588 
589 MachineInstrBuilder
590 MachineIRBuilder::buildMerge(const DstOp &Res,
591                              std::initializer_list<SrcOp> Ops) {
592   assert(Ops.size() > 1);
593   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
594 }
595 
596 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
597                                                    const SrcOp &Op) {
598   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
599   // we need some temporary storage for the DstOp objects. Here we use a
600   // sufficiently large SmallVector to not go through the heap.
601   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
602   assert(TmpVec.size() > 1);
603   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
604 }
605 
606 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
607                                                    const SrcOp &Op) {
608   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
609   SmallVector<Register, 8> TmpVec;
610   for (unsigned I = 0; I != NumReg; ++I)
611     TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
612   return buildUnmerge(TmpVec, Op);
613 }
614 
615 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
616                                                    const SrcOp &Op) {
617   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
618   // we need some temporary storage for the DstOp objects. Here we use a
619   // sufficiently large SmallVector to not go through the heap.
620   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
621   assert(TmpVec.size() > 1);
622   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
623 }
624 
625 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
626                                                        ArrayRef<Register> Ops) {
627   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
628   // we need some temporary storage for the DstOp objects. Here we use a
629   // sufficiently large SmallVector to not go through the heap.
630   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
631   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
632 }
633 
634 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
635                                                        const SrcOp &Src) {
636   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
637   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
638 }
639 
640 MachineInstrBuilder
641 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
642                                         ArrayRef<Register> Ops) {
643   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
644   // we need some temporary storage for the DstOp objects. Here we use a
645   // sufficiently large SmallVector to not go through the heap.
646   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
647   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
648 }
649 
650 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
651                                                         const SrcOp &Src) {
652   LLT DstTy = Res.getLLTTy(*getMRI());
653   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
654          "Expected Src to match Dst elt ty");
655   auto UndefVec = buildUndef(DstTy);
656   auto Zero = buildConstant(LLT::scalar(64), 0);
657   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
658   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
659   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
660 }
661 
662 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
663                                                          const SrcOp &Src1,
664                                                          const SrcOp &Src2,
665                                                          ArrayRef<int> Mask) {
666   LLT DstTy = Res.getLLTTy(*getMRI());
667   LLT Src1Ty = Src1.getLLTTy(*getMRI());
668   LLT Src2Ty = Src2.getLLTTy(*getMRI());
669   assert(Src1Ty.getNumElements() + Src2Ty.getNumElements() >= Mask.size());
670   assert(DstTy.getElementType() == Src1Ty.getElementType() &&
671          DstTy.getElementType() == Src2Ty.getElementType());
672   (void)Src1Ty;
673   (void)Src2Ty;
674   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
675   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {DstTy}, {Src1, Src2})
676       .addShuffleMask(MaskAlloc);
677 }
678 
679 MachineInstrBuilder
680 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
681   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
682   // we need some temporary storage for the DstOp objects. Here we use a
683   // sufficiently large SmallVector to not go through the heap.
684   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
685   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
686 }
687 
688 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
689                                                   const SrcOp &Src,
690                                                   const SrcOp &Op,
691                                                   unsigned Index) {
692   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
693              Res.getLLTTy(*getMRI()).getSizeInBits() &&
694          "insertion past the end of a register");
695 
696   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
697       Op.getLLTTy(*getMRI()).getSizeInBits()) {
698     return buildCast(Res, Op);
699   }
700 
701   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
702 }
703 
704 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
705                                                      ArrayRef<Register> ResultRegs,
706                                                      bool HasSideEffects) {
707   auto MIB =
708       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
709                                 : TargetOpcode::G_INTRINSIC);
710   for (unsigned ResultReg : ResultRegs)
711     MIB.addDef(ResultReg);
712   MIB.addIntrinsicID(ID);
713   return MIB;
714 }
715 
716 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
717                                                      ArrayRef<DstOp> Results,
718                                                      bool HasSideEffects) {
719   auto MIB =
720       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
721                                 : TargetOpcode::G_INTRINSIC);
722   for (DstOp Result : Results)
723     Result.addDefToMIB(*getMRI(), MIB);
724   MIB.addIntrinsicID(ID);
725   return MIB;
726 }
727 
728 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
729                                                  const SrcOp &Op) {
730   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
731 }
732 
733 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
734                                                    const SrcOp &Op,
735                                                    Optional<unsigned> Flags) {
736   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
737 }
738 
739 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
740                                                 const DstOp &Res,
741                                                 const SrcOp &Op0,
742                                                 const SrcOp &Op1) {
743   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
744 }
745 
746 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
747                                                 const DstOp &Res,
748                                                 const SrcOp &Op0,
749                                                 const SrcOp &Op1,
750                                                 Optional<unsigned> Flags) {
751 
752   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
753 }
754 
755 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
756                                                   const SrcOp &Tst,
757                                                   const SrcOp &Op0,
758                                                   const SrcOp &Op1,
759                                                   Optional<unsigned> Flags) {
760 
761   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
762 }
763 
764 MachineInstrBuilder
765 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
766                                            const SrcOp &Elt, const SrcOp &Idx) {
767   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
768 }
769 
770 MachineInstrBuilder
771 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
772                                             const SrcOp &Idx) {
773   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
774 }
775 
776 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
777     Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
778     Register NewVal, MachineMemOperand &MMO) {
779 #ifndef NDEBUG
780   LLT OldValResTy = getMRI()->getType(OldValRes);
781   LLT SuccessResTy = getMRI()->getType(SuccessRes);
782   LLT AddrTy = getMRI()->getType(Addr);
783   LLT CmpValTy = getMRI()->getType(CmpVal);
784   LLT NewValTy = getMRI()->getType(NewVal);
785   assert(OldValResTy.isScalar() && "invalid operand type");
786   assert(SuccessResTy.isScalar() && "invalid operand type");
787   assert(AddrTy.isPointer() && "invalid operand type");
788   assert(CmpValTy.isValid() && "invalid operand type");
789   assert(NewValTy.isValid() && "invalid operand type");
790   assert(OldValResTy == CmpValTy && "type mismatch");
791   assert(OldValResTy == NewValTy && "type mismatch");
792 #endif
793 
794   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
795       .addDef(OldValRes)
796       .addDef(SuccessRes)
797       .addUse(Addr)
798       .addUse(CmpVal)
799       .addUse(NewVal)
800       .addMemOperand(&MMO);
801 }
802 
803 MachineInstrBuilder
804 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
805                                      Register CmpVal, Register NewVal,
806                                      MachineMemOperand &MMO) {
807 #ifndef NDEBUG
808   LLT OldValResTy = getMRI()->getType(OldValRes);
809   LLT AddrTy = getMRI()->getType(Addr);
810   LLT CmpValTy = getMRI()->getType(CmpVal);
811   LLT NewValTy = getMRI()->getType(NewVal);
812   assert(OldValResTy.isScalar() && "invalid operand type");
813   assert(AddrTy.isPointer() && "invalid operand type");
814   assert(CmpValTy.isValid() && "invalid operand type");
815   assert(NewValTy.isValid() && "invalid operand type");
816   assert(OldValResTy == CmpValTy && "type mismatch");
817   assert(OldValResTy == NewValTy && "type mismatch");
818 #endif
819 
820   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
821       .addDef(OldValRes)
822       .addUse(Addr)
823       .addUse(CmpVal)
824       .addUse(NewVal)
825       .addMemOperand(&MMO);
826 }
827 
828 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
829   unsigned Opcode, const DstOp &OldValRes,
830   const SrcOp &Addr, const SrcOp &Val,
831   MachineMemOperand &MMO) {
832 
833 #ifndef NDEBUG
834   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
835   LLT AddrTy = Addr.getLLTTy(*getMRI());
836   LLT ValTy = Val.getLLTTy(*getMRI());
837   assert(OldValResTy.isScalar() && "invalid operand type");
838   assert(AddrTy.isPointer() && "invalid operand type");
839   assert(ValTy.isValid() && "invalid operand type");
840   assert(OldValResTy == ValTy && "type mismatch");
841   assert(MMO.isAtomic() && "not atomic mem operand");
842 #endif
843 
844   auto MIB = buildInstr(Opcode);
845   OldValRes.addDefToMIB(*getMRI(), MIB);
846   Addr.addSrcToMIB(MIB);
847   Val.addSrcToMIB(MIB);
848   MIB.addMemOperand(&MMO);
849   return MIB;
850 }
851 
852 MachineInstrBuilder
853 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
854                                      Register Val, MachineMemOperand &MMO) {
855   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
856                         MMO);
857 }
858 MachineInstrBuilder
859 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
860                                     Register Val, MachineMemOperand &MMO) {
861   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
862                         MMO);
863 }
864 MachineInstrBuilder
865 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
866                                     Register Val, MachineMemOperand &MMO) {
867   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
868                         MMO);
869 }
870 MachineInstrBuilder
871 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
872                                     Register Val, MachineMemOperand &MMO) {
873   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
874                         MMO);
875 }
876 MachineInstrBuilder
877 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
878                                      Register Val, MachineMemOperand &MMO) {
879   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
880                         MMO);
881 }
882 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
883                                                        Register Addr,
884                                                        Register Val,
885                                                        MachineMemOperand &MMO) {
886   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
887                         MMO);
888 }
889 MachineInstrBuilder
890 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
891                                     Register Val, MachineMemOperand &MMO) {
892   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
893                         MMO);
894 }
895 MachineInstrBuilder
896 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
897                                     Register Val, MachineMemOperand &MMO) {
898   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
899                         MMO);
900 }
901 MachineInstrBuilder
902 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
903                                     Register Val, MachineMemOperand &MMO) {
904   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
905                         MMO);
906 }
907 MachineInstrBuilder
908 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
909                                      Register Val, MachineMemOperand &MMO) {
910   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
911                         MMO);
912 }
913 MachineInstrBuilder
914 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
915                                      Register Val, MachineMemOperand &MMO) {
916   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
917                         MMO);
918 }
919 
920 MachineInstrBuilder
921 MachineIRBuilder::buildAtomicRMWFAdd(
922   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
923   MachineMemOperand &MMO) {
924   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
925                         MMO);
926 }
927 
928 MachineInstrBuilder
929 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
930                                      MachineMemOperand &MMO) {
931   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
932                         MMO);
933 }
934 
935 MachineInstrBuilder
936 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
937   return buildInstr(TargetOpcode::G_FENCE)
938     .addImm(Ordering)
939     .addImm(Scope);
940 }
941 
942 MachineInstrBuilder
943 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
944 #ifndef NDEBUG
945   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
946 #endif
947 
948   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
949 }
950 
951 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
952                                         bool IsExtend) {
953 #ifndef NDEBUG
954   if (DstTy.isVector()) {
955     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
956     assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
957            "different number of elements in a trunc/ext");
958   } else
959     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
960 
961   if (IsExtend)
962     assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
963            "invalid narrowing extend");
964   else
965     assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
966            "invalid widening trunc");
967 #endif
968 }
969 
970 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
971                                         const LLT Op0Ty, const LLT Op1Ty) {
972 #ifndef NDEBUG
973   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
974          "invalid operand type");
975   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
976   if (ResTy.isScalar() || ResTy.isPointer())
977     assert(TstTy.isScalar() && "type mismatch");
978   else
979     assert((TstTy.isScalar() ||
980             (TstTy.isVector() &&
981              TstTy.getNumElements() == Op0Ty.getNumElements())) &&
982            "type mismatch");
983 #endif
984 }
985 
986 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
987                                                  ArrayRef<DstOp> DstOps,
988                                                  ArrayRef<SrcOp> SrcOps,
989                                                  Optional<unsigned> Flags) {
990   switch (Opc) {
991   default:
992     break;
993   case TargetOpcode::G_SELECT: {
994     assert(DstOps.size() == 1 && "Invalid select");
995     assert(SrcOps.size() == 3 && "Invalid select");
996     validateSelectOp(
997         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
998         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
999     break;
1000   }
1001   case TargetOpcode::G_FNEG:
1002   case TargetOpcode::G_ABS:
1003     // All these are unary ops.
1004     assert(DstOps.size() == 1 && "Invalid Dst");
1005     assert(SrcOps.size() == 1 && "Invalid Srcs");
1006     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1007                     SrcOps[0].getLLTTy(*getMRI()));
1008     break;
1009   case TargetOpcode::G_ADD:
1010   case TargetOpcode::G_AND:
1011   case TargetOpcode::G_MUL:
1012   case TargetOpcode::G_OR:
1013   case TargetOpcode::G_SUB:
1014   case TargetOpcode::G_XOR:
1015   case TargetOpcode::G_UDIV:
1016   case TargetOpcode::G_SDIV:
1017   case TargetOpcode::G_UREM:
1018   case TargetOpcode::G_SREM:
1019   case TargetOpcode::G_SMIN:
1020   case TargetOpcode::G_SMAX:
1021   case TargetOpcode::G_UMIN:
1022   case TargetOpcode::G_UMAX:
1023   case TargetOpcode::G_UADDSAT:
1024   case TargetOpcode::G_SADDSAT:
1025   case TargetOpcode::G_USUBSAT:
1026   case TargetOpcode::G_SSUBSAT: {
1027     // All these are binary ops.
1028     assert(DstOps.size() == 1 && "Invalid Dst");
1029     assert(SrcOps.size() == 2 && "Invalid Srcs");
1030     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1031                      SrcOps[0].getLLTTy(*getMRI()),
1032                      SrcOps[1].getLLTTy(*getMRI()));
1033     break;
1034   }
1035   case TargetOpcode::G_SHL:
1036   case TargetOpcode::G_ASHR:
1037   case TargetOpcode::G_LSHR:
1038   case TargetOpcode::G_USHLSAT:
1039   case TargetOpcode::G_SSHLSAT: {
1040     assert(DstOps.size() == 1 && "Invalid Dst");
1041     assert(SrcOps.size() == 2 && "Invalid Srcs");
1042     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1043                     SrcOps[0].getLLTTy(*getMRI()),
1044                     SrcOps[1].getLLTTy(*getMRI()));
1045     break;
1046   }
1047   case TargetOpcode::G_SEXT:
1048   case TargetOpcode::G_ZEXT:
1049   case TargetOpcode::G_ANYEXT:
1050     assert(DstOps.size() == 1 && "Invalid Dst");
1051     assert(SrcOps.size() == 1 && "Invalid Srcs");
1052     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1053                      SrcOps[0].getLLTTy(*getMRI()), true);
1054     break;
1055   case TargetOpcode::G_TRUNC:
1056   case TargetOpcode::G_FPTRUNC: {
1057     assert(DstOps.size() == 1 && "Invalid Dst");
1058     assert(SrcOps.size() == 1 && "Invalid Srcs");
1059     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1060                      SrcOps[0].getLLTTy(*getMRI()), false);
1061     break;
1062   }
1063   case TargetOpcode::G_BITCAST: {
1064     assert(DstOps.size() == 1 && "Invalid Dst");
1065     assert(SrcOps.size() == 1 && "Invalid Srcs");
1066     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1067            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1068     break;
1069   }
1070   case TargetOpcode::COPY:
1071     assert(DstOps.size() == 1 && "Invalid Dst");
1072     // If the caller wants to add a subreg source it has to be done separately
1073     // so we may not have any SrcOps at this point yet.
1074     break;
1075   case TargetOpcode::G_FCMP:
1076   case TargetOpcode::G_ICMP: {
1077     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1078     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1079     // For F/ICMP, the first src operand is the predicate, followed by
1080     // the two comparands.
1081     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1082            "Expecting predicate");
1083     assert([&]() -> bool {
1084       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1085       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1086                                          : CmpInst::isFPPredicate(Pred);
1087     }() && "Invalid predicate");
1088     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1089            "Type mismatch");
1090     assert([&]() -> bool {
1091       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1092       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1093       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1094         return DstTy.isScalar();
1095       else
1096         return DstTy.isVector() &&
1097                DstTy.getNumElements() == Op0Ty.getNumElements();
1098     }() && "Type Mismatch");
1099     break;
1100   }
1101   case TargetOpcode::G_UNMERGE_VALUES: {
1102     assert(!DstOps.empty() && "Invalid trivial sequence");
1103     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1104     assert(llvm::all_of(DstOps,
1105                         [&, this](const DstOp &Op) {
1106                           return Op.getLLTTy(*getMRI()) ==
1107                                  DstOps[0].getLLTTy(*getMRI());
1108                         }) &&
1109            "type mismatch in output list");
1110     assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1111                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1112            "input operands do not cover output register");
1113     break;
1114   }
1115   case TargetOpcode::G_MERGE_VALUES: {
1116     assert(!SrcOps.empty() && "invalid trivial sequence");
1117     assert(DstOps.size() == 1 && "Invalid Dst");
1118     assert(llvm::all_of(SrcOps,
1119                         [&, this](const SrcOp &Op) {
1120                           return Op.getLLTTy(*getMRI()) ==
1121                                  SrcOps[0].getLLTTy(*getMRI());
1122                         }) &&
1123            "type mismatch in input list");
1124     assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1125                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1126            "input operands do not cover output register");
1127     if (SrcOps.size() == 1)
1128       return buildCast(DstOps[0], SrcOps[0]);
1129     if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1130       if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1131         return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1132       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1133     }
1134     break;
1135   }
1136   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1137     assert(DstOps.size() == 1 && "Invalid Dst size");
1138     assert(SrcOps.size() == 2 && "Invalid Src size");
1139     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1140     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1141             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1142            "Invalid operand type");
1143     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1144     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1145                DstOps[0].getLLTTy(*getMRI()) &&
1146            "Type mismatch");
1147     break;
1148   }
1149   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1150     assert(DstOps.size() == 1 && "Invalid dst size");
1151     assert(SrcOps.size() == 3 && "Invalid src size");
1152     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1153            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1154     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1155                SrcOps[1].getLLTTy(*getMRI()) &&
1156            "Type mismatch");
1157     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1158     assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1159                SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1160            "Type mismatch");
1161     break;
1162   }
1163   case TargetOpcode::G_BUILD_VECTOR: {
1164     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1165            "Must have at least 2 operands");
1166     assert(DstOps.size() == 1 && "Invalid DstOps");
1167     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1168            "Res type must be a vector");
1169     assert(llvm::all_of(SrcOps,
1170                         [&, this](const SrcOp &Op) {
1171                           return Op.getLLTTy(*getMRI()) ==
1172                                  SrcOps[0].getLLTTy(*getMRI());
1173                         }) &&
1174            "type mismatch in input list");
1175     assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1176                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1177            "input scalars do not exactly cover the output vector register");
1178     break;
1179   }
1180   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1181     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1182            "Must have at least 2 operands");
1183     assert(DstOps.size() == 1 && "Invalid DstOps");
1184     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1185            "Res type must be a vector");
1186     assert(llvm::all_of(SrcOps,
1187                         [&, this](const SrcOp &Op) {
1188                           return Op.getLLTTy(*getMRI()) ==
1189                                  SrcOps[0].getLLTTy(*getMRI());
1190                         }) &&
1191            "type mismatch in input list");
1192     if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1193         DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1194       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1195     break;
1196   }
1197   case TargetOpcode::G_CONCAT_VECTORS: {
1198     assert(DstOps.size() == 1 && "Invalid DstOps");
1199     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1200            "Must have at least 2 operands");
1201     assert(llvm::all_of(SrcOps,
1202                         [&, this](const SrcOp &Op) {
1203                           return (Op.getLLTTy(*getMRI()).isVector() &&
1204                                   Op.getLLTTy(*getMRI()) ==
1205                                       SrcOps[0].getLLTTy(*getMRI()));
1206                         }) &&
1207            "type mismatch in input list");
1208     assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1209                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1210            "input vectors do not exactly cover the output vector register");
1211     break;
1212   }
1213   case TargetOpcode::G_UADDE: {
1214     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1215     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1216     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1217     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1218            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1219            "Invalid operand");
1220     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1221     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1222            "type mismatch");
1223     break;
1224   }
1225   }
1226 
1227   auto MIB = buildInstr(Opc);
1228   for (const DstOp &Op : DstOps)
1229     Op.addDefToMIB(*getMRI(), MIB);
1230   for (const SrcOp &Op : SrcOps)
1231     Op.addSrcToMIB(MIB);
1232   if (Flags)
1233     MIB->setFlags(*Flags);
1234   return MIB;
1235 }
1236