1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/MachineFunction.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetOpcodes.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DebugInfoMetadata.h"
21 
22 using namespace llvm;
23 
24 void MachineIRBuilder::setMF(MachineFunction &MF) {
25   State.MF = &MF;
26   State.MBB = nullptr;
27   State.MRI = &MF.getRegInfo();
28   State.TII = MF.getSubtarget().getInstrInfo();
29   State.DL = DebugLoc();
30   State.II = MachineBasicBlock::iterator();
31   State.Observer = nullptr;
32 }
33 
34 //------------------------------------------------------------------------------
35 // Build instruction variants.
36 //------------------------------------------------------------------------------
37 
38 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
39   MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
40   return MIB;
41 }
42 
43 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
44   getMBB().insert(getInsertPt(), MIB);
45   recordInsertion(MIB);
46   return MIB;
47 }
48 
49 MachineInstrBuilder
50 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
51                                       const MDNode *Expr) {
52   assert(isa<DILocalVariable>(Variable) && "not a variable");
53   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
54   assert(
55       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
56       "Expected inlined-at fields to agree");
57   return insertInstr(BuildMI(getMF(), getDL(),
58                              getTII().get(TargetOpcode::DBG_VALUE),
59                              /*IsIndirect*/ false, Reg, Variable, Expr));
60 }
61 
62 MachineInstrBuilder
63 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
64                                         const MDNode *Expr) {
65   assert(isa<DILocalVariable>(Variable) && "not a variable");
66   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
67   assert(
68       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
69       "Expected inlined-at fields to agree");
70   return insertInstr(BuildMI(getMF(), getDL(),
71                              getTII().get(TargetOpcode::DBG_VALUE),
72                              /*IsIndirect*/ true, Reg, Variable, Expr));
73 }
74 
75 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
76                                                       const MDNode *Variable,
77                                                       const MDNode *Expr) {
78   assert(isa<DILocalVariable>(Variable) && "not a variable");
79   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
80   assert(
81       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
82       "Expected inlined-at fields to agree");
83   return buildInstr(TargetOpcode::DBG_VALUE)
84       .addFrameIndex(FI)
85       .addImm(0)
86       .addMetadata(Variable)
87       .addMetadata(Expr);
88 }
89 
90 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
91                                                          const MDNode *Variable,
92                                                          const MDNode *Expr) {
93   assert(isa<DILocalVariable>(Variable) && "not a variable");
94   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95   assert(
96       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
97       "Expected inlined-at fields to agree");
98   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
99   if (auto *CI = dyn_cast<ConstantInt>(&C)) {
100     if (CI->getBitWidth() > 64)
101       MIB.addCImm(CI);
102     else
103       MIB.addImm(CI->getZExtValue());
104   } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
105     MIB.addFPImm(CFP);
106   } else {
107     // Insert $noreg if we didn't find a usable constant and had to drop it.
108     MIB.addReg(Register());
109   }
110 
111   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
112   return insertInstr(MIB);
113 }
114 
115 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
116   assert(isa<DILabel>(Label) && "not a label");
117   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
118          "Expected inlined-at fields to agree");
119   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
120 
121   return MIB.addMetadata(Label);
122 }
123 
124 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
125                                                          const SrcOp &Size,
126                                                          Align Alignment) {
127   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
128   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
129   Res.addDefToMIB(*getMRI(), MIB);
130   Size.addSrcToMIB(MIB);
131   MIB.addImm(Alignment.value());
132   return MIB;
133 }
134 
135 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
136                                                       int Idx) {
137   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
138   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
139   Res.addDefToMIB(*getMRI(), MIB);
140   MIB.addFrameIndex(Idx);
141   return MIB;
142 }
143 
144 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
145                                                        const GlobalValue *GV) {
146   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
147   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
148              GV->getType()->getAddressSpace() &&
149          "address space mismatch");
150 
151   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
152   Res.addDefToMIB(*getMRI(), MIB);
153   MIB.addGlobalAddress(GV);
154   return MIB;
155 }
156 
157 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
158                                                      unsigned JTI) {
159   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
160       .addJumpTableIndex(JTI);
161 }
162 
163 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
164   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
165   assert((Res == Op0) && "type mismatch");
166 }
167 
168 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
169                                         const LLT Op1) {
170   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
171   assert((Res == Op0 && Res == Op1) && "type mismatch");
172 }
173 
174 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
175                                        const LLT Op1) {
176   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
177   assert((Res == Op0) && "type mismatch");
178 }
179 
180 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
181                                                   const SrcOp &Op0,
182                                                   const SrcOp &Op1) {
183   assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
184          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
185   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
186 
187   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
188 }
189 
190 Optional<MachineInstrBuilder>
191 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
192                                     const LLT ValueTy, uint64_t Value) {
193   assert(Res == 0 && "Res is a result argument");
194   assert(ValueTy.isScalar()  && "invalid offset type");
195 
196   if (Value == 0) {
197     Res = Op0;
198     return None;
199   }
200 
201   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
202   auto Cst = buildConstant(ValueTy, Value);
203   return buildPtrAdd(Res, Op0, Cst.getReg(0));
204 }
205 
206 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
207                                                           const SrcOp &Op0,
208                                                           uint32_t NumBits) {
209   LLT PtrTy = Res.getLLTTy(*getMRI());
210   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
211   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
212   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
213   return buildPtrMask(Res, Op0, MaskReg);
214 }
215 
216 MachineInstrBuilder
217 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
218                                                   const SrcOp &Op0) {
219   LLT ResTy = Res.getLLTTy(*getMRI());
220   LLT Op0Ty = Op0.getLLTTy(*getMRI());
221 
222   assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
223   assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
224          "Different vector element types");
225   assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
226          "Op0 has more elements");
227 
228   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
229   SmallVector<Register, 8> Regs;
230   for (auto Op : Unmerge.getInstr()->defs())
231     Regs.push_back(Op.getReg());
232   Register Undef = buildUndef(Op0Ty.getElementType()).getReg(0);
233   unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
234   for (unsigned i = 0; i < NumberOfPadElts; ++i)
235     Regs.push_back(Undef);
236   return buildMerge(Res, Regs);
237 }
238 
239 MachineInstrBuilder
240 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
241                                                     const SrcOp &Op0) {
242   LLT ResTy = Res.getLLTTy(*getMRI());
243   LLT Op0Ty = Op0.getLLTTy(*getMRI());
244 
245   assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
246   assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
247          "Different vector element types");
248   assert((ResTy.getNumElements() < Op0Ty.getNumElements()) &&
249          "Op0 has fewer elements");
250 
251   SmallVector<Register, 8> Regs;
252   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
253   for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
254     Regs.push_back(Unmerge.getReg(i));
255   return buildMerge(Res, Regs);
256 }
257 
258 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
259   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
260 }
261 
262 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
263   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
264   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
265 }
266 
267 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
268                                                 unsigned JTI,
269                                                 Register IndexReg) {
270   assert(getMRI()->getType(TablePtr).isPointer() &&
271          "Table reg must be a pointer");
272   return buildInstr(TargetOpcode::G_BRJT)
273       .addUse(TablePtr)
274       .addJumpTableIndex(JTI)
275       .addUse(IndexReg);
276 }
277 
278 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
279                                                 const SrcOp &Op) {
280   return buildInstr(TargetOpcode::COPY, Res, Op);
281 }
282 
283 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
284                                                     const ConstantInt &Val) {
285   LLT Ty = Res.getLLTTy(*getMRI());
286   LLT EltTy = Ty.getScalarType();
287   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
288          "creating constant with the wrong size");
289 
290   if (Ty.isVector()) {
291     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
292     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
293     .addCImm(&Val);
294     return buildSplatVector(Res, Const);
295   }
296 
297   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
298   Const->setDebugLoc(DebugLoc());
299   Res.addDefToMIB(*getMRI(), Const);
300   Const.addCImm(&Val);
301   return Const;
302 }
303 
304 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
305                                                     int64_t Val) {
306   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
307                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
308   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
309   return buildConstant(Res, *CI);
310 }
311 
312 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
313                                                      const ConstantFP &Val) {
314   LLT Ty = Res.getLLTTy(*getMRI());
315   LLT EltTy = Ty.getScalarType();
316 
317   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
318          == EltTy.getSizeInBits() &&
319          "creating fconstant with the wrong size");
320 
321   assert(!Ty.isPointer() && "invalid operand type");
322 
323   if (Ty.isVector()) {
324     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
325     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
326     .addFPImm(&Val);
327 
328     return buildSplatVector(Res, Const);
329   }
330 
331   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
332   Const->setDebugLoc(DebugLoc());
333   Res.addDefToMIB(*getMRI(), Const);
334   Const.addFPImm(&Val);
335   return Const;
336 }
337 
338 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
339                                                     const APInt &Val) {
340   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
341   return buildConstant(Res, *CI);
342 }
343 
344 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
345                                                      double Val) {
346   LLT DstTy = Res.getLLTTy(*getMRI());
347   auto &Ctx = getMF().getFunction().getContext();
348   auto *CFP =
349       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
350   return buildFConstant(Res, *CFP);
351 }
352 
353 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
354                                                      const APFloat &Val) {
355   auto &Ctx = getMF().getFunction().getContext();
356   auto *CFP = ConstantFP::get(Ctx, Val);
357   return buildFConstant(Res, *CFP);
358 }
359 
360 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
361                                                   MachineBasicBlock &Dest) {
362   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
363 
364   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
365   Tst.addSrcToMIB(MIB);
366   MIB.addMBB(&Dest);
367   return MIB;
368 }
369 
370 MachineInstrBuilder
371 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
372                             MachinePointerInfo PtrInfo, Align Alignment,
373                             MachineMemOperand::Flags MMOFlags,
374                             const AAMDNodes &AAInfo) {
375   MMOFlags |= MachineMemOperand::MOLoad;
376   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
377 
378   LLT Ty = Dst.getLLTTy(*getMRI());
379   MachineMemOperand *MMO =
380       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
381   return buildLoad(Dst, Addr, *MMO);
382 }
383 
384 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
385                                                      const DstOp &Res,
386                                                      const SrcOp &Addr,
387                                                      MachineMemOperand &MMO) {
388   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
389   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
390 
391   auto MIB = buildInstr(Opcode);
392   Res.addDefToMIB(*getMRI(), MIB);
393   Addr.addSrcToMIB(MIB);
394   MIB.addMemOperand(&MMO);
395   return MIB;
396 }
397 
398 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
399   const DstOp &Dst, const SrcOp &BasePtr,
400   MachineMemOperand &BaseMMO, int64_t Offset) {
401   LLT LoadTy = Dst.getLLTTy(*getMRI());
402   MachineMemOperand *OffsetMMO =
403       getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
404 
405   if (Offset == 0) // This may be a size or type changing load.
406     return buildLoad(Dst, BasePtr, *OffsetMMO);
407 
408   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
409   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
410   auto ConstOffset = buildConstant(OffsetTy, Offset);
411   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
412   return buildLoad(Dst, Ptr, *OffsetMMO);
413 }
414 
415 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
416                                                  const SrcOp &Addr,
417                                                  MachineMemOperand &MMO) {
418   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
419   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
420 
421   auto MIB = buildInstr(TargetOpcode::G_STORE);
422   Val.addSrcToMIB(MIB);
423   Addr.addSrcToMIB(MIB);
424   MIB.addMemOperand(&MMO);
425   return MIB;
426 }
427 
428 MachineInstrBuilder
429 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
430                              MachinePointerInfo PtrInfo, Align Alignment,
431                              MachineMemOperand::Flags MMOFlags,
432                              const AAMDNodes &AAInfo) {
433   MMOFlags |= MachineMemOperand::MOStore;
434   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
435 
436   LLT Ty = Val.getLLTTy(*getMRI());
437   MachineMemOperand *MMO =
438       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
439   return buildStore(Val, Addr, *MMO);
440 }
441 
442 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
443                                                   const SrcOp &Op) {
444   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
445 }
446 
447 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
448                                                 const SrcOp &Op) {
449   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
450 }
451 
452 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
453                                                 const SrcOp &Op) {
454   return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
455 }
456 
457 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
458   const auto *TLI = getMF().getSubtarget().getTargetLowering();
459   switch (TLI->getBooleanContents(IsVec, IsFP)) {
460   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
461     return TargetOpcode::G_SEXT;
462   case TargetLoweringBase::ZeroOrOneBooleanContent:
463     return TargetOpcode::G_ZEXT;
464   default:
465     return TargetOpcode::G_ANYEXT;
466   }
467 }
468 
469 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
470                                                    const SrcOp &Op,
471                                                    bool IsFP) {
472   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
473   return buildInstr(ExtOp, Res, Op);
474 }
475 
476 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
477                                                       const DstOp &Res,
478                                                       const SrcOp &Op) {
479   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
480           TargetOpcode::G_SEXT == ExtOpc) &&
481          "Expecting Extending Opc");
482   assert(Res.getLLTTy(*getMRI()).isScalar() ||
483          Res.getLLTTy(*getMRI()).isVector());
484   assert(Res.getLLTTy(*getMRI()).isScalar() ==
485          Op.getLLTTy(*getMRI()).isScalar());
486 
487   unsigned Opcode = TargetOpcode::COPY;
488   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
489       Op.getLLTTy(*getMRI()).getSizeInBits())
490     Opcode = ExtOpc;
491   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
492            Op.getLLTTy(*getMRI()).getSizeInBits())
493     Opcode = TargetOpcode::G_TRUNC;
494   else
495     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
496 
497   return buildInstr(Opcode, Res, Op);
498 }
499 
500 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
501                                                        const SrcOp &Op) {
502   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
503 }
504 
505 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
506                                                        const SrcOp &Op) {
507   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
508 }
509 
510 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
511                                                          const SrcOp &Op) {
512   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
513 }
514 
515 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
516                                                      const SrcOp &Op,
517                                                      int64_t ImmOp) {
518   LLT ResTy = Res.getLLTTy(*getMRI());
519   auto Mask = buildConstant(
520       ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
521   return buildAnd(Res, Op, Mask);
522 }
523 
524 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
525                                                 const SrcOp &Src) {
526   LLT SrcTy = Src.getLLTTy(*getMRI());
527   LLT DstTy = Dst.getLLTTy(*getMRI());
528   if (SrcTy == DstTy)
529     return buildCopy(Dst, Src);
530 
531   unsigned Opcode;
532   if (SrcTy.isPointer() && DstTy.isScalar())
533     Opcode = TargetOpcode::G_PTRTOINT;
534   else if (DstTy.isPointer() && SrcTy.isScalar())
535     Opcode = TargetOpcode::G_INTTOPTR;
536   else {
537     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
538     Opcode = TargetOpcode::G_BITCAST;
539   }
540 
541   return buildInstr(Opcode, Dst, Src);
542 }
543 
544 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
545                                                    const SrcOp &Src,
546                                                    uint64_t Index) {
547   LLT SrcTy = Src.getLLTTy(*getMRI());
548   LLT DstTy = Dst.getLLTTy(*getMRI());
549 
550 #ifndef NDEBUG
551   assert(SrcTy.isValid() && "invalid operand type");
552   assert(DstTy.isValid() && "invalid operand type");
553   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
554          "extracting off end of register");
555 #endif
556 
557   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
558     assert(Index == 0 && "insertion past the end of a register");
559     return buildCast(Dst, Src);
560   }
561 
562   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
563   Dst.addDefToMIB(*getMRI(), Extract);
564   Src.addSrcToMIB(Extract);
565   Extract.addImm(Index);
566   return Extract;
567 }
568 
569 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
570                                      ArrayRef<uint64_t> Indices) {
571 #ifndef NDEBUG
572   assert(Ops.size() == Indices.size() && "incompatible args");
573   assert(!Ops.empty() && "invalid trivial sequence");
574   assert(llvm::is_sorted(Indices) &&
575          "sequence offsets must be in ascending order");
576 
577   assert(getMRI()->getType(Res).isValid() && "invalid operand type");
578   for (auto Op : Ops)
579     assert(getMRI()->getType(Op).isValid() && "invalid operand type");
580 #endif
581 
582   LLT ResTy = getMRI()->getType(Res);
583   LLT OpTy = getMRI()->getType(Ops[0]);
584   unsigned OpSize = OpTy.getSizeInBits();
585   bool MaybeMerge = true;
586   for (unsigned i = 0; i < Ops.size(); ++i) {
587     if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
588       MaybeMerge = false;
589       break;
590     }
591   }
592 
593   if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
594     buildMerge(Res, Ops);
595     return;
596   }
597 
598   Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
599   buildUndef(ResIn);
600 
601   for (unsigned i = 0; i < Ops.size(); ++i) {
602     Register ResOut = i + 1 == Ops.size()
603                           ? Res
604                           : getMRI()->createGenericVirtualRegister(ResTy);
605     buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
606     ResIn = ResOut;
607   }
608 }
609 
610 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
611   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
612 }
613 
614 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
615                                                  ArrayRef<Register> Ops) {
616   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
617   // we need some temporary storage for the DstOp objects. Here we use a
618   // sufficiently large SmallVector to not go through the heap.
619   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
620   assert(TmpVec.size() > 1);
621   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
622 }
623 
624 MachineInstrBuilder
625 MachineIRBuilder::buildMerge(const DstOp &Res,
626                              std::initializer_list<SrcOp> Ops) {
627   assert(Ops.size() > 1);
628   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
629 }
630 
631 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
632                                                    const SrcOp &Op) {
633   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
634   // we need some temporary storage for the DstOp objects. Here we use a
635   // sufficiently large SmallVector to not go through the heap.
636   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
637   assert(TmpVec.size() > 1);
638   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
639 }
640 
641 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
642                                                    const SrcOp &Op) {
643   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
644   SmallVector<DstOp, 8> TmpVec(NumReg, Res);
645   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
646 }
647 
648 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
649                                                    const SrcOp &Op) {
650   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
651   // we need some temporary storage for the DstOp objects. Here we use a
652   // sufficiently large SmallVector to not go through the heap.
653   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
654   assert(TmpVec.size() > 1);
655   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
656 }
657 
658 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
659                                                        ArrayRef<Register> Ops) {
660   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
661   // we need some temporary storage for the DstOp objects. Here we use a
662   // sufficiently large SmallVector to not go through the heap.
663   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
664   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
665 }
666 
667 MachineInstrBuilder
668 MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
669                                            ArrayRef<APInt> Ops) {
670   SmallVector<SrcOp> TmpVec;
671   TmpVec.reserve(Ops.size());
672   LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
673   for (auto &Op : Ops)
674     TmpVec.push_back(buildConstant(EltTy, Op));
675   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
676 }
677 
678 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
679                                                        const SrcOp &Src) {
680   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
681   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
682 }
683 
684 MachineInstrBuilder
685 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
686                                         ArrayRef<Register> Ops) {
687   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
688   // we need some temporary storage for the DstOp objects. Here we use a
689   // sufficiently large SmallVector to not go through the heap.
690   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
691   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
692 }
693 
694 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
695                                                         const SrcOp &Src) {
696   LLT DstTy = Res.getLLTTy(*getMRI());
697   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
698          "Expected Src to match Dst elt ty");
699   auto UndefVec = buildUndef(DstTy);
700   auto Zero = buildConstant(LLT::scalar(64), 0);
701   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
702   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
703   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
704 }
705 
706 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
707                                                          const SrcOp &Src1,
708                                                          const SrcOp &Src2,
709                                                          ArrayRef<int> Mask) {
710   LLT DstTy = Res.getLLTTy(*getMRI());
711   LLT Src1Ty = Src1.getLLTTy(*getMRI());
712   LLT Src2Ty = Src2.getLLTTy(*getMRI());
713   assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
714          Mask.size());
715   assert(DstTy.getElementType() == Src1Ty.getElementType() &&
716          DstTy.getElementType() == Src2Ty.getElementType());
717   (void)DstTy;
718   (void)Src1Ty;
719   (void)Src2Ty;
720   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
721   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
722       .addShuffleMask(MaskAlloc);
723 }
724 
725 MachineInstrBuilder
726 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
727   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
728   // we need some temporary storage for the DstOp objects. Here we use a
729   // sufficiently large SmallVector to not go through the heap.
730   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
731   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
732 }
733 
734 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
735                                                   const SrcOp &Src,
736                                                   const SrcOp &Op,
737                                                   unsigned Index) {
738   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
739              Res.getLLTTy(*getMRI()).getSizeInBits() &&
740          "insertion past the end of a register");
741 
742   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
743       Op.getLLTTy(*getMRI()).getSizeInBits()) {
744     return buildCast(Res, Op);
745   }
746 
747   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
748 }
749 
750 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
751                                                      ArrayRef<Register> ResultRegs,
752                                                      bool HasSideEffects) {
753   auto MIB =
754       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
755                                 : TargetOpcode::G_INTRINSIC);
756   for (unsigned ResultReg : ResultRegs)
757     MIB.addDef(ResultReg);
758   MIB.addIntrinsicID(ID);
759   return MIB;
760 }
761 
762 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
763                                                      ArrayRef<DstOp> Results,
764                                                      bool HasSideEffects) {
765   auto MIB =
766       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
767                                 : TargetOpcode::G_INTRINSIC);
768   for (DstOp Result : Results)
769     Result.addDefToMIB(*getMRI(), MIB);
770   MIB.addIntrinsicID(ID);
771   return MIB;
772 }
773 
774 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
775                                                  const SrcOp &Op) {
776   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
777 }
778 
779 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
780                                                    const SrcOp &Op,
781                                                    Optional<unsigned> Flags) {
782   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
783 }
784 
785 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
786                                                 const DstOp &Res,
787                                                 const SrcOp &Op0,
788                                                 const SrcOp &Op1) {
789   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
790 }
791 
792 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
793                                                 const DstOp &Res,
794                                                 const SrcOp &Op0,
795                                                 const SrcOp &Op1,
796                                                 Optional<unsigned> Flags) {
797 
798   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
799 }
800 
801 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
802                                                   const SrcOp &Tst,
803                                                   const SrcOp &Op0,
804                                                   const SrcOp &Op1,
805                                                   Optional<unsigned> Flags) {
806 
807   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
808 }
809 
810 MachineInstrBuilder
811 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
812                                            const SrcOp &Elt, const SrcOp &Idx) {
813   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
814 }
815 
816 MachineInstrBuilder
817 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
818                                             const SrcOp &Idx) {
819   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
820 }
821 
822 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
823     Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
824     Register NewVal, MachineMemOperand &MMO) {
825 #ifndef NDEBUG
826   LLT OldValResTy = getMRI()->getType(OldValRes);
827   LLT SuccessResTy = getMRI()->getType(SuccessRes);
828   LLT AddrTy = getMRI()->getType(Addr);
829   LLT CmpValTy = getMRI()->getType(CmpVal);
830   LLT NewValTy = getMRI()->getType(NewVal);
831   assert(OldValResTy.isScalar() && "invalid operand type");
832   assert(SuccessResTy.isScalar() && "invalid operand type");
833   assert(AddrTy.isPointer() && "invalid operand type");
834   assert(CmpValTy.isValid() && "invalid operand type");
835   assert(NewValTy.isValid() && "invalid operand type");
836   assert(OldValResTy == CmpValTy && "type mismatch");
837   assert(OldValResTy == NewValTy && "type mismatch");
838 #endif
839 
840   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
841       .addDef(OldValRes)
842       .addDef(SuccessRes)
843       .addUse(Addr)
844       .addUse(CmpVal)
845       .addUse(NewVal)
846       .addMemOperand(&MMO);
847 }
848 
849 MachineInstrBuilder
850 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
851                                      Register CmpVal, Register NewVal,
852                                      MachineMemOperand &MMO) {
853 #ifndef NDEBUG
854   LLT OldValResTy = getMRI()->getType(OldValRes);
855   LLT AddrTy = getMRI()->getType(Addr);
856   LLT CmpValTy = getMRI()->getType(CmpVal);
857   LLT NewValTy = getMRI()->getType(NewVal);
858   assert(OldValResTy.isScalar() && "invalid operand type");
859   assert(AddrTy.isPointer() && "invalid operand type");
860   assert(CmpValTy.isValid() && "invalid operand type");
861   assert(NewValTy.isValid() && "invalid operand type");
862   assert(OldValResTy == CmpValTy && "type mismatch");
863   assert(OldValResTy == NewValTy && "type mismatch");
864 #endif
865 
866   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
867       .addDef(OldValRes)
868       .addUse(Addr)
869       .addUse(CmpVal)
870       .addUse(NewVal)
871       .addMemOperand(&MMO);
872 }
873 
874 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
875   unsigned Opcode, const DstOp &OldValRes,
876   const SrcOp &Addr, const SrcOp &Val,
877   MachineMemOperand &MMO) {
878 
879 #ifndef NDEBUG
880   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
881   LLT AddrTy = Addr.getLLTTy(*getMRI());
882   LLT ValTy = Val.getLLTTy(*getMRI());
883   assert(OldValResTy.isScalar() && "invalid operand type");
884   assert(AddrTy.isPointer() && "invalid operand type");
885   assert(ValTy.isValid() && "invalid operand type");
886   assert(OldValResTy == ValTy && "type mismatch");
887   assert(MMO.isAtomic() && "not atomic mem operand");
888 #endif
889 
890   auto MIB = buildInstr(Opcode);
891   OldValRes.addDefToMIB(*getMRI(), MIB);
892   Addr.addSrcToMIB(MIB);
893   Val.addSrcToMIB(MIB);
894   MIB.addMemOperand(&MMO);
895   return MIB;
896 }
897 
898 MachineInstrBuilder
899 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
900                                      Register Val, MachineMemOperand &MMO) {
901   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
902                         MMO);
903 }
904 MachineInstrBuilder
905 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
906                                     Register Val, MachineMemOperand &MMO) {
907   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
908                         MMO);
909 }
910 MachineInstrBuilder
911 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
912                                     Register Val, MachineMemOperand &MMO) {
913   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
914                         MMO);
915 }
916 MachineInstrBuilder
917 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
918                                     Register Val, MachineMemOperand &MMO) {
919   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
920                         MMO);
921 }
922 MachineInstrBuilder
923 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
924                                      Register Val, MachineMemOperand &MMO) {
925   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
926                         MMO);
927 }
928 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
929                                                        Register Addr,
930                                                        Register Val,
931                                                        MachineMemOperand &MMO) {
932   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
933                         MMO);
934 }
935 MachineInstrBuilder
936 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
937                                     Register Val, MachineMemOperand &MMO) {
938   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
939                         MMO);
940 }
941 MachineInstrBuilder
942 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
943                                     Register Val, MachineMemOperand &MMO) {
944   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
945                         MMO);
946 }
947 MachineInstrBuilder
948 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
949                                     Register Val, MachineMemOperand &MMO) {
950   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
951                         MMO);
952 }
953 MachineInstrBuilder
954 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
955                                      Register Val, MachineMemOperand &MMO) {
956   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
957                         MMO);
958 }
959 MachineInstrBuilder
960 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
961                                      Register Val, MachineMemOperand &MMO) {
962   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
963                         MMO);
964 }
965 
966 MachineInstrBuilder
967 MachineIRBuilder::buildAtomicRMWFAdd(
968   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
969   MachineMemOperand &MMO) {
970   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
971                         MMO);
972 }
973 
974 MachineInstrBuilder
975 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
976                                      MachineMemOperand &MMO) {
977   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
978                         MMO);
979 }
980 
981 MachineInstrBuilder
982 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
983   return buildInstr(TargetOpcode::G_FENCE)
984     .addImm(Ordering)
985     .addImm(Scope);
986 }
987 
988 MachineInstrBuilder
989 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
990 #ifndef NDEBUG
991   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
992 #endif
993 
994   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
995 }
996 
997 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
998                                         bool IsExtend) {
999 #ifndef NDEBUG
1000   if (DstTy.isVector()) {
1001     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1002     assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
1003            "different number of elements in a trunc/ext");
1004   } else
1005     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1006 
1007   if (IsExtend)
1008     assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1009            "invalid narrowing extend");
1010   else
1011     assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
1012            "invalid widening trunc");
1013 #endif
1014 }
1015 
1016 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1017                                         const LLT Op0Ty, const LLT Op1Ty) {
1018 #ifndef NDEBUG
1019   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1020          "invalid operand type");
1021   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1022   if (ResTy.isScalar() || ResTy.isPointer())
1023     assert(TstTy.isScalar() && "type mismatch");
1024   else
1025     assert((TstTy.isScalar() ||
1026             (TstTy.isVector() &&
1027              TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1028            "type mismatch");
1029 #endif
1030 }
1031 
1032 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
1033                                                  ArrayRef<DstOp> DstOps,
1034                                                  ArrayRef<SrcOp> SrcOps,
1035                                                  Optional<unsigned> Flags) {
1036   switch (Opc) {
1037   default:
1038     break;
1039   case TargetOpcode::G_SELECT: {
1040     assert(DstOps.size() == 1 && "Invalid select");
1041     assert(SrcOps.size() == 3 && "Invalid select");
1042     validateSelectOp(
1043         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1044         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1045     break;
1046   }
1047   case TargetOpcode::G_FNEG:
1048   case TargetOpcode::G_ABS:
1049     // All these are unary ops.
1050     assert(DstOps.size() == 1 && "Invalid Dst");
1051     assert(SrcOps.size() == 1 && "Invalid Srcs");
1052     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1053                     SrcOps[0].getLLTTy(*getMRI()));
1054     break;
1055   case TargetOpcode::G_ADD:
1056   case TargetOpcode::G_AND:
1057   case TargetOpcode::G_MUL:
1058   case TargetOpcode::G_OR:
1059   case TargetOpcode::G_SUB:
1060   case TargetOpcode::G_XOR:
1061   case TargetOpcode::G_UDIV:
1062   case TargetOpcode::G_SDIV:
1063   case TargetOpcode::G_UREM:
1064   case TargetOpcode::G_SREM:
1065   case TargetOpcode::G_SMIN:
1066   case TargetOpcode::G_SMAX:
1067   case TargetOpcode::G_UMIN:
1068   case TargetOpcode::G_UMAX:
1069   case TargetOpcode::G_UADDSAT:
1070   case TargetOpcode::G_SADDSAT:
1071   case TargetOpcode::G_USUBSAT:
1072   case TargetOpcode::G_SSUBSAT: {
1073     // All these are binary ops.
1074     assert(DstOps.size() == 1 && "Invalid Dst");
1075     assert(SrcOps.size() == 2 && "Invalid Srcs");
1076     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1077                      SrcOps[0].getLLTTy(*getMRI()),
1078                      SrcOps[1].getLLTTy(*getMRI()));
1079     break;
1080   }
1081   case TargetOpcode::G_SHL:
1082   case TargetOpcode::G_ASHR:
1083   case TargetOpcode::G_LSHR:
1084   case TargetOpcode::G_USHLSAT:
1085   case TargetOpcode::G_SSHLSAT: {
1086     assert(DstOps.size() == 1 && "Invalid Dst");
1087     assert(SrcOps.size() == 2 && "Invalid Srcs");
1088     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1089                     SrcOps[0].getLLTTy(*getMRI()),
1090                     SrcOps[1].getLLTTy(*getMRI()));
1091     break;
1092   }
1093   case TargetOpcode::G_SEXT:
1094   case TargetOpcode::G_ZEXT:
1095   case TargetOpcode::G_ANYEXT:
1096     assert(DstOps.size() == 1 && "Invalid Dst");
1097     assert(SrcOps.size() == 1 && "Invalid Srcs");
1098     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1099                      SrcOps[0].getLLTTy(*getMRI()), true);
1100     break;
1101   case TargetOpcode::G_TRUNC:
1102   case TargetOpcode::G_FPTRUNC: {
1103     assert(DstOps.size() == 1 && "Invalid Dst");
1104     assert(SrcOps.size() == 1 && "Invalid Srcs");
1105     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1106                      SrcOps[0].getLLTTy(*getMRI()), false);
1107     break;
1108   }
1109   case TargetOpcode::G_BITCAST: {
1110     assert(DstOps.size() == 1 && "Invalid Dst");
1111     assert(SrcOps.size() == 1 && "Invalid Srcs");
1112     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1113            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1114     break;
1115   }
1116   case TargetOpcode::COPY:
1117     assert(DstOps.size() == 1 && "Invalid Dst");
1118     // If the caller wants to add a subreg source it has to be done separately
1119     // so we may not have any SrcOps at this point yet.
1120     break;
1121   case TargetOpcode::G_FCMP:
1122   case TargetOpcode::G_ICMP: {
1123     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1124     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1125     // For F/ICMP, the first src operand is the predicate, followed by
1126     // the two comparands.
1127     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1128            "Expecting predicate");
1129     assert([&]() -> bool {
1130       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1131       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1132                                          : CmpInst::isFPPredicate(Pred);
1133     }() && "Invalid predicate");
1134     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1135            "Type mismatch");
1136     assert([&]() -> bool {
1137       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1138       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1139       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1140         return DstTy.isScalar();
1141       else
1142         return DstTy.isVector() &&
1143                DstTy.getNumElements() == Op0Ty.getNumElements();
1144     }() && "Type Mismatch");
1145     break;
1146   }
1147   case TargetOpcode::G_UNMERGE_VALUES: {
1148     assert(!DstOps.empty() && "Invalid trivial sequence");
1149     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1150     assert(llvm::all_of(DstOps,
1151                         [&, this](const DstOp &Op) {
1152                           return Op.getLLTTy(*getMRI()) ==
1153                                  DstOps[0].getLLTTy(*getMRI());
1154                         }) &&
1155            "type mismatch in output list");
1156     assert((TypeSize::ScalarTy)DstOps.size() *
1157                    DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1158                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1159            "input operands do not cover output register");
1160     break;
1161   }
1162   case TargetOpcode::G_MERGE_VALUES: {
1163     assert(!SrcOps.empty() && "invalid trivial sequence");
1164     assert(DstOps.size() == 1 && "Invalid Dst");
1165     assert(llvm::all_of(SrcOps,
1166                         [&, this](const SrcOp &Op) {
1167                           return Op.getLLTTy(*getMRI()) ==
1168                                  SrcOps[0].getLLTTy(*getMRI());
1169                         }) &&
1170            "type mismatch in input list");
1171     assert((TypeSize::ScalarTy)SrcOps.size() *
1172                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1173                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1174            "input operands do not cover output register");
1175     if (SrcOps.size() == 1)
1176       return buildCast(DstOps[0], SrcOps[0]);
1177     if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1178       if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1179         return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1180       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1181     }
1182     break;
1183   }
1184   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1185     assert(DstOps.size() == 1 && "Invalid Dst size");
1186     assert(SrcOps.size() == 2 && "Invalid Src size");
1187     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1188     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1189             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1190            "Invalid operand type");
1191     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1192     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1193                DstOps[0].getLLTTy(*getMRI()) &&
1194            "Type mismatch");
1195     break;
1196   }
1197   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1198     assert(DstOps.size() == 1 && "Invalid dst size");
1199     assert(SrcOps.size() == 3 && "Invalid src size");
1200     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1201            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1202     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1203                SrcOps[1].getLLTTy(*getMRI()) &&
1204            "Type mismatch");
1205     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1206     assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1207                SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1208            "Type mismatch");
1209     break;
1210   }
1211   case TargetOpcode::G_BUILD_VECTOR: {
1212     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1213            "Must have at least 2 operands");
1214     assert(DstOps.size() == 1 && "Invalid DstOps");
1215     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1216            "Res type must be a vector");
1217     assert(llvm::all_of(SrcOps,
1218                         [&, this](const SrcOp &Op) {
1219                           return Op.getLLTTy(*getMRI()) ==
1220                                  SrcOps[0].getLLTTy(*getMRI());
1221                         }) &&
1222            "type mismatch in input list");
1223     assert((TypeSize::ScalarTy)SrcOps.size() *
1224                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1225                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1226            "input scalars do not exactly cover the output vector register");
1227     break;
1228   }
1229   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1230     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1231            "Must have at least 2 operands");
1232     assert(DstOps.size() == 1 && "Invalid DstOps");
1233     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1234            "Res type must be a vector");
1235     assert(llvm::all_of(SrcOps,
1236                         [&, this](const SrcOp &Op) {
1237                           return Op.getLLTTy(*getMRI()) ==
1238                                  SrcOps[0].getLLTTy(*getMRI());
1239                         }) &&
1240            "type mismatch in input list");
1241     if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1242         DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1243       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1244     break;
1245   }
1246   case TargetOpcode::G_CONCAT_VECTORS: {
1247     assert(DstOps.size() == 1 && "Invalid DstOps");
1248     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1249            "Must have at least 2 operands");
1250     assert(llvm::all_of(SrcOps,
1251                         [&, this](const SrcOp &Op) {
1252                           return (Op.getLLTTy(*getMRI()).isVector() &&
1253                                   Op.getLLTTy(*getMRI()) ==
1254                                       SrcOps[0].getLLTTy(*getMRI()));
1255                         }) &&
1256            "type mismatch in input list");
1257     assert((TypeSize::ScalarTy)SrcOps.size() *
1258                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1259                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1260            "input vectors do not exactly cover the output vector register");
1261     break;
1262   }
1263   case TargetOpcode::G_UADDE: {
1264     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1265     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1266     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1267     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1268            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1269            "Invalid operand");
1270     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1271     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1272            "type mismatch");
1273     break;
1274   }
1275   }
1276 
1277   auto MIB = buildInstr(Opc);
1278   for (const DstOp &Op : DstOps)
1279     Op.addDefToMIB(*getMRI(), MIB);
1280   for (const SrcOp &Op : SrcOps)
1281     Op.addSrcToMIB(MIB);
1282   if (Flags)
1283     MIB->setFlags(*Flags);
1284   return MIB;
1285 }
1286