1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the MachineIRBuidler class.
11 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/TargetInstrInfo.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
23
24 using namespace llvm;
25
setMF(MachineFunction & MF)26 void MachineIRBuilder::setMF(MachineFunction &MF) {
27 State.MF = &MF;
28 State.MBB = nullptr;
29 State.MRI = &MF.getRegInfo();
30 State.TII = MF.getSubtarget().getInstrInfo();
31 State.DL = DebugLoc();
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34 }
35
setMBB(MachineBasicBlock & MBB)36 void MachineIRBuilder::setMBB(MachineBasicBlock &MBB) {
37 State.MBB = &MBB;
38 State.II = MBB.end();
39 assert(&getMF() == MBB.getParent() &&
40 "Basic block is in a different function");
41 }
42
setInstr(MachineInstr & MI)43 void MachineIRBuilder::setInstr(MachineInstr &MI) {
44 assert(MI.getParent() && "Instruction is not part of a basic block");
45 setMBB(*MI.getParent());
46 State.II = MI.getIterator();
47 }
48
setCSEInfo(GISelCSEInfo * Info)49 void MachineIRBuilder::setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; }
50
setInsertPt(MachineBasicBlock & MBB,MachineBasicBlock::iterator II)51 void MachineIRBuilder::setInsertPt(MachineBasicBlock &MBB,
52 MachineBasicBlock::iterator II) {
53 assert(MBB.getParent() == &getMF() &&
54 "Basic block is in a different function");
55 State.MBB = &MBB;
56 State.II = II;
57 }
58
recordInsertion(MachineInstr * InsertedInstr) const59 void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const {
60 if (State.Observer)
61 State.Observer->createdInstr(*InsertedInstr);
62 }
63
setChangeObserver(GISelChangeObserver & Observer)64 void MachineIRBuilder::setChangeObserver(GISelChangeObserver &Observer) {
65 State.Observer = &Observer;
66 }
67
stopObservingChanges()68 void MachineIRBuilder::stopObservingChanges() { State.Observer = nullptr; }
69
70 //------------------------------------------------------------------------------
71 // Build instruction variants.
72 //------------------------------------------------------------------------------
73
buildInstr(unsigned Opcode)74 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode) {
75 return insertInstr(buildInstrNoInsert(Opcode));
76 }
77
buildInstrNoInsert(unsigned Opcode)78 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
79 MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
80 return MIB;
81 }
82
insertInstr(MachineInstrBuilder MIB)83 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
84 getMBB().insert(getInsertPt(), MIB);
85 recordInsertion(MIB);
86 return MIB;
87 }
88
89 MachineInstrBuilder
buildDirectDbgValue(unsigned Reg,const MDNode * Variable,const MDNode * Expr)90 MachineIRBuilder::buildDirectDbgValue(unsigned Reg, const MDNode *Variable,
91 const MDNode *Expr) {
92 assert(isa<DILocalVariable>(Variable) && "not a variable");
93 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
94 assert(
95 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
96 "Expected inlined-at fields to agree");
97 return insertInstr(BuildMI(getMF(), getDL(),
98 getTII().get(TargetOpcode::DBG_VALUE),
99 /*IsIndirect*/ false, Reg, Variable, Expr));
100 }
101
102 MachineInstrBuilder
buildIndirectDbgValue(unsigned Reg,const MDNode * Variable,const MDNode * Expr)103 MachineIRBuilder::buildIndirectDbgValue(unsigned Reg, const MDNode *Variable,
104 const MDNode *Expr) {
105 assert(isa<DILocalVariable>(Variable) && "not a variable");
106 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
107 assert(
108 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
109 "Expected inlined-at fields to agree");
110 return insertInstr(BuildMI(getMF(), getDL(),
111 getTII().get(TargetOpcode::DBG_VALUE),
112 /*IsIndirect*/ true, Reg, Variable, Expr));
113 }
114
buildFIDbgValue(int FI,const MDNode * Variable,const MDNode * Expr)115 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
116 const MDNode *Variable,
117 const MDNode *Expr) {
118 assert(isa<DILocalVariable>(Variable) && "not a variable");
119 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
120 assert(
121 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
122 "Expected inlined-at fields to agree");
123 return buildInstr(TargetOpcode::DBG_VALUE)
124 .addFrameIndex(FI)
125 .addImm(0)
126 .addMetadata(Variable)
127 .addMetadata(Expr);
128 }
129
buildConstDbgValue(const Constant & C,const MDNode * Variable,const MDNode * Expr)130 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
131 const MDNode *Variable,
132 const MDNode *Expr) {
133 assert(isa<DILocalVariable>(Variable) && "not a variable");
134 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
135 assert(
136 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
137 "Expected inlined-at fields to agree");
138 auto MIB = buildInstr(TargetOpcode::DBG_VALUE);
139 if (auto *CI = dyn_cast<ConstantInt>(&C)) {
140 if (CI->getBitWidth() > 64)
141 MIB.addCImm(CI);
142 else
143 MIB.addImm(CI->getZExtValue());
144 } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
145 MIB.addFPImm(CFP);
146 } else {
147 // Insert %noreg if we didn't find a usable constant and had to drop it.
148 MIB.addReg(0U);
149 }
150
151 return MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
152 }
153
buildDbgLabel(const MDNode * Label)154 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
155 assert(isa<DILabel>(Label) && "not a label");
156 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
157 "Expected inlined-at fields to agree");
158 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
159
160 return MIB.addMetadata(Label);
161 }
162
buildFrameIndex(unsigned Res,int Idx)163 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(unsigned Res, int Idx) {
164 assert(getMRI()->getType(Res).isPointer() && "invalid operand type");
165 return buildInstr(TargetOpcode::G_FRAME_INDEX)
166 .addDef(Res)
167 .addFrameIndex(Idx);
168 }
169
buildGlobalValue(unsigned Res,const GlobalValue * GV)170 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(unsigned Res,
171 const GlobalValue *GV) {
172 assert(getMRI()->getType(Res).isPointer() && "invalid operand type");
173 assert(getMRI()->getType(Res).getAddressSpace() ==
174 GV->getType()->getAddressSpace() &&
175 "address space mismatch");
176
177 return buildInstr(TargetOpcode::G_GLOBAL_VALUE)
178 .addDef(Res)
179 .addGlobalAddress(GV);
180 }
181
validateBinaryOp(const LLT & Res,const LLT & Op0,const LLT & Op1)182 void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0,
183 const LLT &Op1) {
184 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
185 assert((Res == Op0 && Res == Op1) && "type mismatch");
186 }
187
buildGEP(unsigned Res,unsigned Op0,unsigned Op1)188 MachineInstrBuilder MachineIRBuilder::buildGEP(unsigned Res, unsigned Op0,
189 unsigned Op1) {
190 assert(getMRI()->getType(Res).isPointer() &&
191 getMRI()->getType(Res) == getMRI()->getType(Op0) && "type mismatch");
192 assert(getMRI()->getType(Op1).isScalar() && "invalid offset type");
193
194 return buildInstr(TargetOpcode::G_GEP)
195 .addDef(Res)
196 .addUse(Op0)
197 .addUse(Op1);
198 }
199
200 Optional<MachineInstrBuilder>
materializeGEP(unsigned & Res,unsigned Op0,const LLT & ValueTy,uint64_t Value)201 MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
202 const LLT &ValueTy, uint64_t Value) {
203 assert(Res == 0 && "Res is a result argument");
204 assert(ValueTy.isScalar() && "invalid offset type");
205
206 if (Value == 0) {
207 Res = Op0;
208 return None;
209 }
210
211 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
212 unsigned TmpReg = getMRI()->createGenericVirtualRegister(ValueTy);
213
214 buildConstant(TmpReg, Value);
215 return buildGEP(Res, Op0, TmpReg);
216 }
217
buildPtrMask(unsigned Res,unsigned Op0,uint32_t NumBits)218 MachineInstrBuilder MachineIRBuilder::buildPtrMask(unsigned Res, unsigned Op0,
219 uint32_t NumBits) {
220 assert(getMRI()->getType(Res).isPointer() &&
221 getMRI()->getType(Res) == getMRI()->getType(Op0) && "type mismatch");
222
223 return buildInstr(TargetOpcode::G_PTR_MASK)
224 .addDef(Res)
225 .addUse(Op0)
226 .addImm(NumBits);
227 }
228
buildBr(MachineBasicBlock & Dest)229 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
230 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
231 }
232
buildBrIndirect(unsigned Tgt)233 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(unsigned Tgt) {
234 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
235 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
236 }
237
buildCopy(const DstOp & Res,const SrcOp & Op)238 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
239 const SrcOp &Op) {
240 return buildInstr(TargetOpcode::COPY, Res, Op);
241 }
242
buildConstant(const DstOp & Res,const ConstantInt & Val)243 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
244 const ConstantInt &Val) {
245 LLT Ty = Res.getLLTTy(*getMRI());
246
247 assert((Ty.isScalar() || Ty.isPointer()) && "invalid operand type");
248
249 const ConstantInt *NewVal = &Val;
250 if (Ty.getSizeInBits() != Val.getBitWidth())
251 NewVal = ConstantInt::get(getMF().getFunction().getContext(),
252 Val.getValue().sextOrTrunc(Ty.getSizeInBits()));
253
254 auto MIB = buildInstr(TargetOpcode::G_CONSTANT);
255 Res.addDefToMIB(*getMRI(), MIB);
256 MIB.addCImm(NewVal);
257 return MIB;
258 }
259
buildConstant(const DstOp & Res,int64_t Val)260 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
261 int64_t Val) {
262 auto IntN = IntegerType::get(getMF().getFunction().getContext(),
263 Res.getLLTTy(*getMRI()).getSizeInBits());
264 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
265 return buildConstant(Res, *CI);
266 }
267
buildFConstant(const DstOp & Res,const ConstantFP & Val)268 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
269 const ConstantFP &Val) {
270 assert(Res.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
271
272 auto MIB = buildInstr(TargetOpcode::G_FCONSTANT);
273 Res.addDefToMIB(*getMRI(), MIB);
274 MIB.addFPImm(&Val);
275 return MIB;
276 }
277
buildFConstant(const DstOp & Res,double Val)278 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
279 double Val) {
280 LLT DstTy = Res.getLLTTy(*getMRI());
281 auto &Ctx = getMF().getFunction().getContext();
282 auto *CFP =
283 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getSizeInBits()));
284 return buildFConstant(Res, *CFP);
285 }
286
buildBrCond(unsigned Tst,MachineBasicBlock & Dest)287 MachineInstrBuilder MachineIRBuilder::buildBrCond(unsigned Tst,
288 MachineBasicBlock &Dest) {
289 assert(getMRI()->getType(Tst).isScalar() && "invalid operand type");
290
291 return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
292 }
293
buildLoad(unsigned Res,unsigned Addr,MachineMemOperand & MMO)294 MachineInstrBuilder MachineIRBuilder::buildLoad(unsigned Res, unsigned Addr,
295 MachineMemOperand &MMO) {
296 return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
297 }
298
buildLoadInstr(unsigned Opcode,unsigned Res,unsigned Addr,MachineMemOperand & MMO)299 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
300 unsigned Res,
301 unsigned Addr,
302 MachineMemOperand &MMO) {
303 assert(getMRI()->getType(Res).isValid() && "invalid operand type");
304 assert(getMRI()->getType(Addr).isPointer() && "invalid operand type");
305
306 return buildInstr(Opcode)
307 .addDef(Res)
308 .addUse(Addr)
309 .addMemOperand(&MMO);
310 }
311
buildStore(unsigned Val,unsigned Addr,MachineMemOperand & MMO)312 MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
313 MachineMemOperand &MMO) {
314 assert(getMRI()->getType(Val).isValid() && "invalid operand type");
315 assert(getMRI()->getType(Addr).isPointer() && "invalid operand type");
316
317 return buildInstr(TargetOpcode::G_STORE)
318 .addUse(Val)
319 .addUse(Addr)
320 .addMemOperand(&MMO);
321 }
322
buildUAdde(const DstOp & Res,const DstOp & CarryOut,const SrcOp & Op0,const SrcOp & Op1,const SrcOp & CarryIn)323 MachineInstrBuilder MachineIRBuilder::buildUAdde(const DstOp &Res,
324 const DstOp &CarryOut,
325 const SrcOp &Op0,
326 const SrcOp &Op1,
327 const SrcOp &CarryIn) {
328 return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
329 {Op0, Op1, CarryIn});
330 }
331
buildAnyExt(const DstOp & Res,const SrcOp & Op)332 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
333 const SrcOp &Op) {
334 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
335 }
336
buildSExt(const DstOp & Res,const SrcOp & Op)337 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
338 const SrcOp &Op) {
339 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
340 }
341
buildZExt(const DstOp & Res,const SrcOp & Op)342 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
343 const SrcOp &Op) {
344 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
345 }
346
buildExtOrTrunc(unsigned ExtOpc,const DstOp & Res,const SrcOp & Op)347 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
348 const DstOp &Res,
349 const SrcOp &Op) {
350 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
351 TargetOpcode::G_SEXT == ExtOpc) &&
352 "Expecting Extending Opc");
353 assert(Res.getLLTTy(*getMRI()).isScalar() ||
354 Res.getLLTTy(*getMRI()).isVector());
355 assert(Res.getLLTTy(*getMRI()).isScalar() ==
356 Op.getLLTTy(*getMRI()).isScalar());
357
358 unsigned Opcode = TargetOpcode::COPY;
359 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
360 Op.getLLTTy(*getMRI()).getSizeInBits())
361 Opcode = ExtOpc;
362 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
363 Op.getLLTTy(*getMRI()).getSizeInBits())
364 Opcode = TargetOpcode::G_TRUNC;
365 else
366 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
367
368 return buildInstr(Opcode, Res, Op);
369 }
370
buildSExtOrTrunc(const DstOp & Res,const SrcOp & Op)371 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
372 const SrcOp &Op) {
373 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
374 }
375
buildZExtOrTrunc(const DstOp & Res,const SrcOp & Op)376 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
377 const SrcOp &Op) {
378 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
379 }
380
buildAnyExtOrTrunc(const DstOp & Res,const SrcOp & Op)381 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
382 const SrcOp &Op) {
383 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
384 }
385
buildCast(const DstOp & Dst,const SrcOp & Src)386 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
387 const SrcOp &Src) {
388 LLT SrcTy = Src.getLLTTy(*getMRI());
389 LLT DstTy = Dst.getLLTTy(*getMRI());
390 if (SrcTy == DstTy)
391 return buildCopy(Dst, Src);
392
393 unsigned Opcode;
394 if (SrcTy.isPointer() && DstTy.isScalar())
395 Opcode = TargetOpcode::G_PTRTOINT;
396 else if (DstTy.isPointer() && SrcTy.isScalar())
397 Opcode = TargetOpcode::G_INTTOPTR;
398 else {
399 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
400 Opcode = TargetOpcode::G_BITCAST;
401 }
402
403 return buildInstr(Opcode, Dst, Src);
404 }
405
buildExtract(unsigned Res,unsigned Src,uint64_t Index)406 MachineInstrBuilder MachineIRBuilder::buildExtract(unsigned Res, unsigned Src,
407 uint64_t Index) {
408 #ifndef NDEBUG
409 assert(getMRI()->getType(Src).isValid() && "invalid operand type");
410 assert(getMRI()->getType(Res).isValid() && "invalid operand type");
411 assert(Index + getMRI()->getType(Res).getSizeInBits() <=
412 getMRI()->getType(Src).getSizeInBits() &&
413 "extracting off end of register");
414 #endif
415
416 if (getMRI()->getType(Res).getSizeInBits() ==
417 getMRI()->getType(Src).getSizeInBits()) {
418 assert(Index == 0 && "insertion past the end of a register");
419 return buildCast(Res, Src);
420 }
421
422 return buildInstr(TargetOpcode::G_EXTRACT)
423 .addDef(Res)
424 .addUse(Src)
425 .addImm(Index);
426 }
427
buildSequence(unsigned Res,ArrayRef<unsigned> Ops,ArrayRef<uint64_t> Indices)428 void MachineIRBuilder::buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
429 ArrayRef<uint64_t> Indices) {
430 #ifndef NDEBUG
431 assert(Ops.size() == Indices.size() && "incompatible args");
432 assert(!Ops.empty() && "invalid trivial sequence");
433 assert(std::is_sorted(Indices.begin(), Indices.end()) &&
434 "sequence offsets must be in ascending order");
435
436 assert(getMRI()->getType(Res).isValid() && "invalid operand type");
437 for (auto Op : Ops)
438 assert(getMRI()->getType(Op).isValid() && "invalid operand type");
439 #endif
440
441 LLT ResTy = getMRI()->getType(Res);
442 LLT OpTy = getMRI()->getType(Ops[0]);
443 unsigned OpSize = OpTy.getSizeInBits();
444 bool MaybeMerge = true;
445 for (unsigned i = 0; i < Ops.size(); ++i) {
446 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
447 MaybeMerge = false;
448 break;
449 }
450 }
451
452 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
453 buildMerge(Res, Ops);
454 return;
455 }
456
457 unsigned ResIn = getMRI()->createGenericVirtualRegister(ResTy);
458 buildUndef(ResIn);
459
460 for (unsigned i = 0; i < Ops.size(); ++i) {
461 unsigned ResOut = i + 1 == Ops.size()
462 ? Res
463 : getMRI()->createGenericVirtualRegister(ResTy);
464 buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
465 ResIn = ResOut;
466 }
467 }
468
buildUndef(const DstOp & Res)469 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
470 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
471 }
472
buildMerge(const DstOp & Res,ArrayRef<unsigned> Ops)473 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
474 ArrayRef<unsigned> Ops) {
475 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
476 // we need some temporary storage for the DstOp objects. Here we use a
477 // sufficiently large SmallVector to not go through the heap.
478 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
479 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
480 }
481
buildUnmerge(ArrayRef<LLT> Res,const SrcOp & Op)482 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
483 const SrcOp &Op) {
484 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
485 // we need some temporary storage for the DstOp objects. Here we use a
486 // sufficiently large SmallVector to not go through the heap.
487 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
488 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
489 }
490
buildUnmerge(ArrayRef<unsigned> Res,const SrcOp & Op)491 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<unsigned> Res,
492 const SrcOp &Op) {
493 // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<DstOp>,
494 // we need some temporary storage for the DstOp objects. Here we use a
495 // sufficiently large SmallVector to not go through the heap.
496 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
497 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
498 }
499
buildBuildVector(const DstOp & Res,ArrayRef<unsigned> Ops)500 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
501 ArrayRef<unsigned> Ops) {
502 // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
503 // we need some temporary storage for the DstOp objects. Here we use a
504 // sufficiently large SmallVector to not go through the heap.
505 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
506 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
507 }
508
509 MachineInstrBuilder
buildBuildVectorTrunc(const DstOp & Res,ArrayRef<unsigned> Ops)510 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
511 ArrayRef<unsigned> Ops) {
512 // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
513 // we need some temporary storage for the DstOp objects. Here we use a
514 // sufficiently large SmallVector to not go through the heap.
515 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
516 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
517 }
518
519 MachineInstrBuilder
buildConcatVectors(const DstOp & Res,ArrayRef<unsigned> Ops)520 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<unsigned> Ops) {
521 // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
522 // we need some temporary storage for the DstOp objects. Here we use a
523 // sufficiently large SmallVector to not go through the heap.
524 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
525 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
526 }
527
buildInsert(unsigned Res,unsigned Src,unsigned Op,unsigned Index)528 MachineInstrBuilder MachineIRBuilder::buildInsert(unsigned Res, unsigned Src,
529 unsigned Op, unsigned Index) {
530 assert(Index + getMRI()->getType(Op).getSizeInBits() <=
531 getMRI()->getType(Res).getSizeInBits() &&
532 "insertion past the end of a register");
533
534 if (getMRI()->getType(Res).getSizeInBits() ==
535 getMRI()->getType(Op).getSizeInBits()) {
536 return buildCast(Res, Op);
537 }
538
539 return buildInstr(TargetOpcode::G_INSERT)
540 .addDef(Res)
541 .addUse(Src)
542 .addUse(Op)
543 .addImm(Index);
544 }
545
buildIntrinsic(Intrinsic::ID ID,unsigned Res,bool HasSideEffects)546 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
547 unsigned Res,
548 bool HasSideEffects) {
549 auto MIB =
550 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
551 : TargetOpcode::G_INTRINSIC);
552 if (Res)
553 MIB.addDef(Res);
554 MIB.addIntrinsicID(ID);
555 return MIB;
556 }
557
buildTrunc(const DstOp & Res,const SrcOp & Op)558 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
559 const SrcOp &Op) {
560 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
561 }
562
buildFPTrunc(const DstOp & Res,const SrcOp & Op)563 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
564 const SrcOp &Op) {
565 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op);
566 }
567
buildICmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)568 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
569 const DstOp &Res,
570 const SrcOp &Op0,
571 const SrcOp &Op1) {
572 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
573 }
574
buildFCmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)575 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
576 const DstOp &Res,
577 const SrcOp &Op0,
578 const SrcOp &Op1) {
579
580 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1});
581 }
582
buildSelect(const DstOp & Res,const SrcOp & Tst,const SrcOp & Op0,const SrcOp & Op1)583 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
584 const SrcOp &Tst,
585 const SrcOp &Op0,
586 const SrcOp &Op1) {
587
588 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1});
589 }
590
591 MachineInstrBuilder
buildInsertVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Elt,const SrcOp & Idx)592 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
593 const SrcOp &Elt, const SrcOp &Idx) {
594 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
595 }
596
597 MachineInstrBuilder
buildExtractVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Idx)598 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
599 const SrcOp &Idx) {
600 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
601 }
602
buildAtomicCmpXchgWithSuccess(unsigned OldValRes,unsigned SuccessRes,unsigned Addr,unsigned CmpVal,unsigned NewVal,MachineMemOperand & MMO)603 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
604 unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal,
605 unsigned NewVal, MachineMemOperand &MMO) {
606 #ifndef NDEBUG
607 LLT OldValResTy = getMRI()->getType(OldValRes);
608 LLT SuccessResTy = getMRI()->getType(SuccessRes);
609 LLT AddrTy = getMRI()->getType(Addr);
610 LLT CmpValTy = getMRI()->getType(CmpVal);
611 LLT NewValTy = getMRI()->getType(NewVal);
612 assert(OldValResTy.isScalar() && "invalid operand type");
613 assert(SuccessResTy.isScalar() && "invalid operand type");
614 assert(AddrTy.isPointer() && "invalid operand type");
615 assert(CmpValTy.isValid() && "invalid operand type");
616 assert(NewValTy.isValid() && "invalid operand type");
617 assert(OldValResTy == CmpValTy && "type mismatch");
618 assert(OldValResTy == NewValTy && "type mismatch");
619 #endif
620
621 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
622 .addDef(OldValRes)
623 .addDef(SuccessRes)
624 .addUse(Addr)
625 .addUse(CmpVal)
626 .addUse(NewVal)
627 .addMemOperand(&MMO);
628 }
629
630 MachineInstrBuilder
buildAtomicCmpXchg(unsigned OldValRes,unsigned Addr,unsigned CmpVal,unsigned NewVal,MachineMemOperand & MMO)631 MachineIRBuilder::buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
632 unsigned CmpVal, unsigned NewVal,
633 MachineMemOperand &MMO) {
634 #ifndef NDEBUG
635 LLT OldValResTy = getMRI()->getType(OldValRes);
636 LLT AddrTy = getMRI()->getType(Addr);
637 LLT CmpValTy = getMRI()->getType(CmpVal);
638 LLT NewValTy = getMRI()->getType(NewVal);
639 assert(OldValResTy.isScalar() && "invalid operand type");
640 assert(AddrTy.isPointer() && "invalid operand type");
641 assert(CmpValTy.isValid() && "invalid operand type");
642 assert(NewValTy.isValid() && "invalid operand type");
643 assert(OldValResTy == CmpValTy && "type mismatch");
644 assert(OldValResTy == NewValTy && "type mismatch");
645 #endif
646
647 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
648 .addDef(OldValRes)
649 .addUse(Addr)
650 .addUse(CmpVal)
651 .addUse(NewVal)
652 .addMemOperand(&MMO);
653 }
654
buildAtomicRMW(unsigned Opcode,unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)655 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(unsigned Opcode,
656 unsigned OldValRes,
657 unsigned Addr,
658 unsigned Val,
659 MachineMemOperand &MMO) {
660 #ifndef NDEBUG
661 LLT OldValResTy = getMRI()->getType(OldValRes);
662 LLT AddrTy = getMRI()->getType(Addr);
663 LLT ValTy = getMRI()->getType(Val);
664 assert(OldValResTy.isScalar() && "invalid operand type");
665 assert(AddrTy.isPointer() && "invalid operand type");
666 assert(ValTy.isValid() && "invalid operand type");
667 assert(OldValResTy == ValTy && "type mismatch");
668 #endif
669
670 return buildInstr(Opcode)
671 .addDef(OldValRes)
672 .addUse(Addr)
673 .addUse(Val)
674 .addMemOperand(&MMO);
675 }
676
677 MachineInstrBuilder
buildAtomicRMWXchg(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)678 MachineIRBuilder::buildAtomicRMWXchg(unsigned OldValRes, unsigned Addr,
679 unsigned Val, MachineMemOperand &MMO) {
680 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
681 MMO);
682 }
683 MachineInstrBuilder
buildAtomicRMWAdd(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)684 MachineIRBuilder::buildAtomicRMWAdd(unsigned OldValRes, unsigned Addr,
685 unsigned Val, MachineMemOperand &MMO) {
686 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
687 MMO);
688 }
689 MachineInstrBuilder
buildAtomicRMWSub(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)690 MachineIRBuilder::buildAtomicRMWSub(unsigned OldValRes, unsigned Addr,
691 unsigned Val, MachineMemOperand &MMO) {
692 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
693 MMO);
694 }
695 MachineInstrBuilder
buildAtomicRMWAnd(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)696 MachineIRBuilder::buildAtomicRMWAnd(unsigned OldValRes, unsigned Addr,
697 unsigned Val, MachineMemOperand &MMO) {
698 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
699 MMO);
700 }
701 MachineInstrBuilder
buildAtomicRMWNand(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)702 MachineIRBuilder::buildAtomicRMWNand(unsigned OldValRes, unsigned Addr,
703 unsigned Val, MachineMemOperand &MMO) {
704 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
705 MMO);
706 }
buildAtomicRMWOr(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)707 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(unsigned OldValRes,
708 unsigned Addr,
709 unsigned Val,
710 MachineMemOperand &MMO) {
711 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
712 MMO);
713 }
714 MachineInstrBuilder
buildAtomicRMWXor(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)715 MachineIRBuilder::buildAtomicRMWXor(unsigned OldValRes, unsigned Addr,
716 unsigned Val, MachineMemOperand &MMO) {
717 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
718 MMO);
719 }
720 MachineInstrBuilder
buildAtomicRMWMax(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)721 MachineIRBuilder::buildAtomicRMWMax(unsigned OldValRes, unsigned Addr,
722 unsigned Val, MachineMemOperand &MMO) {
723 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
724 MMO);
725 }
726 MachineInstrBuilder
buildAtomicRMWMin(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)727 MachineIRBuilder::buildAtomicRMWMin(unsigned OldValRes, unsigned Addr,
728 unsigned Val, MachineMemOperand &MMO) {
729 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
730 MMO);
731 }
732 MachineInstrBuilder
buildAtomicRMWUmax(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)733 MachineIRBuilder::buildAtomicRMWUmax(unsigned OldValRes, unsigned Addr,
734 unsigned Val, MachineMemOperand &MMO) {
735 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
736 MMO);
737 }
738 MachineInstrBuilder
buildAtomicRMWUmin(unsigned OldValRes,unsigned Addr,unsigned Val,MachineMemOperand & MMO)739 MachineIRBuilder::buildAtomicRMWUmin(unsigned OldValRes, unsigned Addr,
740 unsigned Val, MachineMemOperand &MMO) {
741 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
742 MMO);
743 }
744
745 MachineInstrBuilder
buildBlockAddress(unsigned Res,const BlockAddress * BA)746 MachineIRBuilder::buildBlockAddress(unsigned Res, const BlockAddress *BA) {
747 #ifndef NDEBUG
748 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
749 #endif
750
751 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
752 }
753
validateTruncExt(const LLT & DstTy,const LLT & SrcTy,bool IsExtend)754 void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy,
755 bool IsExtend) {
756 #ifndef NDEBUG
757 if (DstTy.isVector()) {
758 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
759 assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
760 "different number of elements in a trunc/ext");
761 } else
762 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
763
764 if (IsExtend)
765 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
766 "invalid narrowing extend");
767 else
768 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
769 "invalid widening trunc");
770 #endif
771 }
772
validateSelectOp(const LLT & ResTy,const LLT & TstTy,const LLT & Op0Ty,const LLT & Op1Ty)773 void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy,
774 const LLT &Op0Ty, const LLT &Op1Ty) {
775 #ifndef NDEBUG
776 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
777 "invalid operand type");
778 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
779 if (ResTy.isScalar() || ResTy.isPointer())
780 assert(TstTy.isScalar() && "type mismatch");
781 else
782 assert((TstTy.isScalar() ||
783 (TstTy.isVector() &&
784 TstTy.getNumElements() == Op0Ty.getNumElements())) &&
785 "type mismatch");
786 #endif
787 }
788
buildInstr(unsigned Opc,ArrayRef<DstOp> DstOps,ArrayRef<SrcOp> SrcOps,Optional<unsigned> Flags)789 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
790 ArrayRef<DstOp> DstOps,
791 ArrayRef<SrcOp> SrcOps,
792 Optional<unsigned> Flags) {
793 switch (Opc) {
794 default:
795 break;
796 case TargetOpcode::G_SELECT: {
797 assert(DstOps.size() == 1 && "Invalid select");
798 assert(SrcOps.size() == 3 && "Invalid select");
799 validateSelectOp(
800 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
801 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
802 break;
803 }
804 case TargetOpcode::G_ADD:
805 case TargetOpcode::G_AND:
806 case TargetOpcode::G_ASHR:
807 case TargetOpcode::G_LSHR:
808 case TargetOpcode::G_MUL:
809 case TargetOpcode::G_OR:
810 case TargetOpcode::G_SHL:
811 case TargetOpcode::G_SUB:
812 case TargetOpcode::G_XOR:
813 case TargetOpcode::G_UDIV:
814 case TargetOpcode::G_SDIV:
815 case TargetOpcode::G_UREM:
816 case TargetOpcode::G_SREM: {
817 // All these are binary ops.
818 assert(DstOps.size() == 1 && "Invalid Dst");
819 assert(SrcOps.size() == 2 && "Invalid Srcs");
820 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
821 SrcOps[0].getLLTTy(*getMRI()),
822 SrcOps[1].getLLTTy(*getMRI()));
823 break;
824 case TargetOpcode::G_SEXT:
825 case TargetOpcode::G_ZEXT:
826 case TargetOpcode::G_ANYEXT:
827 assert(DstOps.size() == 1 && "Invalid Dst");
828 assert(SrcOps.size() == 1 && "Invalid Srcs");
829 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
830 SrcOps[0].getLLTTy(*getMRI()), true);
831 break;
832 case TargetOpcode::G_TRUNC:
833 case TargetOpcode::G_FPTRUNC:
834 assert(DstOps.size() == 1 && "Invalid Dst");
835 assert(SrcOps.size() == 1 && "Invalid Srcs");
836 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
837 SrcOps[0].getLLTTy(*getMRI()), false);
838 break;
839 }
840 case TargetOpcode::COPY:
841 assert(DstOps.size() == 1 && "Invalid Dst");
842 assert(SrcOps.size() == 1 && "Invalid Srcs");
843 assert(DstOps[0].getLLTTy(*getMRI()) == LLT() ||
844 SrcOps[0].getLLTTy(*getMRI()) == LLT() ||
845 DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI()));
846 break;
847 case TargetOpcode::G_FCMP:
848 case TargetOpcode::G_ICMP: {
849 assert(DstOps.size() == 1 && "Invalid Dst Operands");
850 assert(SrcOps.size() == 3 && "Invalid Src Operands");
851 // For F/ICMP, the first src operand is the predicate, followed by
852 // the two comparands.
853 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
854 "Expecting predicate");
855 assert([&]() -> bool {
856 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
857 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
858 : CmpInst::isFPPredicate(Pred);
859 }() && "Invalid predicate");
860 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
861 "Type mismatch");
862 assert([&]() -> bool {
863 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
864 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
865 if (Op0Ty.isScalar() || Op0Ty.isPointer())
866 return DstTy.isScalar();
867 else
868 return DstTy.isVector() &&
869 DstTy.getNumElements() == Op0Ty.getNumElements();
870 }() && "Type Mismatch");
871 break;
872 }
873 case TargetOpcode::G_UNMERGE_VALUES: {
874 assert(!DstOps.empty() && "Invalid trivial sequence");
875 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
876 assert(std::all_of(DstOps.begin(), DstOps.end(),
877 [&, this](const DstOp &Op) {
878 return Op.getLLTTy(*getMRI()) ==
879 DstOps[0].getLLTTy(*getMRI());
880 }) &&
881 "type mismatch in output list");
882 assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
883 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
884 "input operands do not cover output register");
885 break;
886 }
887 case TargetOpcode::G_MERGE_VALUES: {
888 assert(!SrcOps.empty() && "invalid trivial sequence");
889 assert(DstOps.size() == 1 && "Invalid Dst");
890 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
891 [&, this](const SrcOp &Op) {
892 return Op.getLLTTy(*getMRI()) ==
893 SrcOps[0].getLLTTy(*getMRI());
894 }) &&
895 "type mismatch in input list");
896 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
897 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
898 "input operands do not cover output register");
899 if (SrcOps.size() == 1)
900 return buildCast(DstOps[0], SrcOps[0]);
901 if (DstOps[0].getLLTTy(*getMRI()).isVector())
902 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
903 break;
904 }
905 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
906 assert(DstOps.size() == 1 && "Invalid Dst size");
907 assert(SrcOps.size() == 2 && "Invalid Src size");
908 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
909 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
910 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
911 "Invalid operand type");
912 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
913 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
914 DstOps[0].getLLTTy(*getMRI()) &&
915 "Type mismatch");
916 break;
917 }
918 case TargetOpcode::G_INSERT_VECTOR_ELT: {
919 assert(DstOps.size() == 1 && "Invalid dst size");
920 assert(SrcOps.size() == 3 && "Invalid src size");
921 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
922 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
923 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
924 SrcOps[1].getLLTTy(*getMRI()) &&
925 "Type mismatch");
926 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
927 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
928 SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
929 "Type mismatch");
930 break;
931 }
932 case TargetOpcode::G_BUILD_VECTOR: {
933 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
934 "Must have at least 2 operands");
935 assert(DstOps.size() == 1 && "Invalid DstOps");
936 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
937 "Res type must be a vector");
938 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
939 [&, this](const SrcOp &Op) {
940 return Op.getLLTTy(*getMRI()) ==
941 SrcOps[0].getLLTTy(*getMRI());
942 }) &&
943 "type mismatch in input list");
944 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
945 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
946 "input scalars do not exactly cover the outpur vector register");
947 break;
948 }
949 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
950 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
951 "Must have at least 2 operands");
952 assert(DstOps.size() == 1 && "Invalid DstOps");
953 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
954 "Res type must be a vector");
955 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
956 [&, this](const SrcOp &Op) {
957 return Op.getLLTTy(*getMRI()) ==
958 SrcOps[0].getLLTTy(*getMRI());
959 }) &&
960 "type mismatch in input list");
961 if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
962 DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
963 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
964 break;
965 }
966 case TargetOpcode::G_CONCAT_VECTORS: {
967 assert(DstOps.size() == 1 && "Invalid DstOps");
968 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
969 "Must have at least 2 operands");
970 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
971 [&, this](const SrcOp &Op) {
972 return (Op.getLLTTy(*getMRI()).isVector() &&
973 Op.getLLTTy(*getMRI()) ==
974 SrcOps[0].getLLTTy(*getMRI()));
975 }) &&
976 "type mismatch in input list");
977 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
978 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
979 "input vectors do not exactly cover the outpur vector register");
980 break;
981 }
982 case TargetOpcode::G_UADDE: {
983 assert(DstOps.size() == 2 && "Invalid no of dst operands");
984 assert(SrcOps.size() == 3 && "Invalid no of src operands");
985 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
986 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
987 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
988 "Invalid operand");
989 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
990 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
991 "type mismatch");
992 break;
993 }
994 }
995
996 auto MIB = buildInstr(Opc);
997 for (const DstOp &Op : DstOps)
998 Op.addDefToMIB(*getMRI(), MIB);
999 for (const SrcOp &Op : SrcOps)
1000 Op.addSrcToMIB(MIB);
1001 if (Flags)
1002 MIB->setFlags(*Flags);
1003 return MIB;
1004 }
1005