1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the MachineIRBuidler class. 10 //===----------------------------------------------------------------------===// 11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 12 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 13 14 #include "llvm/CodeGen/MachineFunction.h" 15 #include "llvm/CodeGen/MachineInstr.h" 16 #include "llvm/CodeGen/MachineInstrBuilder.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/TargetInstrInfo.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 #include "llvm/CodeGen/TargetOpcodes.h" 21 #include "llvm/CodeGen/TargetSubtargetInfo.h" 22 #include "llvm/IR/DebugInfo.h" 23 24 using namespace llvm; 25 26 void MachineIRBuilder::setMF(MachineFunction &MF) { 27 State.MF = &MF; 28 State.MBB = nullptr; 29 State.MRI = &MF.getRegInfo(); 30 State.TII = MF.getSubtarget().getInstrInfo(); 31 State.DL = DebugLoc(); 32 State.II = MachineBasicBlock::iterator(); 33 State.Observer = nullptr; 34 } 35 36 void MachineIRBuilder::setMBB(MachineBasicBlock &MBB) { 37 State.MBB = &MBB; 38 State.II = MBB.end(); 39 assert(&getMF() == MBB.getParent() && 40 "Basic block is in a different function"); 41 } 42 43 void MachineIRBuilder::setInstr(MachineInstr &MI) { 44 assert(MI.getParent() && "Instruction is not part of a basic block"); 45 setMBB(*MI.getParent()); 46 State.II = MI.getIterator(); 47 } 48 49 void MachineIRBuilder::setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; } 50 51 void MachineIRBuilder::setInsertPt(MachineBasicBlock &MBB, 52 MachineBasicBlock::iterator II) { 53 assert(MBB.getParent() == &getMF() && 54 "Basic block is in a different function"); 55 State.MBB = &MBB; 56 State.II = II; 57 } 58 59 void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const { 60 if (State.Observer) 61 State.Observer->createdInstr(*InsertedInstr); 62 } 63 64 void MachineIRBuilder::setChangeObserver(GISelChangeObserver &Observer) { 65 State.Observer = &Observer; 66 } 67 68 void MachineIRBuilder::stopObservingChanges() { State.Observer = nullptr; } 69 70 //------------------------------------------------------------------------------ 71 // Build instruction variants. 72 //------------------------------------------------------------------------------ 73 74 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode) { 75 return insertInstr(buildInstrNoInsert(Opcode)); 76 } 77 78 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) { 79 MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode)); 80 return MIB; 81 } 82 83 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) { 84 getMBB().insert(getInsertPt(), MIB); 85 recordInsertion(MIB); 86 return MIB; 87 } 88 89 MachineInstrBuilder 90 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable, 91 const MDNode *Expr) { 92 assert(isa<DILocalVariable>(Variable) && "not a variable"); 93 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 94 assert( 95 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 96 "Expected inlined-at fields to agree"); 97 return insertInstr(BuildMI(getMF(), getDL(), 98 getTII().get(TargetOpcode::DBG_VALUE), 99 /*IsIndirect*/ false, Reg, Variable, Expr)); 100 } 101 102 MachineInstrBuilder 103 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable, 104 const MDNode *Expr) { 105 assert(isa<DILocalVariable>(Variable) && "not a variable"); 106 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 107 assert( 108 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 109 "Expected inlined-at fields to agree"); 110 return insertInstr(BuildMI(getMF(), getDL(), 111 getTII().get(TargetOpcode::DBG_VALUE), 112 /*IsIndirect*/ true, Reg, Variable, Expr)); 113 } 114 115 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI, 116 const MDNode *Variable, 117 const MDNode *Expr) { 118 assert(isa<DILocalVariable>(Variable) && "not a variable"); 119 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 120 assert( 121 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 122 "Expected inlined-at fields to agree"); 123 return buildInstr(TargetOpcode::DBG_VALUE) 124 .addFrameIndex(FI) 125 .addImm(0) 126 .addMetadata(Variable) 127 .addMetadata(Expr); 128 } 129 130 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C, 131 const MDNode *Variable, 132 const MDNode *Expr) { 133 assert(isa<DILocalVariable>(Variable) && "not a variable"); 134 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 135 assert( 136 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 137 "Expected inlined-at fields to agree"); 138 auto MIB = buildInstr(TargetOpcode::DBG_VALUE); 139 if (auto *CI = dyn_cast<ConstantInt>(&C)) { 140 if (CI->getBitWidth() > 64) 141 MIB.addCImm(CI); 142 else 143 MIB.addImm(CI->getZExtValue()); 144 } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) { 145 MIB.addFPImm(CFP); 146 } else { 147 // Insert %noreg if we didn't find a usable constant and had to drop it. 148 MIB.addReg(0U); 149 } 150 151 return MIB.addImm(0).addMetadata(Variable).addMetadata(Expr); 152 } 153 154 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) { 155 assert(isa<DILabel>(Label) && "not a label"); 156 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) && 157 "Expected inlined-at fields to agree"); 158 auto MIB = buildInstr(TargetOpcode::DBG_LABEL); 159 160 return MIB.addMetadata(Label); 161 } 162 163 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res, 164 const SrcOp &Size, 165 unsigned Align) { 166 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type"); 167 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC); 168 Res.addDefToMIB(*getMRI(), MIB); 169 Size.addSrcToMIB(MIB); 170 MIB.addImm(Align); 171 return MIB; 172 } 173 174 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res, 175 int Idx) { 176 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 177 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX); 178 Res.addDefToMIB(*getMRI(), MIB); 179 MIB.addFrameIndex(Idx); 180 return MIB; 181 } 182 183 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res, 184 const GlobalValue *GV) { 185 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 186 assert(Res.getLLTTy(*getMRI()).getAddressSpace() == 187 GV->getType()->getAddressSpace() && 188 "address space mismatch"); 189 190 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE); 191 Res.addDefToMIB(*getMRI(), MIB); 192 MIB.addGlobalAddress(GV); 193 return MIB; 194 } 195 196 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy, 197 unsigned JTI) { 198 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {}) 199 .addJumpTableIndex(JTI); 200 } 201 202 void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0, 203 const LLT &Op1) { 204 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 205 assert((Res == Op0 && Res == Op1) && "type mismatch"); 206 } 207 208 void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0, 209 const LLT &Op1) { 210 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 211 assert((Res == Op0) && "type mismatch"); 212 } 213 214 MachineInstrBuilder MachineIRBuilder::buildGEP(const DstOp &Res, 215 const SrcOp &Op0, 216 const SrcOp &Op1) { 217 assert(Res.getLLTTy(*getMRI()).isPointer() && 218 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); 219 assert(Op1.getLLTTy(*getMRI()).isScalar() && "invalid offset type"); 220 221 return buildInstr(TargetOpcode::G_GEP, {Res}, {Op0, Op1}); 222 } 223 224 Optional<MachineInstrBuilder> 225 MachineIRBuilder::materializeGEP(Register &Res, Register Op0, 226 const LLT &ValueTy, uint64_t Value) { 227 assert(Res == 0 && "Res is a result argument"); 228 assert(ValueTy.isScalar() && "invalid offset type"); 229 230 if (Value == 0) { 231 Res = Op0; 232 return None; 233 } 234 235 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); 236 auto Cst = buildConstant(ValueTy, Value); 237 return buildGEP(Res, Op0, Cst.getReg(0)); 238 } 239 240 MachineInstrBuilder MachineIRBuilder::buildPtrMask(const DstOp &Res, 241 const SrcOp &Op0, 242 uint32_t NumBits) { 243 assert(Res.getLLTTy(*getMRI()).isPointer() && 244 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); 245 246 auto MIB = buildInstr(TargetOpcode::G_PTR_MASK); 247 Res.addDefToMIB(*getMRI(), MIB); 248 Op0.addSrcToMIB(MIB); 249 MIB.addImm(NumBits); 250 return MIB; 251 } 252 253 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) { 254 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest); 255 } 256 257 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) { 258 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination"); 259 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt); 260 } 261 262 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr, 263 unsigned JTI, 264 Register IndexReg) { 265 assert(getMRI()->getType(TablePtr).isPointer() && 266 "Table reg must be a pointer"); 267 return buildInstr(TargetOpcode::G_BRJT) 268 .addUse(TablePtr) 269 .addJumpTableIndex(JTI) 270 .addUse(IndexReg); 271 } 272 273 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res, 274 const SrcOp &Op) { 275 return buildInstr(TargetOpcode::COPY, Res, Op); 276 } 277 278 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 279 const ConstantInt &Val) { 280 LLT Ty = Res.getLLTTy(*getMRI()); 281 LLT EltTy = Ty.getScalarType(); 282 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() && 283 "creating constant with the wrong size"); 284 285 if (Ty.isVector()) { 286 auto Const = buildInstr(TargetOpcode::G_CONSTANT) 287 .addDef(getMRI()->createGenericVirtualRegister(EltTy)) 288 .addCImm(&Val); 289 return buildSplatVector(Res, Const); 290 } 291 292 auto Const = buildInstr(TargetOpcode::G_CONSTANT); 293 Res.addDefToMIB(*getMRI(), Const); 294 Const.addCImm(&Val); 295 return Const; 296 } 297 298 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 299 int64_t Val) { 300 auto IntN = IntegerType::get(getMF().getFunction().getContext(), 301 Res.getLLTTy(*getMRI()).getScalarSizeInBits()); 302 ConstantInt *CI = ConstantInt::get(IntN, Val, true); 303 return buildConstant(Res, *CI); 304 } 305 306 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 307 const ConstantFP &Val) { 308 LLT Ty = Res.getLLTTy(*getMRI()); 309 LLT EltTy = Ty.getScalarType(); 310 311 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics()) 312 == EltTy.getSizeInBits() && 313 "creating fconstant with the wrong size"); 314 315 assert(!Ty.isPointer() && "invalid operand type"); 316 317 if (Ty.isVector()) { 318 auto Const = buildInstr(TargetOpcode::G_FCONSTANT) 319 .addDef(getMRI()->createGenericVirtualRegister(EltTy)) 320 .addFPImm(&Val); 321 322 return buildSplatVector(Res, Const); 323 } 324 325 auto Const = buildInstr(TargetOpcode::G_FCONSTANT); 326 Res.addDefToMIB(*getMRI(), Const); 327 Const.addFPImm(&Val); 328 return Const; 329 } 330 331 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 332 const APInt &Val) { 333 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val); 334 return buildConstant(Res, *CI); 335 } 336 337 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 338 double Val) { 339 LLT DstTy = Res.getLLTTy(*getMRI()); 340 auto &Ctx = getMF().getFunction().getContext(); 341 auto *CFP = 342 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits())); 343 return buildFConstant(Res, *CFP); 344 } 345 346 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 347 const APFloat &Val) { 348 auto &Ctx = getMF().getFunction().getContext(); 349 auto *CFP = ConstantFP::get(Ctx, Val); 350 return buildFConstant(Res, *CFP); 351 } 352 353 MachineInstrBuilder MachineIRBuilder::buildBrCond(Register Tst, 354 MachineBasicBlock &Dest) { 355 assert(getMRI()->getType(Tst).isScalar() && "invalid operand type"); 356 357 return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest); 358 } 359 360 MachineInstrBuilder MachineIRBuilder::buildLoad(const DstOp &Res, 361 const SrcOp &Addr, 362 MachineMemOperand &MMO) { 363 return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO); 364 } 365 366 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode, 367 const DstOp &Res, 368 const SrcOp &Addr, 369 MachineMemOperand &MMO) { 370 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type"); 371 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 372 373 auto MIB = buildInstr(Opcode); 374 Res.addDefToMIB(*getMRI(), MIB); 375 Addr.addSrcToMIB(MIB); 376 MIB.addMemOperand(&MMO); 377 return MIB; 378 } 379 380 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val, 381 const SrcOp &Addr, 382 MachineMemOperand &MMO) { 383 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type"); 384 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 385 386 auto MIB = buildInstr(TargetOpcode::G_STORE); 387 Val.addSrcToMIB(MIB); 388 Addr.addSrcToMIB(MIB); 389 MIB.addMemOperand(&MMO); 390 return MIB; 391 } 392 393 MachineInstrBuilder MachineIRBuilder::buildUAddo(const DstOp &Res, 394 const DstOp &CarryOut, 395 const SrcOp &Op0, 396 const SrcOp &Op1) { 397 return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1}); 398 } 399 400 MachineInstrBuilder MachineIRBuilder::buildUAdde(const DstOp &Res, 401 const DstOp &CarryOut, 402 const SrcOp &Op0, 403 const SrcOp &Op1, 404 const SrcOp &CarryIn) { 405 return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut}, 406 {Op0, Op1, CarryIn}); 407 } 408 409 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res, 410 const SrcOp &Op) { 411 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op); 412 } 413 414 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res, 415 const SrcOp &Op) { 416 return buildInstr(TargetOpcode::G_SEXT, Res, Op); 417 } 418 419 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res, 420 const SrcOp &Op) { 421 return buildInstr(TargetOpcode::G_ZEXT, Res, Op); 422 } 423 424 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const { 425 const auto *TLI = getMF().getSubtarget().getTargetLowering(); 426 switch (TLI->getBooleanContents(IsVec, IsFP)) { 427 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent: 428 return TargetOpcode::G_SEXT; 429 case TargetLoweringBase::ZeroOrOneBooleanContent: 430 return TargetOpcode::G_ZEXT; 431 default: 432 return TargetOpcode::G_ANYEXT; 433 } 434 } 435 436 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res, 437 const SrcOp &Op, 438 bool IsFP) { 439 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP); 440 return buildInstr(ExtOp, Res, Op); 441 } 442 443 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc, 444 const DstOp &Res, 445 const SrcOp &Op) { 446 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc || 447 TargetOpcode::G_SEXT == ExtOpc) && 448 "Expecting Extending Opc"); 449 assert(Res.getLLTTy(*getMRI()).isScalar() || 450 Res.getLLTTy(*getMRI()).isVector()); 451 assert(Res.getLLTTy(*getMRI()).isScalar() == 452 Op.getLLTTy(*getMRI()).isScalar()); 453 454 unsigned Opcode = TargetOpcode::COPY; 455 if (Res.getLLTTy(*getMRI()).getSizeInBits() > 456 Op.getLLTTy(*getMRI()).getSizeInBits()) 457 Opcode = ExtOpc; 458 else if (Res.getLLTTy(*getMRI()).getSizeInBits() < 459 Op.getLLTTy(*getMRI()).getSizeInBits()) 460 Opcode = TargetOpcode::G_TRUNC; 461 else 462 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI())); 463 464 return buildInstr(Opcode, Res, Op); 465 } 466 467 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res, 468 const SrcOp &Op) { 469 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op); 470 } 471 472 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res, 473 const SrcOp &Op) { 474 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op); 475 } 476 477 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res, 478 const SrcOp &Op) { 479 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op); 480 } 481 482 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst, 483 const SrcOp &Src) { 484 LLT SrcTy = Src.getLLTTy(*getMRI()); 485 LLT DstTy = Dst.getLLTTy(*getMRI()); 486 if (SrcTy == DstTy) 487 return buildCopy(Dst, Src); 488 489 unsigned Opcode; 490 if (SrcTy.isPointer() && DstTy.isScalar()) 491 Opcode = TargetOpcode::G_PTRTOINT; 492 else if (DstTy.isPointer() && SrcTy.isScalar()) 493 Opcode = TargetOpcode::G_INTTOPTR; 494 else { 495 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet"); 496 Opcode = TargetOpcode::G_BITCAST; 497 } 498 499 return buildInstr(Opcode, Dst, Src); 500 } 501 502 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst, 503 const SrcOp &Src, 504 uint64_t Index) { 505 LLT SrcTy = Src.getLLTTy(*getMRI()); 506 LLT DstTy = Dst.getLLTTy(*getMRI()); 507 508 #ifndef NDEBUG 509 assert(SrcTy.isValid() && "invalid operand type"); 510 assert(DstTy.isValid() && "invalid operand type"); 511 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() && 512 "extracting off end of register"); 513 #endif 514 515 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) { 516 assert(Index == 0 && "insertion past the end of a register"); 517 return buildCast(Dst, Src); 518 } 519 520 auto Extract = buildInstr(TargetOpcode::G_EXTRACT); 521 Dst.addDefToMIB(*getMRI(), Extract); 522 Src.addSrcToMIB(Extract); 523 Extract.addImm(Index); 524 return Extract; 525 } 526 527 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops, 528 ArrayRef<uint64_t> Indices) { 529 #ifndef NDEBUG 530 assert(Ops.size() == Indices.size() && "incompatible args"); 531 assert(!Ops.empty() && "invalid trivial sequence"); 532 assert(std::is_sorted(Indices.begin(), Indices.end()) && 533 "sequence offsets must be in ascending order"); 534 535 assert(getMRI()->getType(Res).isValid() && "invalid operand type"); 536 for (auto Op : Ops) 537 assert(getMRI()->getType(Op).isValid() && "invalid operand type"); 538 #endif 539 540 LLT ResTy = getMRI()->getType(Res); 541 LLT OpTy = getMRI()->getType(Ops[0]); 542 unsigned OpSize = OpTy.getSizeInBits(); 543 bool MaybeMerge = true; 544 for (unsigned i = 0; i < Ops.size(); ++i) { 545 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) { 546 MaybeMerge = false; 547 break; 548 } 549 } 550 551 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) { 552 buildMerge(Res, Ops); 553 return; 554 } 555 556 Register ResIn = getMRI()->createGenericVirtualRegister(ResTy); 557 buildUndef(ResIn); 558 559 for (unsigned i = 0; i < Ops.size(); ++i) { 560 Register ResOut = i + 1 == Ops.size() 561 ? Res 562 : getMRI()->createGenericVirtualRegister(ResTy); 563 buildInsert(ResOut, ResIn, Ops[i], Indices[i]); 564 ResIn = ResOut; 565 } 566 } 567 568 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) { 569 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {}); 570 } 571 572 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res, 573 ArrayRef<Register> Ops) { 574 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>, 575 // we need some temporary storage for the DstOp objects. Here we use a 576 // sufficiently large SmallVector to not go through the heap. 577 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 578 assert(TmpVec.size() > 1); 579 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec); 580 } 581 582 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res, 583 const SrcOp &Op) { 584 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>, 585 // we need some temporary storage for the DstOp objects. Here we use a 586 // sufficiently large SmallVector to not go through the heap. 587 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); 588 assert(TmpVec.size() > 1); 589 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); 590 } 591 592 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res, 593 const SrcOp &Op) { 594 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits(); 595 SmallVector<Register, 8> TmpVec; 596 for (unsigned I = 0; I != NumReg; ++I) 597 TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res)); 598 return buildUnmerge(TmpVec, Op); 599 } 600 601 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res, 602 const SrcOp &Op) { 603 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>, 604 // we need some temporary storage for the DstOp objects. Here we use a 605 // sufficiently large SmallVector to not go through the heap. 606 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); 607 assert(TmpVec.size() > 1); 608 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); 609 } 610 611 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res, 612 ArrayRef<Register> Ops) { 613 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 614 // we need some temporary storage for the DstOp objects. Here we use a 615 // sufficiently large SmallVector to not go through the heap. 616 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 617 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); 618 } 619 620 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res, 621 const SrcOp &Src) { 622 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src); 623 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); 624 } 625 626 MachineInstrBuilder 627 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res, 628 ArrayRef<Register> Ops) { 629 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 630 // we need some temporary storage for the DstOp objects. Here we use a 631 // sufficiently large SmallVector to not go through the heap. 632 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 633 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec); 634 } 635 636 MachineInstrBuilder 637 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) { 638 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 639 // we need some temporary storage for the DstOp objects. Here we use a 640 // sufficiently large SmallVector to not go through the heap. 641 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 642 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec); 643 } 644 645 MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src, 646 Register Op, unsigned Index) { 647 assert(Index + getMRI()->getType(Op).getSizeInBits() <= 648 getMRI()->getType(Res).getSizeInBits() && 649 "insertion past the end of a register"); 650 651 if (getMRI()->getType(Res).getSizeInBits() == 652 getMRI()->getType(Op).getSizeInBits()) { 653 return buildCast(Res, Op); 654 } 655 656 return buildInstr(TargetOpcode::G_INSERT) 657 .addDef(Res) 658 .addUse(Src) 659 .addUse(Op) 660 .addImm(Index); 661 } 662 663 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, 664 ArrayRef<Register> ResultRegs, 665 bool HasSideEffects) { 666 auto MIB = 667 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS 668 : TargetOpcode::G_INTRINSIC); 669 for (unsigned ResultReg : ResultRegs) 670 MIB.addDef(ResultReg); 671 MIB.addIntrinsicID(ID); 672 return MIB; 673 } 674 675 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, 676 ArrayRef<DstOp> Results, 677 bool HasSideEffects) { 678 auto MIB = 679 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS 680 : TargetOpcode::G_INTRINSIC); 681 for (DstOp Result : Results) 682 Result.addDefToMIB(*getMRI(), MIB); 683 MIB.addIntrinsicID(ID); 684 return MIB; 685 } 686 687 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res, 688 const SrcOp &Op) { 689 return buildInstr(TargetOpcode::G_TRUNC, Res, Op); 690 } 691 692 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res, 693 const SrcOp &Op) { 694 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op); 695 } 696 697 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred, 698 const DstOp &Res, 699 const SrcOp &Op0, 700 const SrcOp &Op1) { 701 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}); 702 } 703 704 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred, 705 const DstOp &Res, 706 const SrcOp &Op0, 707 const SrcOp &Op1, 708 Optional<unsigned> Flags) { 709 710 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags); 711 } 712 713 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res, 714 const SrcOp &Tst, 715 const SrcOp &Op0, 716 const SrcOp &Op1, 717 Optional<unsigned> Flags) { 718 719 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags); 720 } 721 722 MachineInstrBuilder 723 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, 724 const SrcOp &Elt, const SrcOp &Idx) { 725 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx}); 726 } 727 728 MachineInstrBuilder 729 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, 730 const SrcOp &Idx) { 731 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx}); 732 } 733 734 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess( 735 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, 736 Register NewVal, MachineMemOperand &MMO) { 737 #ifndef NDEBUG 738 LLT OldValResTy = getMRI()->getType(OldValRes); 739 LLT SuccessResTy = getMRI()->getType(SuccessRes); 740 LLT AddrTy = getMRI()->getType(Addr); 741 LLT CmpValTy = getMRI()->getType(CmpVal); 742 LLT NewValTy = getMRI()->getType(NewVal); 743 assert(OldValResTy.isScalar() && "invalid operand type"); 744 assert(SuccessResTy.isScalar() && "invalid operand type"); 745 assert(AddrTy.isPointer() && "invalid operand type"); 746 assert(CmpValTy.isValid() && "invalid operand type"); 747 assert(NewValTy.isValid() && "invalid operand type"); 748 assert(OldValResTy == CmpValTy && "type mismatch"); 749 assert(OldValResTy == NewValTy && "type mismatch"); 750 #endif 751 752 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS) 753 .addDef(OldValRes) 754 .addDef(SuccessRes) 755 .addUse(Addr) 756 .addUse(CmpVal) 757 .addUse(NewVal) 758 .addMemOperand(&MMO); 759 } 760 761 MachineInstrBuilder 762 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr, 763 Register CmpVal, Register NewVal, 764 MachineMemOperand &MMO) { 765 #ifndef NDEBUG 766 LLT OldValResTy = getMRI()->getType(OldValRes); 767 LLT AddrTy = getMRI()->getType(Addr); 768 LLT CmpValTy = getMRI()->getType(CmpVal); 769 LLT NewValTy = getMRI()->getType(NewVal); 770 assert(OldValResTy.isScalar() && "invalid operand type"); 771 assert(AddrTy.isPointer() && "invalid operand type"); 772 assert(CmpValTy.isValid() && "invalid operand type"); 773 assert(NewValTy.isValid() && "invalid operand type"); 774 assert(OldValResTy == CmpValTy && "type mismatch"); 775 assert(OldValResTy == NewValTy && "type mismatch"); 776 #endif 777 778 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG) 779 .addDef(OldValRes) 780 .addUse(Addr) 781 .addUse(CmpVal) 782 .addUse(NewVal) 783 .addMemOperand(&MMO); 784 } 785 786 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW( 787 unsigned Opcode, const DstOp &OldValRes, 788 const SrcOp &Addr, const SrcOp &Val, 789 MachineMemOperand &MMO) { 790 791 #ifndef NDEBUG 792 LLT OldValResTy = OldValRes.getLLTTy(*getMRI()); 793 LLT AddrTy = Addr.getLLTTy(*getMRI()); 794 LLT ValTy = Val.getLLTTy(*getMRI()); 795 assert(OldValResTy.isScalar() && "invalid operand type"); 796 assert(AddrTy.isPointer() && "invalid operand type"); 797 assert(ValTy.isValid() && "invalid operand type"); 798 assert(OldValResTy == ValTy && "type mismatch"); 799 assert(MMO.isAtomic() && "not atomic mem operand"); 800 #endif 801 802 auto MIB = buildInstr(Opcode); 803 OldValRes.addDefToMIB(*getMRI(), MIB); 804 Addr.addSrcToMIB(MIB); 805 Val.addSrcToMIB(MIB); 806 MIB.addMemOperand(&MMO); 807 return MIB; 808 } 809 810 MachineInstrBuilder 811 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr, 812 Register Val, MachineMemOperand &MMO) { 813 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val, 814 MMO); 815 } 816 MachineInstrBuilder 817 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr, 818 Register Val, MachineMemOperand &MMO) { 819 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val, 820 MMO); 821 } 822 MachineInstrBuilder 823 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr, 824 Register Val, MachineMemOperand &MMO) { 825 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val, 826 MMO); 827 } 828 MachineInstrBuilder 829 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr, 830 Register Val, MachineMemOperand &MMO) { 831 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val, 832 MMO); 833 } 834 MachineInstrBuilder 835 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr, 836 Register Val, MachineMemOperand &MMO) { 837 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val, 838 MMO); 839 } 840 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes, 841 Register Addr, 842 Register Val, 843 MachineMemOperand &MMO) { 844 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val, 845 MMO); 846 } 847 MachineInstrBuilder 848 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr, 849 Register Val, MachineMemOperand &MMO) { 850 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val, 851 MMO); 852 } 853 MachineInstrBuilder 854 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr, 855 Register Val, MachineMemOperand &MMO) { 856 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val, 857 MMO); 858 } 859 MachineInstrBuilder 860 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr, 861 Register Val, MachineMemOperand &MMO) { 862 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val, 863 MMO); 864 } 865 MachineInstrBuilder 866 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr, 867 Register Val, MachineMemOperand &MMO) { 868 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val, 869 MMO); 870 } 871 MachineInstrBuilder 872 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr, 873 Register Val, MachineMemOperand &MMO) { 874 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val, 875 MMO); 876 } 877 878 MachineInstrBuilder 879 MachineIRBuilder::buildAtomicRMWFAdd( 880 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, 881 MachineMemOperand &MMO) { 882 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val, 883 MMO); 884 } 885 886 MachineInstrBuilder 887 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, 888 MachineMemOperand &MMO) { 889 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val, 890 MMO); 891 } 892 893 MachineInstrBuilder 894 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) { 895 return buildInstr(TargetOpcode::G_FENCE) 896 .addImm(Ordering) 897 .addImm(Scope); 898 } 899 900 MachineInstrBuilder 901 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) { 902 #ifndef NDEBUG 903 assert(getMRI()->getType(Res).isPointer() && "invalid res type"); 904 #endif 905 906 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA); 907 } 908 909 void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy, 910 bool IsExtend) { 911 #ifndef NDEBUG 912 if (DstTy.isVector()) { 913 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector"); 914 assert(SrcTy.getNumElements() == DstTy.getNumElements() && 915 "different number of elements in a trunc/ext"); 916 } else 917 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc"); 918 919 if (IsExtend) 920 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() && 921 "invalid narrowing extend"); 922 else 923 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() && 924 "invalid widening trunc"); 925 #endif 926 } 927 928 void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy, 929 const LLT &Op0Ty, const LLT &Op1Ty) { 930 #ifndef NDEBUG 931 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) && 932 "invalid operand type"); 933 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch"); 934 if (ResTy.isScalar() || ResTy.isPointer()) 935 assert(TstTy.isScalar() && "type mismatch"); 936 else 937 assert((TstTy.isScalar() || 938 (TstTy.isVector() && 939 TstTy.getNumElements() == Op0Ty.getNumElements())) && 940 "type mismatch"); 941 #endif 942 } 943 944 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, 945 ArrayRef<DstOp> DstOps, 946 ArrayRef<SrcOp> SrcOps, 947 Optional<unsigned> Flags) { 948 switch (Opc) { 949 default: 950 break; 951 case TargetOpcode::G_SELECT: { 952 assert(DstOps.size() == 1 && "Invalid select"); 953 assert(SrcOps.size() == 3 && "Invalid select"); 954 validateSelectOp( 955 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()), 956 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI())); 957 break; 958 } 959 case TargetOpcode::G_ADD: 960 case TargetOpcode::G_AND: 961 case TargetOpcode::G_MUL: 962 case TargetOpcode::G_OR: 963 case TargetOpcode::G_SUB: 964 case TargetOpcode::G_XOR: 965 case TargetOpcode::G_UDIV: 966 case TargetOpcode::G_SDIV: 967 case TargetOpcode::G_UREM: 968 case TargetOpcode::G_SREM: 969 case TargetOpcode::G_SMIN: 970 case TargetOpcode::G_SMAX: 971 case TargetOpcode::G_UMIN: 972 case TargetOpcode::G_UMAX: { 973 // All these are binary ops. 974 assert(DstOps.size() == 1 && "Invalid Dst"); 975 assert(SrcOps.size() == 2 && "Invalid Srcs"); 976 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()), 977 SrcOps[0].getLLTTy(*getMRI()), 978 SrcOps[1].getLLTTy(*getMRI())); 979 break; 980 } 981 case TargetOpcode::G_SHL: 982 case TargetOpcode::G_ASHR: 983 case TargetOpcode::G_LSHR: { 984 assert(DstOps.size() == 1 && "Invalid Dst"); 985 assert(SrcOps.size() == 2 && "Invalid Srcs"); 986 validateShiftOp(DstOps[0].getLLTTy(*getMRI()), 987 SrcOps[0].getLLTTy(*getMRI()), 988 SrcOps[1].getLLTTy(*getMRI())); 989 break; 990 } 991 case TargetOpcode::G_SEXT: 992 case TargetOpcode::G_ZEXT: 993 case TargetOpcode::G_ANYEXT: 994 assert(DstOps.size() == 1 && "Invalid Dst"); 995 assert(SrcOps.size() == 1 && "Invalid Srcs"); 996 validateTruncExt(DstOps[0].getLLTTy(*getMRI()), 997 SrcOps[0].getLLTTy(*getMRI()), true); 998 break; 999 case TargetOpcode::G_TRUNC: 1000 case TargetOpcode::G_FPTRUNC: { 1001 assert(DstOps.size() == 1 && "Invalid Dst"); 1002 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1003 validateTruncExt(DstOps[0].getLLTTy(*getMRI()), 1004 SrcOps[0].getLLTTy(*getMRI()), false); 1005 break; 1006 } 1007 case TargetOpcode::COPY: 1008 assert(DstOps.size() == 1 && "Invalid Dst"); 1009 // If the caller wants to add a subreg source it has to be done separately 1010 // so we may not have any SrcOps at this point yet. 1011 break; 1012 case TargetOpcode::G_FCMP: 1013 case TargetOpcode::G_ICMP: { 1014 assert(DstOps.size() == 1 && "Invalid Dst Operands"); 1015 assert(SrcOps.size() == 3 && "Invalid Src Operands"); 1016 // For F/ICMP, the first src operand is the predicate, followed by 1017 // the two comparands. 1018 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate && 1019 "Expecting predicate"); 1020 assert([&]() -> bool { 1021 CmpInst::Predicate Pred = SrcOps[0].getPredicate(); 1022 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred) 1023 : CmpInst::isFPPredicate(Pred); 1024 }() && "Invalid predicate"); 1025 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && 1026 "Type mismatch"); 1027 assert([&]() -> bool { 1028 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI()); 1029 LLT DstTy = DstOps[0].getLLTTy(*getMRI()); 1030 if (Op0Ty.isScalar() || Op0Ty.isPointer()) 1031 return DstTy.isScalar(); 1032 else 1033 return DstTy.isVector() && 1034 DstTy.getNumElements() == Op0Ty.getNumElements(); 1035 }() && "Type Mismatch"); 1036 break; 1037 } 1038 case TargetOpcode::G_UNMERGE_VALUES: { 1039 assert(!DstOps.empty() && "Invalid trivial sequence"); 1040 assert(SrcOps.size() == 1 && "Invalid src for Unmerge"); 1041 assert(std::all_of(DstOps.begin(), DstOps.end(), 1042 [&, this](const DstOp &Op) { 1043 return Op.getLLTTy(*getMRI()) == 1044 DstOps[0].getLLTTy(*getMRI()); 1045 }) && 1046 "type mismatch in output list"); 1047 assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1048 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1049 "input operands do not cover output register"); 1050 break; 1051 } 1052 case TargetOpcode::G_MERGE_VALUES: { 1053 assert(!SrcOps.empty() && "invalid trivial sequence"); 1054 assert(DstOps.size() == 1 && "Invalid Dst"); 1055 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1056 [&, this](const SrcOp &Op) { 1057 return Op.getLLTTy(*getMRI()) == 1058 SrcOps[0].getLLTTy(*getMRI()); 1059 }) && 1060 "type mismatch in input list"); 1061 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1062 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1063 "input operands do not cover output register"); 1064 if (SrcOps.size() == 1) 1065 return buildCast(DstOps[0], SrcOps[0]); 1066 if (DstOps[0].getLLTTy(*getMRI()).isVector()) { 1067 if (SrcOps[0].getLLTTy(*getMRI()).isVector()) 1068 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps); 1069 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); 1070 } 1071 break; 1072 } 1073 case TargetOpcode::G_EXTRACT_VECTOR_ELT: { 1074 assert(DstOps.size() == 1 && "Invalid Dst size"); 1075 assert(SrcOps.size() == 2 && "Invalid Src size"); 1076 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); 1077 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() || 1078 DstOps[0].getLLTTy(*getMRI()).isPointer()) && 1079 "Invalid operand type"); 1080 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type"); 1081 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() == 1082 DstOps[0].getLLTTy(*getMRI()) && 1083 "Type mismatch"); 1084 break; 1085 } 1086 case TargetOpcode::G_INSERT_VECTOR_ELT: { 1087 assert(DstOps.size() == 1 && "Invalid dst size"); 1088 assert(SrcOps.size() == 3 && "Invalid src size"); 1089 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1090 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); 1091 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() == 1092 SrcOps[1].getLLTTy(*getMRI()) && 1093 "Type mismatch"); 1094 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index"); 1095 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() == 1096 SrcOps[0].getLLTTy(*getMRI()).getNumElements() && 1097 "Type mismatch"); 1098 break; 1099 } 1100 case TargetOpcode::G_BUILD_VECTOR: { 1101 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1102 "Must have at least 2 operands"); 1103 assert(DstOps.size() == 1 && "Invalid DstOps"); 1104 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1105 "Res type must be a vector"); 1106 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1107 [&, this](const SrcOp &Op) { 1108 return Op.getLLTTy(*getMRI()) == 1109 SrcOps[0].getLLTTy(*getMRI()); 1110 }) && 1111 "type mismatch in input list"); 1112 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1113 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1114 "input scalars do not exactly cover the output vector register"); 1115 break; 1116 } 1117 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 1118 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1119 "Must have at least 2 operands"); 1120 assert(DstOps.size() == 1 && "Invalid DstOps"); 1121 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1122 "Res type must be a vector"); 1123 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1124 [&, this](const SrcOp &Op) { 1125 return Op.getLLTTy(*getMRI()) == 1126 SrcOps[0].getLLTTy(*getMRI()); 1127 }) && 1128 "type mismatch in input list"); 1129 if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1130 DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits()) 1131 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); 1132 break; 1133 } 1134 case TargetOpcode::G_CONCAT_VECTORS: { 1135 assert(DstOps.size() == 1 && "Invalid DstOps"); 1136 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1137 "Must have at least 2 operands"); 1138 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1139 [&, this](const SrcOp &Op) { 1140 return (Op.getLLTTy(*getMRI()).isVector() && 1141 Op.getLLTTy(*getMRI()) == 1142 SrcOps[0].getLLTTy(*getMRI())); 1143 }) && 1144 "type mismatch in input list"); 1145 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1146 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1147 "input vectors do not exactly cover the output vector register"); 1148 break; 1149 } 1150 case TargetOpcode::G_UADDE: { 1151 assert(DstOps.size() == 2 && "Invalid no of dst operands"); 1152 assert(SrcOps.size() == 3 && "Invalid no of src operands"); 1153 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); 1154 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) && 1155 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) && 1156 "Invalid operand"); 1157 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); 1158 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && 1159 "type mismatch"); 1160 break; 1161 } 1162 } 1163 1164 auto MIB = buildInstr(Opc); 1165 for (const DstOp &Op : DstOps) 1166 Op.addDefToMIB(*getMRI(), MIB); 1167 for (const SrcOp &Op : SrcOps) 1168 Op.addSrcToMIB(MIB); 1169 if (Flags) 1170 MIB->setFlags(*Flags); 1171 return MIB; 1172 } 1173