1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the MachineIRBuidler class. 10 //===----------------------------------------------------------------------===// 11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 12 #include "llvm/Analysis/MemoryLocation.h" 13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 14 #include "llvm/CodeGen/MachineFunction.h" 15 #include "llvm/CodeGen/MachineInstr.h" 16 #include "llvm/CodeGen/MachineInstrBuilder.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/TargetInstrInfo.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 #include "llvm/CodeGen/TargetOpcodes.h" 21 #include "llvm/CodeGen/TargetSubtargetInfo.h" 22 #include "llvm/IR/DebugInfo.h" 23 24 using namespace llvm; 25 26 void MachineIRBuilder::setMF(MachineFunction &MF) { 27 State.MF = &MF; 28 State.MBB = nullptr; 29 State.MRI = &MF.getRegInfo(); 30 State.TII = MF.getSubtarget().getInstrInfo(); 31 State.DL = DebugLoc(); 32 State.II = MachineBasicBlock::iterator(); 33 State.Observer = nullptr; 34 } 35 36 //------------------------------------------------------------------------------ 37 // Build instruction variants. 38 //------------------------------------------------------------------------------ 39 40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) { 41 MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode)); 42 return MIB; 43 } 44 45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) { 46 getMBB().insert(getInsertPt(), MIB); 47 recordInsertion(MIB); 48 return MIB; 49 } 50 51 MachineInstrBuilder 52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable, 53 const MDNode *Expr) { 54 assert(isa<DILocalVariable>(Variable) && "not a variable"); 55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 56 assert( 57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 58 "Expected inlined-at fields to agree"); 59 return insertInstr(BuildMI(getMF(), getDL(), 60 getTII().get(TargetOpcode::DBG_VALUE), 61 /*IsIndirect*/ false, Reg, Variable, Expr)); 62 } 63 64 MachineInstrBuilder 65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable, 66 const MDNode *Expr) { 67 assert(isa<DILocalVariable>(Variable) && "not a variable"); 68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 69 assert( 70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 71 "Expected inlined-at fields to agree"); 72 return insertInstr(BuildMI(getMF(), getDL(), 73 getTII().get(TargetOpcode::DBG_VALUE), 74 /*IsIndirect*/ true, Reg, Variable, Expr)); 75 } 76 77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI, 78 const MDNode *Variable, 79 const MDNode *Expr) { 80 assert(isa<DILocalVariable>(Variable) && "not a variable"); 81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 82 assert( 83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 84 "Expected inlined-at fields to agree"); 85 return buildInstr(TargetOpcode::DBG_VALUE) 86 .addFrameIndex(FI) 87 .addImm(0) 88 .addMetadata(Variable) 89 .addMetadata(Expr); 90 } 91 92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C, 93 const MDNode *Variable, 94 const MDNode *Expr) { 95 assert(isa<DILocalVariable>(Variable) && "not a variable"); 96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 97 assert( 98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 99 "Expected inlined-at fields to agree"); 100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE); 101 if (auto *CI = dyn_cast<ConstantInt>(&C)) { 102 if (CI->getBitWidth() > 64) 103 MIB.addCImm(CI); 104 else 105 MIB.addImm(CI->getZExtValue()); 106 } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) { 107 MIB.addFPImm(CFP); 108 } else { 109 // Insert $noreg if we didn't find a usable constant and had to drop it. 110 MIB.addReg(Register()); 111 } 112 113 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr); 114 return insertInstr(MIB); 115 } 116 117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) { 118 assert(isa<DILabel>(Label) && "not a label"); 119 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) && 120 "Expected inlined-at fields to agree"); 121 auto MIB = buildInstr(TargetOpcode::DBG_LABEL); 122 123 return MIB.addMetadata(Label); 124 } 125 126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res, 127 const SrcOp &Size, 128 Align Alignment) { 129 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type"); 130 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC); 131 Res.addDefToMIB(*getMRI(), MIB); 132 Size.addSrcToMIB(MIB); 133 MIB.addImm(Alignment.value()); 134 return MIB; 135 } 136 137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res, 138 int Idx) { 139 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 140 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX); 141 Res.addDefToMIB(*getMRI(), MIB); 142 MIB.addFrameIndex(Idx); 143 return MIB; 144 } 145 146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res, 147 const GlobalValue *GV) { 148 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 149 assert(Res.getLLTTy(*getMRI()).getAddressSpace() == 150 GV->getType()->getAddressSpace() && 151 "address space mismatch"); 152 153 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE); 154 Res.addDefToMIB(*getMRI(), MIB); 155 MIB.addGlobalAddress(GV); 156 return MIB; 157 } 158 159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy, 160 unsigned JTI) { 161 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {}) 162 .addJumpTableIndex(JTI); 163 } 164 165 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0, 166 const LLT Op1) { 167 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 168 assert((Res == Op0 && Res == Op1) && "type mismatch"); 169 } 170 171 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0, 172 const LLT Op1) { 173 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 174 assert((Res == Op0) && "type mismatch"); 175 } 176 177 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res, 178 const SrcOp &Op0, 179 const SrcOp &Op1) { 180 assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() && 181 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); 182 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type"); 183 184 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}); 185 } 186 187 Optional<MachineInstrBuilder> 188 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0, 189 const LLT ValueTy, uint64_t Value) { 190 assert(Res == 0 && "Res is a result argument"); 191 assert(ValueTy.isScalar() && "invalid offset type"); 192 193 if (Value == 0) { 194 Res = Op0; 195 return None; 196 } 197 198 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); 199 auto Cst = buildConstant(ValueTy, Value); 200 return buildPtrAdd(Res, Op0, Cst.getReg(0)); 201 } 202 203 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res, 204 const SrcOp &Op0, 205 uint32_t NumBits) { 206 LLT PtrTy = Res.getLLTTy(*getMRI()); 207 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits()); 208 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy); 209 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits)); 210 return buildPtrMask(Res, Op0, MaskReg); 211 } 212 213 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) { 214 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest); 215 } 216 217 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) { 218 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination"); 219 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt); 220 } 221 222 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr, 223 unsigned JTI, 224 Register IndexReg) { 225 assert(getMRI()->getType(TablePtr).isPointer() && 226 "Table reg must be a pointer"); 227 return buildInstr(TargetOpcode::G_BRJT) 228 .addUse(TablePtr) 229 .addJumpTableIndex(JTI) 230 .addUse(IndexReg); 231 } 232 233 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res, 234 const SrcOp &Op) { 235 return buildInstr(TargetOpcode::COPY, Res, Op); 236 } 237 238 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 239 const ConstantInt &Val) { 240 LLT Ty = Res.getLLTTy(*getMRI()); 241 LLT EltTy = Ty.getScalarType(); 242 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() && 243 "creating constant with the wrong size"); 244 245 if (Ty.isVector()) { 246 auto Const = buildInstr(TargetOpcode::G_CONSTANT) 247 .addDef(getMRI()->createGenericVirtualRegister(EltTy)) 248 .addCImm(&Val); 249 return buildSplatVector(Res, Const); 250 } 251 252 auto Const = buildInstr(TargetOpcode::G_CONSTANT); 253 Const->setDebugLoc(DebugLoc()); 254 Res.addDefToMIB(*getMRI(), Const); 255 Const.addCImm(&Val); 256 return Const; 257 } 258 259 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 260 int64_t Val) { 261 auto IntN = IntegerType::get(getMF().getFunction().getContext(), 262 Res.getLLTTy(*getMRI()).getScalarSizeInBits()); 263 ConstantInt *CI = ConstantInt::get(IntN, Val, true); 264 return buildConstant(Res, *CI); 265 } 266 267 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 268 const ConstantFP &Val) { 269 LLT Ty = Res.getLLTTy(*getMRI()); 270 LLT EltTy = Ty.getScalarType(); 271 272 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics()) 273 == EltTy.getSizeInBits() && 274 "creating fconstant with the wrong size"); 275 276 assert(!Ty.isPointer() && "invalid operand type"); 277 278 if (Ty.isVector()) { 279 auto Const = buildInstr(TargetOpcode::G_FCONSTANT) 280 .addDef(getMRI()->createGenericVirtualRegister(EltTy)) 281 .addFPImm(&Val); 282 283 return buildSplatVector(Res, Const); 284 } 285 286 auto Const = buildInstr(TargetOpcode::G_FCONSTANT); 287 Const->setDebugLoc(DebugLoc()); 288 Res.addDefToMIB(*getMRI(), Const); 289 Const.addFPImm(&Val); 290 return Const; 291 } 292 293 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 294 const APInt &Val) { 295 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val); 296 return buildConstant(Res, *CI); 297 } 298 299 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 300 double Val) { 301 LLT DstTy = Res.getLLTTy(*getMRI()); 302 auto &Ctx = getMF().getFunction().getContext(); 303 auto *CFP = 304 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits())); 305 return buildFConstant(Res, *CFP); 306 } 307 308 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 309 const APFloat &Val) { 310 auto &Ctx = getMF().getFunction().getContext(); 311 auto *CFP = ConstantFP::get(Ctx, Val); 312 return buildFConstant(Res, *CFP); 313 } 314 315 MachineInstrBuilder MachineIRBuilder::buildBrCond(Register Tst, 316 MachineBasicBlock &Dest) { 317 assert(getMRI()->getType(Tst).isScalar() && "invalid operand type"); 318 319 return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest); 320 } 321 322 MachineInstrBuilder 323 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr, 324 MachinePointerInfo PtrInfo, Align Alignment, 325 MachineMemOperand::Flags MMOFlags, 326 const AAMDNodes &AAInfo) { 327 MMOFlags |= MachineMemOperand::MOLoad; 328 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 329 330 uint64_t Size = MemoryLocation::getSizeOrUnknown( 331 TypeSize::Fixed(Dst.getLLTTy(*getMRI()).getSizeInBytes())); 332 MachineMemOperand *MMO = 333 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 334 return buildLoad(Dst, Addr, *MMO); 335 } 336 337 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode, 338 const DstOp &Res, 339 const SrcOp &Addr, 340 MachineMemOperand &MMO) { 341 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type"); 342 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 343 344 auto MIB = buildInstr(Opcode); 345 Res.addDefToMIB(*getMRI(), MIB); 346 Addr.addSrcToMIB(MIB); 347 MIB.addMemOperand(&MMO); 348 return MIB; 349 } 350 351 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset( 352 const DstOp &Dst, const SrcOp &BasePtr, 353 MachineMemOperand &BaseMMO, int64_t Offset) { 354 LLT LoadTy = Dst.getLLTTy(*getMRI()); 355 MachineMemOperand *OffsetMMO = 356 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes()); 357 358 if (Offset == 0) // This may be a size or type changing load. 359 return buildLoad(Dst, BasePtr, *OffsetMMO); 360 361 LLT PtrTy = BasePtr.getLLTTy(*getMRI()); 362 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits()); 363 auto ConstOffset = buildConstant(OffsetTy, Offset); 364 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset); 365 return buildLoad(Dst, Ptr, *OffsetMMO); 366 } 367 368 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val, 369 const SrcOp &Addr, 370 MachineMemOperand &MMO) { 371 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type"); 372 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 373 374 auto MIB = buildInstr(TargetOpcode::G_STORE); 375 Val.addSrcToMIB(MIB); 376 Addr.addSrcToMIB(MIB); 377 MIB.addMemOperand(&MMO); 378 return MIB; 379 } 380 381 MachineInstrBuilder 382 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr, 383 MachinePointerInfo PtrInfo, Align Alignment, 384 MachineMemOperand::Flags MMOFlags, 385 const AAMDNodes &AAInfo) { 386 MMOFlags |= MachineMemOperand::MOStore; 387 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 388 389 uint64_t Size = MemoryLocation::getSizeOrUnknown( 390 TypeSize::Fixed(Val.getLLTTy(*getMRI()).getSizeInBytes())); 391 MachineMemOperand *MMO = 392 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 393 return buildStore(Val, Addr, *MMO); 394 } 395 396 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res, 397 const SrcOp &Op) { 398 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op); 399 } 400 401 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res, 402 const SrcOp &Op) { 403 return buildInstr(TargetOpcode::G_SEXT, Res, Op); 404 } 405 406 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res, 407 const SrcOp &Op) { 408 return buildInstr(TargetOpcode::G_ZEXT, Res, Op); 409 } 410 411 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const { 412 const auto *TLI = getMF().getSubtarget().getTargetLowering(); 413 switch (TLI->getBooleanContents(IsVec, IsFP)) { 414 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent: 415 return TargetOpcode::G_SEXT; 416 case TargetLoweringBase::ZeroOrOneBooleanContent: 417 return TargetOpcode::G_ZEXT; 418 default: 419 return TargetOpcode::G_ANYEXT; 420 } 421 } 422 423 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res, 424 const SrcOp &Op, 425 bool IsFP) { 426 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP); 427 return buildInstr(ExtOp, Res, Op); 428 } 429 430 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc, 431 const DstOp &Res, 432 const SrcOp &Op) { 433 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc || 434 TargetOpcode::G_SEXT == ExtOpc) && 435 "Expecting Extending Opc"); 436 assert(Res.getLLTTy(*getMRI()).isScalar() || 437 Res.getLLTTy(*getMRI()).isVector()); 438 assert(Res.getLLTTy(*getMRI()).isScalar() == 439 Op.getLLTTy(*getMRI()).isScalar()); 440 441 unsigned Opcode = TargetOpcode::COPY; 442 if (Res.getLLTTy(*getMRI()).getSizeInBits() > 443 Op.getLLTTy(*getMRI()).getSizeInBits()) 444 Opcode = ExtOpc; 445 else if (Res.getLLTTy(*getMRI()).getSizeInBits() < 446 Op.getLLTTy(*getMRI()).getSizeInBits()) 447 Opcode = TargetOpcode::G_TRUNC; 448 else 449 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI())); 450 451 return buildInstr(Opcode, Res, Op); 452 } 453 454 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res, 455 const SrcOp &Op) { 456 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op); 457 } 458 459 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res, 460 const SrcOp &Op) { 461 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op); 462 } 463 464 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res, 465 const SrcOp &Op) { 466 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op); 467 } 468 469 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst, 470 const SrcOp &Src) { 471 LLT SrcTy = Src.getLLTTy(*getMRI()); 472 LLT DstTy = Dst.getLLTTy(*getMRI()); 473 if (SrcTy == DstTy) 474 return buildCopy(Dst, Src); 475 476 unsigned Opcode; 477 if (SrcTy.isPointer() && DstTy.isScalar()) 478 Opcode = TargetOpcode::G_PTRTOINT; 479 else if (DstTy.isPointer() && SrcTy.isScalar()) 480 Opcode = TargetOpcode::G_INTTOPTR; 481 else { 482 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet"); 483 Opcode = TargetOpcode::G_BITCAST; 484 } 485 486 return buildInstr(Opcode, Dst, Src); 487 } 488 489 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst, 490 const SrcOp &Src, 491 uint64_t Index) { 492 LLT SrcTy = Src.getLLTTy(*getMRI()); 493 LLT DstTy = Dst.getLLTTy(*getMRI()); 494 495 #ifndef NDEBUG 496 assert(SrcTy.isValid() && "invalid operand type"); 497 assert(DstTy.isValid() && "invalid operand type"); 498 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() && 499 "extracting off end of register"); 500 #endif 501 502 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) { 503 assert(Index == 0 && "insertion past the end of a register"); 504 return buildCast(Dst, Src); 505 } 506 507 auto Extract = buildInstr(TargetOpcode::G_EXTRACT); 508 Dst.addDefToMIB(*getMRI(), Extract); 509 Src.addSrcToMIB(Extract); 510 Extract.addImm(Index); 511 return Extract; 512 } 513 514 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops, 515 ArrayRef<uint64_t> Indices) { 516 #ifndef NDEBUG 517 assert(Ops.size() == Indices.size() && "incompatible args"); 518 assert(!Ops.empty() && "invalid trivial sequence"); 519 assert(llvm::is_sorted(Indices) && 520 "sequence offsets must be in ascending order"); 521 522 assert(getMRI()->getType(Res).isValid() && "invalid operand type"); 523 for (auto Op : Ops) 524 assert(getMRI()->getType(Op).isValid() && "invalid operand type"); 525 #endif 526 527 LLT ResTy = getMRI()->getType(Res); 528 LLT OpTy = getMRI()->getType(Ops[0]); 529 unsigned OpSize = OpTy.getSizeInBits(); 530 bool MaybeMerge = true; 531 for (unsigned i = 0; i < Ops.size(); ++i) { 532 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) { 533 MaybeMerge = false; 534 break; 535 } 536 } 537 538 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) { 539 buildMerge(Res, Ops); 540 return; 541 } 542 543 Register ResIn = getMRI()->createGenericVirtualRegister(ResTy); 544 buildUndef(ResIn); 545 546 for (unsigned i = 0; i < Ops.size(); ++i) { 547 Register ResOut = i + 1 == Ops.size() 548 ? Res 549 : getMRI()->createGenericVirtualRegister(ResTy); 550 buildInsert(ResOut, ResIn, Ops[i], Indices[i]); 551 ResIn = ResOut; 552 } 553 } 554 555 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) { 556 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {}); 557 } 558 559 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res, 560 ArrayRef<Register> Ops) { 561 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>, 562 // we need some temporary storage for the DstOp objects. Here we use a 563 // sufficiently large SmallVector to not go through the heap. 564 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 565 assert(TmpVec.size() > 1); 566 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec); 567 } 568 569 MachineInstrBuilder 570 MachineIRBuilder::buildMerge(const DstOp &Res, 571 std::initializer_list<SrcOp> Ops) { 572 assert(Ops.size() > 1); 573 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops); 574 } 575 576 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res, 577 const SrcOp &Op) { 578 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>, 579 // we need some temporary storage for the DstOp objects. Here we use a 580 // sufficiently large SmallVector to not go through the heap. 581 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); 582 assert(TmpVec.size() > 1); 583 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); 584 } 585 586 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res, 587 const SrcOp &Op) { 588 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits(); 589 SmallVector<Register, 8> TmpVec; 590 for (unsigned I = 0; I != NumReg; ++I) 591 TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res)); 592 return buildUnmerge(TmpVec, Op); 593 } 594 595 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res, 596 const SrcOp &Op) { 597 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>, 598 // we need some temporary storage for the DstOp objects. Here we use a 599 // sufficiently large SmallVector to not go through the heap. 600 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); 601 assert(TmpVec.size() > 1); 602 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); 603 } 604 605 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res, 606 ArrayRef<Register> Ops) { 607 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 608 // we need some temporary storage for the DstOp objects. Here we use a 609 // sufficiently large SmallVector to not go through the heap. 610 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 611 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); 612 } 613 614 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res, 615 const SrcOp &Src) { 616 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src); 617 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); 618 } 619 620 MachineInstrBuilder 621 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res, 622 ArrayRef<Register> Ops) { 623 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 624 // we need some temporary storage for the DstOp objects. Here we use a 625 // sufficiently large SmallVector to not go through the heap. 626 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 627 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec); 628 } 629 630 MachineInstrBuilder 631 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) { 632 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 633 // we need some temporary storage for the DstOp objects. Here we use a 634 // sufficiently large SmallVector to not go through the heap. 635 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 636 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec); 637 } 638 639 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res, 640 const SrcOp &Src, 641 const SrcOp &Op, 642 unsigned Index) { 643 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <= 644 Res.getLLTTy(*getMRI()).getSizeInBits() && 645 "insertion past the end of a register"); 646 647 if (Res.getLLTTy(*getMRI()).getSizeInBits() == 648 Op.getLLTTy(*getMRI()).getSizeInBits()) { 649 return buildCast(Res, Op); 650 } 651 652 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)}); 653 } 654 655 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, 656 ArrayRef<Register> ResultRegs, 657 bool HasSideEffects) { 658 auto MIB = 659 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS 660 : TargetOpcode::G_INTRINSIC); 661 for (unsigned ResultReg : ResultRegs) 662 MIB.addDef(ResultReg); 663 MIB.addIntrinsicID(ID); 664 return MIB; 665 } 666 667 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, 668 ArrayRef<DstOp> Results, 669 bool HasSideEffects) { 670 auto MIB = 671 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS 672 : TargetOpcode::G_INTRINSIC); 673 for (DstOp Result : Results) 674 Result.addDefToMIB(*getMRI(), MIB); 675 MIB.addIntrinsicID(ID); 676 return MIB; 677 } 678 679 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res, 680 const SrcOp &Op) { 681 return buildInstr(TargetOpcode::G_TRUNC, Res, Op); 682 } 683 684 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res, 685 const SrcOp &Op, 686 Optional<unsigned> Flags) { 687 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags); 688 } 689 690 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred, 691 const DstOp &Res, 692 const SrcOp &Op0, 693 const SrcOp &Op1) { 694 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}); 695 } 696 697 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred, 698 const DstOp &Res, 699 const SrcOp &Op0, 700 const SrcOp &Op1, 701 Optional<unsigned> Flags) { 702 703 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags); 704 } 705 706 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res, 707 const SrcOp &Tst, 708 const SrcOp &Op0, 709 const SrcOp &Op1, 710 Optional<unsigned> Flags) { 711 712 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags); 713 } 714 715 MachineInstrBuilder 716 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, 717 const SrcOp &Elt, const SrcOp &Idx) { 718 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx}); 719 } 720 721 MachineInstrBuilder 722 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, 723 const SrcOp &Idx) { 724 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx}); 725 } 726 727 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess( 728 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, 729 Register NewVal, MachineMemOperand &MMO) { 730 #ifndef NDEBUG 731 LLT OldValResTy = getMRI()->getType(OldValRes); 732 LLT SuccessResTy = getMRI()->getType(SuccessRes); 733 LLT AddrTy = getMRI()->getType(Addr); 734 LLT CmpValTy = getMRI()->getType(CmpVal); 735 LLT NewValTy = getMRI()->getType(NewVal); 736 assert(OldValResTy.isScalar() && "invalid operand type"); 737 assert(SuccessResTy.isScalar() && "invalid operand type"); 738 assert(AddrTy.isPointer() && "invalid operand type"); 739 assert(CmpValTy.isValid() && "invalid operand type"); 740 assert(NewValTy.isValid() && "invalid operand type"); 741 assert(OldValResTy == CmpValTy && "type mismatch"); 742 assert(OldValResTy == NewValTy && "type mismatch"); 743 #endif 744 745 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS) 746 .addDef(OldValRes) 747 .addDef(SuccessRes) 748 .addUse(Addr) 749 .addUse(CmpVal) 750 .addUse(NewVal) 751 .addMemOperand(&MMO); 752 } 753 754 MachineInstrBuilder 755 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr, 756 Register CmpVal, Register NewVal, 757 MachineMemOperand &MMO) { 758 #ifndef NDEBUG 759 LLT OldValResTy = getMRI()->getType(OldValRes); 760 LLT AddrTy = getMRI()->getType(Addr); 761 LLT CmpValTy = getMRI()->getType(CmpVal); 762 LLT NewValTy = getMRI()->getType(NewVal); 763 assert(OldValResTy.isScalar() && "invalid operand type"); 764 assert(AddrTy.isPointer() && "invalid operand type"); 765 assert(CmpValTy.isValid() && "invalid operand type"); 766 assert(NewValTy.isValid() && "invalid operand type"); 767 assert(OldValResTy == CmpValTy && "type mismatch"); 768 assert(OldValResTy == NewValTy && "type mismatch"); 769 #endif 770 771 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG) 772 .addDef(OldValRes) 773 .addUse(Addr) 774 .addUse(CmpVal) 775 .addUse(NewVal) 776 .addMemOperand(&MMO); 777 } 778 779 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW( 780 unsigned Opcode, const DstOp &OldValRes, 781 const SrcOp &Addr, const SrcOp &Val, 782 MachineMemOperand &MMO) { 783 784 #ifndef NDEBUG 785 LLT OldValResTy = OldValRes.getLLTTy(*getMRI()); 786 LLT AddrTy = Addr.getLLTTy(*getMRI()); 787 LLT ValTy = Val.getLLTTy(*getMRI()); 788 assert(OldValResTy.isScalar() && "invalid operand type"); 789 assert(AddrTy.isPointer() && "invalid operand type"); 790 assert(ValTy.isValid() && "invalid operand type"); 791 assert(OldValResTy == ValTy && "type mismatch"); 792 assert(MMO.isAtomic() && "not atomic mem operand"); 793 #endif 794 795 auto MIB = buildInstr(Opcode); 796 OldValRes.addDefToMIB(*getMRI(), MIB); 797 Addr.addSrcToMIB(MIB); 798 Val.addSrcToMIB(MIB); 799 MIB.addMemOperand(&MMO); 800 return MIB; 801 } 802 803 MachineInstrBuilder 804 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr, 805 Register Val, MachineMemOperand &MMO) { 806 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val, 807 MMO); 808 } 809 MachineInstrBuilder 810 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr, 811 Register Val, MachineMemOperand &MMO) { 812 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val, 813 MMO); 814 } 815 MachineInstrBuilder 816 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr, 817 Register Val, MachineMemOperand &MMO) { 818 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val, 819 MMO); 820 } 821 MachineInstrBuilder 822 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr, 823 Register Val, MachineMemOperand &MMO) { 824 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val, 825 MMO); 826 } 827 MachineInstrBuilder 828 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr, 829 Register Val, MachineMemOperand &MMO) { 830 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val, 831 MMO); 832 } 833 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes, 834 Register Addr, 835 Register Val, 836 MachineMemOperand &MMO) { 837 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val, 838 MMO); 839 } 840 MachineInstrBuilder 841 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr, 842 Register Val, MachineMemOperand &MMO) { 843 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val, 844 MMO); 845 } 846 MachineInstrBuilder 847 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr, 848 Register Val, MachineMemOperand &MMO) { 849 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val, 850 MMO); 851 } 852 MachineInstrBuilder 853 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr, 854 Register Val, MachineMemOperand &MMO) { 855 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val, 856 MMO); 857 } 858 MachineInstrBuilder 859 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr, 860 Register Val, MachineMemOperand &MMO) { 861 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val, 862 MMO); 863 } 864 MachineInstrBuilder 865 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr, 866 Register Val, MachineMemOperand &MMO) { 867 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val, 868 MMO); 869 } 870 871 MachineInstrBuilder 872 MachineIRBuilder::buildAtomicRMWFAdd( 873 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, 874 MachineMemOperand &MMO) { 875 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val, 876 MMO); 877 } 878 879 MachineInstrBuilder 880 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, 881 MachineMemOperand &MMO) { 882 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val, 883 MMO); 884 } 885 886 MachineInstrBuilder 887 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) { 888 return buildInstr(TargetOpcode::G_FENCE) 889 .addImm(Ordering) 890 .addImm(Scope); 891 } 892 893 MachineInstrBuilder 894 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) { 895 #ifndef NDEBUG 896 assert(getMRI()->getType(Res).isPointer() && "invalid res type"); 897 #endif 898 899 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA); 900 } 901 902 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy, 903 bool IsExtend) { 904 #ifndef NDEBUG 905 if (DstTy.isVector()) { 906 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector"); 907 assert(SrcTy.getNumElements() == DstTy.getNumElements() && 908 "different number of elements in a trunc/ext"); 909 } else 910 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc"); 911 912 if (IsExtend) 913 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() && 914 "invalid narrowing extend"); 915 else 916 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() && 917 "invalid widening trunc"); 918 #endif 919 } 920 921 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy, 922 const LLT Op0Ty, const LLT Op1Ty) { 923 #ifndef NDEBUG 924 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) && 925 "invalid operand type"); 926 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch"); 927 if (ResTy.isScalar() || ResTy.isPointer()) 928 assert(TstTy.isScalar() && "type mismatch"); 929 else 930 assert((TstTy.isScalar() || 931 (TstTy.isVector() && 932 TstTy.getNumElements() == Op0Ty.getNumElements())) && 933 "type mismatch"); 934 #endif 935 } 936 937 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, 938 ArrayRef<DstOp> DstOps, 939 ArrayRef<SrcOp> SrcOps, 940 Optional<unsigned> Flags) { 941 switch (Opc) { 942 default: 943 break; 944 case TargetOpcode::G_SELECT: { 945 assert(DstOps.size() == 1 && "Invalid select"); 946 assert(SrcOps.size() == 3 && "Invalid select"); 947 validateSelectOp( 948 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()), 949 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI())); 950 break; 951 } 952 case TargetOpcode::G_ADD: 953 case TargetOpcode::G_AND: 954 case TargetOpcode::G_MUL: 955 case TargetOpcode::G_OR: 956 case TargetOpcode::G_SUB: 957 case TargetOpcode::G_XOR: 958 case TargetOpcode::G_UDIV: 959 case TargetOpcode::G_SDIV: 960 case TargetOpcode::G_UREM: 961 case TargetOpcode::G_SREM: 962 case TargetOpcode::G_SMIN: 963 case TargetOpcode::G_SMAX: 964 case TargetOpcode::G_UMIN: 965 case TargetOpcode::G_UMAX: 966 case TargetOpcode::G_UADDSAT: 967 case TargetOpcode::G_SADDSAT: 968 case TargetOpcode::G_USUBSAT: 969 case TargetOpcode::G_SSUBSAT: { 970 // All these are binary ops. 971 assert(DstOps.size() == 1 && "Invalid Dst"); 972 assert(SrcOps.size() == 2 && "Invalid Srcs"); 973 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()), 974 SrcOps[0].getLLTTy(*getMRI()), 975 SrcOps[1].getLLTTy(*getMRI())); 976 break; 977 } 978 case TargetOpcode::G_SHL: 979 case TargetOpcode::G_ASHR: 980 case TargetOpcode::G_LSHR: { 981 assert(DstOps.size() == 1 && "Invalid Dst"); 982 assert(SrcOps.size() == 2 && "Invalid Srcs"); 983 validateShiftOp(DstOps[0].getLLTTy(*getMRI()), 984 SrcOps[0].getLLTTy(*getMRI()), 985 SrcOps[1].getLLTTy(*getMRI())); 986 break; 987 } 988 case TargetOpcode::G_SEXT: 989 case TargetOpcode::G_ZEXT: 990 case TargetOpcode::G_ANYEXT: 991 assert(DstOps.size() == 1 && "Invalid Dst"); 992 assert(SrcOps.size() == 1 && "Invalid Srcs"); 993 validateTruncExt(DstOps[0].getLLTTy(*getMRI()), 994 SrcOps[0].getLLTTy(*getMRI()), true); 995 break; 996 case TargetOpcode::G_TRUNC: 997 case TargetOpcode::G_FPTRUNC: { 998 assert(DstOps.size() == 1 && "Invalid Dst"); 999 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1000 validateTruncExt(DstOps[0].getLLTTy(*getMRI()), 1001 SrcOps[0].getLLTTy(*getMRI()), false); 1002 break; 1003 } 1004 case TargetOpcode::G_BITCAST: { 1005 assert(DstOps.size() == 1 && "Invalid Dst"); 1006 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1007 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1008 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast"); 1009 break; 1010 } 1011 case TargetOpcode::COPY: 1012 assert(DstOps.size() == 1 && "Invalid Dst"); 1013 // If the caller wants to add a subreg source it has to be done separately 1014 // so we may not have any SrcOps at this point yet. 1015 break; 1016 case TargetOpcode::G_FCMP: 1017 case TargetOpcode::G_ICMP: { 1018 assert(DstOps.size() == 1 && "Invalid Dst Operands"); 1019 assert(SrcOps.size() == 3 && "Invalid Src Operands"); 1020 // For F/ICMP, the first src operand is the predicate, followed by 1021 // the two comparands. 1022 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate && 1023 "Expecting predicate"); 1024 assert([&]() -> bool { 1025 CmpInst::Predicate Pred = SrcOps[0].getPredicate(); 1026 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred) 1027 : CmpInst::isFPPredicate(Pred); 1028 }() && "Invalid predicate"); 1029 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && 1030 "Type mismatch"); 1031 assert([&]() -> bool { 1032 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI()); 1033 LLT DstTy = DstOps[0].getLLTTy(*getMRI()); 1034 if (Op0Ty.isScalar() || Op0Ty.isPointer()) 1035 return DstTy.isScalar(); 1036 else 1037 return DstTy.isVector() && 1038 DstTy.getNumElements() == Op0Ty.getNumElements(); 1039 }() && "Type Mismatch"); 1040 break; 1041 } 1042 case TargetOpcode::G_UNMERGE_VALUES: { 1043 assert(!DstOps.empty() && "Invalid trivial sequence"); 1044 assert(SrcOps.size() == 1 && "Invalid src for Unmerge"); 1045 assert(std::all_of(DstOps.begin(), DstOps.end(), 1046 [&, this](const DstOp &Op) { 1047 return Op.getLLTTy(*getMRI()) == 1048 DstOps[0].getLLTTy(*getMRI()); 1049 }) && 1050 "type mismatch in output list"); 1051 assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1052 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1053 "input operands do not cover output register"); 1054 break; 1055 } 1056 case TargetOpcode::G_MERGE_VALUES: { 1057 assert(!SrcOps.empty() && "invalid trivial sequence"); 1058 assert(DstOps.size() == 1 && "Invalid Dst"); 1059 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1060 [&, this](const SrcOp &Op) { 1061 return Op.getLLTTy(*getMRI()) == 1062 SrcOps[0].getLLTTy(*getMRI()); 1063 }) && 1064 "type mismatch in input list"); 1065 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1066 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1067 "input operands do not cover output register"); 1068 if (SrcOps.size() == 1) 1069 return buildCast(DstOps[0], SrcOps[0]); 1070 if (DstOps[0].getLLTTy(*getMRI()).isVector()) { 1071 if (SrcOps[0].getLLTTy(*getMRI()).isVector()) 1072 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps); 1073 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); 1074 } 1075 break; 1076 } 1077 case TargetOpcode::G_EXTRACT_VECTOR_ELT: { 1078 assert(DstOps.size() == 1 && "Invalid Dst size"); 1079 assert(SrcOps.size() == 2 && "Invalid Src size"); 1080 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); 1081 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() || 1082 DstOps[0].getLLTTy(*getMRI()).isPointer()) && 1083 "Invalid operand type"); 1084 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type"); 1085 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() == 1086 DstOps[0].getLLTTy(*getMRI()) && 1087 "Type mismatch"); 1088 break; 1089 } 1090 case TargetOpcode::G_INSERT_VECTOR_ELT: { 1091 assert(DstOps.size() == 1 && "Invalid dst size"); 1092 assert(SrcOps.size() == 3 && "Invalid src size"); 1093 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1094 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); 1095 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() == 1096 SrcOps[1].getLLTTy(*getMRI()) && 1097 "Type mismatch"); 1098 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index"); 1099 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() == 1100 SrcOps[0].getLLTTy(*getMRI()).getNumElements() && 1101 "Type mismatch"); 1102 break; 1103 } 1104 case TargetOpcode::G_BUILD_VECTOR: { 1105 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1106 "Must have at least 2 operands"); 1107 assert(DstOps.size() == 1 && "Invalid DstOps"); 1108 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1109 "Res type must be a vector"); 1110 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1111 [&, this](const SrcOp &Op) { 1112 return Op.getLLTTy(*getMRI()) == 1113 SrcOps[0].getLLTTy(*getMRI()); 1114 }) && 1115 "type mismatch in input list"); 1116 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1117 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1118 "input scalars do not exactly cover the output vector register"); 1119 break; 1120 } 1121 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 1122 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1123 "Must have at least 2 operands"); 1124 assert(DstOps.size() == 1 && "Invalid DstOps"); 1125 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1126 "Res type must be a vector"); 1127 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1128 [&, this](const SrcOp &Op) { 1129 return Op.getLLTTy(*getMRI()) == 1130 SrcOps[0].getLLTTy(*getMRI()); 1131 }) && 1132 "type mismatch in input list"); 1133 if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1134 DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits()) 1135 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); 1136 break; 1137 } 1138 case TargetOpcode::G_CONCAT_VECTORS: { 1139 assert(DstOps.size() == 1 && "Invalid DstOps"); 1140 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1141 "Must have at least 2 operands"); 1142 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1143 [&, this](const SrcOp &Op) { 1144 return (Op.getLLTTy(*getMRI()).isVector() && 1145 Op.getLLTTy(*getMRI()) == 1146 SrcOps[0].getLLTTy(*getMRI())); 1147 }) && 1148 "type mismatch in input list"); 1149 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1150 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1151 "input vectors do not exactly cover the output vector register"); 1152 break; 1153 } 1154 case TargetOpcode::G_UADDE: { 1155 assert(DstOps.size() == 2 && "Invalid no of dst operands"); 1156 assert(SrcOps.size() == 3 && "Invalid no of src operands"); 1157 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); 1158 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) && 1159 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) && 1160 "Invalid operand"); 1161 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); 1162 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && 1163 "type mismatch"); 1164 break; 1165 } 1166 } 1167 1168 auto MIB = buildInstr(Opc); 1169 for (const DstOp &Op : DstOps) 1170 Op.addDefToMIB(*getMRI(), MIB); 1171 for (const SrcOp &Op : SrcOps) 1172 Op.addSrcToMIB(MIB); 1173 if (Flags) 1174 MIB->setFlags(*Flags); 1175 return MIB; 1176 } 1177