1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the MachineIRBuidler class. 10 //===----------------------------------------------------------------------===// 11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 12 #include "llvm/Analysis/MemoryLocation.h" 13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 14 #include "llvm/CodeGen/MachineFunction.h" 15 #include "llvm/CodeGen/MachineInstr.h" 16 #include "llvm/CodeGen/MachineInstrBuilder.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/TargetInstrInfo.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 #include "llvm/CodeGen/TargetOpcodes.h" 21 #include "llvm/CodeGen/TargetSubtargetInfo.h" 22 #include "llvm/IR/DebugInfo.h" 23 24 using namespace llvm; 25 26 void MachineIRBuilder::setMF(MachineFunction &MF) { 27 State.MF = &MF; 28 State.MBB = nullptr; 29 State.MRI = &MF.getRegInfo(); 30 State.TII = MF.getSubtarget().getInstrInfo(); 31 State.DL = DebugLoc(); 32 State.II = MachineBasicBlock::iterator(); 33 State.Observer = nullptr; 34 } 35 36 //------------------------------------------------------------------------------ 37 // Build instruction variants. 38 //------------------------------------------------------------------------------ 39 40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) { 41 MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode)); 42 return MIB; 43 } 44 45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) { 46 getMBB().insert(getInsertPt(), MIB); 47 recordInsertion(MIB); 48 return MIB; 49 } 50 51 MachineInstrBuilder 52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable, 53 const MDNode *Expr) { 54 assert(isa<DILocalVariable>(Variable) && "not a variable"); 55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 56 assert( 57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 58 "Expected inlined-at fields to agree"); 59 return insertInstr(BuildMI(getMF(), getDL(), 60 getTII().get(TargetOpcode::DBG_VALUE), 61 /*IsIndirect*/ false, Reg, Variable, Expr)); 62 } 63 64 MachineInstrBuilder 65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable, 66 const MDNode *Expr) { 67 assert(isa<DILocalVariable>(Variable) && "not a variable"); 68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 69 assert( 70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 71 "Expected inlined-at fields to agree"); 72 return insertInstr(BuildMI(getMF(), getDL(), 73 getTII().get(TargetOpcode::DBG_VALUE), 74 /*IsIndirect*/ true, Reg, Variable, Expr)); 75 } 76 77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI, 78 const MDNode *Variable, 79 const MDNode *Expr) { 80 assert(isa<DILocalVariable>(Variable) && "not a variable"); 81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 82 assert( 83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 84 "Expected inlined-at fields to agree"); 85 return buildInstr(TargetOpcode::DBG_VALUE) 86 .addFrameIndex(FI) 87 .addImm(0) 88 .addMetadata(Variable) 89 .addMetadata(Expr); 90 } 91 92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C, 93 const MDNode *Variable, 94 const MDNode *Expr) { 95 assert(isa<DILocalVariable>(Variable) && "not a variable"); 96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 97 assert( 98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 99 "Expected inlined-at fields to agree"); 100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE); 101 if (auto *CI = dyn_cast<ConstantInt>(&C)) { 102 if (CI->getBitWidth() > 64) 103 MIB.addCImm(CI); 104 else 105 MIB.addImm(CI->getZExtValue()); 106 } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) { 107 MIB.addFPImm(CFP); 108 } else { 109 // Insert $noreg if we didn't find a usable constant and had to drop it. 110 MIB.addReg(Register()); 111 } 112 113 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr); 114 return insertInstr(MIB); 115 } 116 117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) { 118 assert(isa<DILabel>(Label) && "not a label"); 119 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) && 120 "Expected inlined-at fields to agree"); 121 auto MIB = buildInstr(TargetOpcode::DBG_LABEL); 122 123 return MIB.addMetadata(Label); 124 } 125 126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res, 127 const SrcOp &Size, 128 Align Alignment) { 129 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type"); 130 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC); 131 Res.addDefToMIB(*getMRI(), MIB); 132 Size.addSrcToMIB(MIB); 133 MIB.addImm(Alignment.value()); 134 return MIB; 135 } 136 137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res, 138 int Idx) { 139 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 140 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX); 141 Res.addDefToMIB(*getMRI(), MIB); 142 MIB.addFrameIndex(Idx); 143 return MIB; 144 } 145 146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res, 147 const GlobalValue *GV) { 148 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 149 assert(Res.getLLTTy(*getMRI()).getAddressSpace() == 150 GV->getType()->getAddressSpace() && 151 "address space mismatch"); 152 153 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE); 154 Res.addDefToMIB(*getMRI(), MIB); 155 MIB.addGlobalAddress(GV); 156 return MIB; 157 } 158 159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy, 160 unsigned JTI) { 161 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {}) 162 .addJumpTableIndex(JTI); 163 } 164 165 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) { 166 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 167 assert((Res == Op0) && "type mismatch"); 168 } 169 170 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0, 171 const LLT Op1) { 172 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 173 assert((Res == Op0 && Res == Op1) && "type mismatch"); 174 } 175 176 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0, 177 const LLT Op1) { 178 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 179 assert((Res == Op0) && "type mismatch"); 180 } 181 182 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res, 183 const SrcOp &Op0, 184 const SrcOp &Op1) { 185 assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() && 186 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); 187 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type"); 188 189 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}); 190 } 191 192 Optional<MachineInstrBuilder> 193 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0, 194 const LLT ValueTy, uint64_t Value) { 195 assert(Res == 0 && "Res is a result argument"); 196 assert(ValueTy.isScalar() && "invalid offset type"); 197 198 if (Value == 0) { 199 Res = Op0; 200 return None; 201 } 202 203 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); 204 auto Cst = buildConstant(ValueTy, Value); 205 return buildPtrAdd(Res, Op0, Cst.getReg(0)); 206 } 207 208 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res, 209 const SrcOp &Op0, 210 uint32_t NumBits) { 211 LLT PtrTy = Res.getLLTTy(*getMRI()); 212 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits()); 213 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy); 214 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits)); 215 return buildPtrMask(Res, Op0, MaskReg); 216 } 217 218 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) { 219 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest); 220 } 221 222 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) { 223 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination"); 224 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt); 225 } 226 227 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr, 228 unsigned JTI, 229 Register IndexReg) { 230 assert(getMRI()->getType(TablePtr).isPointer() && 231 "Table reg must be a pointer"); 232 return buildInstr(TargetOpcode::G_BRJT) 233 .addUse(TablePtr) 234 .addJumpTableIndex(JTI) 235 .addUse(IndexReg); 236 } 237 238 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res, 239 const SrcOp &Op) { 240 return buildInstr(TargetOpcode::COPY, Res, Op); 241 } 242 243 MachineInstrBuilder MachineIRBuilder::buildAssertSExt(const DstOp &Res, 244 const SrcOp &Op, 245 unsigned Size) { 246 return buildInstr(TargetOpcode::G_ASSERT_SEXT, Res, Op).addImm(Size); 247 } 248 249 MachineInstrBuilder MachineIRBuilder::buildAssertZExt(const DstOp &Res, 250 const SrcOp &Op, 251 unsigned Size) { 252 return buildInstr(TargetOpcode::G_ASSERT_ZEXT, Res, Op).addImm(Size); 253 } 254 255 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 256 const ConstantInt &Val) { 257 LLT Ty = Res.getLLTTy(*getMRI()); 258 LLT EltTy = Ty.getScalarType(); 259 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() && 260 "creating constant with the wrong size"); 261 262 if (Ty.isVector()) { 263 auto Const = buildInstr(TargetOpcode::G_CONSTANT) 264 .addDef(getMRI()->createGenericVirtualRegister(EltTy)) 265 .addCImm(&Val); 266 return buildSplatVector(Res, Const); 267 } 268 269 auto Const = buildInstr(TargetOpcode::G_CONSTANT); 270 Const->setDebugLoc(DebugLoc()); 271 Res.addDefToMIB(*getMRI(), Const); 272 Const.addCImm(&Val); 273 return Const; 274 } 275 276 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 277 int64_t Val) { 278 auto IntN = IntegerType::get(getMF().getFunction().getContext(), 279 Res.getLLTTy(*getMRI()).getScalarSizeInBits()); 280 ConstantInt *CI = ConstantInt::get(IntN, Val, true); 281 return buildConstant(Res, *CI); 282 } 283 284 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 285 const ConstantFP &Val) { 286 LLT Ty = Res.getLLTTy(*getMRI()); 287 LLT EltTy = Ty.getScalarType(); 288 289 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics()) 290 == EltTy.getSizeInBits() && 291 "creating fconstant with the wrong size"); 292 293 assert(!Ty.isPointer() && "invalid operand type"); 294 295 if (Ty.isVector()) { 296 auto Const = buildInstr(TargetOpcode::G_FCONSTANT) 297 .addDef(getMRI()->createGenericVirtualRegister(EltTy)) 298 .addFPImm(&Val); 299 300 return buildSplatVector(Res, Const); 301 } 302 303 auto Const = buildInstr(TargetOpcode::G_FCONSTANT); 304 Const->setDebugLoc(DebugLoc()); 305 Res.addDefToMIB(*getMRI(), Const); 306 Const.addFPImm(&Val); 307 return Const; 308 } 309 310 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 311 const APInt &Val) { 312 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val); 313 return buildConstant(Res, *CI); 314 } 315 316 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 317 double Val) { 318 LLT DstTy = Res.getLLTTy(*getMRI()); 319 auto &Ctx = getMF().getFunction().getContext(); 320 auto *CFP = 321 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits())); 322 return buildFConstant(Res, *CFP); 323 } 324 325 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 326 const APFloat &Val) { 327 auto &Ctx = getMF().getFunction().getContext(); 328 auto *CFP = ConstantFP::get(Ctx, Val); 329 return buildFConstant(Res, *CFP); 330 } 331 332 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst, 333 MachineBasicBlock &Dest) { 334 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type"); 335 336 auto MIB = buildInstr(TargetOpcode::G_BRCOND); 337 Tst.addSrcToMIB(MIB); 338 MIB.addMBB(&Dest); 339 return MIB; 340 } 341 342 MachineInstrBuilder 343 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr, 344 MachinePointerInfo PtrInfo, Align Alignment, 345 MachineMemOperand::Flags MMOFlags, 346 const AAMDNodes &AAInfo) { 347 MMOFlags |= MachineMemOperand::MOLoad; 348 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 349 350 uint64_t Size = MemoryLocation::getSizeOrUnknown( 351 TypeSize::Fixed(Dst.getLLTTy(*getMRI()).getSizeInBytes())); 352 MachineMemOperand *MMO = 353 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 354 return buildLoad(Dst, Addr, *MMO); 355 } 356 357 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode, 358 const DstOp &Res, 359 const SrcOp &Addr, 360 MachineMemOperand &MMO) { 361 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type"); 362 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 363 364 auto MIB = buildInstr(Opcode); 365 Res.addDefToMIB(*getMRI(), MIB); 366 Addr.addSrcToMIB(MIB); 367 MIB.addMemOperand(&MMO); 368 return MIB; 369 } 370 371 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset( 372 const DstOp &Dst, const SrcOp &BasePtr, 373 MachineMemOperand &BaseMMO, int64_t Offset) { 374 LLT LoadTy = Dst.getLLTTy(*getMRI()); 375 MachineMemOperand *OffsetMMO = 376 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes()); 377 378 if (Offset == 0) // This may be a size or type changing load. 379 return buildLoad(Dst, BasePtr, *OffsetMMO); 380 381 LLT PtrTy = BasePtr.getLLTTy(*getMRI()); 382 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits()); 383 auto ConstOffset = buildConstant(OffsetTy, Offset); 384 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset); 385 return buildLoad(Dst, Ptr, *OffsetMMO); 386 } 387 388 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val, 389 const SrcOp &Addr, 390 MachineMemOperand &MMO) { 391 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type"); 392 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 393 394 auto MIB = buildInstr(TargetOpcode::G_STORE); 395 Val.addSrcToMIB(MIB); 396 Addr.addSrcToMIB(MIB); 397 MIB.addMemOperand(&MMO); 398 return MIB; 399 } 400 401 MachineInstrBuilder 402 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr, 403 MachinePointerInfo PtrInfo, Align Alignment, 404 MachineMemOperand::Flags MMOFlags, 405 const AAMDNodes &AAInfo) { 406 MMOFlags |= MachineMemOperand::MOStore; 407 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 408 409 uint64_t Size = MemoryLocation::getSizeOrUnknown( 410 TypeSize::Fixed(Val.getLLTTy(*getMRI()).getSizeInBytes())); 411 MachineMemOperand *MMO = 412 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 413 return buildStore(Val, Addr, *MMO); 414 } 415 416 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res, 417 const SrcOp &Op) { 418 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op); 419 } 420 421 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res, 422 const SrcOp &Op) { 423 return buildInstr(TargetOpcode::G_SEXT, Res, Op); 424 } 425 426 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res, 427 const SrcOp &Op) { 428 return buildInstr(TargetOpcode::G_ZEXT, Res, Op); 429 } 430 431 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const { 432 const auto *TLI = getMF().getSubtarget().getTargetLowering(); 433 switch (TLI->getBooleanContents(IsVec, IsFP)) { 434 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent: 435 return TargetOpcode::G_SEXT; 436 case TargetLoweringBase::ZeroOrOneBooleanContent: 437 return TargetOpcode::G_ZEXT; 438 default: 439 return TargetOpcode::G_ANYEXT; 440 } 441 } 442 443 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res, 444 const SrcOp &Op, 445 bool IsFP) { 446 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP); 447 return buildInstr(ExtOp, Res, Op); 448 } 449 450 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc, 451 const DstOp &Res, 452 const SrcOp &Op) { 453 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc || 454 TargetOpcode::G_SEXT == ExtOpc) && 455 "Expecting Extending Opc"); 456 assert(Res.getLLTTy(*getMRI()).isScalar() || 457 Res.getLLTTy(*getMRI()).isVector()); 458 assert(Res.getLLTTy(*getMRI()).isScalar() == 459 Op.getLLTTy(*getMRI()).isScalar()); 460 461 unsigned Opcode = TargetOpcode::COPY; 462 if (Res.getLLTTy(*getMRI()).getSizeInBits() > 463 Op.getLLTTy(*getMRI()).getSizeInBits()) 464 Opcode = ExtOpc; 465 else if (Res.getLLTTy(*getMRI()).getSizeInBits() < 466 Op.getLLTTy(*getMRI()).getSizeInBits()) 467 Opcode = TargetOpcode::G_TRUNC; 468 else 469 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI())); 470 471 return buildInstr(Opcode, Res, Op); 472 } 473 474 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res, 475 const SrcOp &Op) { 476 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op); 477 } 478 479 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res, 480 const SrcOp &Op) { 481 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op); 482 } 483 484 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res, 485 const SrcOp &Op) { 486 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op); 487 } 488 489 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res, 490 const SrcOp &Op, 491 int64_t ImmOp) { 492 LLT ResTy = Res.getLLTTy(*getMRI()); 493 auto Mask = buildConstant( 494 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp)); 495 return buildAnd(Res, Op, Mask); 496 } 497 498 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst, 499 const SrcOp &Src) { 500 LLT SrcTy = Src.getLLTTy(*getMRI()); 501 LLT DstTy = Dst.getLLTTy(*getMRI()); 502 if (SrcTy == DstTy) 503 return buildCopy(Dst, Src); 504 505 unsigned Opcode; 506 if (SrcTy.isPointer() && DstTy.isScalar()) 507 Opcode = TargetOpcode::G_PTRTOINT; 508 else if (DstTy.isPointer() && SrcTy.isScalar()) 509 Opcode = TargetOpcode::G_INTTOPTR; 510 else { 511 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet"); 512 Opcode = TargetOpcode::G_BITCAST; 513 } 514 515 return buildInstr(Opcode, Dst, Src); 516 } 517 518 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst, 519 const SrcOp &Src, 520 uint64_t Index) { 521 LLT SrcTy = Src.getLLTTy(*getMRI()); 522 LLT DstTy = Dst.getLLTTy(*getMRI()); 523 524 #ifndef NDEBUG 525 assert(SrcTy.isValid() && "invalid operand type"); 526 assert(DstTy.isValid() && "invalid operand type"); 527 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() && 528 "extracting off end of register"); 529 #endif 530 531 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) { 532 assert(Index == 0 && "insertion past the end of a register"); 533 return buildCast(Dst, Src); 534 } 535 536 auto Extract = buildInstr(TargetOpcode::G_EXTRACT); 537 Dst.addDefToMIB(*getMRI(), Extract); 538 Src.addSrcToMIB(Extract); 539 Extract.addImm(Index); 540 return Extract; 541 } 542 543 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops, 544 ArrayRef<uint64_t> Indices) { 545 #ifndef NDEBUG 546 assert(Ops.size() == Indices.size() && "incompatible args"); 547 assert(!Ops.empty() && "invalid trivial sequence"); 548 assert(llvm::is_sorted(Indices) && 549 "sequence offsets must be in ascending order"); 550 551 assert(getMRI()->getType(Res).isValid() && "invalid operand type"); 552 for (auto Op : Ops) 553 assert(getMRI()->getType(Op).isValid() && "invalid operand type"); 554 #endif 555 556 LLT ResTy = getMRI()->getType(Res); 557 LLT OpTy = getMRI()->getType(Ops[0]); 558 unsigned OpSize = OpTy.getSizeInBits(); 559 bool MaybeMerge = true; 560 for (unsigned i = 0; i < Ops.size(); ++i) { 561 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) { 562 MaybeMerge = false; 563 break; 564 } 565 } 566 567 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) { 568 buildMerge(Res, Ops); 569 return; 570 } 571 572 Register ResIn = getMRI()->createGenericVirtualRegister(ResTy); 573 buildUndef(ResIn); 574 575 for (unsigned i = 0; i < Ops.size(); ++i) { 576 Register ResOut = i + 1 == Ops.size() 577 ? Res 578 : getMRI()->createGenericVirtualRegister(ResTy); 579 buildInsert(ResOut, ResIn, Ops[i], Indices[i]); 580 ResIn = ResOut; 581 } 582 } 583 584 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) { 585 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {}); 586 } 587 588 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res, 589 ArrayRef<Register> Ops) { 590 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>, 591 // we need some temporary storage for the DstOp objects. Here we use a 592 // sufficiently large SmallVector to not go through the heap. 593 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 594 assert(TmpVec.size() > 1); 595 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec); 596 } 597 598 MachineInstrBuilder 599 MachineIRBuilder::buildMerge(const DstOp &Res, 600 std::initializer_list<SrcOp> Ops) { 601 assert(Ops.size() > 1); 602 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops); 603 } 604 605 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res, 606 const SrcOp &Op) { 607 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>, 608 // we need some temporary storage for the DstOp objects. Here we use a 609 // sufficiently large SmallVector to not go through the heap. 610 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); 611 assert(TmpVec.size() > 1); 612 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); 613 } 614 615 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res, 616 const SrcOp &Op) { 617 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits(); 618 SmallVector<Register, 8> TmpVec; 619 for (unsigned I = 0; I != NumReg; ++I) 620 TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res)); 621 return buildUnmerge(TmpVec, Op); 622 } 623 624 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res, 625 const SrcOp &Op) { 626 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>, 627 // we need some temporary storage for the DstOp objects. Here we use a 628 // sufficiently large SmallVector to not go through the heap. 629 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); 630 assert(TmpVec.size() > 1); 631 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); 632 } 633 634 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res, 635 ArrayRef<Register> Ops) { 636 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 637 // we need some temporary storage for the DstOp objects. Here we use a 638 // sufficiently large SmallVector to not go through the heap. 639 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 640 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); 641 } 642 643 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res, 644 const SrcOp &Src) { 645 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src); 646 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); 647 } 648 649 MachineInstrBuilder 650 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res, 651 ArrayRef<Register> Ops) { 652 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 653 // we need some temporary storage for the DstOp objects. Here we use a 654 // sufficiently large SmallVector to not go through the heap. 655 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 656 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec); 657 } 658 659 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res, 660 const SrcOp &Src) { 661 LLT DstTy = Res.getLLTTy(*getMRI()); 662 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() && 663 "Expected Src to match Dst elt ty"); 664 auto UndefVec = buildUndef(DstTy); 665 auto Zero = buildConstant(LLT::scalar(64), 0); 666 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero); 667 SmallVector<int, 16> ZeroMask(DstTy.getNumElements()); 668 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask); 669 } 670 671 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res, 672 const SrcOp &Src1, 673 const SrcOp &Src2, 674 ArrayRef<int> Mask) { 675 LLT DstTy = Res.getLLTTy(*getMRI()); 676 LLT Src1Ty = Src1.getLLTTy(*getMRI()); 677 LLT Src2Ty = Src2.getLLTTy(*getMRI()); 678 assert(Src1Ty.getNumElements() + Src2Ty.getNumElements() >= Mask.size()); 679 assert(DstTy.getElementType() == Src1Ty.getElementType() && 680 DstTy.getElementType() == Src2Ty.getElementType()); 681 (void)DstTy; 682 (void)Src1Ty; 683 (void)Src2Ty; 684 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask); 685 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2}) 686 .addShuffleMask(MaskAlloc); 687 } 688 689 MachineInstrBuilder 690 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) { 691 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 692 // we need some temporary storage for the DstOp objects. Here we use a 693 // sufficiently large SmallVector to not go through the heap. 694 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 695 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec); 696 } 697 698 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res, 699 const SrcOp &Src, 700 const SrcOp &Op, 701 unsigned Index) { 702 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <= 703 Res.getLLTTy(*getMRI()).getSizeInBits() && 704 "insertion past the end of a register"); 705 706 if (Res.getLLTTy(*getMRI()).getSizeInBits() == 707 Op.getLLTTy(*getMRI()).getSizeInBits()) { 708 return buildCast(Res, Op); 709 } 710 711 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)}); 712 } 713 714 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, 715 ArrayRef<Register> ResultRegs, 716 bool HasSideEffects) { 717 auto MIB = 718 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS 719 : TargetOpcode::G_INTRINSIC); 720 for (unsigned ResultReg : ResultRegs) 721 MIB.addDef(ResultReg); 722 MIB.addIntrinsicID(ID); 723 return MIB; 724 } 725 726 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, 727 ArrayRef<DstOp> Results, 728 bool HasSideEffects) { 729 auto MIB = 730 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS 731 : TargetOpcode::G_INTRINSIC); 732 for (DstOp Result : Results) 733 Result.addDefToMIB(*getMRI(), MIB); 734 MIB.addIntrinsicID(ID); 735 return MIB; 736 } 737 738 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res, 739 const SrcOp &Op) { 740 return buildInstr(TargetOpcode::G_TRUNC, Res, Op); 741 } 742 743 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res, 744 const SrcOp &Op, 745 Optional<unsigned> Flags) { 746 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags); 747 } 748 749 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred, 750 const DstOp &Res, 751 const SrcOp &Op0, 752 const SrcOp &Op1) { 753 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}); 754 } 755 756 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred, 757 const DstOp &Res, 758 const SrcOp &Op0, 759 const SrcOp &Op1, 760 Optional<unsigned> Flags) { 761 762 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags); 763 } 764 765 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res, 766 const SrcOp &Tst, 767 const SrcOp &Op0, 768 const SrcOp &Op1, 769 Optional<unsigned> Flags) { 770 771 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags); 772 } 773 774 MachineInstrBuilder 775 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, 776 const SrcOp &Elt, const SrcOp &Idx) { 777 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx}); 778 } 779 780 MachineInstrBuilder 781 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, 782 const SrcOp &Idx) { 783 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx}); 784 } 785 786 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess( 787 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, 788 Register NewVal, MachineMemOperand &MMO) { 789 #ifndef NDEBUG 790 LLT OldValResTy = getMRI()->getType(OldValRes); 791 LLT SuccessResTy = getMRI()->getType(SuccessRes); 792 LLT AddrTy = getMRI()->getType(Addr); 793 LLT CmpValTy = getMRI()->getType(CmpVal); 794 LLT NewValTy = getMRI()->getType(NewVal); 795 assert(OldValResTy.isScalar() && "invalid operand type"); 796 assert(SuccessResTy.isScalar() && "invalid operand type"); 797 assert(AddrTy.isPointer() && "invalid operand type"); 798 assert(CmpValTy.isValid() && "invalid operand type"); 799 assert(NewValTy.isValid() && "invalid operand type"); 800 assert(OldValResTy == CmpValTy && "type mismatch"); 801 assert(OldValResTy == NewValTy && "type mismatch"); 802 #endif 803 804 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS) 805 .addDef(OldValRes) 806 .addDef(SuccessRes) 807 .addUse(Addr) 808 .addUse(CmpVal) 809 .addUse(NewVal) 810 .addMemOperand(&MMO); 811 } 812 813 MachineInstrBuilder 814 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr, 815 Register CmpVal, Register NewVal, 816 MachineMemOperand &MMO) { 817 #ifndef NDEBUG 818 LLT OldValResTy = getMRI()->getType(OldValRes); 819 LLT AddrTy = getMRI()->getType(Addr); 820 LLT CmpValTy = getMRI()->getType(CmpVal); 821 LLT NewValTy = getMRI()->getType(NewVal); 822 assert(OldValResTy.isScalar() && "invalid operand type"); 823 assert(AddrTy.isPointer() && "invalid operand type"); 824 assert(CmpValTy.isValid() && "invalid operand type"); 825 assert(NewValTy.isValid() && "invalid operand type"); 826 assert(OldValResTy == CmpValTy && "type mismatch"); 827 assert(OldValResTy == NewValTy && "type mismatch"); 828 #endif 829 830 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG) 831 .addDef(OldValRes) 832 .addUse(Addr) 833 .addUse(CmpVal) 834 .addUse(NewVal) 835 .addMemOperand(&MMO); 836 } 837 838 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW( 839 unsigned Opcode, const DstOp &OldValRes, 840 const SrcOp &Addr, const SrcOp &Val, 841 MachineMemOperand &MMO) { 842 843 #ifndef NDEBUG 844 LLT OldValResTy = OldValRes.getLLTTy(*getMRI()); 845 LLT AddrTy = Addr.getLLTTy(*getMRI()); 846 LLT ValTy = Val.getLLTTy(*getMRI()); 847 assert(OldValResTy.isScalar() && "invalid operand type"); 848 assert(AddrTy.isPointer() && "invalid operand type"); 849 assert(ValTy.isValid() && "invalid operand type"); 850 assert(OldValResTy == ValTy && "type mismatch"); 851 assert(MMO.isAtomic() && "not atomic mem operand"); 852 #endif 853 854 auto MIB = buildInstr(Opcode); 855 OldValRes.addDefToMIB(*getMRI(), MIB); 856 Addr.addSrcToMIB(MIB); 857 Val.addSrcToMIB(MIB); 858 MIB.addMemOperand(&MMO); 859 return MIB; 860 } 861 862 MachineInstrBuilder 863 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr, 864 Register Val, MachineMemOperand &MMO) { 865 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val, 866 MMO); 867 } 868 MachineInstrBuilder 869 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr, 870 Register Val, MachineMemOperand &MMO) { 871 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val, 872 MMO); 873 } 874 MachineInstrBuilder 875 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr, 876 Register Val, MachineMemOperand &MMO) { 877 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val, 878 MMO); 879 } 880 MachineInstrBuilder 881 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr, 882 Register Val, MachineMemOperand &MMO) { 883 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val, 884 MMO); 885 } 886 MachineInstrBuilder 887 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr, 888 Register Val, MachineMemOperand &MMO) { 889 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val, 890 MMO); 891 } 892 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes, 893 Register Addr, 894 Register Val, 895 MachineMemOperand &MMO) { 896 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val, 897 MMO); 898 } 899 MachineInstrBuilder 900 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr, 901 Register Val, MachineMemOperand &MMO) { 902 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val, 903 MMO); 904 } 905 MachineInstrBuilder 906 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr, 907 Register Val, MachineMemOperand &MMO) { 908 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val, 909 MMO); 910 } 911 MachineInstrBuilder 912 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr, 913 Register Val, MachineMemOperand &MMO) { 914 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val, 915 MMO); 916 } 917 MachineInstrBuilder 918 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr, 919 Register Val, MachineMemOperand &MMO) { 920 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val, 921 MMO); 922 } 923 MachineInstrBuilder 924 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr, 925 Register Val, MachineMemOperand &MMO) { 926 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val, 927 MMO); 928 } 929 930 MachineInstrBuilder 931 MachineIRBuilder::buildAtomicRMWFAdd( 932 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, 933 MachineMemOperand &MMO) { 934 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val, 935 MMO); 936 } 937 938 MachineInstrBuilder 939 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, 940 MachineMemOperand &MMO) { 941 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val, 942 MMO); 943 } 944 945 MachineInstrBuilder 946 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) { 947 return buildInstr(TargetOpcode::G_FENCE) 948 .addImm(Ordering) 949 .addImm(Scope); 950 } 951 952 MachineInstrBuilder 953 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) { 954 #ifndef NDEBUG 955 assert(getMRI()->getType(Res).isPointer() && "invalid res type"); 956 #endif 957 958 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA); 959 } 960 961 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy, 962 bool IsExtend) { 963 #ifndef NDEBUG 964 if (DstTy.isVector()) { 965 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector"); 966 assert(SrcTy.getNumElements() == DstTy.getNumElements() && 967 "different number of elements in a trunc/ext"); 968 } else 969 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc"); 970 971 if (IsExtend) 972 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() && 973 "invalid narrowing extend"); 974 else 975 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() && 976 "invalid widening trunc"); 977 #endif 978 } 979 980 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy, 981 const LLT Op0Ty, const LLT Op1Ty) { 982 #ifndef NDEBUG 983 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) && 984 "invalid operand type"); 985 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch"); 986 if (ResTy.isScalar() || ResTy.isPointer()) 987 assert(TstTy.isScalar() && "type mismatch"); 988 else 989 assert((TstTy.isScalar() || 990 (TstTy.isVector() && 991 TstTy.getNumElements() == Op0Ty.getNumElements())) && 992 "type mismatch"); 993 #endif 994 } 995 996 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, 997 ArrayRef<DstOp> DstOps, 998 ArrayRef<SrcOp> SrcOps, 999 Optional<unsigned> Flags) { 1000 switch (Opc) { 1001 default: 1002 break; 1003 case TargetOpcode::G_SELECT: { 1004 assert(DstOps.size() == 1 && "Invalid select"); 1005 assert(SrcOps.size() == 3 && "Invalid select"); 1006 validateSelectOp( 1007 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()), 1008 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI())); 1009 break; 1010 } 1011 case TargetOpcode::G_FNEG: 1012 case TargetOpcode::G_ABS: 1013 // All these are unary ops. 1014 assert(DstOps.size() == 1 && "Invalid Dst"); 1015 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1016 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()), 1017 SrcOps[0].getLLTTy(*getMRI())); 1018 break; 1019 case TargetOpcode::G_ADD: 1020 case TargetOpcode::G_AND: 1021 case TargetOpcode::G_MUL: 1022 case TargetOpcode::G_OR: 1023 case TargetOpcode::G_SUB: 1024 case TargetOpcode::G_XOR: 1025 case TargetOpcode::G_UDIV: 1026 case TargetOpcode::G_SDIV: 1027 case TargetOpcode::G_UREM: 1028 case TargetOpcode::G_SREM: 1029 case TargetOpcode::G_SMIN: 1030 case TargetOpcode::G_SMAX: 1031 case TargetOpcode::G_UMIN: 1032 case TargetOpcode::G_UMAX: 1033 case TargetOpcode::G_UADDSAT: 1034 case TargetOpcode::G_SADDSAT: 1035 case TargetOpcode::G_USUBSAT: 1036 case TargetOpcode::G_SSUBSAT: { 1037 // All these are binary ops. 1038 assert(DstOps.size() == 1 && "Invalid Dst"); 1039 assert(SrcOps.size() == 2 && "Invalid Srcs"); 1040 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()), 1041 SrcOps[0].getLLTTy(*getMRI()), 1042 SrcOps[1].getLLTTy(*getMRI())); 1043 break; 1044 } 1045 case TargetOpcode::G_SHL: 1046 case TargetOpcode::G_ASHR: 1047 case TargetOpcode::G_LSHR: 1048 case TargetOpcode::G_USHLSAT: 1049 case TargetOpcode::G_SSHLSAT: { 1050 assert(DstOps.size() == 1 && "Invalid Dst"); 1051 assert(SrcOps.size() == 2 && "Invalid Srcs"); 1052 validateShiftOp(DstOps[0].getLLTTy(*getMRI()), 1053 SrcOps[0].getLLTTy(*getMRI()), 1054 SrcOps[1].getLLTTy(*getMRI())); 1055 break; 1056 } 1057 case TargetOpcode::G_SEXT: 1058 case TargetOpcode::G_ZEXT: 1059 case TargetOpcode::G_ANYEXT: 1060 assert(DstOps.size() == 1 && "Invalid Dst"); 1061 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1062 validateTruncExt(DstOps[0].getLLTTy(*getMRI()), 1063 SrcOps[0].getLLTTy(*getMRI()), true); 1064 break; 1065 case TargetOpcode::G_TRUNC: 1066 case TargetOpcode::G_FPTRUNC: { 1067 assert(DstOps.size() == 1 && "Invalid Dst"); 1068 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1069 validateTruncExt(DstOps[0].getLLTTy(*getMRI()), 1070 SrcOps[0].getLLTTy(*getMRI()), false); 1071 break; 1072 } 1073 case TargetOpcode::G_BITCAST: { 1074 assert(DstOps.size() == 1 && "Invalid Dst"); 1075 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1076 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1077 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast"); 1078 break; 1079 } 1080 case TargetOpcode::COPY: 1081 assert(DstOps.size() == 1 && "Invalid Dst"); 1082 // If the caller wants to add a subreg source it has to be done separately 1083 // so we may not have any SrcOps at this point yet. 1084 break; 1085 case TargetOpcode::G_FCMP: 1086 case TargetOpcode::G_ICMP: { 1087 assert(DstOps.size() == 1 && "Invalid Dst Operands"); 1088 assert(SrcOps.size() == 3 && "Invalid Src Operands"); 1089 // For F/ICMP, the first src operand is the predicate, followed by 1090 // the two comparands. 1091 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate && 1092 "Expecting predicate"); 1093 assert([&]() -> bool { 1094 CmpInst::Predicate Pred = SrcOps[0].getPredicate(); 1095 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred) 1096 : CmpInst::isFPPredicate(Pred); 1097 }() && "Invalid predicate"); 1098 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && 1099 "Type mismatch"); 1100 assert([&]() -> bool { 1101 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI()); 1102 LLT DstTy = DstOps[0].getLLTTy(*getMRI()); 1103 if (Op0Ty.isScalar() || Op0Ty.isPointer()) 1104 return DstTy.isScalar(); 1105 else 1106 return DstTy.isVector() && 1107 DstTy.getNumElements() == Op0Ty.getNumElements(); 1108 }() && "Type Mismatch"); 1109 break; 1110 } 1111 case TargetOpcode::G_UNMERGE_VALUES: { 1112 assert(!DstOps.empty() && "Invalid trivial sequence"); 1113 assert(SrcOps.size() == 1 && "Invalid src for Unmerge"); 1114 assert(llvm::all_of(DstOps, 1115 [&, this](const DstOp &Op) { 1116 return Op.getLLTTy(*getMRI()) == 1117 DstOps[0].getLLTTy(*getMRI()); 1118 }) && 1119 "type mismatch in output list"); 1120 assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1121 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1122 "input operands do not cover output register"); 1123 break; 1124 } 1125 case TargetOpcode::G_MERGE_VALUES: { 1126 assert(!SrcOps.empty() && "invalid trivial sequence"); 1127 assert(DstOps.size() == 1 && "Invalid Dst"); 1128 assert(llvm::all_of(SrcOps, 1129 [&, this](const SrcOp &Op) { 1130 return Op.getLLTTy(*getMRI()) == 1131 SrcOps[0].getLLTTy(*getMRI()); 1132 }) && 1133 "type mismatch in input list"); 1134 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1135 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1136 "input operands do not cover output register"); 1137 if (SrcOps.size() == 1) 1138 return buildCast(DstOps[0], SrcOps[0]); 1139 if (DstOps[0].getLLTTy(*getMRI()).isVector()) { 1140 if (SrcOps[0].getLLTTy(*getMRI()).isVector()) 1141 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps); 1142 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); 1143 } 1144 break; 1145 } 1146 case TargetOpcode::G_EXTRACT_VECTOR_ELT: { 1147 assert(DstOps.size() == 1 && "Invalid Dst size"); 1148 assert(SrcOps.size() == 2 && "Invalid Src size"); 1149 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); 1150 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() || 1151 DstOps[0].getLLTTy(*getMRI()).isPointer()) && 1152 "Invalid operand type"); 1153 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type"); 1154 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() == 1155 DstOps[0].getLLTTy(*getMRI()) && 1156 "Type mismatch"); 1157 break; 1158 } 1159 case TargetOpcode::G_INSERT_VECTOR_ELT: { 1160 assert(DstOps.size() == 1 && "Invalid dst size"); 1161 assert(SrcOps.size() == 3 && "Invalid src size"); 1162 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1163 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); 1164 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() == 1165 SrcOps[1].getLLTTy(*getMRI()) && 1166 "Type mismatch"); 1167 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index"); 1168 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() == 1169 SrcOps[0].getLLTTy(*getMRI()).getNumElements() && 1170 "Type mismatch"); 1171 break; 1172 } 1173 case TargetOpcode::G_BUILD_VECTOR: { 1174 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1175 "Must have at least 2 operands"); 1176 assert(DstOps.size() == 1 && "Invalid DstOps"); 1177 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1178 "Res type must be a vector"); 1179 assert(llvm::all_of(SrcOps, 1180 [&, this](const SrcOp &Op) { 1181 return Op.getLLTTy(*getMRI()) == 1182 SrcOps[0].getLLTTy(*getMRI()); 1183 }) && 1184 "type mismatch in input list"); 1185 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1186 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1187 "input scalars do not exactly cover the output vector register"); 1188 break; 1189 } 1190 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 1191 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1192 "Must have at least 2 operands"); 1193 assert(DstOps.size() == 1 && "Invalid DstOps"); 1194 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1195 "Res type must be a vector"); 1196 assert(llvm::all_of(SrcOps, 1197 [&, this](const SrcOp &Op) { 1198 return Op.getLLTTy(*getMRI()) == 1199 SrcOps[0].getLLTTy(*getMRI()); 1200 }) && 1201 "type mismatch in input list"); 1202 if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1203 DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits()) 1204 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); 1205 break; 1206 } 1207 case TargetOpcode::G_CONCAT_VECTORS: { 1208 assert(DstOps.size() == 1 && "Invalid DstOps"); 1209 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1210 "Must have at least 2 operands"); 1211 assert(llvm::all_of(SrcOps, 1212 [&, this](const SrcOp &Op) { 1213 return (Op.getLLTTy(*getMRI()).isVector() && 1214 Op.getLLTTy(*getMRI()) == 1215 SrcOps[0].getLLTTy(*getMRI())); 1216 }) && 1217 "type mismatch in input list"); 1218 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1219 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1220 "input vectors do not exactly cover the output vector register"); 1221 break; 1222 } 1223 case TargetOpcode::G_UADDE: { 1224 assert(DstOps.size() == 2 && "Invalid no of dst operands"); 1225 assert(SrcOps.size() == 3 && "Invalid no of src operands"); 1226 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); 1227 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) && 1228 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) && 1229 "Invalid operand"); 1230 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); 1231 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && 1232 "type mismatch"); 1233 break; 1234 } 1235 } 1236 1237 auto MIB = buildInstr(Opc); 1238 for (const DstOp &Op : DstOps) 1239 Op.addDefToMIB(*getMRI(), MIB); 1240 for (const SrcOp &Op : SrcOps) 1241 Op.addSrcToMIB(MIB); 1242 if (Flags) 1243 MIB->setFlags(*Flags); 1244 return MIB; 1245 } 1246