1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the MachineIRBuidler class. 10 //===----------------------------------------------------------------------===// 11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 12 #include "llvm/Analysis/MemoryLocation.h" 13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 14 #include "llvm/CodeGen/MachineFunction.h" 15 #include "llvm/CodeGen/MachineInstr.h" 16 #include "llvm/CodeGen/MachineInstrBuilder.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/TargetInstrInfo.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 #include "llvm/CodeGen/TargetOpcodes.h" 21 #include "llvm/CodeGen/TargetSubtargetInfo.h" 22 #include "llvm/IR/DebugInfo.h" 23 24 using namespace llvm; 25 26 void MachineIRBuilder::setMF(MachineFunction &MF) { 27 State.MF = &MF; 28 State.MBB = nullptr; 29 State.MRI = &MF.getRegInfo(); 30 State.TII = MF.getSubtarget().getInstrInfo(); 31 State.DL = DebugLoc(); 32 State.II = MachineBasicBlock::iterator(); 33 State.Observer = nullptr; 34 } 35 36 //------------------------------------------------------------------------------ 37 // Build instruction variants. 38 //------------------------------------------------------------------------------ 39 40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) { 41 MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode)); 42 return MIB; 43 } 44 45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) { 46 getMBB().insert(getInsertPt(), MIB); 47 recordInsertion(MIB); 48 return MIB; 49 } 50 51 MachineInstrBuilder 52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable, 53 const MDNode *Expr) { 54 assert(isa<DILocalVariable>(Variable) && "not a variable"); 55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 56 assert( 57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 58 "Expected inlined-at fields to agree"); 59 return insertInstr(BuildMI(getMF(), getDL(), 60 getTII().get(TargetOpcode::DBG_VALUE), 61 /*IsIndirect*/ false, Reg, Variable, Expr)); 62 } 63 64 MachineInstrBuilder 65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable, 66 const MDNode *Expr) { 67 assert(isa<DILocalVariable>(Variable) && "not a variable"); 68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 69 assert( 70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 71 "Expected inlined-at fields to agree"); 72 return insertInstr(BuildMI(getMF(), getDL(), 73 getTII().get(TargetOpcode::DBG_VALUE), 74 /*IsIndirect*/ true, Reg, Variable, Expr)); 75 } 76 77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI, 78 const MDNode *Variable, 79 const MDNode *Expr) { 80 assert(isa<DILocalVariable>(Variable) && "not a variable"); 81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 82 assert( 83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 84 "Expected inlined-at fields to agree"); 85 return buildInstr(TargetOpcode::DBG_VALUE) 86 .addFrameIndex(FI) 87 .addImm(0) 88 .addMetadata(Variable) 89 .addMetadata(Expr); 90 } 91 92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C, 93 const MDNode *Variable, 94 const MDNode *Expr) { 95 assert(isa<DILocalVariable>(Variable) && "not a variable"); 96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 97 assert( 98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 99 "Expected inlined-at fields to agree"); 100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE); 101 if (auto *CI = dyn_cast<ConstantInt>(&C)) { 102 if (CI->getBitWidth() > 64) 103 MIB.addCImm(CI); 104 else 105 MIB.addImm(CI->getZExtValue()); 106 } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) { 107 MIB.addFPImm(CFP); 108 } else { 109 // Insert $noreg if we didn't find a usable constant and had to drop it. 110 MIB.addReg(Register()); 111 } 112 113 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr); 114 return insertInstr(MIB); 115 } 116 117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) { 118 assert(isa<DILabel>(Label) && "not a label"); 119 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) && 120 "Expected inlined-at fields to agree"); 121 auto MIB = buildInstr(TargetOpcode::DBG_LABEL); 122 123 return MIB.addMetadata(Label); 124 } 125 126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res, 127 const SrcOp &Size, 128 Align Alignment) { 129 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type"); 130 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC); 131 Res.addDefToMIB(*getMRI(), MIB); 132 Size.addSrcToMIB(MIB); 133 MIB.addImm(Alignment.value()); 134 return MIB; 135 } 136 137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res, 138 int Idx) { 139 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 140 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX); 141 Res.addDefToMIB(*getMRI(), MIB); 142 MIB.addFrameIndex(Idx); 143 return MIB; 144 } 145 146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res, 147 const GlobalValue *GV) { 148 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 149 assert(Res.getLLTTy(*getMRI()).getAddressSpace() == 150 GV->getType()->getAddressSpace() && 151 "address space mismatch"); 152 153 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE); 154 Res.addDefToMIB(*getMRI(), MIB); 155 MIB.addGlobalAddress(GV); 156 return MIB; 157 } 158 159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy, 160 unsigned JTI) { 161 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {}) 162 .addJumpTableIndex(JTI); 163 } 164 165 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) { 166 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 167 assert((Res == Op0) && "type mismatch"); 168 } 169 170 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0, 171 const LLT Op1) { 172 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 173 assert((Res == Op0 && Res == Op1) && "type mismatch"); 174 } 175 176 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0, 177 const LLT Op1) { 178 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 179 assert((Res == Op0) && "type mismatch"); 180 } 181 182 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res, 183 const SrcOp &Op0, 184 const SrcOp &Op1) { 185 assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() && 186 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); 187 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type"); 188 189 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}); 190 } 191 192 Optional<MachineInstrBuilder> 193 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0, 194 const LLT ValueTy, uint64_t Value) { 195 assert(Res == 0 && "Res is a result argument"); 196 assert(ValueTy.isScalar() && "invalid offset type"); 197 198 if (Value == 0) { 199 Res = Op0; 200 return None; 201 } 202 203 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); 204 auto Cst = buildConstant(ValueTy, Value); 205 return buildPtrAdd(Res, Op0, Cst.getReg(0)); 206 } 207 208 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res, 209 const SrcOp &Op0, 210 uint32_t NumBits) { 211 LLT PtrTy = Res.getLLTTy(*getMRI()); 212 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits()); 213 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy); 214 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits)); 215 return buildPtrMask(Res, Op0, MaskReg); 216 } 217 218 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) { 219 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest); 220 } 221 222 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) { 223 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination"); 224 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt); 225 } 226 227 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr, 228 unsigned JTI, 229 Register IndexReg) { 230 assert(getMRI()->getType(TablePtr).isPointer() && 231 "Table reg must be a pointer"); 232 return buildInstr(TargetOpcode::G_BRJT) 233 .addUse(TablePtr) 234 .addJumpTableIndex(JTI) 235 .addUse(IndexReg); 236 } 237 238 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res, 239 const SrcOp &Op) { 240 return buildInstr(TargetOpcode::COPY, Res, Op); 241 } 242 243 MachineInstrBuilder MachineIRBuilder::buildAssertZExt(const DstOp &Res, 244 const SrcOp &Op, 245 unsigned Size) { 246 return buildInstr(TargetOpcode::G_ASSERT_ZEXT, Res, Op).addImm(Size); 247 } 248 249 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 250 const ConstantInt &Val) { 251 LLT Ty = Res.getLLTTy(*getMRI()); 252 LLT EltTy = Ty.getScalarType(); 253 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() && 254 "creating constant with the wrong size"); 255 256 if (Ty.isVector()) { 257 auto Const = buildInstr(TargetOpcode::G_CONSTANT) 258 .addDef(getMRI()->createGenericVirtualRegister(EltTy)) 259 .addCImm(&Val); 260 return buildSplatVector(Res, Const); 261 } 262 263 auto Const = buildInstr(TargetOpcode::G_CONSTANT); 264 Const->setDebugLoc(DebugLoc()); 265 Res.addDefToMIB(*getMRI(), Const); 266 Const.addCImm(&Val); 267 return Const; 268 } 269 270 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 271 int64_t Val) { 272 auto IntN = IntegerType::get(getMF().getFunction().getContext(), 273 Res.getLLTTy(*getMRI()).getScalarSizeInBits()); 274 ConstantInt *CI = ConstantInt::get(IntN, Val, true); 275 return buildConstant(Res, *CI); 276 } 277 278 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 279 const ConstantFP &Val) { 280 LLT Ty = Res.getLLTTy(*getMRI()); 281 LLT EltTy = Ty.getScalarType(); 282 283 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics()) 284 == EltTy.getSizeInBits() && 285 "creating fconstant with the wrong size"); 286 287 assert(!Ty.isPointer() && "invalid operand type"); 288 289 if (Ty.isVector()) { 290 auto Const = buildInstr(TargetOpcode::G_FCONSTANT) 291 .addDef(getMRI()->createGenericVirtualRegister(EltTy)) 292 .addFPImm(&Val); 293 294 return buildSplatVector(Res, Const); 295 } 296 297 auto Const = buildInstr(TargetOpcode::G_FCONSTANT); 298 Const->setDebugLoc(DebugLoc()); 299 Res.addDefToMIB(*getMRI(), Const); 300 Const.addFPImm(&Val); 301 return Const; 302 } 303 304 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 305 const APInt &Val) { 306 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val); 307 return buildConstant(Res, *CI); 308 } 309 310 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 311 double Val) { 312 LLT DstTy = Res.getLLTTy(*getMRI()); 313 auto &Ctx = getMF().getFunction().getContext(); 314 auto *CFP = 315 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits())); 316 return buildFConstant(Res, *CFP); 317 } 318 319 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 320 const APFloat &Val) { 321 auto &Ctx = getMF().getFunction().getContext(); 322 auto *CFP = ConstantFP::get(Ctx, Val); 323 return buildFConstant(Res, *CFP); 324 } 325 326 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst, 327 MachineBasicBlock &Dest) { 328 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type"); 329 330 auto MIB = buildInstr(TargetOpcode::G_BRCOND); 331 Tst.addSrcToMIB(MIB); 332 MIB.addMBB(&Dest); 333 return MIB; 334 } 335 336 MachineInstrBuilder 337 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr, 338 MachinePointerInfo PtrInfo, Align Alignment, 339 MachineMemOperand::Flags MMOFlags, 340 const AAMDNodes &AAInfo) { 341 MMOFlags |= MachineMemOperand::MOLoad; 342 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 343 344 uint64_t Size = MemoryLocation::getSizeOrUnknown( 345 TypeSize::Fixed(Dst.getLLTTy(*getMRI()).getSizeInBytes())); 346 MachineMemOperand *MMO = 347 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 348 return buildLoad(Dst, Addr, *MMO); 349 } 350 351 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode, 352 const DstOp &Res, 353 const SrcOp &Addr, 354 MachineMemOperand &MMO) { 355 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type"); 356 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 357 358 auto MIB = buildInstr(Opcode); 359 Res.addDefToMIB(*getMRI(), MIB); 360 Addr.addSrcToMIB(MIB); 361 MIB.addMemOperand(&MMO); 362 return MIB; 363 } 364 365 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset( 366 const DstOp &Dst, const SrcOp &BasePtr, 367 MachineMemOperand &BaseMMO, int64_t Offset) { 368 LLT LoadTy = Dst.getLLTTy(*getMRI()); 369 MachineMemOperand *OffsetMMO = 370 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes()); 371 372 if (Offset == 0) // This may be a size or type changing load. 373 return buildLoad(Dst, BasePtr, *OffsetMMO); 374 375 LLT PtrTy = BasePtr.getLLTTy(*getMRI()); 376 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits()); 377 auto ConstOffset = buildConstant(OffsetTy, Offset); 378 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset); 379 return buildLoad(Dst, Ptr, *OffsetMMO); 380 } 381 382 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val, 383 const SrcOp &Addr, 384 MachineMemOperand &MMO) { 385 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type"); 386 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 387 388 auto MIB = buildInstr(TargetOpcode::G_STORE); 389 Val.addSrcToMIB(MIB); 390 Addr.addSrcToMIB(MIB); 391 MIB.addMemOperand(&MMO); 392 return MIB; 393 } 394 395 MachineInstrBuilder 396 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr, 397 MachinePointerInfo PtrInfo, Align Alignment, 398 MachineMemOperand::Flags MMOFlags, 399 const AAMDNodes &AAInfo) { 400 MMOFlags |= MachineMemOperand::MOStore; 401 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 402 403 uint64_t Size = MemoryLocation::getSizeOrUnknown( 404 TypeSize::Fixed(Val.getLLTTy(*getMRI()).getSizeInBytes())); 405 MachineMemOperand *MMO = 406 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 407 return buildStore(Val, Addr, *MMO); 408 } 409 410 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res, 411 const SrcOp &Op) { 412 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op); 413 } 414 415 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res, 416 const SrcOp &Op) { 417 return buildInstr(TargetOpcode::G_SEXT, Res, Op); 418 } 419 420 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res, 421 const SrcOp &Op) { 422 return buildInstr(TargetOpcode::G_ZEXT, Res, Op); 423 } 424 425 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const { 426 const auto *TLI = getMF().getSubtarget().getTargetLowering(); 427 switch (TLI->getBooleanContents(IsVec, IsFP)) { 428 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent: 429 return TargetOpcode::G_SEXT; 430 case TargetLoweringBase::ZeroOrOneBooleanContent: 431 return TargetOpcode::G_ZEXT; 432 default: 433 return TargetOpcode::G_ANYEXT; 434 } 435 } 436 437 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res, 438 const SrcOp &Op, 439 bool IsFP) { 440 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP); 441 return buildInstr(ExtOp, Res, Op); 442 } 443 444 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc, 445 const DstOp &Res, 446 const SrcOp &Op) { 447 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc || 448 TargetOpcode::G_SEXT == ExtOpc) && 449 "Expecting Extending Opc"); 450 assert(Res.getLLTTy(*getMRI()).isScalar() || 451 Res.getLLTTy(*getMRI()).isVector()); 452 assert(Res.getLLTTy(*getMRI()).isScalar() == 453 Op.getLLTTy(*getMRI()).isScalar()); 454 455 unsigned Opcode = TargetOpcode::COPY; 456 if (Res.getLLTTy(*getMRI()).getSizeInBits() > 457 Op.getLLTTy(*getMRI()).getSizeInBits()) 458 Opcode = ExtOpc; 459 else if (Res.getLLTTy(*getMRI()).getSizeInBits() < 460 Op.getLLTTy(*getMRI()).getSizeInBits()) 461 Opcode = TargetOpcode::G_TRUNC; 462 else 463 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI())); 464 465 return buildInstr(Opcode, Res, Op); 466 } 467 468 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res, 469 const SrcOp &Op) { 470 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op); 471 } 472 473 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res, 474 const SrcOp &Op) { 475 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op); 476 } 477 478 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res, 479 const SrcOp &Op) { 480 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op); 481 } 482 483 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst, 484 const SrcOp &Src) { 485 LLT SrcTy = Src.getLLTTy(*getMRI()); 486 LLT DstTy = Dst.getLLTTy(*getMRI()); 487 if (SrcTy == DstTy) 488 return buildCopy(Dst, Src); 489 490 unsigned Opcode; 491 if (SrcTy.isPointer() && DstTy.isScalar()) 492 Opcode = TargetOpcode::G_PTRTOINT; 493 else if (DstTy.isPointer() && SrcTy.isScalar()) 494 Opcode = TargetOpcode::G_INTTOPTR; 495 else { 496 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet"); 497 Opcode = TargetOpcode::G_BITCAST; 498 } 499 500 return buildInstr(Opcode, Dst, Src); 501 } 502 503 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst, 504 const SrcOp &Src, 505 uint64_t Index) { 506 LLT SrcTy = Src.getLLTTy(*getMRI()); 507 LLT DstTy = Dst.getLLTTy(*getMRI()); 508 509 #ifndef NDEBUG 510 assert(SrcTy.isValid() && "invalid operand type"); 511 assert(DstTy.isValid() && "invalid operand type"); 512 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() && 513 "extracting off end of register"); 514 #endif 515 516 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) { 517 assert(Index == 0 && "insertion past the end of a register"); 518 return buildCast(Dst, Src); 519 } 520 521 auto Extract = buildInstr(TargetOpcode::G_EXTRACT); 522 Dst.addDefToMIB(*getMRI(), Extract); 523 Src.addSrcToMIB(Extract); 524 Extract.addImm(Index); 525 return Extract; 526 } 527 528 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops, 529 ArrayRef<uint64_t> Indices) { 530 #ifndef NDEBUG 531 assert(Ops.size() == Indices.size() && "incompatible args"); 532 assert(!Ops.empty() && "invalid trivial sequence"); 533 assert(llvm::is_sorted(Indices) && 534 "sequence offsets must be in ascending order"); 535 536 assert(getMRI()->getType(Res).isValid() && "invalid operand type"); 537 for (auto Op : Ops) 538 assert(getMRI()->getType(Op).isValid() && "invalid operand type"); 539 #endif 540 541 LLT ResTy = getMRI()->getType(Res); 542 LLT OpTy = getMRI()->getType(Ops[0]); 543 unsigned OpSize = OpTy.getSizeInBits(); 544 bool MaybeMerge = true; 545 for (unsigned i = 0; i < Ops.size(); ++i) { 546 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) { 547 MaybeMerge = false; 548 break; 549 } 550 } 551 552 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) { 553 buildMerge(Res, Ops); 554 return; 555 } 556 557 Register ResIn = getMRI()->createGenericVirtualRegister(ResTy); 558 buildUndef(ResIn); 559 560 for (unsigned i = 0; i < Ops.size(); ++i) { 561 Register ResOut = i + 1 == Ops.size() 562 ? Res 563 : getMRI()->createGenericVirtualRegister(ResTy); 564 buildInsert(ResOut, ResIn, Ops[i], Indices[i]); 565 ResIn = ResOut; 566 } 567 } 568 569 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) { 570 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {}); 571 } 572 573 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res, 574 ArrayRef<Register> Ops) { 575 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>, 576 // we need some temporary storage for the DstOp objects. Here we use a 577 // sufficiently large SmallVector to not go through the heap. 578 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 579 assert(TmpVec.size() > 1); 580 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec); 581 } 582 583 MachineInstrBuilder 584 MachineIRBuilder::buildMerge(const DstOp &Res, 585 std::initializer_list<SrcOp> Ops) { 586 assert(Ops.size() > 1); 587 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops); 588 } 589 590 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res, 591 const SrcOp &Op) { 592 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>, 593 // we need some temporary storage for the DstOp objects. Here we use a 594 // sufficiently large SmallVector to not go through the heap. 595 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); 596 assert(TmpVec.size() > 1); 597 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); 598 } 599 600 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res, 601 const SrcOp &Op) { 602 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits(); 603 SmallVector<Register, 8> TmpVec; 604 for (unsigned I = 0; I != NumReg; ++I) 605 TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res)); 606 return buildUnmerge(TmpVec, Op); 607 } 608 609 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res, 610 const SrcOp &Op) { 611 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>, 612 // we need some temporary storage for the DstOp objects. Here we use a 613 // sufficiently large SmallVector to not go through the heap. 614 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); 615 assert(TmpVec.size() > 1); 616 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); 617 } 618 619 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res, 620 ArrayRef<Register> Ops) { 621 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 622 // we need some temporary storage for the DstOp objects. Here we use a 623 // sufficiently large SmallVector to not go through the heap. 624 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 625 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); 626 } 627 628 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res, 629 const SrcOp &Src) { 630 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src); 631 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); 632 } 633 634 MachineInstrBuilder 635 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res, 636 ArrayRef<Register> Ops) { 637 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 638 // we need some temporary storage for the DstOp objects. Here we use a 639 // sufficiently large SmallVector to not go through the heap. 640 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 641 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec); 642 } 643 644 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res, 645 const SrcOp &Src) { 646 LLT DstTy = Res.getLLTTy(*getMRI()); 647 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() && 648 "Expected Src to match Dst elt ty"); 649 auto UndefVec = buildUndef(DstTy); 650 auto Zero = buildConstant(LLT::scalar(64), 0); 651 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero); 652 SmallVector<int, 16> ZeroMask(DstTy.getNumElements()); 653 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask); 654 } 655 656 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res, 657 const SrcOp &Src1, 658 const SrcOp &Src2, 659 ArrayRef<int> Mask) { 660 LLT DstTy = Res.getLLTTy(*getMRI()); 661 LLT Src1Ty = Src1.getLLTTy(*getMRI()); 662 LLT Src2Ty = Src2.getLLTTy(*getMRI()); 663 assert(Src1Ty.getNumElements() + Src2Ty.getNumElements() >= Mask.size()); 664 assert(DstTy.getElementType() == Src1Ty.getElementType() && 665 DstTy.getElementType() == Src2Ty.getElementType()); 666 (void)Src1Ty; 667 (void)Src2Ty; 668 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask); 669 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {DstTy}, {Src1, Src2}) 670 .addShuffleMask(MaskAlloc); 671 } 672 673 MachineInstrBuilder 674 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) { 675 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 676 // we need some temporary storage for the DstOp objects. Here we use a 677 // sufficiently large SmallVector to not go through the heap. 678 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 679 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec); 680 } 681 682 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res, 683 const SrcOp &Src, 684 const SrcOp &Op, 685 unsigned Index) { 686 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <= 687 Res.getLLTTy(*getMRI()).getSizeInBits() && 688 "insertion past the end of a register"); 689 690 if (Res.getLLTTy(*getMRI()).getSizeInBits() == 691 Op.getLLTTy(*getMRI()).getSizeInBits()) { 692 return buildCast(Res, Op); 693 } 694 695 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)}); 696 } 697 698 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, 699 ArrayRef<Register> ResultRegs, 700 bool HasSideEffects) { 701 auto MIB = 702 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS 703 : TargetOpcode::G_INTRINSIC); 704 for (unsigned ResultReg : ResultRegs) 705 MIB.addDef(ResultReg); 706 MIB.addIntrinsicID(ID); 707 return MIB; 708 } 709 710 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, 711 ArrayRef<DstOp> Results, 712 bool HasSideEffects) { 713 auto MIB = 714 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS 715 : TargetOpcode::G_INTRINSIC); 716 for (DstOp Result : Results) 717 Result.addDefToMIB(*getMRI(), MIB); 718 MIB.addIntrinsicID(ID); 719 return MIB; 720 } 721 722 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res, 723 const SrcOp &Op) { 724 return buildInstr(TargetOpcode::G_TRUNC, Res, Op); 725 } 726 727 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res, 728 const SrcOp &Op, 729 Optional<unsigned> Flags) { 730 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags); 731 } 732 733 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred, 734 const DstOp &Res, 735 const SrcOp &Op0, 736 const SrcOp &Op1) { 737 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}); 738 } 739 740 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred, 741 const DstOp &Res, 742 const SrcOp &Op0, 743 const SrcOp &Op1, 744 Optional<unsigned> Flags) { 745 746 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags); 747 } 748 749 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res, 750 const SrcOp &Tst, 751 const SrcOp &Op0, 752 const SrcOp &Op1, 753 Optional<unsigned> Flags) { 754 755 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags); 756 } 757 758 MachineInstrBuilder 759 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, 760 const SrcOp &Elt, const SrcOp &Idx) { 761 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx}); 762 } 763 764 MachineInstrBuilder 765 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, 766 const SrcOp &Idx) { 767 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx}); 768 } 769 770 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess( 771 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, 772 Register NewVal, MachineMemOperand &MMO) { 773 #ifndef NDEBUG 774 LLT OldValResTy = getMRI()->getType(OldValRes); 775 LLT SuccessResTy = getMRI()->getType(SuccessRes); 776 LLT AddrTy = getMRI()->getType(Addr); 777 LLT CmpValTy = getMRI()->getType(CmpVal); 778 LLT NewValTy = getMRI()->getType(NewVal); 779 assert(OldValResTy.isScalar() && "invalid operand type"); 780 assert(SuccessResTy.isScalar() && "invalid operand type"); 781 assert(AddrTy.isPointer() && "invalid operand type"); 782 assert(CmpValTy.isValid() && "invalid operand type"); 783 assert(NewValTy.isValid() && "invalid operand type"); 784 assert(OldValResTy == CmpValTy && "type mismatch"); 785 assert(OldValResTy == NewValTy && "type mismatch"); 786 #endif 787 788 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS) 789 .addDef(OldValRes) 790 .addDef(SuccessRes) 791 .addUse(Addr) 792 .addUse(CmpVal) 793 .addUse(NewVal) 794 .addMemOperand(&MMO); 795 } 796 797 MachineInstrBuilder 798 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr, 799 Register CmpVal, Register NewVal, 800 MachineMemOperand &MMO) { 801 #ifndef NDEBUG 802 LLT OldValResTy = getMRI()->getType(OldValRes); 803 LLT AddrTy = getMRI()->getType(Addr); 804 LLT CmpValTy = getMRI()->getType(CmpVal); 805 LLT NewValTy = getMRI()->getType(NewVal); 806 assert(OldValResTy.isScalar() && "invalid operand type"); 807 assert(AddrTy.isPointer() && "invalid operand type"); 808 assert(CmpValTy.isValid() && "invalid operand type"); 809 assert(NewValTy.isValid() && "invalid operand type"); 810 assert(OldValResTy == CmpValTy && "type mismatch"); 811 assert(OldValResTy == NewValTy && "type mismatch"); 812 #endif 813 814 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG) 815 .addDef(OldValRes) 816 .addUse(Addr) 817 .addUse(CmpVal) 818 .addUse(NewVal) 819 .addMemOperand(&MMO); 820 } 821 822 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW( 823 unsigned Opcode, const DstOp &OldValRes, 824 const SrcOp &Addr, const SrcOp &Val, 825 MachineMemOperand &MMO) { 826 827 #ifndef NDEBUG 828 LLT OldValResTy = OldValRes.getLLTTy(*getMRI()); 829 LLT AddrTy = Addr.getLLTTy(*getMRI()); 830 LLT ValTy = Val.getLLTTy(*getMRI()); 831 assert(OldValResTy.isScalar() && "invalid operand type"); 832 assert(AddrTy.isPointer() && "invalid operand type"); 833 assert(ValTy.isValid() && "invalid operand type"); 834 assert(OldValResTy == ValTy && "type mismatch"); 835 assert(MMO.isAtomic() && "not atomic mem operand"); 836 #endif 837 838 auto MIB = buildInstr(Opcode); 839 OldValRes.addDefToMIB(*getMRI(), MIB); 840 Addr.addSrcToMIB(MIB); 841 Val.addSrcToMIB(MIB); 842 MIB.addMemOperand(&MMO); 843 return MIB; 844 } 845 846 MachineInstrBuilder 847 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr, 848 Register Val, MachineMemOperand &MMO) { 849 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val, 850 MMO); 851 } 852 MachineInstrBuilder 853 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr, 854 Register Val, MachineMemOperand &MMO) { 855 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val, 856 MMO); 857 } 858 MachineInstrBuilder 859 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr, 860 Register Val, MachineMemOperand &MMO) { 861 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val, 862 MMO); 863 } 864 MachineInstrBuilder 865 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr, 866 Register Val, MachineMemOperand &MMO) { 867 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val, 868 MMO); 869 } 870 MachineInstrBuilder 871 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr, 872 Register Val, MachineMemOperand &MMO) { 873 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val, 874 MMO); 875 } 876 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes, 877 Register Addr, 878 Register Val, 879 MachineMemOperand &MMO) { 880 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val, 881 MMO); 882 } 883 MachineInstrBuilder 884 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr, 885 Register Val, MachineMemOperand &MMO) { 886 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val, 887 MMO); 888 } 889 MachineInstrBuilder 890 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr, 891 Register Val, MachineMemOperand &MMO) { 892 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val, 893 MMO); 894 } 895 MachineInstrBuilder 896 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr, 897 Register Val, MachineMemOperand &MMO) { 898 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val, 899 MMO); 900 } 901 MachineInstrBuilder 902 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr, 903 Register Val, MachineMemOperand &MMO) { 904 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val, 905 MMO); 906 } 907 MachineInstrBuilder 908 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr, 909 Register Val, MachineMemOperand &MMO) { 910 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val, 911 MMO); 912 } 913 914 MachineInstrBuilder 915 MachineIRBuilder::buildAtomicRMWFAdd( 916 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, 917 MachineMemOperand &MMO) { 918 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val, 919 MMO); 920 } 921 922 MachineInstrBuilder 923 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, 924 MachineMemOperand &MMO) { 925 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val, 926 MMO); 927 } 928 929 MachineInstrBuilder 930 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) { 931 return buildInstr(TargetOpcode::G_FENCE) 932 .addImm(Ordering) 933 .addImm(Scope); 934 } 935 936 MachineInstrBuilder 937 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) { 938 #ifndef NDEBUG 939 assert(getMRI()->getType(Res).isPointer() && "invalid res type"); 940 #endif 941 942 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA); 943 } 944 945 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy, 946 bool IsExtend) { 947 #ifndef NDEBUG 948 if (DstTy.isVector()) { 949 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector"); 950 assert(SrcTy.getNumElements() == DstTy.getNumElements() && 951 "different number of elements in a trunc/ext"); 952 } else 953 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc"); 954 955 if (IsExtend) 956 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() && 957 "invalid narrowing extend"); 958 else 959 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() && 960 "invalid widening trunc"); 961 #endif 962 } 963 964 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy, 965 const LLT Op0Ty, const LLT Op1Ty) { 966 #ifndef NDEBUG 967 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) && 968 "invalid operand type"); 969 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch"); 970 if (ResTy.isScalar() || ResTy.isPointer()) 971 assert(TstTy.isScalar() && "type mismatch"); 972 else 973 assert((TstTy.isScalar() || 974 (TstTy.isVector() && 975 TstTy.getNumElements() == Op0Ty.getNumElements())) && 976 "type mismatch"); 977 #endif 978 } 979 980 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, 981 ArrayRef<DstOp> DstOps, 982 ArrayRef<SrcOp> SrcOps, 983 Optional<unsigned> Flags) { 984 switch (Opc) { 985 default: 986 break; 987 case TargetOpcode::G_SELECT: { 988 assert(DstOps.size() == 1 && "Invalid select"); 989 assert(SrcOps.size() == 3 && "Invalid select"); 990 validateSelectOp( 991 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()), 992 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI())); 993 break; 994 } 995 case TargetOpcode::G_FNEG: 996 case TargetOpcode::G_ABS: 997 // All these are unary ops. 998 assert(DstOps.size() == 1 && "Invalid Dst"); 999 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1000 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()), 1001 SrcOps[0].getLLTTy(*getMRI())); 1002 break; 1003 case TargetOpcode::G_ADD: 1004 case TargetOpcode::G_AND: 1005 case TargetOpcode::G_MUL: 1006 case TargetOpcode::G_OR: 1007 case TargetOpcode::G_SUB: 1008 case TargetOpcode::G_XOR: 1009 case TargetOpcode::G_UDIV: 1010 case TargetOpcode::G_SDIV: 1011 case TargetOpcode::G_UREM: 1012 case TargetOpcode::G_SREM: 1013 case TargetOpcode::G_SMIN: 1014 case TargetOpcode::G_SMAX: 1015 case TargetOpcode::G_UMIN: 1016 case TargetOpcode::G_UMAX: 1017 case TargetOpcode::G_UADDSAT: 1018 case TargetOpcode::G_SADDSAT: 1019 case TargetOpcode::G_USUBSAT: 1020 case TargetOpcode::G_SSUBSAT: { 1021 // All these are binary ops. 1022 assert(DstOps.size() == 1 && "Invalid Dst"); 1023 assert(SrcOps.size() == 2 && "Invalid Srcs"); 1024 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()), 1025 SrcOps[0].getLLTTy(*getMRI()), 1026 SrcOps[1].getLLTTy(*getMRI())); 1027 break; 1028 } 1029 case TargetOpcode::G_SHL: 1030 case TargetOpcode::G_ASHR: 1031 case TargetOpcode::G_LSHR: 1032 case TargetOpcode::G_USHLSAT: 1033 case TargetOpcode::G_SSHLSAT: { 1034 assert(DstOps.size() == 1 && "Invalid Dst"); 1035 assert(SrcOps.size() == 2 && "Invalid Srcs"); 1036 validateShiftOp(DstOps[0].getLLTTy(*getMRI()), 1037 SrcOps[0].getLLTTy(*getMRI()), 1038 SrcOps[1].getLLTTy(*getMRI())); 1039 break; 1040 } 1041 case TargetOpcode::G_SEXT: 1042 case TargetOpcode::G_ZEXT: 1043 case TargetOpcode::G_ANYEXT: 1044 assert(DstOps.size() == 1 && "Invalid Dst"); 1045 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1046 validateTruncExt(DstOps[0].getLLTTy(*getMRI()), 1047 SrcOps[0].getLLTTy(*getMRI()), true); 1048 break; 1049 case TargetOpcode::G_TRUNC: 1050 case TargetOpcode::G_FPTRUNC: { 1051 assert(DstOps.size() == 1 && "Invalid Dst"); 1052 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1053 validateTruncExt(DstOps[0].getLLTTy(*getMRI()), 1054 SrcOps[0].getLLTTy(*getMRI()), false); 1055 break; 1056 } 1057 case TargetOpcode::G_BITCAST: { 1058 assert(DstOps.size() == 1 && "Invalid Dst"); 1059 assert(SrcOps.size() == 1 && "Invalid Srcs"); 1060 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1061 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast"); 1062 break; 1063 } 1064 case TargetOpcode::COPY: 1065 assert(DstOps.size() == 1 && "Invalid Dst"); 1066 // If the caller wants to add a subreg source it has to be done separately 1067 // so we may not have any SrcOps at this point yet. 1068 break; 1069 case TargetOpcode::G_FCMP: 1070 case TargetOpcode::G_ICMP: { 1071 assert(DstOps.size() == 1 && "Invalid Dst Operands"); 1072 assert(SrcOps.size() == 3 && "Invalid Src Operands"); 1073 // For F/ICMP, the first src operand is the predicate, followed by 1074 // the two comparands. 1075 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate && 1076 "Expecting predicate"); 1077 assert([&]() -> bool { 1078 CmpInst::Predicate Pred = SrcOps[0].getPredicate(); 1079 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred) 1080 : CmpInst::isFPPredicate(Pred); 1081 }() && "Invalid predicate"); 1082 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && 1083 "Type mismatch"); 1084 assert([&]() -> bool { 1085 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI()); 1086 LLT DstTy = DstOps[0].getLLTTy(*getMRI()); 1087 if (Op0Ty.isScalar() || Op0Ty.isPointer()) 1088 return DstTy.isScalar(); 1089 else 1090 return DstTy.isVector() && 1091 DstTy.getNumElements() == Op0Ty.getNumElements(); 1092 }() && "Type Mismatch"); 1093 break; 1094 } 1095 case TargetOpcode::G_UNMERGE_VALUES: { 1096 assert(!DstOps.empty() && "Invalid trivial sequence"); 1097 assert(SrcOps.size() == 1 && "Invalid src for Unmerge"); 1098 assert(llvm::all_of(DstOps, 1099 [&, this](const DstOp &Op) { 1100 return Op.getLLTTy(*getMRI()) == 1101 DstOps[0].getLLTTy(*getMRI()); 1102 }) && 1103 "type mismatch in output list"); 1104 assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1105 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1106 "input operands do not cover output register"); 1107 break; 1108 } 1109 case TargetOpcode::G_MERGE_VALUES: { 1110 assert(!SrcOps.empty() && "invalid trivial sequence"); 1111 assert(DstOps.size() == 1 && "Invalid Dst"); 1112 assert(llvm::all_of(SrcOps, 1113 [&, this](const SrcOp &Op) { 1114 return Op.getLLTTy(*getMRI()) == 1115 SrcOps[0].getLLTTy(*getMRI()); 1116 }) && 1117 "type mismatch in input list"); 1118 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1119 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1120 "input operands do not cover output register"); 1121 if (SrcOps.size() == 1) 1122 return buildCast(DstOps[0], SrcOps[0]); 1123 if (DstOps[0].getLLTTy(*getMRI()).isVector()) { 1124 if (SrcOps[0].getLLTTy(*getMRI()).isVector()) 1125 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps); 1126 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); 1127 } 1128 break; 1129 } 1130 case TargetOpcode::G_EXTRACT_VECTOR_ELT: { 1131 assert(DstOps.size() == 1 && "Invalid Dst size"); 1132 assert(SrcOps.size() == 2 && "Invalid Src size"); 1133 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); 1134 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() || 1135 DstOps[0].getLLTTy(*getMRI()).isPointer()) && 1136 "Invalid operand type"); 1137 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type"); 1138 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() == 1139 DstOps[0].getLLTTy(*getMRI()) && 1140 "Type mismatch"); 1141 break; 1142 } 1143 case TargetOpcode::G_INSERT_VECTOR_ELT: { 1144 assert(DstOps.size() == 1 && "Invalid dst size"); 1145 assert(SrcOps.size() == 3 && "Invalid src size"); 1146 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1147 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); 1148 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() == 1149 SrcOps[1].getLLTTy(*getMRI()) && 1150 "Type mismatch"); 1151 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index"); 1152 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() == 1153 SrcOps[0].getLLTTy(*getMRI()).getNumElements() && 1154 "Type mismatch"); 1155 break; 1156 } 1157 case TargetOpcode::G_BUILD_VECTOR: { 1158 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1159 "Must have at least 2 operands"); 1160 assert(DstOps.size() == 1 && "Invalid DstOps"); 1161 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1162 "Res type must be a vector"); 1163 assert(llvm::all_of(SrcOps, 1164 [&, this](const SrcOp &Op) { 1165 return Op.getLLTTy(*getMRI()) == 1166 SrcOps[0].getLLTTy(*getMRI()); 1167 }) && 1168 "type mismatch in input list"); 1169 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1170 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1171 "input scalars do not exactly cover the output vector register"); 1172 break; 1173 } 1174 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 1175 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1176 "Must have at least 2 operands"); 1177 assert(DstOps.size() == 1 && "Invalid DstOps"); 1178 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1179 "Res type must be a vector"); 1180 assert(llvm::all_of(SrcOps, 1181 [&, this](const SrcOp &Op) { 1182 return Op.getLLTTy(*getMRI()) == 1183 SrcOps[0].getLLTTy(*getMRI()); 1184 }) && 1185 "type mismatch in input list"); 1186 if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1187 DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits()) 1188 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); 1189 break; 1190 } 1191 case TargetOpcode::G_CONCAT_VECTORS: { 1192 assert(DstOps.size() == 1 && "Invalid DstOps"); 1193 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1194 "Must have at least 2 operands"); 1195 assert(llvm::all_of(SrcOps, 1196 [&, this](const SrcOp &Op) { 1197 return (Op.getLLTTy(*getMRI()).isVector() && 1198 Op.getLLTTy(*getMRI()) == 1199 SrcOps[0].getLLTTy(*getMRI())); 1200 }) && 1201 "type mismatch in input list"); 1202 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1203 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1204 "input vectors do not exactly cover the output vector register"); 1205 break; 1206 } 1207 case TargetOpcode::G_UADDE: { 1208 assert(DstOps.size() == 2 && "Invalid no of dst operands"); 1209 assert(SrcOps.size() == 3 && "Invalid no of src operands"); 1210 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); 1211 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) && 1212 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) && 1213 "Invalid operand"); 1214 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); 1215 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && 1216 "type mismatch"); 1217 break; 1218 } 1219 } 1220 1221 auto MIB = buildInstr(Opc); 1222 for (const DstOp &Op : DstOps) 1223 Op.addDefToMIB(*getMRI(), MIB); 1224 for (const SrcOp &Op : SrcOps) 1225 Op.addSrcToMIB(MIB); 1226 if (Flags) 1227 MIB->setFlags(*Flags); 1228 return MIB; 1229 } 1230