1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the targeting of the InstructionSelector class for 10 // SPIRV. 11 // TODO: This should be generated by TableGen. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "SPIRV.h" 16 #include "SPIRVGlobalRegistry.h" 17 #include "SPIRVInstrInfo.h" 18 #include "SPIRVRegisterBankInfo.h" 19 #include "SPIRVRegisterInfo.h" 20 #include "SPIRVTargetMachine.h" 21 #include "SPIRVUtils.h" 22 #include "llvm/ADT/APFloat.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/IR/IntrinsicsSPIRV.h" 28 #include "llvm/Support/Debug.h" 29 30 #define DEBUG_TYPE "spirv-isel" 31 32 using namespace llvm; 33 34 namespace { 35 36 #define GET_GLOBALISEL_PREDICATE_BITSET 37 #include "SPIRVGenGlobalISel.inc" 38 #undef GET_GLOBALISEL_PREDICATE_BITSET 39 40 class SPIRVInstructionSelector : public InstructionSelector { 41 const SPIRVSubtarget &STI; 42 const SPIRVInstrInfo &TII; 43 const SPIRVRegisterInfo &TRI; 44 const RegisterBankInfo &RBI; 45 SPIRVGlobalRegistry &GR; 46 MachineRegisterInfo *MRI; 47 48 public: 49 SPIRVInstructionSelector(const SPIRVTargetMachine &TM, 50 const SPIRVSubtarget &ST, 51 const RegisterBankInfo &RBI); 52 void setupMF(MachineFunction &MF, GISelKnownBits *KB, 53 CodeGenCoverage &CoverageInfo, ProfileSummaryInfo *PSI, 54 BlockFrequencyInfo *BFI) override; 55 // Common selection code. Instruction-specific selection occurs in spvSelect. 56 bool select(MachineInstr &I) override; 57 static const char *getName() { return DEBUG_TYPE; } 58 59 #define GET_GLOBALISEL_PREDICATES_DECL 60 #include "SPIRVGenGlobalISel.inc" 61 #undef GET_GLOBALISEL_PREDICATES_DECL 62 63 #define GET_GLOBALISEL_TEMPORARIES_DECL 64 #include "SPIRVGenGlobalISel.inc" 65 #undef GET_GLOBALISEL_TEMPORARIES_DECL 66 67 private: 68 // tblgen-erated 'select' implementation, used as the initial selector for 69 // the patterns that don't require complex C++. 70 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 71 72 // All instruction-specific selection that didn't happen in "select()". 73 // Is basically a large Switch/Case delegating to all other select method. 74 bool spvSelect(Register ResVReg, const SPIRVType *ResType, 75 MachineInstr &I) const; 76 77 bool selectGlobalValue(Register ResVReg, MachineInstr &I, 78 const MachineInstr *Init = nullptr) const; 79 80 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType, 81 MachineInstr &I, Register SrcReg, 82 unsigned Opcode) const; 83 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 84 unsigned Opcode) const; 85 86 bool selectLoad(Register ResVReg, const SPIRVType *ResType, 87 MachineInstr &I) const; 88 bool selectStore(MachineInstr &I) const; 89 90 bool selectMemOperation(Register ResVReg, MachineInstr &I) const; 91 92 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType, 93 MachineInstr &I, unsigned NewOpcode) const; 94 95 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType, 96 MachineInstr &I) const; 97 98 bool selectFence(MachineInstr &I) const; 99 100 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType, 101 MachineInstr &I) const; 102 103 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType, 104 MachineInstr &I) const; 105 106 bool selectConstVector(Register ResVReg, const SPIRVType *ResType, 107 MachineInstr &I) const; 108 109 bool selectCmp(Register ResVReg, const SPIRVType *ResType, 110 unsigned comparisonOpcode, MachineInstr &I) const; 111 112 bool selectICmp(Register ResVReg, const SPIRVType *ResType, 113 MachineInstr &I) const; 114 bool selectFCmp(Register ResVReg, const SPIRVType *ResType, 115 MachineInstr &I) const; 116 117 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I, 118 int OpIdx) const; 119 void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I, 120 int OpIdx) const; 121 122 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm, 123 MachineInstr &I) const; 124 125 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 126 bool IsSigned) const; 127 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 128 bool IsSigned, unsigned Opcode) const; 129 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 130 bool IsSigned) const; 131 132 bool selectTrunc(Register ResVReg, const SPIRVType *ResType, 133 MachineInstr &I) const; 134 135 bool selectIntToBool(Register IntReg, Register ResVReg, 136 const SPIRVType *intTy, const SPIRVType *boolTy, 137 MachineInstr &I) const; 138 139 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType, 140 MachineInstr &I) const; 141 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType, 142 MachineInstr &I) const; 143 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType, 144 MachineInstr &I) const; 145 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType, 146 MachineInstr &I) const; 147 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType, 148 MachineInstr &I) const; 149 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType, 150 MachineInstr &I) const; 151 bool selectGEP(Register ResVReg, const SPIRVType *ResType, 152 MachineInstr &I) const; 153 154 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType, 155 MachineInstr &I) const; 156 157 bool selectBranch(MachineInstr &I) const; 158 bool selectBranchCond(MachineInstr &I) const; 159 160 bool selectPhi(Register ResVReg, const SPIRVType *ResType, 161 MachineInstr &I) const; 162 163 Register buildI32Constant(uint32_t Val, MachineInstr &I, 164 const SPIRVType *ResType = nullptr) const; 165 166 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const; 167 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType, 168 MachineInstr &I) const; 169 }; 170 171 } // end anonymous namespace 172 173 #define GET_GLOBALISEL_IMPL 174 #include "SPIRVGenGlobalISel.inc" 175 #undef GET_GLOBALISEL_IMPL 176 177 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM, 178 const SPIRVSubtarget &ST, 179 const RegisterBankInfo &RBI) 180 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()), 181 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()), 182 #define GET_GLOBALISEL_PREDICATES_INIT 183 #include "SPIRVGenGlobalISel.inc" 184 #undef GET_GLOBALISEL_PREDICATES_INIT 185 #define GET_GLOBALISEL_TEMPORARIES_INIT 186 #include "SPIRVGenGlobalISel.inc" 187 #undef GET_GLOBALISEL_TEMPORARIES_INIT 188 { 189 } 190 191 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB, 192 CodeGenCoverage &CoverageInfo, 193 ProfileSummaryInfo *PSI, 194 BlockFrequencyInfo *BFI) { 195 MRI = &MF.getRegInfo(); 196 GR.setCurrentFunc(MF); 197 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); 198 } 199 200 // Defined in SPIRVLegalizerInfo.cpp. 201 extern bool isTypeFoldingSupported(unsigned Opcode); 202 203 bool SPIRVInstructionSelector::select(MachineInstr &I) { 204 assert(I.getParent() && "Instruction should be in a basic block!"); 205 assert(I.getParent()->getParent() && "Instruction should be in a function!"); 206 207 Register Opcode = I.getOpcode(); 208 // If it's not a GMIR instruction, we've selected it already. 209 if (!isPreISelGenericOpcode(Opcode)) { 210 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more. 211 auto *Def = MRI->getVRegDef(I.getOperand(1).getReg()); 212 if (isTypeFoldingSupported(Def->getOpcode())) { 213 auto Res = selectImpl(I, *CoverageInfo); 214 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT); 215 if (Res) 216 return Res; 217 } 218 MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg()); 219 I.removeFromParent(); 220 } else if (I.getNumDefs() == 1) { 221 // Make all vregs 32 bits (for SPIR-V IDs). 222 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32)); 223 } 224 return true; 225 } 226 227 if (I.getNumOperands() != I.getNumExplicitOperands()) { 228 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n"); 229 return false; 230 } 231 232 // Common code for getting return reg+type, and removing selected instr 233 // from parent occurs here. Instr-specific selection happens in spvSelect(). 234 bool HasDefs = I.getNumDefs() > 0; 235 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0); 236 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr; 237 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE); 238 if (spvSelect(ResVReg, ResType, I)) { 239 if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs). 240 MRI->setType(ResVReg, LLT::scalar(32)); 241 I.removeFromParent(); 242 return true; 243 } 244 return false; 245 } 246 247 bool SPIRVInstructionSelector::spvSelect(Register ResVReg, 248 const SPIRVType *ResType, 249 MachineInstr &I) const { 250 assert(!isTypeFoldingSupported(I.getOpcode()) || 251 I.getOpcode() == TargetOpcode::G_CONSTANT); 252 const unsigned Opcode = I.getOpcode(); 253 switch (Opcode) { 254 case TargetOpcode::G_CONSTANT: 255 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(), 256 I); 257 case TargetOpcode::G_GLOBAL_VALUE: 258 return selectGlobalValue(ResVReg, I); 259 case TargetOpcode::G_IMPLICIT_DEF: 260 return selectOpUndef(ResVReg, ResType, I); 261 262 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 263 return selectIntrinsic(ResVReg, ResType, I); 264 case TargetOpcode::G_BITREVERSE: 265 return selectBitreverse(ResVReg, ResType, I); 266 267 case TargetOpcode::G_BUILD_VECTOR: 268 return selectConstVector(ResVReg, ResType, I); 269 270 case TargetOpcode::G_SHUFFLE_VECTOR: { 271 MachineBasicBlock &BB = *I.getParent(); 272 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle)) 273 .addDef(ResVReg) 274 .addUse(GR.getSPIRVTypeID(ResType)) 275 .addUse(I.getOperand(1).getReg()) 276 .addUse(I.getOperand(2).getReg()); 277 for (auto V : I.getOperand(3).getShuffleMask()) 278 MIB.addImm(V); 279 return MIB.constrainAllUses(TII, TRI, RBI); 280 } 281 case TargetOpcode::G_MEMMOVE: 282 case TargetOpcode::G_MEMCPY: 283 return selectMemOperation(ResVReg, I); 284 285 case TargetOpcode::G_ICMP: 286 return selectICmp(ResVReg, ResType, I); 287 case TargetOpcode::G_FCMP: 288 return selectFCmp(ResVReg, ResType, I); 289 290 case TargetOpcode::G_FRAME_INDEX: 291 return selectFrameIndex(ResVReg, ResType, I); 292 293 case TargetOpcode::G_LOAD: 294 return selectLoad(ResVReg, ResType, I); 295 case TargetOpcode::G_STORE: 296 return selectStore(I); 297 298 case TargetOpcode::G_BR: 299 return selectBranch(I); 300 case TargetOpcode::G_BRCOND: 301 return selectBranchCond(I); 302 303 case TargetOpcode::G_PHI: 304 return selectPhi(ResVReg, ResType, I); 305 306 case TargetOpcode::G_FPTOSI: 307 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS); 308 case TargetOpcode::G_FPTOUI: 309 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU); 310 311 case TargetOpcode::G_SITOFP: 312 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF); 313 case TargetOpcode::G_UITOFP: 314 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF); 315 316 case TargetOpcode::G_CTPOP: 317 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount); 318 319 case TargetOpcode::G_SEXT: 320 return selectExt(ResVReg, ResType, I, true); 321 case TargetOpcode::G_ANYEXT: 322 case TargetOpcode::G_ZEXT: 323 return selectExt(ResVReg, ResType, I, false); 324 case TargetOpcode::G_TRUNC: 325 return selectTrunc(ResVReg, ResType, I); 326 case TargetOpcode::G_FPTRUNC: 327 case TargetOpcode::G_FPEXT: 328 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert); 329 330 case TargetOpcode::G_PTRTOINT: 331 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU); 332 case TargetOpcode::G_INTTOPTR: 333 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr); 334 case TargetOpcode::G_BITCAST: 335 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast); 336 case TargetOpcode::G_ADDRSPACE_CAST: 337 return selectAddrSpaceCast(ResVReg, ResType, I); 338 339 case TargetOpcode::G_ATOMICRMW_OR: 340 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr); 341 case TargetOpcode::G_ATOMICRMW_ADD: 342 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd); 343 case TargetOpcode::G_ATOMICRMW_AND: 344 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd); 345 case TargetOpcode::G_ATOMICRMW_MAX: 346 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax); 347 case TargetOpcode::G_ATOMICRMW_MIN: 348 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin); 349 case TargetOpcode::G_ATOMICRMW_SUB: 350 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub); 351 case TargetOpcode::G_ATOMICRMW_XOR: 352 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor); 353 case TargetOpcode::G_ATOMICRMW_UMAX: 354 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax); 355 case TargetOpcode::G_ATOMICRMW_UMIN: 356 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin); 357 case TargetOpcode::G_ATOMICRMW_XCHG: 358 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange); 359 case TargetOpcode::G_ATOMIC_CMPXCHG: 360 return selectAtomicCmpXchg(ResVReg, ResType, I); 361 362 case TargetOpcode::G_FENCE: 363 return selectFence(I); 364 365 default: 366 return false; 367 } 368 } 369 370 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg, 371 const SPIRVType *ResType, 372 MachineInstr &I, 373 Register SrcReg, 374 unsigned Opcode) const { 375 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) 376 .addDef(ResVReg) 377 .addUse(GR.getSPIRVTypeID(ResType)) 378 .addUse(SrcReg) 379 .constrainAllUses(TII, TRI, RBI); 380 } 381 382 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg, 383 const SPIRVType *ResType, 384 MachineInstr &I, 385 unsigned Opcode) const { 386 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(), 387 Opcode); 388 } 389 390 static SPIRV::MemorySemantics getMemSemantics(AtomicOrdering Ord) { 391 switch (Ord) { 392 case AtomicOrdering::Acquire: 393 return SPIRV::MemorySemantics::Acquire; 394 case AtomicOrdering::Release: 395 return SPIRV::MemorySemantics::Release; 396 case AtomicOrdering::AcquireRelease: 397 return SPIRV::MemorySemantics::AcquireRelease; 398 case AtomicOrdering::SequentiallyConsistent: 399 return SPIRV::MemorySemantics::SequentiallyConsistent; 400 case AtomicOrdering::Unordered: 401 case AtomicOrdering::Monotonic: 402 case AtomicOrdering::NotAtomic: 403 return SPIRV::MemorySemantics::None; 404 } 405 } 406 407 static SPIRV::Scope getScope(SyncScope::ID Ord) { 408 switch (Ord) { 409 case SyncScope::SingleThread: 410 return SPIRV::Scope::Invocation; 411 case SyncScope::System: 412 return SPIRV::Scope::Device; 413 default: 414 llvm_unreachable("Unsupported synchronization Scope ID."); 415 } 416 } 417 418 static void addMemoryOperands(MachineMemOperand *MemOp, 419 MachineInstrBuilder &MIB) { 420 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None); 421 if (MemOp->isVolatile()) 422 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile); 423 if (MemOp->isNonTemporal()) 424 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal); 425 if (MemOp->getAlign().value()) 426 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned); 427 428 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) { 429 MIB.addImm(SpvMemOp); 430 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned)) 431 MIB.addImm(MemOp->getAlign().value()); 432 } 433 } 434 435 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) { 436 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None); 437 if (Flags & MachineMemOperand::Flags::MOVolatile) 438 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile); 439 if (Flags & MachineMemOperand::Flags::MONonTemporal) 440 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal); 441 442 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) 443 MIB.addImm(SpvMemOp); 444 } 445 446 bool SPIRVInstructionSelector::selectLoad(Register ResVReg, 447 const SPIRVType *ResType, 448 MachineInstr &I) const { 449 unsigned OpOffset = 450 I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0; 451 Register Ptr = I.getOperand(1 + OpOffset).getReg(); 452 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad)) 453 .addDef(ResVReg) 454 .addUse(GR.getSPIRVTypeID(ResType)) 455 .addUse(Ptr); 456 if (!I.getNumMemOperands()) { 457 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS); 458 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB); 459 } else { 460 addMemoryOperands(*I.memoperands_begin(), MIB); 461 } 462 return MIB.constrainAllUses(TII, TRI, RBI); 463 } 464 465 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const { 466 unsigned OpOffset = 467 I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0; 468 Register StoreVal = I.getOperand(0 + OpOffset).getReg(); 469 Register Ptr = I.getOperand(1 + OpOffset).getReg(); 470 MachineBasicBlock &BB = *I.getParent(); 471 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore)) 472 .addUse(Ptr) 473 .addUse(StoreVal); 474 if (!I.getNumMemOperands()) { 475 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS); 476 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB); 477 } else { 478 addMemoryOperands(*I.memoperands_begin(), MIB); 479 } 480 return MIB.constrainAllUses(TII, TRI, RBI); 481 } 482 483 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, 484 MachineInstr &I) const { 485 MachineBasicBlock &BB = *I.getParent(); 486 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized)) 487 .addDef(I.getOperand(0).getReg()) 488 .addUse(I.getOperand(1).getReg()) 489 .addUse(I.getOperand(2).getReg()); 490 if (I.getNumMemOperands()) 491 addMemoryOperands(*I.memoperands_begin(), MIB); 492 bool Result = MIB.constrainAllUses(TII, TRI, RBI); 493 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg()) { 494 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg) 495 .addUse(MIB->getOperand(0).getReg()); 496 } 497 return Result; 498 } 499 500 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg, 501 const SPIRVType *ResType, 502 MachineInstr &I, 503 unsigned NewOpcode) const { 504 assert(I.hasOneMemOperand()); 505 const MachineMemOperand *MemOp = *I.memoperands_begin(); 506 uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID())); 507 Register ScopeReg = buildI32Constant(Scope, I); 508 509 Register Ptr = I.getOperand(1).getReg(); 510 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll 511 // auto ScSem = 512 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)); 513 AtomicOrdering AO = MemOp->getSuccessOrdering(); 514 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO)); 515 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I); 516 517 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode)) 518 .addDef(ResVReg) 519 .addUse(GR.getSPIRVTypeID(ResType)) 520 .addUse(Ptr) 521 .addUse(ScopeReg) 522 .addUse(MemSemReg) 523 .addUse(I.getOperand(2).getReg()) 524 .constrainAllUses(TII, TRI, RBI); 525 } 526 527 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const { 528 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm()); 529 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO)); 530 Register MemSemReg = buildI32Constant(MemSem, I); 531 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm()); 532 uint32_t Scope = static_cast<uint32_t>(getScope(Ord)); 533 Register ScopeReg = buildI32Constant(Scope, I); 534 MachineBasicBlock &BB = *I.getParent(); 535 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier)) 536 .addUse(ScopeReg) 537 .addUse(MemSemReg) 538 .constrainAllUses(TII, TRI, RBI); 539 } 540 541 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg, 542 const SPIRVType *ResType, 543 MachineInstr &I) const { 544 assert(I.hasOneMemOperand()); 545 const MachineMemOperand *MemOp = *I.memoperands_begin(); 546 uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID())); 547 Register ScopeReg = buildI32Constant(Scope, I); 548 549 Register Ptr = I.getOperand(2).getReg(); 550 Register Cmp = I.getOperand(3).getReg(); 551 Register Val = I.getOperand(4).getReg(); 552 553 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val); 554 SPIRV::StorageClass SC = GR.getPointerStorageClass(Ptr); 555 uint32_t ScSem = static_cast<uint32_t>(getMemSemanticsForStorageClass(SC)); 556 AtomicOrdering AO = MemOp->getSuccessOrdering(); 557 uint32_t MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem; 558 Register MemSemEqReg = buildI32Constant(MemSemEq, I); 559 AtomicOrdering FO = MemOp->getFailureOrdering(); 560 uint32_t MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem; 561 Register MemSemNeqReg = 562 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I); 563 const DebugLoc &DL = I.getDebugLoc(); 564 return BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange)) 565 .addDef(ResVReg) 566 .addUse(GR.getSPIRVTypeID(SpvValTy)) 567 .addUse(Ptr) 568 .addUse(ScopeReg) 569 .addUse(MemSemEqReg) 570 .addUse(MemSemNeqReg) 571 .addUse(Val) 572 .addUse(Cmp) 573 .constrainAllUses(TII, TRI, RBI); 574 } 575 576 static bool isGenericCastablePtr(SPIRV::StorageClass SC) { 577 switch (SC) { 578 case SPIRV::StorageClass::Workgroup: 579 case SPIRV::StorageClass::CrossWorkgroup: 580 case SPIRV::StorageClass::Function: 581 return true; 582 default: 583 return false; 584 } 585 } 586 587 // In SPIR-V address space casting can only happen to and from the Generic 588 // storage class. We can also only case Workgroup, CrossWorkgroup, or Function 589 // pointers to and from Generic pointers. As such, we can convert e.g. from 590 // Workgroup to Function by going via a Generic pointer as an intermediary. All 591 // other combinations can only be done by a bitcast, and are probably not safe. 592 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg, 593 const SPIRVType *ResType, 594 MachineInstr &I) const { 595 Register SrcPtr = I.getOperand(1).getReg(); 596 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr); 597 SPIRV::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr); 598 SPIRV::StorageClass DstSC = GR.getPointerStorageClass(ResVReg); 599 600 // Casting from an eligable pointer to Generic. 601 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC)) 602 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric); 603 // Casting from Generic to an eligable pointer. 604 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC)) 605 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr); 606 // Casting between 2 eligable pointers using Generic as an intermediary. 607 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) { 608 Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass); 609 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType( 610 SrcPtrTy, I, TII, SPIRV::StorageClass::Generic); 611 MachineBasicBlock &BB = *I.getParent(); 612 const DebugLoc &DL = I.getDebugLoc(); 613 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric)) 614 .addDef(Tmp) 615 .addUse(GR.getSPIRVTypeID(GenericPtrTy)) 616 .addUse(SrcPtr) 617 .constrainAllUses(TII, TRI, RBI); 618 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr)) 619 .addDef(ResVReg) 620 .addUse(GR.getSPIRVTypeID(ResType)) 621 .addUse(Tmp) 622 .constrainAllUses(TII, TRI, RBI); 623 } 624 // TODO Should this case just be disallowed completely? 625 // We're casting 2 other arbitrary address spaces, so have to bitcast. 626 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast); 627 } 628 629 static unsigned getFCmpOpcode(unsigned PredNum) { 630 auto Pred = static_cast<CmpInst::Predicate>(PredNum); 631 switch (Pred) { 632 case CmpInst::FCMP_OEQ: 633 return SPIRV::OpFOrdEqual; 634 case CmpInst::FCMP_OGE: 635 return SPIRV::OpFOrdGreaterThanEqual; 636 case CmpInst::FCMP_OGT: 637 return SPIRV::OpFOrdGreaterThan; 638 case CmpInst::FCMP_OLE: 639 return SPIRV::OpFOrdLessThanEqual; 640 case CmpInst::FCMP_OLT: 641 return SPIRV::OpFOrdLessThan; 642 case CmpInst::FCMP_ONE: 643 return SPIRV::OpFOrdNotEqual; 644 case CmpInst::FCMP_ORD: 645 return SPIRV::OpOrdered; 646 case CmpInst::FCMP_UEQ: 647 return SPIRV::OpFUnordEqual; 648 case CmpInst::FCMP_UGE: 649 return SPIRV::OpFUnordGreaterThanEqual; 650 case CmpInst::FCMP_UGT: 651 return SPIRV::OpFUnordGreaterThan; 652 case CmpInst::FCMP_ULE: 653 return SPIRV::OpFUnordLessThanEqual; 654 case CmpInst::FCMP_ULT: 655 return SPIRV::OpFUnordLessThan; 656 case CmpInst::FCMP_UNE: 657 return SPIRV::OpFUnordNotEqual; 658 case CmpInst::FCMP_UNO: 659 return SPIRV::OpUnordered; 660 default: 661 llvm_unreachable("Unknown predicate type for FCmp"); 662 } 663 } 664 665 static unsigned getICmpOpcode(unsigned PredNum) { 666 auto Pred = static_cast<CmpInst::Predicate>(PredNum); 667 switch (Pred) { 668 case CmpInst::ICMP_EQ: 669 return SPIRV::OpIEqual; 670 case CmpInst::ICMP_NE: 671 return SPIRV::OpINotEqual; 672 case CmpInst::ICMP_SGE: 673 return SPIRV::OpSGreaterThanEqual; 674 case CmpInst::ICMP_SGT: 675 return SPIRV::OpSGreaterThan; 676 case CmpInst::ICMP_SLE: 677 return SPIRV::OpSLessThanEqual; 678 case CmpInst::ICMP_SLT: 679 return SPIRV::OpSLessThan; 680 case CmpInst::ICMP_UGE: 681 return SPIRV::OpUGreaterThanEqual; 682 case CmpInst::ICMP_UGT: 683 return SPIRV::OpUGreaterThan; 684 case CmpInst::ICMP_ULE: 685 return SPIRV::OpULessThanEqual; 686 case CmpInst::ICMP_ULT: 687 return SPIRV::OpULessThan; 688 default: 689 llvm_unreachable("Unknown predicate type for ICmp"); 690 } 691 } 692 693 static unsigned getPtrCmpOpcode(unsigned Pred) { 694 switch (static_cast<CmpInst::Predicate>(Pred)) { 695 case CmpInst::ICMP_EQ: 696 return SPIRV::OpPtrEqual; 697 case CmpInst::ICMP_NE: 698 return SPIRV::OpPtrNotEqual; 699 default: 700 llvm_unreachable("Unknown predicate type for pointer comparison"); 701 } 702 } 703 704 // Return the logical operation, or abort if none exists. 705 static unsigned getBoolCmpOpcode(unsigned PredNum) { 706 auto Pred = static_cast<CmpInst::Predicate>(PredNum); 707 switch (Pred) { 708 case CmpInst::ICMP_EQ: 709 return SPIRV::OpLogicalEqual; 710 case CmpInst::ICMP_NE: 711 return SPIRV::OpLogicalNotEqual; 712 default: 713 llvm_unreachable("Unknown predicate type for Bool comparison"); 714 } 715 } 716 717 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg, 718 const SPIRVType *ResType, 719 MachineInstr &I) const { 720 MachineBasicBlock &BB = *I.getParent(); 721 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse)) 722 .addDef(ResVReg) 723 .addUse(GR.getSPIRVTypeID(ResType)) 724 .addUse(I.getOperand(1).getReg()) 725 .constrainAllUses(TII, TRI, RBI); 726 } 727 728 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg, 729 const SPIRVType *ResType, 730 MachineInstr &I) const { 731 // TODO: only const case is supported for now. 732 assert(std::all_of( 733 I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) { 734 if (MO.isDef()) 735 return true; 736 if (!MO.isReg()) 737 return false; 738 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg()); 739 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE && 740 ConstTy->getOperand(1).isReg()); 741 Register ConstReg = ConstTy->getOperand(1).getReg(); 742 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg); 743 assert(Const); 744 return (Const->getOpcode() == TargetOpcode::G_CONSTANT || 745 Const->getOpcode() == TargetOpcode::G_FCONSTANT); 746 })); 747 748 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), 749 TII.get(SPIRV::OpConstantComposite)) 750 .addDef(ResVReg) 751 .addUse(GR.getSPIRVTypeID(ResType)); 752 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i) 753 MIB.addUse(I.getOperand(i).getReg()); 754 return MIB.constrainAllUses(TII, TRI, RBI); 755 } 756 757 bool SPIRVInstructionSelector::selectCmp(Register ResVReg, 758 const SPIRVType *ResType, 759 unsigned CmpOpc, 760 MachineInstr &I) const { 761 Register Cmp0 = I.getOperand(2).getReg(); 762 Register Cmp1 = I.getOperand(3).getReg(); 763 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() == 764 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() && 765 "CMP operands should have the same type"); 766 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc)) 767 .addDef(ResVReg) 768 .addUse(GR.getSPIRVTypeID(ResType)) 769 .addUse(Cmp0) 770 .addUse(Cmp1) 771 .constrainAllUses(TII, TRI, RBI); 772 } 773 774 bool SPIRVInstructionSelector::selectICmp(Register ResVReg, 775 const SPIRVType *ResType, 776 MachineInstr &I) const { 777 auto Pred = I.getOperand(1).getPredicate(); 778 unsigned CmpOpc; 779 780 Register CmpOperand = I.getOperand(2).getReg(); 781 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer)) 782 CmpOpc = getPtrCmpOpcode(Pred); 783 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool)) 784 CmpOpc = getBoolCmpOpcode(Pred); 785 else 786 CmpOpc = getICmpOpcode(Pred); 787 return selectCmp(ResVReg, ResType, CmpOpc, I); 788 } 789 790 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB, 791 const MachineInstr &I, 792 int OpIdx) const { 793 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 && 794 "Expected G_FCONSTANT"); 795 const ConstantFP *FPImm = I.getOperand(1).getFPImm(); 796 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB); 797 } 798 799 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB, 800 const MachineInstr &I, 801 int OpIdx) const { 802 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 803 "Expected G_CONSTANT"); 804 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB); 805 } 806 807 Register 808 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I, 809 const SPIRVType *ResType) const { 810 const SPIRVType *SpvI32Ty = 811 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII); 812 Register NewReg; 813 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); 814 MachineInstr *MI; 815 MachineBasicBlock &BB = *I.getParent(); 816 if (Val == 0) { 817 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) 818 .addDef(NewReg) 819 .addUse(GR.getSPIRVTypeID(SpvI32Ty)); 820 } else { 821 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI)) 822 .addDef(NewReg) 823 .addUse(GR.getSPIRVTypeID(SpvI32Ty)) 824 .addImm(APInt(32, Val).getZExtValue()); 825 } 826 constrainSelectedInstRegOperands(*MI, TII, TRI, RBI); 827 return NewReg; 828 } 829 830 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg, 831 const SPIRVType *ResType, 832 MachineInstr &I) const { 833 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate()); 834 return selectCmp(ResVReg, ResType, CmpOp, I); 835 } 836 837 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType, 838 MachineInstr &I) const { 839 return buildI32Constant(0, I, ResType); 840 } 841 842 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes, 843 const SPIRVType *ResType, 844 MachineInstr &I) const { 845 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType); 846 APInt One = AllOnes ? APInt::getAllOnesValue(BitWidth) 847 : APInt::getOneBitSet(BitWidth, 0); 848 Register OneReg = buildI32Constant(One.getZExtValue(), I, ResType); 849 if (ResType->getOpcode() == SPIRV::OpTypeVector) { 850 const unsigned NumEles = ResType->getOperand(2).getImm(); 851 Register OneVec = MRI->createVirtualRegister(&SPIRV::IDRegClass); 852 unsigned Opcode = SPIRV::OpConstantComposite; 853 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) 854 .addDef(OneVec) 855 .addUse(GR.getSPIRVTypeID(ResType)); 856 for (unsigned i = 0; i < NumEles; ++i) 857 MIB.addUse(OneReg); 858 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 859 return OneVec; 860 } 861 return OneReg; 862 } 863 864 bool SPIRVInstructionSelector::selectSelect(Register ResVReg, 865 const SPIRVType *ResType, 866 MachineInstr &I, 867 bool IsSigned) const { 868 // To extend a bool, we need to use OpSelect between constants. 869 Register ZeroReg = buildZerosVal(ResType, I); 870 Register OneReg = buildOnesVal(IsSigned, ResType, I); 871 bool IsScalarBool = 872 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool); 873 unsigned Opcode = 874 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond; 875 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) 876 .addDef(ResVReg) 877 .addUse(GR.getSPIRVTypeID(ResType)) 878 .addUse(I.getOperand(1).getReg()) 879 .addUse(OneReg) 880 .addUse(ZeroReg) 881 .constrainAllUses(TII, TRI, RBI); 882 } 883 884 bool SPIRVInstructionSelector::selectIToF(Register ResVReg, 885 const SPIRVType *ResType, 886 MachineInstr &I, bool IsSigned, 887 unsigned Opcode) const { 888 Register SrcReg = I.getOperand(1).getReg(); 889 // We can convert bool value directly to float type without OpConvert*ToF, 890 // however the translator generates OpSelect+OpConvert*ToF, so we do the same. 891 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) { 892 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType); 893 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII); 894 if (ResType->getOpcode() == SPIRV::OpTypeVector) { 895 const unsigned NumElts = ResType->getOperand(2).getImm(); 896 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII); 897 } 898 SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); 899 selectSelect(SrcReg, TmpType, I, false); 900 } 901 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode); 902 } 903 904 bool SPIRVInstructionSelector::selectExt(Register ResVReg, 905 const SPIRVType *ResType, 906 MachineInstr &I, bool IsSigned) const { 907 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) 908 return selectSelect(ResVReg, ResType, I, IsSigned); 909 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert; 910 return selectUnOp(ResVReg, ResType, I, Opcode); 911 } 912 913 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg, 914 Register ResVReg, 915 const SPIRVType *IntTy, 916 const SPIRVType *BoolTy, 917 MachineInstr &I) const { 918 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero. 919 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); 920 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector; 921 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS; 922 Register Zero = buildZerosVal(IntTy, I); 923 Register One = buildOnesVal(false, IntTy, I); 924 MachineBasicBlock &BB = *I.getParent(); 925 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) 926 .addDef(BitIntReg) 927 .addUse(GR.getSPIRVTypeID(IntTy)) 928 .addUse(IntReg) 929 .addUse(One) 930 .constrainAllUses(TII, TRI, RBI); 931 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual)) 932 .addDef(ResVReg) 933 .addUse(GR.getSPIRVTypeID(BoolTy)) 934 .addUse(BitIntReg) 935 .addUse(Zero) 936 .constrainAllUses(TII, TRI, RBI); 937 } 938 939 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg, 940 const SPIRVType *ResType, 941 MachineInstr &I) const { 942 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) { 943 Register IntReg = I.getOperand(1).getReg(); 944 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg); 945 return selectIntToBool(IntReg, ResVReg, ArgType, ResType, I); 946 } 947 bool IsSigned = GR.isScalarOrVectorSigned(ResType); 948 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert; 949 return selectUnOp(ResVReg, ResType, I, Opcode); 950 } 951 952 bool SPIRVInstructionSelector::selectConst(Register ResVReg, 953 const SPIRVType *ResType, 954 const APInt &Imm, 955 MachineInstr &I) const { 956 assert(ResType->getOpcode() != SPIRV::OpTypePointer || Imm.isNullValue()); 957 MachineBasicBlock &BB = *I.getParent(); 958 if (ResType->getOpcode() == SPIRV::OpTypePointer && Imm.isNullValue()) { 959 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) 960 .addDef(ResVReg) 961 .addUse(GR.getSPIRVTypeID(ResType)) 962 .constrainAllUses(TII, TRI, RBI); 963 } 964 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI)) 965 .addDef(ResVReg) 966 .addUse(GR.getSPIRVTypeID(ResType)); 967 // <=32-bit integers should be caught by the sdag pattern. 968 assert(Imm.getBitWidth() > 32); 969 addNumImm(Imm, MIB); 970 return MIB.constrainAllUses(TII, TRI, RBI); 971 } 972 973 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg, 974 const SPIRVType *ResType, 975 MachineInstr &I) const { 976 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef)) 977 .addDef(ResVReg) 978 .addUse(GR.getSPIRVTypeID(ResType)) 979 .constrainAllUses(TII, TRI, RBI); 980 } 981 982 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) { 983 assert(MO.isReg()); 984 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg()); 985 if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE) 986 return false; 987 assert(TypeInst->getOperand(1).isReg()); 988 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg()); 989 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT; 990 } 991 992 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) { 993 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg()); 994 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg()); 995 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT); 996 return ImmInst->getOperand(1).getCImm()->getZExtValue(); 997 } 998 999 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg, 1000 const SPIRVType *ResType, 1001 MachineInstr &I) const { 1002 MachineBasicBlock &BB = *I.getParent(); 1003 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert)) 1004 .addDef(ResVReg) 1005 .addUse(GR.getSPIRVTypeID(ResType)) 1006 // object to insert 1007 .addUse(I.getOperand(3).getReg()) 1008 // composite to insert into 1009 .addUse(I.getOperand(2).getReg()) 1010 // TODO: support arbitrary number of indices 1011 .addImm(foldImm(I.getOperand(4), MRI)) 1012 .constrainAllUses(TII, TRI, RBI); 1013 } 1014 1015 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg, 1016 const SPIRVType *ResType, 1017 MachineInstr &I) const { 1018 MachineBasicBlock &BB = *I.getParent(); 1019 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract)) 1020 .addDef(ResVReg) 1021 .addUse(GR.getSPIRVTypeID(ResType)) 1022 .addUse(I.getOperand(2).getReg()) 1023 // TODO: support arbitrary number of indices 1024 .addImm(foldImm(I.getOperand(3), MRI)) 1025 .constrainAllUses(TII, TRI, RBI); 1026 } 1027 1028 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg, 1029 const SPIRVType *ResType, 1030 MachineInstr &I) const { 1031 if (isImm(I.getOperand(4), MRI)) 1032 return selectInsertVal(ResVReg, ResType, I); 1033 MachineBasicBlock &BB = *I.getParent(); 1034 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic)) 1035 .addDef(ResVReg) 1036 .addUse(GR.getSPIRVTypeID(ResType)) 1037 .addUse(I.getOperand(2).getReg()) 1038 .addUse(I.getOperand(3).getReg()) 1039 .addUse(I.getOperand(4).getReg()) 1040 .constrainAllUses(TII, TRI, RBI); 1041 } 1042 1043 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg, 1044 const SPIRVType *ResType, 1045 MachineInstr &I) const { 1046 if (isImm(I.getOperand(3), MRI)) 1047 return selectExtractVal(ResVReg, ResType, I); 1048 MachineBasicBlock &BB = *I.getParent(); 1049 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic)) 1050 .addDef(ResVReg) 1051 .addUse(GR.getSPIRVTypeID(ResType)) 1052 .addUse(I.getOperand(2).getReg()) 1053 .addUse(I.getOperand(3).getReg()) 1054 .constrainAllUses(TII, TRI, RBI); 1055 } 1056 1057 bool SPIRVInstructionSelector::selectGEP(Register ResVReg, 1058 const SPIRVType *ResType, 1059 MachineInstr &I) const { 1060 // In general we should also support OpAccessChain instrs here (i.e. not 1061 // PtrAccessChain) but SPIRV-LLVM Translator doesn't emit them at all and so 1062 // do we to stay compliant with its test and more importantly consumers. 1063 unsigned Opcode = I.getOperand(2).getImm() ? SPIRV::OpInBoundsPtrAccessChain 1064 : SPIRV::OpPtrAccessChain; 1065 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) 1066 .addDef(ResVReg) 1067 .addUse(GR.getSPIRVTypeID(ResType)) 1068 // Object to get a pointer to. 1069 .addUse(I.getOperand(3).getReg()); 1070 // Adding indices. 1071 for (unsigned i = 4; i < I.getNumExplicitOperands(); ++i) 1072 Res.addUse(I.getOperand(i).getReg()); 1073 return Res.constrainAllUses(TII, TRI, RBI); 1074 } 1075 1076 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, 1077 const SPIRVType *ResType, 1078 MachineInstr &I) const { 1079 MachineBasicBlock &BB = *I.getParent(); 1080 switch (I.getIntrinsicID()) { 1081 case Intrinsic::spv_load: 1082 return selectLoad(ResVReg, ResType, I); 1083 break; 1084 case Intrinsic::spv_store: 1085 return selectStore(I); 1086 break; 1087 case Intrinsic::spv_extractv: 1088 return selectExtractVal(ResVReg, ResType, I); 1089 break; 1090 case Intrinsic::spv_insertv: 1091 return selectInsertVal(ResVReg, ResType, I); 1092 break; 1093 case Intrinsic::spv_extractelt: 1094 return selectExtractElt(ResVReg, ResType, I); 1095 break; 1096 case Intrinsic::spv_insertelt: 1097 return selectInsertElt(ResVReg, ResType, I); 1098 break; 1099 case Intrinsic::spv_gep: 1100 return selectGEP(ResVReg, ResType, I); 1101 break; 1102 case Intrinsic::spv_unref_global: 1103 case Intrinsic::spv_init_global: { 1104 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg()); 1105 MachineInstr *Init = I.getNumExplicitOperands() > 2 1106 ? MRI->getVRegDef(I.getOperand(2).getReg()) 1107 : nullptr; 1108 assert(MI); 1109 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init); 1110 } break; 1111 case Intrinsic::spv_const_composite: { 1112 // If no values are attached, the composite is null constant. 1113 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands(); 1114 unsigned Opcode = 1115 IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite; 1116 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) 1117 .addDef(ResVReg) 1118 .addUse(GR.getSPIRVTypeID(ResType)); 1119 // skip type MD node we already used when generated assign.type for this 1120 if (!IsNull) { 1121 for (unsigned i = I.getNumExplicitDefs() + 1; 1122 i < I.getNumExplicitOperands(); ++i) { 1123 MIB.addUse(I.getOperand(i).getReg()); 1124 } 1125 } 1126 return MIB.constrainAllUses(TII, TRI, RBI); 1127 } break; 1128 case Intrinsic::spv_assign_name: { 1129 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName)); 1130 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg()); 1131 for (unsigned i = I.getNumExplicitDefs() + 2; 1132 i < I.getNumExplicitOperands(); ++i) { 1133 MIB.addImm(I.getOperand(i).getImm()); 1134 } 1135 return MIB.constrainAllUses(TII, TRI, RBI); 1136 } break; 1137 case Intrinsic::spv_switch: { 1138 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch)); 1139 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) { 1140 if (I.getOperand(i).isReg()) 1141 MIB.addReg(I.getOperand(i).getReg()); 1142 else if (I.getOperand(i).isCImm()) 1143 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB); 1144 else if (I.getOperand(i).isMBB()) 1145 MIB.addMBB(I.getOperand(i).getMBB()); 1146 else 1147 llvm_unreachable("Unexpected OpSwitch operand"); 1148 } 1149 return MIB.constrainAllUses(TII, TRI, RBI); 1150 } break; 1151 default: 1152 llvm_unreachable("Intrinsic selection not implemented"); 1153 } 1154 return true; 1155 } 1156 1157 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg, 1158 const SPIRVType *ResType, 1159 MachineInstr &I) const { 1160 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) 1161 .addDef(ResVReg) 1162 .addUse(GR.getSPIRVTypeID(ResType)) 1163 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function)) 1164 .constrainAllUses(TII, TRI, RBI); 1165 } 1166 1167 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const { 1168 // InstructionSelector walks backwards through the instructions. We can use 1169 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR 1170 // first, so can generate an OpBranchConditional here. If there is no 1171 // G_BRCOND, we just use OpBranch for a regular unconditional branch. 1172 const MachineInstr *PrevI = I.getPrevNode(); 1173 MachineBasicBlock &MBB = *I.getParent(); 1174 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) { 1175 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional)) 1176 .addUse(PrevI->getOperand(0).getReg()) 1177 .addMBB(PrevI->getOperand(1).getMBB()) 1178 .addMBB(I.getOperand(0).getMBB()) 1179 .constrainAllUses(TII, TRI, RBI); 1180 } 1181 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch)) 1182 .addMBB(I.getOperand(0).getMBB()) 1183 .constrainAllUses(TII, TRI, RBI); 1184 } 1185 1186 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const { 1187 // InstructionSelector walks backwards through the instructions. For an 1188 // explicit conditional branch with no fallthrough, we use both a G_BR and a 1189 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and 1190 // generate the OpBranchConditional in selectBranch above. 1191 // 1192 // If an OpBranchConditional has been generated, we simply return, as the work 1193 // is alread done. If there is no OpBranchConditional, LLVM must be relying on 1194 // implicit fallthrough to the next basic block, so we need to create an 1195 // OpBranchConditional with an explicit "false" argument pointing to the next 1196 // basic block that LLVM would fall through to. 1197 const MachineInstr *NextI = I.getNextNode(); 1198 // Check if this has already been successfully selected. 1199 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional) 1200 return true; 1201 // Must be relying on implicit block fallthrough, so generate an 1202 // OpBranchConditional with the "next" basic block as the "false" target. 1203 MachineBasicBlock &MBB = *I.getParent(); 1204 unsigned NextMBBNum = MBB.getNextNode()->getNumber(); 1205 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum); 1206 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional)) 1207 .addUse(I.getOperand(0).getReg()) 1208 .addMBB(I.getOperand(1).getMBB()) 1209 .addMBB(NextMBB) 1210 .constrainAllUses(TII, TRI, RBI); 1211 } 1212 1213 bool SPIRVInstructionSelector::selectPhi(Register ResVReg, 1214 const SPIRVType *ResType, 1215 MachineInstr &I) const { 1216 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi)) 1217 .addDef(ResVReg) 1218 .addUse(GR.getSPIRVTypeID(ResType)); 1219 const unsigned NumOps = I.getNumOperands(); 1220 for (unsigned i = 1; i < NumOps; i += 2) { 1221 MIB.addUse(I.getOperand(i + 0).getReg()); 1222 MIB.addMBB(I.getOperand(i + 1).getMBB()); 1223 } 1224 return MIB.constrainAllUses(TII, TRI, RBI); 1225 } 1226 1227 bool SPIRVInstructionSelector::selectGlobalValue( 1228 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const { 1229 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI. 1230 MachineIRBuilder MIRBuilder(I); 1231 const GlobalValue *GV = I.getOperand(1).getGlobal(); 1232 SPIRVType *ResType = GR.getOrCreateSPIRVType( 1233 GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false); 1234 1235 std::string GlobalIdent = GV->getGlobalIdentifier(); 1236 // TODO: suport @llvm.global.annotations. 1237 auto GlobalVar = cast<GlobalVariable>(GV); 1238 1239 bool HasInit = GlobalVar->hasInitializer() && 1240 !isa<UndefValue>(GlobalVar->getInitializer()); 1241 // Skip empty declaration for GVs with initilaizers till we get the decl with 1242 // passed initializer. 1243 if (HasInit && !Init) 1244 return true; 1245 1246 unsigned AddrSpace = GV->getAddressSpace(); 1247 SPIRV::StorageClass Storage = addressSpaceToStorageClass(AddrSpace); 1248 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage && 1249 Storage != SPIRV::StorageClass::Function; 1250 SPIRV::LinkageType LnkType = 1251 (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) 1252 ? SPIRV::LinkageType::Import 1253 : SPIRV::LinkageType::Export; 1254 1255 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV, 1256 Storage, Init, GlobalVar->isConstant(), 1257 HasLnkTy, LnkType, MIRBuilder, true); 1258 return Reg.isValid(); 1259 } 1260 1261 namespace llvm { 1262 InstructionSelector * 1263 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, 1264 const SPIRVSubtarget &Subtarget, 1265 const RegisterBankInfo &RBI) { 1266 return new SPIRVInstructionSelector(TM, Subtarget, RBI); 1267 } 1268 } // namespace llvm 1269