1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "SPIRV.h"
16 #include "SPIRVGlobalRegistry.h"
17 #include "SPIRVInstrInfo.h"
18 #include "SPIRVRegisterBankInfo.h"
19 #include "SPIRVRegisterInfo.h"
20 #include "SPIRVTargetMachine.h"
21 #include "SPIRVUtils.h"
22 #include "llvm/ADT/APFloat.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/IR/IntrinsicsSPIRV.h"
28 #include "llvm/Support/Debug.h"
29
30 #define DEBUG_TYPE "spirv-isel"
31
32 using namespace llvm;
33
34 namespace {
35
36 #define GET_GLOBALISEL_PREDICATE_BITSET
37 #include "SPIRVGenGlobalISel.inc"
38 #undef GET_GLOBALISEL_PREDICATE_BITSET
39
40 class SPIRVInstructionSelector : public InstructionSelector {
41 const SPIRVSubtarget &STI;
42 const SPIRVInstrInfo &TII;
43 const SPIRVRegisterInfo &TRI;
44 const RegisterBankInfo &RBI;
45 SPIRVGlobalRegistry &GR;
46 MachineRegisterInfo *MRI;
47
48 public:
49 SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
50 const SPIRVSubtarget &ST,
51 const RegisterBankInfo &RBI);
52 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
53 CodeGenCoverage &CoverageInfo, ProfileSummaryInfo *PSI,
54 BlockFrequencyInfo *BFI) override;
55 // Common selection code. Instruction-specific selection occurs in spvSelect.
56 bool select(MachineInstr &I) override;
getName()57 static const char *getName() { return DEBUG_TYPE; }
58
59 #define GET_GLOBALISEL_PREDICATES_DECL
60 #include "SPIRVGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_PREDICATES_DECL
62
63 #define GET_GLOBALISEL_TEMPORARIES_DECL
64 #include "SPIRVGenGlobalISel.inc"
65 #undef GET_GLOBALISEL_TEMPORARIES_DECL
66
67 private:
68 // tblgen-erated 'select' implementation, used as the initial selector for
69 // the patterns that don't require complex C++.
70 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
71
72 // All instruction-specific selection that didn't happen in "select()".
73 // Is basically a large Switch/Case delegating to all other select method.
74 bool spvSelect(Register ResVReg, const SPIRVType *ResType,
75 MachineInstr &I) const;
76
77 bool selectGlobalValue(Register ResVReg, MachineInstr &I,
78 const MachineInstr *Init = nullptr) const;
79
80 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
81 MachineInstr &I, Register SrcReg,
82 unsigned Opcode) const;
83 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
84 unsigned Opcode) const;
85
86 bool selectLoad(Register ResVReg, const SPIRVType *ResType,
87 MachineInstr &I) const;
88 bool selectStore(MachineInstr &I) const;
89
90 bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
91
92 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
93 MachineInstr &I, unsigned NewOpcode) const;
94
95 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
96 MachineInstr &I) const;
97
98 bool selectFence(MachineInstr &I) const;
99
100 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
101 MachineInstr &I) const;
102
103 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
104 MachineInstr &I) const;
105
106 bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
107 MachineInstr &I) const;
108
109 bool selectCmp(Register ResVReg, const SPIRVType *ResType,
110 unsigned comparisonOpcode, MachineInstr &I) const;
111
112 bool selectICmp(Register ResVReg, const SPIRVType *ResType,
113 MachineInstr &I) const;
114 bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
115 MachineInstr &I) const;
116
117 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
118 int OpIdx) const;
119 void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
120 int OpIdx) const;
121
122 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
123 MachineInstr &I) const;
124
125 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
126 bool IsSigned) const;
127 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
128 bool IsSigned, unsigned Opcode) const;
129 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
130 bool IsSigned) const;
131
132 bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
133 MachineInstr &I) const;
134
135 bool selectIntToBool(Register IntReg, Register ResVReg,
136 const SPIRVType *intTy, const SPIRVType *boolTy,
137 MachineInstr &I) const;
138
139 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
140 MachineInstr &I) const;
141 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
142 MachineInstr &I) const;
143 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
144 MachineInstr &I) const;
145 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
146 MachineInstr &I) const;
147 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
148 MachineInstr &I) const;
149 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
150 MachineInstr &I) const;
151 bool selectGEP(Register ResVReg, const SPIRVType *ResType,
152 MachineInstr &I) const;
153
154 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
155 MachineInstr &I) const;
156
157 bool selectBranch(MachineInstr &I) const;
158 bool selectBranchCond(MachineInstr &I) const;
159
160 bool selectPhi(Register ResVReg, const SPIRVType *ResType,
161 MachineInstr &I) const;
162
163 Register buildI32Constant(uint32_t Val, MachineInstr &I,
164 const SPIRVType *ResType = nullptr) const;
165
166 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
167 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
168 MachineInstr &I) const;
169 };
170
171 } // end anonymous namespace
172
173 #define GET_GLOBALISEL_IMPL
174 #include "SPIRVGenGlobalISel.inc"
175 #undef GET_GLOBALISEL_IMPL
176
SPIRVInstructionSelector(const SPIRVTargetMachine & TM,const SPIRVSubtarget & ST,const RegisterBankInfo & RBI)177 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
178 const SPIRVSubtarget &ST,
179 const RegisterBankInfo &RBI)
180 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
181 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
182 #define GET_GLOBALISEL_PREDICATES_INIT
183 #include "SPIRVGenGlobalISel.inc"
184 #undef GET_GLOBALISEL_PREDICATES_INIT
185 #define GET_GLOBALISEL_TEMPORARIES_INIT
186 #include "SPIRVGenGlobalISel.inc"
187 #undef GET_GLOBALISEL_TEMPORARIES_INIT
188 {
189 }
190
setupMF(MachineFunction & MF,GISelKnownBits * KB,CodeGenCoverage & CoverageInfo,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)191 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
192 CodeGenCoverage &CoverageInfo,
193 ProfileSummaryInfo *PSI,
194 BlockFrequencyInfo *BFI) {
195 MRI = &MF.getRegInfo();
196 GR.setCurrentFunc(MF);
197 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
198 }
199
200 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
201
202 // Defined in SPIRVLegalizerInfo.cpp.
203 extern bool isTypeFoldingSupported(unsigned Opcode);
204
select(MachineInstr & I)205 bool SPIRVInstructionSelector::select(MachineInstr &I) {
206 assert(I.getParent() && "Instruction should be in a basic block!");
207 assert(I.getParent()->getParent() && "Instruction should be in a function!");
208
209 Register Opcode = I.getOpcode();
210 // If it's not a GMIR instruction, we've selected it already.
211 if (!isPreISelGenericOpcode(Opcode)) {
212 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
213 auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
214 if (isTypeFoldingSupported(Def->getOpcode())) {
215 auto Res = selectImpl(I, *CoverageInfo);
216 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
217 if (Res)
218 return Res;
219 }
220 MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
221 I.removeFromParent();
222 } else if (I.getNumDefs() == 1) {
223 // Make all vregs 32 bits (for SPIR-V IDs).
224 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
225 }
226 return true;
227 }
228
229 if (I.getNumOperands() != I.getNumExplicitOperands()) {
230 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
231 return false;
232 }
233
234 // Common code for getting return reg+type, and removing selected instr
235 // from parent occurs here. Instr-specific selection happens in spvSelect().
236 bool HasDefs = I.getNumDefs() > 0;
237 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
238 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
239 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
240 if (spvSelect(ResVReg, ResType, I)) {
241 if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
242 MRI->setType(ResVReg, LLT::scalar(32));
243 I.removeFromParent();
244 return true;
245 }
246 return false;
247 }
248
spvSelect(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const249 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
250 const SPIRVType *ResType,
251 MachineInstr &I) const {
252 assert(!isTypeFoldingSupported(I.getOpcode()) ||
253 I.getOpcode() == TargetOpcode::G_CONSTANT);
254 const unsigned Opcode = I.getOpcode();
255 switch (Opcode) {
256 case TargetOpcode::G_CONSTANT:
257 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
258 I);
259 case TargetOpcode::G_GLOBAL_VALUE:
260 return selectGlobalValue(ResVReg, I);
261 case TargetOpcode::G_IMPLICIT_DEF:
262 return selectOpUndef(ResVReg, ResType, I);
263
264 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
265 return selectIntrinsic(ResVReg, ResType, I);
266 case TargetOpcode::G_BITREVERSE:
267 return selectBitreverse(ResVReg, ResType, I);
268
269 case TargetOpcode::G_BUILD_VECTOR:
270 return selectConstVector(ResVReg, ResType, I);
271
272 case TargetOpcode::G_SHUFFLE_VECTOR: {
273 MachineBasicBlock &BB = *I.getParent();
274 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
275 .addDef(ResVReg)
276 .addUse(GR.getSPIRVTypeID(ResType))
277 .addUse(I.getOperand(1).getReg())
278 .addUse(I.getOperand(2).getReg());
279 for (auto V : I.getOperand(3).getShuffleMask())
280 MIB.addImm(V);
281 return MIB.constrainAllUses(TII, TRI, RBI);
282 }
283 case TargetOpcode::G_MEMMOVE:
284 case TargetOpcode::G_MEMCPY:
285 return selectMemOperation(ResVReg, I);
286
287 case TargetOpcode::G_ICMP:
288 return selectICmp(ResVReg, ResType, I);
289 case TargetOpcode::G_FCMP:
290 return selectFCmp(ResVReg, ResType, I);
291
292 case TargetOpcode::G_FRAME_INDEX:
293 return selectFrameIndex(ResVReg, ResType, I);
294
295 case TargetOpcode::G_LOAD:
296 return selectLoad(ResVReg, ResType, I);
297 case TargetOpcode::G_STORE:
298 return selectStore(I);
299
300 case TargetOpcode::G_BR:
301 return selectBranch(I);
302 case TargetOpcode::G_BRCOND:
303 return selectBranchCond(I);
304
305 case TargetOpcode::G_PHI:
306 return selectPhi(ResVReg, ResType, I);
307
308 case TargetOpcode::G_FPTOSI:
309 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
310 case TargetOpcode::G_FPTOUI:
311 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
312
313 case TargetOpcode::G_SITOFP:
314 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
315 case TargetOpcode::G_UITOFP:
316 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
317
318 case TargetOpcode::G_CTPOP:
319 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
320
321 case TargetOpcode::G_SEXT:
322 return selectExt(ResVReg, ResType, I, true);
323 case TargetOpcode::G_ANYEXT:
324 case TargetOpcode::G_ZEXT:
325 return selectExt(ResVReg, ResType, I, false);
326 case TargetOpcode::G_TRUNC:
327 return selectTrunc(ResVReg, ResType, I);
328 case TargetOpcode::G_FPTRUNC:
329 case TargetOpcode::G_FPEXT:
330 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
331
332 case TargetOpcode::G_PTRTOINT:
333 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
334 case TargetOpcode::G_INTTOPTR:
335 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
336 case TargetOpcode::G_BITCAST:
337 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
338 case TargetOpcode::G_ADDRSPACE_CAST:
339 return selectAddrSpaceCast(ResVReg, ResType, I);
340 case TargetOpcode::G_PTR_ADD: {
341 // Currently, we get G_PTR_ADD only as a result of translating
342 // global variables, initialized with constant expressions like GV + Const
343 // (see test opencl/basic/progvar_prog_scope_init.ll).
344 // TODO: extend the handler once we have other cases.
345 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
346 Register GV = I.getOperand(1).getReg();
347 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
348 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
349 (*II).getOpcode() == TargetOpcode::COPY ||
350 (*II).getOpcode() == SPIRV::OpVariable) &&
351 isImm(I.getOperand(2), MRI));
352 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
353 MachineBasicBlock &BB = *I.getParent();
354 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
355 .addDef(ResVReg)
356 .addUse(GR.getSPIRVTypeID(ResType))
357 .addImm(static_cast<uint32_t>(
358 SPIRV::Opcode::InBoundsPtrAccessChain))
359 .addUse(GV)
360 .addUse(Idx)
361 .addUse(I.getOperand(2).getReg());
362 return MIB.constrainAllUses(TII, TRI, RBI);
363 }
364
365 case TargetOpcode::G_ATOMICRMW_OR:
366 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
367 case TargetOpcode::G_ATOMICRMW_ADD:
368 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
369 case TargetOpcode::G_ATOMICRMW_AND:
370 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
371 case TargetOpcode::G_ATOMICRMW_MAX:
372 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
373 case TargetOpcode::G_ATOMICRMW_MIN:
374 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
375 case TargetOpcode::G_ATOMICRMW_SUB:
376 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
377 case TargetOpcode::G_ATOMICRMW_XOR:
378 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
379 case TargetOpcode::G_ATOMICRMW_UMAX:
380 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
381 case TargetOpcode::G_ATOMICRMW_UMIN:
382 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
383 case TargetOpcode::G_ATOMICRMW_XCHG:
384 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
385 case TargetOpcode::G_ATOMIC_CMPXCHG:
386 return selectAtomicCmpXchg(ResVReg, ResType, I);
387
388 case TargetOpcode::G_FENCE:
389 return selectFence(I);
390
391 default:
392 return false;
393 }
394 }
395
selectUnOpWithSrc(Register ResVReg,const SPIRVType * ResType,MachineInstr & I,Register SrcReg,unsigned Opcode) const396 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
397 const SPIRVType *ResType,
398 MachineInstr &I,
399 Register SrcReg,
400 unsigned Opcode) const {
401 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
402 .addDef(ResVReg)
403 .addUse(GR.getSPIRVTypeID(ResType))
404 .addUse(SrcReg)
405 .constrainAllUses(TII, TRI, RBI);
406 }
407
selectUnOp(Register ResVReg,const SPIRVType * ResType,MachineInstr & I,unsigned Opcode) const408 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
409 const SPIRVType *ResType,
410 MachineInstr &I,
411 unsigned Opcode) const {
412 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
413 Opcode);
414 }
415
getScope(SyncScope::ID Ord)416 static SPIRV::Scope getScope(SyncScope::ID Ord) {
417 switch (Ord) {
418 case SyncScope::SingleThread:
419 return SPIRV::Scope::Invocation;
420 case SyncScope::System:
421 return SPIRV::Scope::Device;
422 default:
423 llvm_unreachable("Unsupported synchronization Scope ID.");
424 }
425 }
426
addMemoryOperands(MachineMemOperand * MemOp,MachineInstrBuilder & MIB)427 static void addMemoryOperands(MachineMemOperand *MemOp,
428 MachineInstrBuilder &MIB) {
429 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
430 if (MemOp->isVolatile())
431 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
432 if (MemOp->isNonTemporal())
433 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
434 if (MemOp->getAlign().value())
435 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
436
437 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
438 MIB.addImm(SpvMemOp);
439 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
440 MIB.addImm(MemOp->getAlign().value());
441 }
442 }
443
addMemoryOperands(uint64_t Flags,MachineInstrBuilder & MIB)444 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
445 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
446 if (Flags & MachineMemOperand::Flags::MOVolatile)
447 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
448 if (Flags & MachineMemOperand::Flags::MONonTemporal)
449 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
450
451 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
452 MIB.addImm(SpvMemOp);
453 }
454
selectLoad(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const455 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
456 const SPIRVType *ResType,
457 MachineInstr &I) const {
458 unsigned OpOffset =
459 I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
460 Register Ptr = I.getOperand(1 + OpOffset).getReg();
461 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
462 .addDef(ResVReg)
463 .addUse(GR.getSPIRVTypeID(ResType))
464 .addUse(Ptr);
465 if (!I.getNumMemOperands()) {
466 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
467 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
468 } else {
469 addMemoryOperands(*I.memoperands_begin(), MIB);
470 }
471 return MIB.constrainAllUses(TII, TRI, RBI);
472 }
473
selectStore(MachineInstr & I) const474 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
475 unsigned OpOffset =
476 I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
477 Register StoreVal = I.getOperand(0 + OpOffset).getReg();
478 Register Ptr = I.getOperand(1 + OpOffset).getReg();
479 MachineBasicBlock &BB = *I.getParent();
480 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
481 .addUse(Ptr)
482 .addUse(StoreVal);
483 if (!I.getNumMemOperands()) {
484 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
485 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
486 } else {
487 addMemoryOperands(*I.memoperands_begin(), MIB);
488 }
489 return MIB.constrainAllUses(TII, TRI, RBI);
490 }
491
selectMemOperation(Register ResVReg,MachineInstr & I) const492 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
493 MachineInstr &I) const {
494 MachineBasicBlock &BB = *I.getParent();
495 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
496 .addUse(I.getOperand(0).getReg())
497 .addUse(I.getOperand(1).getReg())
498 .addUse(I.getOperand(2).getReg());
499 if (I.getNumMemOperands())
500 addMemoryOperands(*I.memoperands_begin(), MIB);
501 bool Result = MIB.constrainAllUses(TII, TRI, RBI);
502 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
503 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
504 .addUse(MIB->getOperand(0).getReg());
505 return Result;
506 }
507
selectAtomicRMW(Register ResVReg,const SPIRVType * ResType,MachineInstr & I,unsigned NewOpcode) const508 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
509 const SPIRVType *ResType,
510 MachineInstr &I,
511 unsigned NewOpcode) const {
512 assert(I.hasOneMemOperand());
513 const MachineMemOperand *MemOp = *I.memoperands_begin();
514 uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
515 Register ScopeReg = buildI32Constant(Scope, I);
516
517 Register Ptr = I.getOperand(1).getReg();
518 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
519 // auto ScSem =
520 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
521 AtomicOrdering AO = MemOp->getSuccessOrdering();
522 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
523 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
524
525 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
526 .addDef(ResVReg)
527 .addUse(GR.getSPIRVTypeID(ResType))
528 .addUse(Ptr)
529 .addUse(ScopeReg)
530 .addUse(MemSemReg)
531 .addUse(I.getOperand(2).getReg())
532 .constrainAllUses(TII, TRI, RBI);
533 }
534
selectFence(MachineInstr & I) const535 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
536 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
537 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
538 Register MemSemReg = buildI32Constant(MemSem, I);
539 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
540 uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
541 Register ScopeReg = buildI32Constant(Scope, I);
542 MachineBasicBlock &BB = *I.getParent();
543 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
544 .addUse(ScopeReg)
545 .addUse(MemSemReg)
546 .constrainAllUses(TII, TRI, RBI);
547 }
548
selectAtomicCmpXchg(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const549 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
550 const SPIRVType *ResType,
551 MachineInstr &I) const {
552 Register ScopeReg;
553 Register MemSemEqReg;
554 Register MemSemNeqReg;
555 Register Ptr = I.getOperand(2).getReg();
556 if (I.getOpcode() != TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) {
557 assert(I.hasOneMemOperand());
558 const MachineMemOperand *MemOp = *I.memoperands_begin();
559 unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
560 ScopeReg = buildI32Constant(Scope, I);
561
562 unsigned ScSem = static_cast<uint32_t>(
563 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
564 AtomicOrdering AO = MemOp->getSuccessOrdering();
565 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
566 MemSemEqReg = buildI32Constant(MemSemEq, I);
567 AtomicOrdering FO = MemOp->getFailureOrdering();
568 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
569 MemSemNeqReg =
570 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
571 } else {
572 ScopeReg = I.getOperand(5).getReg();
573 MemSemEqReg = I.getOperand(6).getReg();
574 MemSemNeqReg = I.getOperand(7).getReg();
575 }
576
577 Register Cmp = I.getOperand(3).getReg();
578 Register Val = I.getOperand(4).getReg();
579 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
580 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
581 const DebugLoc &DL = I.getDebugLoc();
582 bool Result =
583 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
584 .addDef(ACmpRes)
585 .addUse(GR.getSPIRVTypeID(SpvValTy))
586 .addUse(Ptr)
587 .addUse(ScopeReg)
588 .addUse(MemSemEqReg)
589 .addUse(MemSemNeqReg)
590 .addUse(Val)
591 .addUse(Cmp)
592 .constrainAllUses(TII, TRI, RBI);
593 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
594 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
595 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
596 .addDef(CmpSuccReg)
597 .addUse(GR.getSPIRVTypeID(BoolTy))
598 .addUse(ACmpRes)
599 .addUse(Cmp)
600 .constrainAllUses(TII, TRI, RBI);
601 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
602 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
603 .addDef(TmpReg)
604 .addUse(GR.getSPIRVTypeID(ResType))
605 .addUse(ACmpRes)
606 .addUse(GR.getOrCreateUndef(I, ResType, TII))
607 .addImm(0)
608 .constrainAllUses(TII, TRI, RBI);
609 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
610 .addDef(ResVReg)
611 .addUse(GR.getSPIRVTypeID(ResType))
612 .addUse(CmpSuccReg)
613 .addUse(TmpReg)
614 .addImm(1)
615 .constrainAllUses(TII, TRI, RBI);
616 return Result;
617 }
618
isGenericCastablePtr(SPIRV::StorageClass SC)619 static bool isGenericCastablePtr(SPIRV::StorageClass SC) {
620 switch (SC) {
621 case SPIRV::StorageClass::Workgroup:
622 case SPIRV::StorageClass::CrossWorkgroup:
623 case SPIRV::StorageClass::Function:
624 return true;
625 default:
626 return false;
627 }
628 }
629
630 // In SPIR-V address space casting can only happen to and from the Generic
631 // storage class. We can also only case Workgroup, CrossWorkgroup, or Function
632 // pointers to and from Generic pointers. As such, we can convert e.g. from
633 // Workgroup to Function by going via a Generic pointer as an intermediary. All
634 // other combinations can only be done by a bitcast, and are probably not safe.
selectAddrSpaceCast(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const635 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
636 const SPIRVType *ResType,
637 MachineInstr &I) const {
638 // If the AddrSpaceCast user is single and in OpConstantComposite or
639 // OpVariable, we should select OpSpecConstantOp.
640 auto UIs = MRI->use_instructions(ResVReg);
641 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
642 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
643 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
644 isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
645 Register NewReg = I.getOperand(1).getReg();
646 MachineBasicBlock &BB = *I.getParent();
647 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
648 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
649 SPIRV::StorageClass::Generic);
650 bool Result =
651 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
652 .addDef(ResVReg)
653 .addUse(GR.getSPIRVTypeID(ResType))
654 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
655 .addUse(NewReg)
656 .constrainAllUses(TII, TRI, RBI);
657 return Result;
658 }
659 Register SrcPtr = I.getOperand(1).getReg();
660 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
661 SPIRV::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
662 SPIRV::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
663
664 // Casting from an eligable pointer to Generic.
665 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
666 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
667 // Casting from Generic to an eligable pointer.
668 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
669 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
670 // Casting between 2 eligable pointers using Generic as an intermediary.
671 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
672 Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
673 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
674 SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
675 MachineBasicBlock &BB = *I.getParent();
676 const DebugLoc &DL = I.getDebugLoc();
677 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
678 .addDef(Tmp)
679 .addUse(GR.getSPIRVTypeID(GenericPtrTy))
680 .addUse(SrcPtr)
681 .constrainAllUses(TII, TRI, RBI);
682 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
683 .addDef(ResVReg)
684 .addUse(GR.getSPIRVTypeID(ResType))
685 .addUse(Tmp)
686 .constrainAllUses(TII, TRI, RBI);
687 }
688 // TODO Should this case just be disallowed completely?
689 // We're casting 2 other arbitrary address spaces, so have to bitcast.
690 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
691 }
692
getFCmpOpcode(unsigned PredNum)693 static unsigned getFCmpOpcode(unsigned PredNum) {
694 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
695 switch (Pred) {
696 case CmpInst::FCMP_OEQ:
697 return SPIRV::OpFOrdEqual;
698 case CmpInst::FCMP_OGE:
699 return SPIRV::OpFOrdGreaterThanEqual;
700 case CmpInst::FCMP_OGT:
701 return SPIRV::OpFOrdGreaterThan;
702 case CmpInst::FCMP_OLE:
703 return SPIRV::OpFOrdLessThanEqual;
704 case CmpInst::FCMP_OLT:
705 return SPIRV::OpFOrdLessThan;
706 case CmpInst::FCMP_ONE:
707 return SPIRV::OpFOrdNotEqual;
708 case CmpInst::FCMP_ORD:
709 return SPIRV::OpOrdered;
710 case CmpInst::FCMP_UEQ:
711 return SPIRV::OpFUnordEqual;
712 case CmpInst::FCMP_UGE:
713 return SPIRV::OpFUnordGreaterThanEqual;
714 case CmpInst::FCMP_UGT:
715 return SPIRV::OpFUnordGreaterThan;
716 case CmpInst::FCMP_ULE:
717 return SPIRV::OpFUnordLessThanEqual;
718 case CmpInst::FCMP_ULT:
719 return SPIRV::OpFUnordLessThan;
720 case CmpInst::FCMP_UNE:
721 return SPIRV::OpFUnordNotEqual;
722 case CmpInst::FCMP_UNO:
723 return SPIRV::OpUnordered;
724 default:
725 llvm_unreachable("Unknown predicate type for FCmp");
726 }
727 }
728
getICmpOpcode(unsigned PredNum)729 static unsigned getICmpOpcode(unsigned PredNum) {
730 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
731 switch (Pred) {
732 case CmpInst::ICMP_EQ:
733 return SPIRV::OpIEqual;
734 case CmpInst::ICMP_NE:
735 return SPIRV::OpINotEqual;
736 case CmpInst::ICMP_SGE:
737 return SPIRV::OpSGreaterThanEqual;
738 case CmpInst::ICMP_SGT:
739 return SPIRV::OpSGreaterThan;
740 case CmpInst::ICMP_SLE:
741 return SPIRV::OpSLessThanEqual;
742 case CmpInst::ICMP_SLT:
743 return SPIRV::OpSLessThan;
744 case CmpInst::ICMP_UGE:
745 return SPIRV::OpUGreaterThanEqual;
746 case CmpInst::ICMP_UGT:
747 return SPIRV::OpUGreaterThan;
748 case CmpInst::ICMP_ULE:
749 return SPIRV::OpULessThanEqual;
750 case CmpInst::ICMP_ULT:
751 return SPIRV::OpULessThan;
752 default:
753 llvm_unreachable("Unknown predicate type for ICmp");
754 }
755 }
756
getPtrCmpOpcode(unsigned Pred)757 static unsigned getPtrCmpOpcode(unsigned Pred) {
758 switch (static_cast<CmpInst::Predicate>(Pred)) {
759 case CmpInst::ICMP_EQ:
760 return SPIRV::OpPtrEqual;
761 case CmpInst::ICMP_NE:
762 return SPIRV::OpPtrNotEqual;
763 default:
764 llvm_unreachable("Unknown predicate type for pointer comparison");
765 }
766 }
767
768 // Return the logical operation, or abort if none exists.
getBoolCmpOpcode(unsigned PredNum)769 static unsigned getBoolCmpOpcode(unsigned PredNum) {
770 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
771 switch (Pred) {
772 case CmpInst::ICMP_EQ:
773 return SPIRV::OpLogicalEqual;
774 case CmpInst::ICMP_NE:
775 return SPIRV::OpLogicalNotEqual;
776 default:
777 llvm_unreachable("Unknown predicate type for Bool comparison");
778 }
779 }
780
selectBitreverse(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const781 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
782 const SPIRVType *ResType,
783 MachineInstr &I) const {
784 MachineBasicBlock &BB = *I.getParent();
785 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
786 .addDef(ResVReg)
787 .addUse(GR.getSPIRVTypeID(ResType))
788 .addUse(I.getOperand(1).getReg())
789 .constrainAllUses(TII, TRI, RBI);
790 }
791
selectConstVector(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const792 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
793 const SPIRVType *ResType,
794 MachineInstr &I) const {
795 // TODO: only const case is supported for now.
796 assert(std::all_of(
797 I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
798 if (MO.isDef())
799 return true;
800 if (!MO.isReg())
801 return false;
802 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
803 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
804 ConstTy->getOperand(1).isReg());
805 Register ConstReg = ConstTy->getOperand(1).getReg();
806 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
807 assert(Const);
808 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
809 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
810 }));
811
812 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
813 TII.get(SPIRV::OpConstantComposite))
814 .addDef(ResVReg)
815 .addUse(GR.getSPIRVTypeID(ResType));
816 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
817 MIB.addUse(I.getOperand(i).getReg());
818 return MIB.constrainAllUses(TII, TRI, RBI);
819 }
820
selectCmp(Register ResVReg,const SPIRVType * ResType,unsigned CmpOpc,MachineInstr & I) const821 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
822 const SPIRVType *ResType,
823 unsigned CmpOpc,
824 MachineInstr &I) const {
825 Register Cmp0 = I.getOperand(2).getReg();
826 Register Cmp1 = I.getOperand(3).getReg();
827 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
828 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
829 "CMP operands should have the same type");
830 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
831 .addDef(ResVReg)
832 .addUse(GR.getSPIRVTypeID(ResType))
833 .addUse(Cmp0)
834 .addUse(Cmp1)
835 .constrainAllUses(TII, TRI, RBI);
836 }
837
selectICmp(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const838 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
839 const SPIRVType *ResType,
840 MachineInstr &I) const {
841 auto Pred = I.getOperand(1).getPredicate();
842 unsigned CmpOpc;
843
844 Register CmpOperand = I.getOperand(2).getReg();
845 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
846 CmpOpc = getPtrCmpOpcode(Pred);
847 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
848 CmpOpc = getBoolCmpOpcode(Pred);
849 else
850 CmpOpc = getICmpOpcode(Pred);
851 return selectCmp(ResVReg, ResType, CmpOpc, I);
852 }
853
renderFImm32(MachineInstrBuilder & MIB,const MachineInstr & I,int OpIdx) const854 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
855 const MachineInstr &I,
856 int OpIdx) const {
857 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
858 "Expected G_FCONSTANT");
859 const ConstantFP *FPImm = I.getOperand(1).getFPImm();
860 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
861 }
862
renderImm32(MachineInstrBuilder & MIB,const MachineInstr & I,int OpIdx) const863 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
864 const MachineInstr &I,
865 int OpIdx) const {
866 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
867 "Expected G_CONSTANT");
868 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
869 }
870
871 Register
buildI32Constant(uint32_t Val,MachineInstr & I,const SPIRVType * ResType) const872 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
873 const SPIRVType *ResType) const {
874 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
875 const SPIRVType *SpvI32Ty =
876 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
877 // Find a constant in DT or build a new one.
878 auto ConstInt = ConstantInt::get(LLVMTy, Val);
879 Register NewReg = GR.find(ConstInt, GR.CurMF);
880 if (!NewReg.isValid()) {
881 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
882 GR.add(ConstInt, GR.CurMF, NewReg);
883 MachineInstr *MI;
884 MachineBasicBlock &BB = *I.getParent();
885 if (Val == 0) {
886 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
887 .addDef(NewReg)
888 .addUse(GR.getSPIRVTypeID(SpvI32Ty));
889 } else {
890 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
891 .addDef(NewReg)
892 .addUse(GR.getSPIRVTypeID(SpvI32Ty))
893 .addImm(APInt(32, Val).getZExtValue());
894 }
895 constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
896 }
897 return NewReg;
898 }
899
selectFCmp(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const900 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
901 const SPIRVType *ResType,
902 MachineInstr &I) const {
903 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
904 return selectCmp(ResVReg, ResType, CmpOp, I);
905 }
906
buildZerosVal(const SPIRVType * ResType,MachineInstr & I) const907 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
908 MachineInstr &I) const {
909 if (ResType->getOpcode() == SPIRV::OpTypeVector)
910 return GR.getOrCreateConsIntVector(0, I, ResType, TII);
911 return GR.getOrCreateConstInt(0, I, ResType, TII);
912 }
913
buildOnesVal(bool AllOnes,const SPIRVType * ResType,MachineInstr & I) const914 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
915 const SPIRVType *ResType,
916 MachineInstr &I) const {
917 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
918 APInt One = AllOnes ? APInt::getAllOnesValue(BitWidth)
919 : APInt::getOneBitSet(BitWidth, 0);
920 if (ResType->getOpcode() == SPIRV::OpTypeVector)
921 return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
922 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
923 }
924
selectSelect(Register ResVReg,const SPIRVType * ResType,MachineInstr & I,bool IsSigned) const925 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
926 const SPIRVType *ResType,
927 MachineInstr &I,
928 bool IsSigned) const {
929 // To extend a bool, we need to use OpSelect between constants.
930 Register ZeroReg = buildZerosVal(ResType, I);
931 Register OneReg = buildOnesVal(IsSigned, ResType, I);
932 bool IsScalarBool =
933 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
934 unsigned Opcode =
935 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
936 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
937 .addDef(ResVReg)
938 .addUse(GR.getSPIRVTypeID(ResType))
939 .addUse(I.getOperand(1).getReg())
940 .addUse(OneReg)
941 .addUse(ZeroReg)
942 .constrainAllUses(TII, TRI, RBI);
943 }
944
selectIToF(Register ResVReg,const SPIRVType * ResType,MachineInstr & I,bool IsSigned,unsigned Opcode) const945 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
946 const SPIRVType *ResType,
947 MachineInstr &I, bool IsSigned,
948 unsigned Opcode) const {
949 Register SrcReg = I.getOperand(1).getReg();
950 // We can convert bool value directly to float type without OpConvert*ToF,
951 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
952 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
953 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
954 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
955 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
956 const unsigned NumElts = ResType->getOperand(2).getImm();
957 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
958 }
959 SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
960 selectSelect(SrcReg, TmpType, I, false);
961 }
962 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
963 }
964
selectExt(Register ResVReg,const SPIRVType * ResType,MachineInstr & I,bool IsSigned) const965 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
966 const SPIRVType *ResType,
967 MachineInstr &I, bool IsSigned) const {
968 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
969 return selectSelect(ResVReg, ResType, I, IsSigned);
970 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
971 return selectUnOp(ResVReg, ResType, I, Opcode);
972 }
973
selectIntToBool(Register IntReg,Register ResVReg,const SPIRVType * IntTy,const SPIRVType * BoolTy,MachineInstr & I) const974 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
975 Register ResVReg,
976 const SPIRVType *IntTy,
977 const SPIRVType *BoolTy,
978 MachineInstr &I) const {
979 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
980 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
981 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
982 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
983 Register Zero = buildZerosVal(IntTy, I);
984 Register One = buildOnesVal(false, IntTy, I);
985 MachineBasicBlock &BB = *I.getParent();
986 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
987 .addDef(BitIntReg)
988 .addUse(GR.getSPIRVTypeID(IntTy))
989 .addUse(IntReg)
990 .addUse(One)
991 .constrainAllUses(TII, TRI, RBI);
992 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
993 .addDef(ResVReg)
994 .addUse(GR.getSPIRVTypeID(BoolTy))
995 .addUse(BitIntReg)
996 .addUse(Zero)
997 .constrainAllUses(TII, TRI, RBI);
998 }
999
selectTrunc(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const1000 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1001 const SPIRVType *ResType,
1002 MachineInstr &I) const {
1003 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1004 Register IntReg = I.getOperand(1).getReg();
1005 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1006 return selectIntToBool(IntReg, ResVReg, ArgType, ResType, I);
1007 }
1008 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1009 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1010 return selectUnOp(ResVReg, ResType, I, Opcode);
1011 }
1012
selectConst(Register ResVReg,const SPIRVType * ResType,const APInt & Imm,MachineInstr & I) const1013 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1014 const SPIRVType *ResType,
1015 const APInt &Imm,
1016 MachineInstr &I) const {
1017 unsigned TyOpcode = ResType->getOpcode();
1018 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isNullValue());
1019 MachineBasicBlock &BB = *I.getParent();
1020 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1021 Imm.isNullValue())
1022 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1023 .addDef(ResVReg)
1024 .addUse(GR.getSPIRVTypeID(ResType))
1025 .constrainAllUses(TII, TRI, RBI);
1026 if (TyOpcode == SPIRV::OpTypeInt) {
1027 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1028 if (Reg == ResVReg)
1029 return true;
1030 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1031 .addDef(ResVReg)
1032 .addUse(Reg)
1033 .constrainAllUses(TII, TRI, RBI);
1034 }
1035 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1036 .addDef(ResVReg)
1037 .addUse(GR.getSPIRVTypeID(ResType));
1038 // <=32-bit integers should be caught by the sdag pattern.
1039 assert(Imm.getBitWidth() > 32);
1040 addNumImm(Imm, MIB);
1041 return MIB.constrainAllUses(TII, TRI, RBI);
1042 }
1043
selectOpUndef(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const1044 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1045 const SPIRVType *ResType,
1046 MachineInstr &I) const {
1047 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1048 .addDef(ResVReg)
1049 .addUse(GR.getSPIRVTypeID(ResType))
1050 .constrainAllUses(TII, TRI, RBI);
1051 }
1052
isImm(const MachineOperand & MO,MachineRegisterInfo * MRI)1053 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1054 assert(MO.isReg());
1055 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1056 if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1057 return false;
1058 assert(TypeInst->getOperand(1).isReg());
1059 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1060 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1061 }
1062
foldImm(const MachineOperand & MO,MachineRegisterInfo * MRI)1063 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1064 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1065 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1066 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1067 return ImmInst->getOperand(1).getCImm()->getZExtValue();
1068 }
1069
selectInsertVal(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const1070 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1071 const SPIRVType *ResType,
1072 MachineInstr &I) const {
1073 MachineBasicBlock &BB = *I.getParent();
1074 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1075 .addDef(ResVReg)
1076 .addUse(GR.getSPIRVTypeID(ResType))
1077 // object to insert
1078 .addUse(I.getOperand(3).getReg())
1079 // composite to insert into
1080 .addUse(I.getOperand(2).getReg());
1081 for (unsigned i = 4; i < I.getNumOperands(); i++)
1082 MIB.addImm(foldImm(I.getOperand(i), MRI));
1083 return MIB.constrainAllUses(TII, TRI, RBI);
1084 }
1085
selectExtractVal(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const1086 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1087 const SPIRVType *ResType,
1088 MachineInstr &I) const {
1089 MachineBasicBlock &BB = *I.getParent();
1090 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1091 .addDef(ResVReg)
1092 .addUse(GR.getSPIRVTypeID(ResType))
1093 .addUse(I.getOperand(2).getReg());
1094 for (unsigned i = 3; i < I.getNumOperands(); i++)
1095 MIB.addImm(foldImm(I.getOperand(i), MRI));
1096 return MIB.constrainAllUses(TII, TRI, RBI);
1097 }
1098
selectInsertElt(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const1099 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1100 const SPIRVType *ResType,
1101 MachineInstr &I) const {
1102 if (isImm(I.getOperand(4), MRI))
1103 return selectInsertVal(ResVReg, ResType, I);
1104 MachineBasicBlock &BB = *I.getParent();
1105 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1106 .addDef(ResVReg)
1107 .addUse(GR.getSPIRVTypeID(ResType))
1108 .addUse(I.getOperand(2).getReg())
1109 .addUse(I.getOperand(3).getReg())
1110 .addUse(I.getOperand(4).getReg())
1111 .constrainAllUses(TII, TRI, RBI);
1112 }
1113
selectExtractElt(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const1114 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1115 const SPIRVType *ResType,
1116 MachineInstr &I) const {
1117 if (isImm(I.getOperand(3), MRI))
1118 return selectExtractVal(ResVReg, ResType, I);
1119 MachineBasicBlock &BB = *I.getParent();
1120 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1121 .addDef(ResVReg)
1122 .addUse(GR.getSPIRVTypeID(ResType))
1123 .addUse(I.getOperand(2).getReg())
1124 .addUse(I.getOperand(3).getReg())
1125 .constrainAllUses(TII, TRI, RBI);
1126 }
1127
selectGEP(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const1128 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1129 const SPIRVType *ResType,
1130 MachineInstr &I) const {
1131 // In general we should also support OpAccessChain instrs here (i.e. not
1132 // PtrAccessChain) but SPIRV-LLVM Translator doesn't emit them at all and so
1133 // do we to stay compliant with its test and more importantly consumers.
1134 unsigned Opcode = I.getOperand(2).getImm() ? SPIRV::OpInBoundsPtrAccessChain
1135 : SPIRV::OpPtrAccessChain;
1136 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1137 .addDef(ResVReg)
1138 .addUse(GR.getSPIRVTypeID(ResType))
1139 // Object to get a pointer to.
1140 .addUse(I.getOperand(3).getReg());
1141 // Adding indices.
1142 for (unsigned i = 4; i < I.getNumExplicitOperands(); ++i)
1143 Res.addUse(I.getOperand(i).getReg());
1144 return Res.constrainAllUses(TII, TRI, RBI);
1145 }
1146
selectIntrinsic(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const1147 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1148 const SPIRVType *ResType,
1149 MachineInstr &I) const {
1150 MachineBasicBlock &BB = *I.getParent();
1151 switch (I.getIntrinsicID()) {
1152 case Intrinsic::spv_load:
1153 return selectLoad(ResVReg, ResType, I);
1154 break;
1155 case Intrinsic::spv_store:
1156 return selectStore(I);
1157 break;
1158 case Intrinsic::spv_extractv:
1159 return selectExtractVal(ResVReg, ResType, I);
1160 break;
1161 case Intrinsic::spv_insertv:
1162 return selectInsertVal(ResVReg, ResType, I);
1163 break;
1164 case Intrinsic::spv_extractelt:
1165 return selectExtractElt(ResVReg, ResType, I);
1166 break;
1167 case Intrinsic::spv_insertelt:
1168 return selectInsertElt(ResVReg, ResType, I);
1169 break;
1170 case Intrinsic::spv_gep:
1171 return selectGEP(ResVReg, ResType, I);
1172 break;
1173 case Intrinsic::spv_unref_global:
1174 case Intrinsic::spv_init_global: {
1175 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1176 MachineInstr *Init = I.getNumExplicitOperands() > 2
1177 ? MRI->getVRegDef(I.getOperand(2).getReg())
1178 : nullptr;
1179 assert(MI);
1180 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1181 } break;
1182 case Intrinsic::spv_const_composite: {
1183 // If no values are attached, the composite is null constant.
1184 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1185 unsigned Opcode =
1186 IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1187 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1188 .addDef(ResVReg)
1189 .addUse(GR.getSPIRVTypeID(ResType));
1190 // skip type MD node we already used when generated assign.type for this
1191 if (!IsNull) {
1192 for (unsigned i = I.getNumExplicitDefs() + 1;
1193 i < I.getNumExplicitOperands(); ++i) {
1194 MIB.addUse(I.getOperand(i).getReg());
1195 }
1196 }
1197 return MIB.constrainAllUses(TII, TRI, RBI);
1198 } break;
1199 case Intrinsic::spv_assign_name: {
1200 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1201 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1202 for (unsigned i = I.getNumExplicitDefs() + 2;
1203 i < I.getNumExplicitOperands(); ++i) {
1204 MIB.addImm(I.getOperand(i).getImm());
1205 }
1206 return MIB.constrainAllUses(TII, TRI, RBI);
1207 } break;
1208 case Intrinsic::spv_switch: {
1209 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1210 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1211 if (I.getOperand(i).isReg())
1212 MIB.addReg(I.getOperand(i).getReg());
1213 else if (I.getOperand(i).isCImm())
1214 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1215 else if (I.getOperand(i).isMBB())
1216 MIB.addMBB(I.getOperand(i).getMBB());
1217 else
1218 llvm_unreachable("Unexpected OpSwitch operand");
1219 }
1220 return MIB.constrainAllUses(TII, TRI, RBI);
1221 } break;
1222 case Intrinsic::spv_cmpxchg:
1223 return selectAtomicCmpXchg(ResVReg, ResType, I);
1224 break;
1225 default:
1226 llvm_unreachable("Intrinsic selection not implemented");
1227 }
1228 return true;
1229 }
1230
selectFrameIndex(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const1231 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1232 const SPIRVType *ResType,
1233 MachineInstr &I) const {
1234 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1235 .addDef(ResVReg)
1236 .addUse(GR.getSPIRVTypeID(ResType))
1237 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1238 .constrainAllUses(TII, TRI, RBI);
1239 }
1240
selectBranch(MachineInstr & I) const1241 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1242 // InstructionSelector walks backwards through the instructions. We can use
1243 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1244 // first, so can generate an OpBranchConditional here. If there is no
1245 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1246 const MachineInstr *PrevI = I.getPrevNode();
1247 MachineBasicBlock &MBB = *I.getParent();
1248 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1249 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1250 .addUse(PrevI->getOperand(0).getReg())
1251 .addMBB(PrevI->getOperand(1).getMBB())
1252 .addMBB(I.getOperand(0).getMBB())
1253 .constrainAllUses(TII, TRI, RBI);
1254 }
1255 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1256 .addMBB(I.getOperand(0).getMBB())
1257 .constrainAllUses(TII, TRI, RBI);
1258 }
1259
selectBranchCond(MachineInstr & I) const1260 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1261 // InstructionSelector walks backwards through the instructions. For an
1262 // explicit conditional branch with no fallthrough, we use both a G_BR and a
1263 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1264 // generate the OpBranchConditional in selectBranch above.
1265 //
1266 // If an OpBranchConditional has been generated, we simply return, as the work
1267 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1268 // implicit fallthrough to the next basic block, so we need to create an
1269 // OpBranchConditional with an explicit "false" argument pointing to the next
1270 // basic block that LLVM would fall through to.
1271 const MachineInstr *NextI = I.getNextNode();
1272 // Check if this has already been successfully selected.
1273 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1274 return true;
1275 // Must be relying on implicit block fallthrough, so generate an
1276 // OpBranchConditional with the "next" basic block as the "false" target.
1277 MachineBasicBlock &MBB = *I.getParent();
1278 unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1279 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1280 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1281 .addUse(I.getOperand(0).getReg())
1282 .addMBB(I.getOperand(1).getMBB())
1283 .addMBB(NextMBB)
1284 .constrainAllUses(TII, TRI, RBI);
1285 }
1286
selectPhi(Register ResVReg,const SPIRVType * ResType,MachineInstr & I) const1287 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1288 const SPIRVType *ResType,
1289 MachineInstr &I) const {
1290 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1291 .addDef(ResVReg)
1292 .addUse(GR.getSPIRVTypeID(ResType));
1293 const unsigned NumOps = I.getNumOperands();
1294 for (unsigned i = 1; i < NumOps; i += 2) {
1295 MIB.addUse(I.getOperand(i + 0).getReg());
1296 MIB.addMBB(I.getOperand(i + 1).getMBB());
1297 }
1298 return MIB.constrainAllUses(TII, TRI, RBI);
1299 }
1300
selectGlobalValue(Register ResVReg,MachineInstr & I,const MachineInstr * Init) const1301 bool SPIRVInstructionSelector::selectGlobalValue(
1302 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1303 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1304 MachineIRBuilder MIRBuilder(I);
1305 const GlobalValue *GV = I.getOperand(1).getGlobal();
1306 SPIRVType *ResType = GR.getOrCreateSPIRVType(
1307 GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1308
1309 std::string GlobalIdent = GV->getGlobalIdentifier();
1310 // We have functions as operands in tests with blocks of instruction e.g. in
1311 // transcoding/global_block.ll. These operands are not used and should be
1312 // substituted by zero constants. Their type is expected to be always
1313 // OpTypePointer Function %uchar.
1314 if (isa<Function>(GV)) {
1315 const Constant *ConstVal = GV;
1316 MachineBasicBlock &BB = *I.getParent();
1317 Register NewReg = GR.find(ConstVal, GR.CurMF);
1318 if (!NewReg.isValid()) {
1319 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1320 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII);
1321 Register NewReg = ResVReg;
1322 GR.add(ConstVal, GR.CurMF, NewReg);
1323 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1324 .addDef(NewReg)
1325 .addUse(GR.getSPIRVTypeID(ResType))
1326 .constrainAllUses(TII, TRI, RBI);
1327 }
1328 assert(NewReg != ResVReg);
1329 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1330 .addDef(ResVReg)
1331 .addUse(NewReg)
1332 .constrainAllUses(TII, TRI, RBI);
1333 }
1334 auto GlobalVar = cast<GlobalVariable>(GV);
1335 assert(GlobalVar->getName() != "llvm.global.annotations");
1336
1337 bool HasInit = GlobalVar->hasInitializer() &&
1338 !isa<UndefValue>(GlobalVar->getInitializer());
1339 // Skip empty declaration for GVs with initilaizers till we get the decl with
1340 // passed initializer.
1341 if (HasInit && !Init)
1342 return true;
1343
1344 unsigned AddrSpace = GV->getAddressSpace();
1345 SPIRV::StorageClass Storage = addressSpaceToStorageClass(AddrSpace);
1346 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1347 Storage != SPIRV::StorageClass::Function;
1348 SPIRV::LinkageType LnkType =
1349 (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
1350 ? SPIRV::LinkageType::Import
1351 : SPIRV::LinkageType::Export;
1352
1353 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1354 Storage, Init, GlobalVar->isConstant(),
1355 HasLnkTy, LnkType, MIRBuilder, true);
1356 return Reg.isValid();
1357 }
1358
1359 namespace llvm {
1360 InstructionSelector *
createSPIRVInstructionSelector(const SPIRVTargetMachine & TM,const SPIRVSubtarget & Subtarget,const RegisterBankInfo & RBI)1361 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
1362 const SPIRVSubtarget &Subtarget,
1363 const RegisterBankInfo &RBI) {
1364 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
1365 }
1366 } // namespace llvm
1367