1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "SPIRV.h"
16 #include "SPIRVGlobalRegistry.h"
17 #include "SPIRVInstrInfo.h"
18 #include "SPIRVRegisterBankInfo.h"
19 #include "SPIRVRegisterInfo.h"
20 #include "SPIRVTargetMachine.h"
21 #include "SPIRVUtils.h"
22 #include "llvm/ADT/APFloat.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/IR/IntrinsicsSPIRV.h"
28 #include "llvm/Support/Debug.h"
29 
30 #define DEBUG_TYPE "spirv-isel"
31 
32 using namespace llvm;
33 
34 namespace {
35 
36 #define GET_GLOBALISEL_PREDICATE_BITSET
37 #include "SPIRVGenGlobalISel.inc"
38 #undef GET_GLOBALISEL_PREDICATE_BITSET
39 
40 class SPIRVInstructionSelector : public InstructionSelector {
41   const SPIRVSubtarget &STI;
42   const SPIRVInstrInfo &TII;
43   const SPIRVRegisterInfo &TRI;
44   const RegisterBankInfo &RBI;
45   SPIRVGlobalRegistry &GR;
46   MachineRegisterInfo *MRI;
47 
48 public:
49   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
50                            const SPIRVSubtarget &ST,
51                            const RegisterBankInfo &RBI);
52   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
53                CodeGenCoverage &CoverageInfo, ProfileSummaryInfo *PSI,
54                BlockFrequencyInfo *BFI) override;
55   // Common selection code. Instruction-specific selection occurs in spvSelect.
56   bool select(MachineInstr &I) override;
57   static const char *getName() { return DEBUG_TYPE; }
58 
59 #define GET_GLOBALISEL_PREDICATES_DECL
60 #include "SPIRVGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_PREDICATES_DECL
62 
63 #define GET_GLOBALISEL_TEMPORARIES_DECL
64 #include "SPIRVGenGlobalISel.inc"
65 #undef GET_GLOBALISEL_TEMPORARIES_DECL
66 
67 private:
68   // tblgen-erated 'select' implementation, used as the initial selector for
69   // the patterns that don't require complex C++.
70   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
71 
72   // All instruction-specific selection that didn't happen in "select()".
73   // Is basically a large Switch/Case delegating to all other select method.
74   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
75                  MachineInstr &I) const;
76 
77   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
78                          const MachineInstr *Init = nullptr) const;
79 
80   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
81                          MachineInstr &I, Register SrcReg,
82                          unsigned Opcode) const;
83   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
84                   unsigned Opcode) const;
85 
86   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
87                   MachineInstr &I) const;
88   bool selectStore(MachineInstr &I) const;
89 
90   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
91 
92   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
93                        MachineInstr &I, unsigned NewOpcode) const;
94 
95   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
96                            MachineInstr &I) const;
97 
98   bool selectFence(MachineInstr &I) const;
99 
100   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
101                            MachineInstr &I) const;
102 
103   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
104                         MachineInstr &I) const;
105 
106   bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
107                          MachineInstr &I) const;
108 
109   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
110                  unsigned comparisonOpcode, MachineInstr &I) const;
111 
112   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
113                   MachineInstr &I) const;
114   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
115                   MachineInstr &I) const;
116 
117   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
118                    int OpIdx) const;
119   void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
120                     int OpIdx) const;
121 
122   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
123                    MachineInstr &I) const;
124 
125   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
126                     bool IsSigned) const;
127   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
128                   bool IsSigned, unsigned Opcode) const;
129   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
130                  bool IsSigned) const;
131 
132   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
133                    MachineInstr &I) const;
134 
135   bool selectIntToBool(Register IntReg, Register ResVReg,
136                        const SPIRVType *intTy, const SPIRVType *boolTy,
137                        MachineInstr &I) const;
138 
139   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
140                      MachineInstr &I) const;
141   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
142                        MachineInstr &I) const;
143   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
144                         MachineInstr &I) const;
145   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
146                        MachineInstr &I) const;
147   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
148                         MachineInstr &I) const;
149   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
150                        MachineInstr &I) const;
151   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
152                  MachineInstr &I) const;
153 
154   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
155                         MachineInstr &I) const;
156 
157   bool selectBranch(MachineInstr &I) const;
158   bool selectBranchCond(MachineInstr &I) const;
159 
160   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
161                  MachineInstr &I) const;
162 
163   Register buildI32Constant(uint32_t Val, MachineInstr &I,
164                             const SPIRVType *ResType = nullptr) const;
165 
166   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
167   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
168                         MachineInstr &I) const;
169 };
170 
171 } // end anonymous namespace
172 
173 #define GET_GLOBALISEL_IMPL
174 #include "SPIRVGenGlobalISel.inc"
175 #undef GET_GLOBALISEL_IMPL
176 
177 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
178                                                    const SPIRVSubtarget &ST,
179                                                    const RegisterBankInfo &RBI)
180     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
181       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
182 #define GET_GLOBALISEL_PREDICATES_INIT
183 #include "SPIRVGenGlobalISel.inc"
184 #undef GET_GLOBALISEL_PREDICATES_INIT
185 #define GET_GLOBALISEL_TEMPORARIES_INIT
186 #include "SPIRVGenGlobalISel.inc"
187 #undef GET_GLOBALISEL_TEMPORARIES_INIT
188 {
189 }
190 
191 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
192                                        CodeGenCoverage &CoverageInfo,
193                                        ProfileSummaryInfo *PSI,
194                                        BlockFrequencyInfo *BFI) {
195   MRI = &MF.getRegInfo();
196   GR.setCurrentFunc(MF);
197   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
198 }
199 
200 // Defined in SPIRVLegalizerInfo.cpp.
201 extern bool isTypeFoldingSupported(unsigned Opcode);
202 
203 bool SPIRVInstructionSelector::select(MachineInstr &I) {
204   assert(I.getParent() && "Instruction should be in a basic block!");
205   assert(I.getParent()->getParent() && "Instruction should be in a function!");
206 
207   Register Opcode = I.getOpcode();
208   // If it's not a GMIR instruction, we've selected it already.
209   if (!isPreISelGenericOpcode(Opcode)) {
210     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
211       auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
212       if (isTypeFoldingSupported(Def->getOpcode())) {
213         auto Res = selectImpl(I, *CoverageInfo);
214         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
215         if (Res)
216           return Res;
217       }
218       MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
219       I.removeFromParent();
220     } else if (I.getNumDefs() == 1) {
221       // Make all vregs 32 bits (for SPIR-V IDs).
222       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
223     }
224     return true;
225   }
226 
227   if (I.getNumOperands() != I.getNumExplicitOperands()) {
228     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
229     return false;
230   }
231 
232   // Common code for getting return reg+type, and removing selected instr
233   // from parent occurs here. Instr-specific selection happens in spvSelect().
234   bool HasDefs = I.getNumDefs() > 0;
235   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
236   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
237   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
238   if (spvSelect(ResVReg, ResType, I)) {
239     if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
240       MRI->setType(ResVReg, LLT::scalar(32));
241     I.removeFromParent();
242     return true;
243   }
244   return false;
245 }
246 
247 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
248                                          const SPIRVType *ResType,
249                                          MachineInstr &I) const {
250   assert(!isTypeFoldingSupported(I.getOpcode()) ||
251          I.getOpcode() == TargetOpcode::G_CONSTANT);
252   const unsigned Opcode = I.getOpcode();
253   switch (Opcode) {
254   case TargetOpcode::G_CONSTANT:
255     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
256                        I);
257   case TargetOpcode::G_GLOBAL_VALUE:
258     return selectGlobalValue(ResVReg, I);
259   case TargetOpcode::G_IMPLICIT_DEF:
260     return selectOpUndef(ResVReg, ResType, I);
261 
262   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
263     return selectIntrinsic(ResVReg, ResType, I);
264   case TargetOpcode::G_BITREVERSE:
265     return selectBitreverse(ResVReg, ResType, I);
266 
267   case TargetOpcode::G_BUILD_VECTOR:
268     return selectConstVector(ResVReg, ResType, I);
269 
270   case TargetOpcode::G_SHUFFLE_VECTOR: {
271     MachineBasicBlock &BB = *I.getParent();
272     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
273                    .addDef(ResVReg)
274                    .addUse(GR.getSPIRVTypeID(ResType))
275                    .addUse(I.getOperand(1).getReg())
276                    .addUse(I.getOperand(2).getReg());
277     for (auto V : I.getOperand(3).getShuffleMask())
278       MIB.addImm(V);
279     return MIB.constrainAllUses(TII, TRI, RBI);
280   }
281   case TargetOpcode::G_MEMMOVE:
282   case TargetOpcode::G_MEMCPY:
283     return selectMemOperation(ResVReg, I);
284 
285   case TargetOpcode::G_ICMP:
286     return selectICmp(ResVReg, ResType, I);
287   case TargetOpcode::G_FCMP:
288     return selectFCmp(ResVReg, ResType, I);
289 
290   case TargetOpcode::G_FRAME_INDEX:
291     return selectFrameIndex(ResVReg, ResType, I);
292 
293   case TargetOpcode::G_LOAD:
294     return selectLoad(ResVReg, ResType, I);
295   case TargetOpcode::G_STORE:
296     return selectStore(I);
297 
298   case TargetOpcode::G_BR:
299     return selectBranch(I);
300   case TargetOpcode::G_BRCOND:
301     return selectBranchCond(I);
302 
303   case TargetOpcode::G_PHI:
304     return selectPhi(ResVReg, ResType, I);
305 
306   case TargetOpcode::G_FPTOSI:
307     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
308   case TargetOpcode::G_FPTOUI:
309     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
310 
311   case TargetOpcode::G_SITOFP:
312     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
313   case TargetOpcode::G_UITOFP:
314     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
315 
316   case TargetOpcode::G_CTPOP:
317     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
318 
319   case TargetOpcode::G_SEXT:
320     return selectExt(ResVReg, ResType, I, true);
321   case TargetOpcode::G_ANYEXT:
322   case TargetOpcode::G_ZEXT:
323     return selectExt(ResVReg, ResType, I, false);
324   case TargetOpcode::G_TRUNC:
325     return selectTrunc(ResVReg, ResType, I);
326   case TargetOpcode::G_FPTRUNC:
327   case TargetOpcode::G_FPEXT:
328     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
329 
330   case TargetOpcode::G_PTRTOINT:
331     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
332   case TargetOpcode::G_INTTOPTR:
333     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
334   case TargetOpcode::G_BITCAST:
335     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
336   case TargetOpcode::G_ADDRSPACE_CAST:
337     return selectAddrSpaceCast(ResVReg, ResType, I);
338 
339   case TargetOpcode::G_ATOMICRMW_OR:
340     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
341   case TargetOpcode::G_ATOMICRMW_ADD:
342     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
343   case TargetOpcode::G_ATOMICRMW_AND:
344     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
345   case TargetOpcode::G_ATOMICRMW_MAX:
346     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
347   case TargetOpcode::G_ATOMICRMW_MIN:
348     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
349   case TargetOpcode::G_ATOMICRMW_SUB:
350     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
351   case TargetOpcode::G_ATOMICRMW_XOR:
352     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
353   case TargetOpcode::G_ATOMICRMW_UMAX:
354     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
355   case TargetOpcode::G_ATOMICRMW_UMIN:
356     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
357   case TargetOpcode::G_ATOMICRMW_XCHG:
358     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
359   case TargetOpcode::G_ATOMIC_CMPXCHG:
360     return selectAtomicCmpXchg(ResVReg, ResType, I);
361 
362   case TargetOpcode::G_FENCE:
363     return selectFence(I);
364 
365   default:
366     return false;
367   }
368 }
369 
370 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
371                                                  const SPIRVType *ResType,
372                                                  MachineInstr &I,
373                                                  Register SrcReg,
374                                                  unsigned Opcode) const {
375   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
376       .addDef(ResVReg)
377       .addUse(GR.getSPIRVTypeID(ResType))
378       .addUse(SrcReg)
379       .constrainAllUses(TII, TRI, RBI);
380 }
381 
382 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
383                                           const SPIRVType *ResType,
384                                           MachineInstr &I,
385                                           unsigned Opcode) const {
386   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
387                            Opcode);
388 }
389 
390 static SPIRV::MemorySemantics getMemSemantics(AtomicOrdering Ord) {
391   switch (Ord) {
392   case AtomicOrdering::Acquire:
393     return SPIRV::MemorySemantics::Acquire;
394   case AtomicOrdering::Release:
395     return SPIRV::MemorySemantics::Release;
396   case AtomicOrdering::AcquireRelease:
397     return SPIRV::MemorySemantics::AcquireRelease;
398   case AtomicOrdering::SequentiallyConsistent:
399     return SPIRV::MemorySemantics::SequentiallyConsistent;
400   case AtomicOrdering::Unordered:
401   case AtomicOrdering::Monotonic:
402   case AtomicOrdering::NotAtomic:
403     return SPIRV::MemorySemantics::None;
404   }
405 }
406 
407 static SPIRV::Scope getScope(SyncScope::ID Ord) {
408   switch (Ord) {
409   case SyncScope::SingleThread:
410     return SPIRV::Scope::Invocation;
411   case SyncScope::System:
412     return SPIRV::Scope::Device;
413   default:
414     llvm_unreachable("Unsupported synchronization Scope ID.");
415   }
416 }
417 
418 static void addMemoryOperands(MachineMemOperand *MemOp,
419                               MachineInstrBuilder &MIB) {
420   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
421   if (MemOp->isVolatile())
422     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
423   if (MemOp->isNonTemporal())
424     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
425   if (MemOp->getAlign().value())
426     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
427 
428   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
429     MIB.addImm(SpvMemOp);
430     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
431       MIB.addImm(MemOp->getAlign().value());
432   }
433 }
434 
435 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
436   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
437   if (Flags & MachineMemOperand::Flags::MOVolatile)
438     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
439   if (Flags & MachineMemOperand::Flags::MONonTemporal)
440     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
441 
442   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
443     MIB.addImm(SpvMemOp);
444 }
445 
446 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
447                                           const SPIRVType *ResType,
448                                           MachineInstr &I) const {
449   unsigned OpOffset =
450       I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
451   Register Ptr = I.getOperand(1 + OpOffset).getReg();
452   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
453                  .addDef(ResVReg)
454                  .addUse(GR.getSPIRVTypeID(ResType))
455                  .addUse(Ptr);
456   if (!I.getNumMemOperands()) {
457     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
458     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
459   } else {
460     addMemoryOperands(*I.memoperands_begin(), MIB);
461   }
462   return MIB.constrainAllUses(TII, TRI, RBI);
463 }
464 
465 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
466   unsigned OpOffset =
467       I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
468   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
469   Register Ptr = I.getOperand(1 + OpOffset).getReg();
470   MachineBasicBlock &BB = *I.getParent();
471   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
472                  .addUse(Ptr)
473                  .addUse(StoreVal);
474   if (!I.getNumMemOperands()) {
475     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
476     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
477   } else {
478     addMemoryOperands(*I.memoperands_begin(), MIB);
479   }
480   return MIB.constrainAllUses(TII, TRI, RBI);
481 }
482 
483 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
484                                                   MachineInstr &I) const {
485   MachineBasicBlock &BB = *I.getParent();
486   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
487                  .addDef(I.getOperand(0).getReg())
488                  .addUse(I.getOperand(1).getReg())
489                  .addUse(I.getOperand(2).getReg());
490   if (I.getNumMemOperands())
491     addMemoryOperands(*I.memoperands_begin(), MIB);
492   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
493   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg()) {
494     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
495         .addUse(MIB->getOperand(0).getReg());
496   }
497   return Result;
498 }
499 
500 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
501                                                const SPIRVType *ResType,
502                                                MachineInstr &I,
503                                                unsigned NewOpcode) const {
504   assert(I.hasOneMemOperand());
505   const MachineMemOperand *MemOp = *I.memoperands_begin();
506   uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
507   Register ScopeReg = buildI32Constant(Scope, I);
508 
509   Register Ptr = I.getOperand(1).getReg();
510   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
511   // auto ScSem =
512   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
513   AtomicOrdering AO = MemOp->getSuccessOrdering();
514   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
515   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
516 
517   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
518       .addDef(ResVReg)
519       .addUse(GR.getSPIRVTypeID(ResType))
520       .addUse(Ptr)
521       .addUse(ScopeReg)
522       .addUse(MemSemReg)
523       .addUse(I.getOperand(2).getReg())
524       .constrainAllUses(TII, TRI, RBI);
525 }
526 
527 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
528   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
529   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
530   Register MemSemReg = buildI32Constant(MemSem, I);
531   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
532   uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
533   Register ScopeReg = buildI32Constant(Scope, I);
534   MachineBasicBlock &BB = *I.getParent();
535   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
536       .addUse(ScopeReg)
537       .addUse(MemSemReg)
538       .constrainAllUses(TII, TRI, RBI);
539 }
540 
541 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
542                                                    const SPIRVType *ResType,
543                                                    MachineInstr &I) const {
544   assert(I.hasOneMemOperand());
545   const MachineMemOperand *MemOp = *I.memoperands_begin();
546   uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
547   Register ScopeReg = buildI32Constant(Scope, I);
548 
549   Register Ptr = I.getOperand(2).getReg();
550   Register Cmp = I.getOperand(3).getReg();
551   Register Val = I.getOperand(4).getReg();
552 
553   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
554   SPIRV::StorageClass SC = GR.getPointerStorageClass(Ptr);
555   uint32_t ScSem = static_cast<uint32_t>(getMemSemanticsForStorageClass(SC));
556   AtomicOrdering AO = MemOp->getSuccessOrdering();
557   uint32_t MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
558   Register MemSemEqReg = buildI32Constant(MemSemEq, I);
559   AtomicOrdering FO = MemOp->getFailureOrdering();
560   uint32_t MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
561   Register MemSemNeqReg =
562       MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
563   const DebugLoc &DL = I.getDebugLoc();
564   return BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
565       .addDef(ResVReg)
566       .addUse(GR.getSPIRVTypeID(SpvValTy))
567       .addUse(Ptr)
568       .addUse(ScopeReg)
569       .addUse(MemSemEqReg)
570       .addUse(MemSemNeqReg)
571       .addUse(Val)
572       .addUse(Cmp)
573       .constrainAllUses(TII, TRI, RBI);
574 }
575 
576 static bool isGenericCastablePtr(SPIRV::StorageClass SC) {
577   switch (SC) {
578   case SPIRV::StorageClass::Workgroup:
579   case SPIRV::StorageClass::CrossWorkgroup:
580   case SPIRV::StorageClass::Function:
581     return true;
582   default:
583     return false;
584   }
585 }
586 
587 // In SPIR-V address space casting can only happen to and from the Generic
588 // storage class. We can also only case Workgroup, CrossWorkgroup, or Function
589 // pointers to and from Generic pointers. As such, we can convert e.g. from
590 // Workgroup to Function by going via a Generic pointer as an intermediary. All
591 // other combinations can only be done by a bitcast, and are probably not safe.
592 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
593                                                    const SPIRVType *ResType,
594                                                    MachineInstr &I) const {
595   Register SrcPtr = I.getOperand(1).getReg();
596   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
597   SPIRV::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
598   SPIRV::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
599 
600   // Casting from an eligable pointer to Generic.
601   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
602     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
603   // Casting from Generic to an eligable pointer.
604   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
605     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
606   // Casting between 2 eligable pointers using Generic as an intermediary.
607   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
608     Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
609     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
610         SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
611     MachineBasicBlock &BB = *I.getParent();
612     const DebugLoc &DL = I.getDebugLoc();
613     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
614                        .addDef(Tmp)
615                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
616                        .addUse(SrcPtr)
617                        .constrainAllUses(TII, TRI, RBI);
618     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
619                           .addDef(ResVReg)
620                           .addUse(GR.getSPIRVTypeID(ResType))
621                           .addUse(Tmp)
622                           .constrainAllUses(TII, TRI, RBI);
623   }
624   // TODO Should this case just be disallowed completely?
625   // We're casting 2 other arbitrary address spaces, so have to bitcast.
626   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
627 }
628 
629 static unsigned getFCmpOpcode(unsigned PredNum) {
630   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
631   switch (Pred) {
632   case CmpInst::FCMP_OEQ:
633     return SPIRV::OpFOrdEqual;
634   case CmpInst::FCMP_OGE:
635     return SPIRV::OpFOrdGreaterThanEqual;
636   case CmpInst::FCMP_OGT:
637     return SPIRV::OpFOrdGreaterThan;
638   case CmpInst::FCMP_OLE:
639     return SPIRV::OpFOrdLessThanEqual;
640   case CmpInst::FCMP_OLT:
641     return SPIRV::OpFOrdLessThan;
642   case CmpInst::FCMP_ONE:
643     return SPIRV::OpFOrdNotEqual;
644   case CmpInst::FCMP_ORD:
645     return SPIRV::OpOrdered;
646   case CmpInst::FCMP_UEQ:
647     return SPIRV::OpFUnordEqual;
648   case CmpInst::FCMP_UGE:
649     return SPIRV::OpFUnordGreaterThanEqual;
650   case CmpInst::FCMP_UGT:
651     return SPIRV::OpFUnordGreaterThan;
652   case CmpInst::FCMP_ULE:
653     return SPIRV::OpFUnordLessThanEqual;
654   case CmpInst::FCMP_ULT:
655     return SPIRV::OpFUnordLessThan;
656   case CmpInst::FCMP_UNE:
657     return SPIRV::OpFUnordNotEqual;
658   case CmpInst::FCMP_UNO:
659     return SPIRV::OpUnordered;
660   default:
661     llvm_unreachable("Unknown predicate type for FCmp");
662   }
663 }
664 
665 static unsigned getICmpOpcode(unsigned PredNum) {
666   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
667   switch (Pred) {
668   case CmpInst::ICMP_EQ:
669     return SPIRV::OpIEqual;
670   case CmpInst::ICMP_NE:
671     return SPIRV::OpINotEqual;
672   case CmpInst::ICMP_SGE:
673     return SPIRV::OpSGreaterThanEqual;
674   case CmpInst::ICMP_SGT:
675     return SPIRV::OpSGreaterThan;
676   case CmpInst::ICMP_SLE:
677     return SPIRV::OpSLessThanEqual;
678   case CmpInst::ICMP_SLT:
679     return SPIRV::OpSLessThan;
680   case CmpInst::ICMP_UGE:
681     return SPIRV::OpUGreaterThanEqual;
682   case CmpInst::ICMP_UGT:
683     return SPIRV::OpUGreaterThan;
684   case CmpInst::ICMP_ULE:
685     return SPIRV::OpULessThanEqual;
686   case CmpInst::ICMP_ULT:
687     return SPIRV::OpULessThan;
688   default:
689     llvm_unreachable("Unknown predicate type for ICmp");
690   }
691 }
692 
693 static unsigned getPtrCmpOpcode(unsigned Pred) {
694   switch (static_cast<CmpInst::Predicate>(Pred)) {
695   case CmpInst::ICMP_EQ:
696     return SPIRV::OpPtrEqual;
697   case CmpInst::ICMP_NE:
698     return SPIRV::OpPtrNotEqual;
699   default:
700     llvm_unreachable("Unknown predicate type for pointer comparison");
701   }
702 }
703 
704 // Return the logical operation, or abort if none exists.
705 static unsigned getBoolCmpOpcode(unsigned PredNum) {
706   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
707   switch (Pred) {
708   case CmpInst::ICMP_EQ:
709     return SPIRV::OpLogicalEqual;
710   case CmpInst::ICMP_NE:
711     return SPIRV::OpLogicalNotEqual;
712   default:
713     llvm_unreachable("Unknown predicate type for Bool comparison");
714   }
715 }
716 
717 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
718                                                 const SPIRVType *ResType,
719                                                 MachineInstr &I) const {
720   MachineBasicBlock &BB = *I.getParent();
721   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
722       .addDef(ResVReg)
723       .addUse(GR.getSPIRVTypeID(ResType))
724       .addUse(I.getOperand(1).getReg())
725       .constrainAllUses(TII, TRI, RBI);
726 }
727 
728 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
729                                                  const SPIRVType *ResType,
730                                                  MachineInstr &I) const {
731   // TODO: only const case is supported for now.
732   assert(std::all_of(
733       I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
734         if (MO.isDef())
735           return true;
736         if (!MO.isReg())
737           return false;
738         SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
739         assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
740                ConstTy->getOperand(1).isReg());
741         Register ConstReg = ConstTy->getOperand(1).getReg();
742         const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
743         assert(Const);
744         return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
745                 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
746       }));
747 
748   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
749                      TII.get(SPIRV::OpConstantComposite))
750                  .addDef(ResVReg)
751                  .addUse(GR.getSPIRVTypeID(ResType));
752   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
753     MIB.addUse(I.getOperand(i).getReg());
754   return MIB.constrainAllUses(TII, TRI, RBI);
755 }
756 
757 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
758                                          const SPIRVType *ResType,
759                                          unsigned CmpOpc,
760                                          MachineInstr &I) const {
761   Register Cmp0 = I.getOperand(2).getReg();
762   Register Cmp1 = I.getOperand(3).getReg();
763   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
764              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
765          "CMP operands should have the same type");
766   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
767       .addDef(ResVReg)
768       .addUse(GR.getSPIRVTypeID(ResType))
769       .addUse(Cmp0)
770       .addUse(Cmp1)
771       .constrainAllUses(TII, TRI, RBI);
772 }
773 
774 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
775                                           const SPIRVType *ResType,
776                                           MachineInstr &I) const {
777   auto Pred = I.getOperand(1).getPredicate();
778   unsigned CmpOpc;
779 
780   Register CmpOperand = I.getOperand(2).getReg();
781   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
782     CmpOpc = getPtrCmpOpcode(Pred);
783   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
784     CmpOpc = getBoolCmpOpcode(Pred);
785   else
786     CmpOpc = getICmpOpcode(Pred);
787   return selectCmp(ResVReg, ResType, CmpOpc, I);
788 }
789 
790 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
791                                             const MachineInstr &I,
792                                             int OpIdx) const {
793   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
794          "Expected G_FCONSTANT");
795   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
796   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
797 }
798 
799 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
800                                            const MachineInstr &I,
801                                            int OpIdx) const {
802   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
803          "Expected G_CONSTANT");
804   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
805 }
806 
807 Register
808 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
809                                            const SPIRVType *ResType) const {
810   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
811   const SPIRVType *SpvI32Ty =
812       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
813   // Find a constant in DT or build a new one.
814   auto ConstInt = ConstantInt::get(LLVMTy, Val);
815   Register NewReg = GR.find(ConstInt, GR.CurMF);
816   if (!NewReg.isValid()) {
817     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
818     GR.add(ConstInt, GR.CurMF, NewReg);
819     MachineInstr *MI;
820     MachineBasicBlock &BB = *I.getParent();
821     if (Val == 0) {
822       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
823                .addDef(NewReg)
824                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
825     } else {
826       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
827                .addDef(NewReg)
828                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
829                .addImm(APInt(32, Val).getZExtValue());
830     }
831     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
832   }
833   return NewReg;
834 }
835 
836 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
837                                           const SPIRVType *ResType,
838                                           MachineInstr &I) const {
839   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
840   return selectCmp(ResVReg, ResType, CmpOp, I);
841 }
842 
843 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
844                                                  MachineInstr &I) const {
845   return buildI32Constant(0, I, ResType);
846 }
847 
848 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
849                                                 const SPIRVType *ResType,
850                                                 MachineInstr &I) const {
851   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
852   APInt One = AllOnes ? APInt::getAllOnesValue(BitWidth)
853                       : APInt::getOneBitSet(BitWidth, 0);
854   Register OneReg = buildI32Constant(One.getZExtValue(), I, ResType);
855   if (ResType->getOpcode() == SPIRV::OpTypeVector) {
856     const unsigned NumEles = ResType->getOperand(2).getImm();
857     Register OneVec = MRI->createVirtualRegister(&SPIRV::IDRegClass);
858     unsigned Opcode = SPIRV::OpConstantComposite;
859     auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
860                    .addDef(OneVec)
861                    .addUse(GR.getSPIRVTypeID(ResType));
862     for (unsigned i = 0; i < NumEles; ++i)
863       MIB.addUse(OneReg);
864     constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
865     return OneVec;
866   }
867   return OneReg;
868 }
869 
870 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
871                                             const SPIRVType *ResType,
872                                             MachineInstr &I,
873                                             bool IsSigned) const {
874   // To extend a bool, we need to use OpSelect between constants.
875   Register ZeroReg = buildZerosVal(ResType, I);
876   Register OneReg = buildOnesVal(IsSigned, ResType, I);
877   bool IsScalarBool =
878       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
879   unsigned Opcode =
880       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
881   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
882       .addDef(ResVReg)
883       .addUse(GR.getSPIRVTypeID(ResType))
884       .addUse(I.getOperand(1).getReg())
885       .addUse(OneReg)
886       .addUse(ZeroReg)
887       .constrainAllUses(TII, TRI, RBI);
888 }
889 
890 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
891                                           const SPIRVType *ResType,
892                                           MachineInstr &I, bool IsSigned,
893                                           unsigned Opcode) const {
894   Register SrcReg = I.getOperand(1).getReg();
895   // We can convert bool value directly to float type without OpConvert*ToF,
896   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
897   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
898     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
899     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
900     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
901       const unsigned NumElts = ResType->getOperand(2).getImm();
902       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
903     }
904     SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
905     selectSelect(SrcReg, TmpType, I, false);
906   }
907   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
908 }
909 
910 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
911                                          const SPIRVType *ResType,
912                                          MachineInstr &I, bool IsSigned) const {
913   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
914     return selectSelect(ResVReg, ResType, I, IsSigned);
915   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
916   return selectUnOp(ResVReg, ResType, I, Opcode);
917 }
918 
919 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
920                                                Register ResVReg,
921                                                const SPIRVType *IntTy,
922                                                const SPIRVType *BoolTy,
923                                                MachineInstr &I) const {
924   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
925   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
926   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
927   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
928   Register Zero = buildZerosVal(IntTy, I);
929   Register One = buildOnesVal(false, IntTy, I);
930   MachineBasicBlock &BB = *I.getParent();
931   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
932       .addDef(BitIntReg)
933       .addUse(GR.getSPIRVTypeID(IntTy))
934       .addUse(IntReg)
935       .addUse(One)
936       .constrainAllUses(TII, TRI, RBI);
937   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
938       .addDef(ResVReg)
939       .addUse(GR.getSPIRVTypeID(BoolTy))
940       .addUse(BitIntReg)
941       .addUse(Zero)
942       .constrainAllUses(TII, TRI, RBI);
943 }
944 
945 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
946                                            const SPIRVType *ResType,
947                                            MachineInstr &I) const {
948   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
949     Register IntReg = I.getOperand(1).getReg();
950     const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
951     return selectIntToBool(IntReg, ResVReg, ArgType, ResType, I);
952   }
953   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
954   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
955   return selectUnOp(ResVReg, ResType, I, Opcode);
956 }
957 
958 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
959                                            const SPIRVType *ResType,
960                                            const APInt &Imm,
961                                            MachineInstr &I) const {
962   assert(ResType->getOpcode() != SPIRV::OpTypePointer || Imm.isNullValue());
963   MachineBasicBlock &BB = *I.getParent();
964   if (ResType->getOpcode() == SPIRV::OpTypePointer && Imm.isNullValue()) {
965     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
966         .addDef(ResVReg)
967         .addUse(GR.getSPIRVTypeID(ResType))
968         .constrainAllUses(TII, TRI, RBI);
969   }
970   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
971                  .addDef(ResVReg)
972                  .addUse(GR.getSPIRVTypeID(ResType));
973   // <=32-bit integers should be caught by the sdag pattern.
974   assert(Imm.getBitWidth() > 32);
975   addNumImm(Imm, MIB);
976   return MIB.constrainAllUses(TII, TRI, RBI);
977 }
978 
979 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
980                                              const SPIRVType *ResType,
981                                              MachineInstr &I) const {
982   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
983       .addDef(ResVReg)
984       .addUse(GR.getSPIRVTypeID(ResType))
985       .constrainAllUses(TII, TRI, RBI);
986 }
987 
988 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
989   assert(MO.isReg());
990   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
991   if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
992     return false;
993   assert(TypeInst->getOperand(1).isReg());
994   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
995   return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
996 }
997 
998 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
999   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1000   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1001   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1002   return ImmInst->getOperand(1).getCImm()->getZExtValue();
1003 }
1004 
1005 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1006                                                const SPIRVType *ResType,
1007                                                MachineInstr &I) const {
1008   MachineBasicBlock &BB = *I.getParent();
1009   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1010       .addDef(ResVReg)
1011       .addUse(GR.getSPIRVTypeID(ResType))
1012       // object to insert
1013       .addUse(I.getOperand(3).getReg())
1014       // composite to insert into
1015       .addUse(I.getOperand(2).getReg())
1016       // TODO: support arbitrary number of indices
1017       .addImm(foldImm(I.getOperand(4), MRI))
1018       .constrainAllUses(TII, TRI, RBI);
1019 }
1020 
1021 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1022                                                 const SPIRVType *ResType,
1023                                                 MachineInstr &I) const {
1024   MachineBasicBlock &BB = *I.getParent();
1025   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1026       .addDef(ResVReg)
1027       .addUse(GR.getSPIRVTypeID(ResType))
1028       .addUse(I.getOperand(2).getReg())
1029       // TODO: support arbitrary number of indices
1030       .addImm(foldImm(I.getOperand(3), MRI))
1031       .constrainAllUses(TII, TRI, RBI);
1032 }
1033 
1034 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1035                                                const SPIRVType *ResType,
1036                                                MachineInstr &I) const {
1037   if (isImm(I.getOperand(4), MRI))
1038     return selectInsertVal(ResVReg, ResType, I);
1039   MachineBasicBlock &BB = *I.getParent();
1040   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1041       .addDef(ResVReg)
1042       .addUse(GR.getSPIRVTypeID(ResType))
1043       .addUse(I.getOperand(2).getReg())
1044       .addUse(I.getOperand(3).getReg())
1045       .addUse(I.getOperand(4).getReg())
1046       .constrainAllUses(TII, TRI, RBI);
1047 }
1048 
1049 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1050                                                 const SPIRVType *ResType,
1051                                                 MachineInstr &I) const {
1052   if (isImm(I.getOperand(3), MRI))
1053     return selectExtractVal(ResVReg, ResType, I);
1054   MachineBasicBlock &BB = *I.getParent();
1055   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1056       .addDef(ResVReg)
1057       .addUse(GR.getSPIRVTypeID(ResType))
1058       .addUse(I.getOperand(2).getReg())
1059       .addUse(I.getOperand(3).getReg())
1060       .constrainAllUses(TII, TRI, RBI);
1061 }
1062 
1063 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1064                                          const SPIRVType *ResType,
1065                                          MachineInstr &I) const {
1066   // In general we should also support OpAccessChain instrs here (i.e. not
1067   // PtrAccessChain) but SPIRV-LLVM Translator doesn't emit them at all and so
1068   // do we to stay compliant with its test and more importantly consumers.
1069   unsigned Opcode = I.getOperand(2).getImm() ? SPIRV::OpInBoundsPtrAccessChain
1070                                              : SPIRV::OpPtrAccessChain;
1071   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1072                  .addDef(ResVReg)
1073                  .addUse(GR.getSPIRVTypeID(ResType))
1074                  // Object to get a pointer to.
1075                  .addUse(I.getOperand(3).getReg());
1076   // Adding indices.
1077   for (unsigned i = 4; i < I.getNumExplicitOperands(); ++i)
1078     Res.addUse(I.getOperand(i).getReg());
1079   return Res.constrainAllUses(TII, TRI, RBI);
1080 }
1081 
1082 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1083                                                const SPIRVType *ResType,
1084                                                MachineInstr &I) const {
1085   MachineBasicBlock &BB = *I.getParent();
1086   switch (I.getIntrinsicID()) {
1087   case Intrinsic::spv_load:
1088     return selectLoad(ResVReg, ResType, I);
1089     break;
1090   case Intrinsic::spv_store:
1091     return selectStore(I);
1092     break;
1093   case Intrinsic::spv_extractv:
1094     return selectExtractVal(ResVReg, ResType, I);
1095     break;
1096   case Intrinsic::spv_insertv:
1097     return selectInsertVal(ResVReg, ResType, I);
1098     break;
1099   case Intrinsic::spv_extractelt:
1100     return selectExtractElt(ResVReg, ResType, I);
1101     break;
1102   case Intrinsic::spv_insertelt:
1103     return selectInsertElt(ResVReg, ResType, I);
1104     break;
1105   case Intrinsic::spv_gep:
1106     return selectGEP(ResVReg, ResType, I);
1107     break;
1108   case Intrinsic::spv_unref_global:
1109   case Intrinsic::spv_init_global: {
1110     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1111     MachineInstr *Init = I.getNumExplicitOperands() > 2
1112                              ? MRI->getVRegDef(I.getOperand(2).getReg())
1113                              : nullptr;
1114     assert(MI);
1115     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1116   } break;
1117   case Intrinsic::spv_const_composite: {
1118     // If no values are attached, the composite is null constant.
1119     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1120     unsigned Opcode =
1121         IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1122     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1123                    .addDef(ResVReg)
1124                    .addUse(GR.getSPIRVTypeID(ResType));
1125     // skip type MD node we already used when generated assign.type for this
1126     if (!IsNull) {
1127       for (unsigned i = I.getNumExplicitDefs() + 1;
1128            i < I.getNumExplicitOperands(); ++i) {
1129         MIB.addUse(I.getOperand(i).getReg());
1130       }
1131     }
1132     return MIB.constrainAllUses(TII, TRI, RBI);
1133   } break;
1134   case Intrinsic::spv_assign_name: {
1135     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1136     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1137     for (unsigned i = I.getNumExplicitDefs() + 2;
1138          i < I.getNumExplicitOperands(); ++i) {
1139       MIB.addImm(I.getOperand(i).getImm());
1140     }
1141     return MIB.constrainAllUses(TII, TRI, RBI);
1142   } break;
1143   case Intrinsic::spv_switch: {
1144     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1145     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1146       if (I.getOperand(i).isReg())
1147         MIB.addReg(I.getOperand(i).getReg());
1148       else if (I.getOperand(i).isCImm())
1149         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1150       else if (I.getOperand(i).isMBB())
1151         MIB.addMBB(I.getOperand(i).getMBB());
1152       else
1153         llvm_unreachable("Unexpected OpSwitch operand");
1154     }
1155     return MIB.constrainAllUses(TII, TRI, RBI);
1156   } break;
1157   default:
1158     llvm_unreachable("Intrinsic selection not implemented");
1159   }
1160   return true;
1161 }
1162 
1163 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1164                                                 const SPIRVType *ResType,
1165                                                 MachineInstr &I) const {
1166   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1167       .addDef(ResVReg)
1168       .addUse(GR.getSPIRVTypeID(ResType))
1169       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1170       .constrainAllUses(TII, TRI, RBI);
1171 }
1172 
1173 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1174   // InstructionSelector walks backwards through the instructions. We can use
1175   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1176   // first, so can generate an OpBranchConditional here. If there is no
1177   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1178   const MachineInstr *PrevI = I.getPrevNode();
1179   MachineBasicBlock &MBB = *I.getParent();
1180   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1181     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1182         .addUse(PrevI->getOperand(0).getReg())
1183         .addMBB(PrevI->getOperand(1).getMBB())
1184         .addMBB(I.getOperand(0).getMBB())
1185         .constrainAllUses(TII, TRI, RBI);
1186   }
1187   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1188       .addMBB(I.getOperand(0).getMBB())
1189       .constrainAllUses(TII, TRI, RBI);
1190 }
1191 
1192 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1193   // InstructionSelector walks backwards through the instructions. For an
1194   // explicit conditional branch with no fallthrough, we use both a G_BR and a
1195   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1196   // generate the OpBranchConditional in selectBranch above.
1197   //
1198   // If an OpBranchConditional has been generated, we simply return, as the work
1199   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1200   // implicit fallthrough to the next basic block, so we need to create an
1201   // OpBranchConditional with an explicit "false" argument pointing to the next
1202   // basic block that LLVM would fall through to.
1203   const MachineInstr *NextI = I.getNextNode();
1204   // Check if this has already been successfully selected.
1205   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1206     return true;
1207   // Must be relying on implicit block fallthrough, so generate an
1208   // OpBranchConditional with the "next" basic block as the "false" target.
1209   MachineBasicBlock &MBB = *I.getParent();
1210   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1211   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1212   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1213       .addUse(I.getOperand(0).getReg())
1214       .addMBB(I.getOperand(1).getMBB())
1215       .addMBB(NextMBB)
1216       .constrainAllUses(TII, TRI, RBI);
1217 }
1218 
1219 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1220                                          const SPIRVType *ResType,
1221                                          MachineInstr &I) const {
1222   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1223                  .addDef(ResVReg)
1224                  .addUse(GR.getSPIRVTypeID(ResType));
1225   const unsigned NumOps = I.getNumOperands();
1226   for (unsigned i = 1; i < NumOps; i += 2) {
1227     MIB.addUse(I.getOperand(i + 0).getReg());
1228     MIB.addMBB(I.getOperand(i + 1).getMBB());
1229   }
1230   return MIB.constrainAllUses(TII, TRI, RBI);
1231 }
1232 
1233 bool SPIRVInstructionSelector::selectGlobalValue(
1234     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1235   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1236   MachineIRBuilder MIRBuilder(I);
1237   const GlobalValue *GV = I.getOperand(1).getGlobal();
1238   SPIRVType *ResType = GR.getOrCreateSPIRVType(
1239       GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1240 
1241   std::string GlobalIdent = GV->getGlobalIdentifier();
1242   // TODO: suport @llvm.global.annotations.
1243   auto GlobalVar = cast<GlobalVariable>(GV);
1244 
1245   bool HasInit = GlobalVar->hasInitializer() &&
1246                  !isa<UndefValue>(GlobalVar->getInitializer());
1247   // Skip empty declaration for GVs with initilaizers till we get the decl with
1248   // passed initializer.
1249   if (HasInit && !Init)
1250     return true;
1251 
1252   unsigned AddrSpace = GV->getAddressSpace();
1253   SPIRV::StorageClass Storage = addressSpaceToStorageClass(AddrSpace);
1254   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1255                   Storage != SPIRV::StorageClass::Function;
1256   SPIRV::LinkageType LnkType =
1257       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
1258           ? SPIRV::LinkageType::Import
1259           : SPIRV::LinkageType::Export;
1260 
1261   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1262                                         Storage, Init, GlobalVar->isConstant(),
1263                                         HasLnkTy, LnkType, MIRBuilder, true);
1264   return Reg.isValid();
1265 }
1266 
1267 namespace llvm {
1268 InstructionSelector *
1269 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
1270                                const SPIRVSubtarget &Subtarget,
1271                                const RegisterBankInfo &RBI) {
1272   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
1273 }
1274 } // namespace llvm
1275