xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (revision 540d255167742b6464a01f052d31c704fc4dd5aa)
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
17 #include "SPIRV.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/IR/IntrinsicsSPIRV.h"
31 #include "llvm/Support/Debug.h"
32 
33 #define DEBUG_TYPE "spirv-isel"
34 
35 using namespace llvm;
36 namespace CL = SPIRV::OpenCLExtInst;
37 namespace GL = SPIRV::GLSLExtInst;
38 
39 using ExtInstList =
40     std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
41 
42 namespace {
43 
44 #define GET_GLOBALISEL_PREDICATE_BITSET
45 #include "SPIRVGenGlobalISel.inc"
46 #undef GET_GLOBALISEL_PREDICATE_BITSET
47 
48 class SPIRVInstructionSelector : public InstructionSelector {
49   const SPIRVSubtarget &STI;
50   const SPIRVInstrInfo &TII;
51   const SPIRVRegisterInfo &TRI;
52   const RegisterBankInfo &RBI;
53   SPIRVGlobalRegistry &GR;
54   MachineRegisterInfo *MRI;
55 
56   /// We need to keep track of the number we give to anonymous global values to
57   /// generate the same name every time when this is needed.
58   mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
59 
60 public:
61   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
62                            const SPIRVSubtarget &ST,
63                            const RegisterBankInfo &RBI);
64   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
66                BlockFrequencyInfo *BFI) override;
67   // Common selection code. Instruction-specific selection occurs in spvSelect.
68   bool select(MachineInstr &I) override;
69   static const char *getName() { return DEBUG_TYPE; }
70 
71 #define GET_GLOBALISEL_PREDICATES_DECL
72 #include "SPIRVGenGlobalISel.inc"
73 #undef GET_GLOBALISEL_PREDICATES_DECL
74 
75 #define GET_GLOBALISEL_TEMPORARIES_DECL
76 #include "SPIRVGenGlobalISel.inc"
77 #undef GET_GLOBALISEL_TEMPORARIES_DECL
78 
79 private:
80   // tblgen-erated 'select' implementation, used as the initial selector for
81   // the patterns that don't require complex C++.
82   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
83 
84   // All instruction-specific selection that didn't happen in "select()".
85   // Is basically a large Switch/Case delegating to all other select method.
86   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
87                  MachineInstr &I) const;
88 
89   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
90                          const MachineInstr *Init = nullptr) const;
91 
92   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
93                          MachineInstr &I, Register SrcReg,
94                          unsigned Opcode) const;
95   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
96                   unsigned Opcode) const;
97 
98   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
99                   MachineInstr &I) const;
100   bool selectStore(MachineInstr &I) const;
101 
102   bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
103                        MachineInstr &I) const;
104   bool selectStackRestore(MachineInstr &I) const;
105 
106   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
107 
108   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
109                        MachineInstr &I, unsigned NewOpcode,
110                        unsigned NegateOpcode = 0) const;
111 
112   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
113                            MachineInstr &I) const;
114 
115   bool selectFence(MachineInstr &I) const;
116 
117   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
118                            MachineInstr &I) const;
119 
120   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
121                         MachineInstr &I) const;
122 
123   bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
124                          MachineInstr &I) const;
125 
126   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
127                  unsigned comparisonOpcode, MachineInstr &I) const;
128 
129   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
130                   MachineInstr &I) const;
131   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
132                   MachineInstr &I) const;
133 
134   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
135                    int OpIdx) const;
136   void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
137                     int OpIdx) const;
138 
139   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
140                    MachineInstr &I) const;
141 
142   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
143                     bool IsSigned) const;
144   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
145                   bool IsSigned, unsigned Opcode) const;
146   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
147                  bool IsSigned) const;
148 
149   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
150                    MachineInstr &I) const;
151 
152   bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
153                        const SPIRVType *intTy, const SPIRVType *boolTy) const;
154 
155   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
156                      MachineInstr &I) const;
157   bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
158                     MachineInstr &I) const;
159   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
160                        MachineInstr &I) const;
161   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
162                         MachineInstr &I) const;
163   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
164                        MachineInstr &I) const;
165   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
166                         MachineInstr &I) const;
167   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
168                        MachineInstr &I) const;
169   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
170                  MachineInstr &I) const;
171 
172   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
173                         MachineInstr &I) const;
174   bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
175                          MachineInstr &I) const;
176 
177   bool selectBranch(MachineInstr &I) const;
178   bool selectBranchCond(MachineInstr &I) const;
179 
180   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
181                  MachineInstr &I) const;
182 
183   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
184                      MachineInstr &I, CL::OpenCLExtInst CLInst) const;
185   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
186                      MachineInstr &I, CL::OpenCLExtInst CLInst,
187                      GL::GLSLExtInst GLInst) const;
188   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
189                      MachineInstr &I, const ExtInstList &ExtInsts) const;
190 
191   bool selectLog10(Register ResVReg, const SPIRVType *ResType,
192                    MachineInstr &I) const;
193 
194   bool selectUnmergeValues(MachineInstr &I) const;
195 
196   Register buildI32Constant(uint32_t Val, MachineInstr &I,
197                             const SPIRVType *ResType = nullptr) const;
198 
199   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
200   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
201                         MachineInstr &I) const;
202 };
203 
204 } // end anonymous namespace
205 
206 #define GET_GLOBALISEL_IMPL
207 #include "SPIRVGenGlobalISel.inc"
208 #undef GET_GLOBALISEL_IMPL
209 
210 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
211                                                    const SPIRVSubtarget &ST,
212                                                    const RegisterBankInfo &RBI)
213     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
214       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
215 #define GET_GLOBALISEL_PREDICATES_INIT
216 #include "SPIRVGenGlobalISel.inc"
217 #undef GET_GLOBALISEL_PREDICATES_INIT
218 #define GET_GLOBALISEL_TEMPORARIES_INIT
219 #include "SPIRVGenGlobalISel.inc"
220 #undef GET_GLOBALISEL_TEMPORARIES_INIT
221 {
222 }
223 
224 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
225                                        CodeGenCoverage *CoverageInfo,
226                                        ProfileSummaryInfo *PSI,
227                                        BlockFrequencyInfo *BFI) {
228   MRI = &MF.getRegInfo();
229   GR.setCurrentFunc(MF);
230   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
231 }
232 
233 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
234 
235 // Defined in SPIRVLegalizerInfo.cpp.
236 extern bool isTypeFoldingSupported(unsigned Opcode);
237 
238 bool SPIRVInstructionSelector::select(MachineInstr &I) {
239   assert(I.getParent() && "Instruction should be in a basic block!");
240   assert(I.getParent()->getParent() && "Instruction should be in a function!");
241 
242   Register Opcode = I.getOpcode();
243   // If it's not a GMIR instruction, we've selected it already.
244   if (!isPreISelGenericOpcode(Opcode)) {
245     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
246       auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
247       if (isTypeFoldingSupported(Def->getOpcode())) {
248         bool Res = selectImpl(I, *CoverageInfo);
249         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
250         if (Res)
251           return Res;
252       }
253       MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
254       I.removeFromParent();
255       return true;
256     } else if (I.getNumDefs() == 1) {
257       // Make all vregs 32 bits (for SPIR-V IDs).
258       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
259     }
260     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
261   }
262 
263   if (I.getNumOperands() != I.getNumExplicitOperands()) {
264     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
265     return false;
266   }
267 
268   // Common code for getting return reg+type, and removing selected instr
269   // from parent occurs here. Instr-specific selection happens in spvSelect().
270   bool HasDefs = I.getNumDefs() > 0;
271   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
272   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
273   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
274   if (spvSelect(ResVReg, ResType, I)) {
275     if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
276       for (unsigned i = 0; i < I.getNumDefs(); ++i)
277         MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
278     I.removeFromParent();
279     return true;
280   }
281   return false;
282 }
283 
284 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
285                                          const SPIRVType *ResType,
286                                          MachineInstr &I) const {
287   const unsigned Opcode = I.getOpcode();
288   if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
289     return selectImpl(I, *CoverageInfo);
290   switch (Opcode) {
291   case TargetOpcode::G_CONSTANT:
292     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
293                        I);
294   case TargetOpcode::G_GLOBAL_VALUE:
295     return selectGlobalValue(ResVReg, I);
296   case TargetOpcode::G_IMPLICIT_DEF:
297     return selectOpUndef(ResVReg, ResType, I);
298   case TargetOpcode::G_FREEZE:
299     return selectFreeze(ResVReg, ResType, I);
300 
301   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
302   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
303     return selectIntrinsic(ResVReg, ResType, I);
304   case TargetOpcode::G_BITREVERSE:
305     return selectBitreverse(ResVReg, ResType, I);
306 
307   case TargetOpcode::G_BUILD_VECTOR:
308     return selectConstVector(ResVReg, ResType, I);
309 
310   case TargetOpcode::G_SHUFFLE_VECTOR: {
311     MachineBasicBlock &BB = *I.getParent();
312     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
313                    .addDef(ResVReg)
314                    .addUse(GR.getSPIRVTypeID(ResType))
315                    .addUse(I.getOperand(1).getReg())
316                    .addUse(I.getOperand(2).getReg());
317     for (auto V : I.getOperand(3).getShuffleMask())
318       MIB.addImm(V);
319     return MIB.constrainAllUses(TII, TRI, RBI);
320   }
321   case TargetOpcode::G_MEMMOVE:
322   case TargetOpcode::G_MEMCPY:
323   case TargetOpcode::G_MEMSET:
324     return selectMemOperation(ResVReg, I);
325 
326   case TargetOpcode::G_ICMP:
327     return selectICmp(ResVReg, ResType, I);
328   case TargetOpcode::G_FCMP:
329     return selectFCmp(ResVReg, ResType, I);
330 
331   case TargetOpcode::G_FRAME_INDEX:
332     return selectFrameIndex(ResVReg, ResType, I);
333 
334   case TargetOpcode::G_LOAD:
335     return selectLoad(ResVReg, ResType, I);
336   case TargetOpcode::G_STORE:
337     return selectStore(I);
338 
339   case TargetOpcode::G_BR:
340     return selectBranch(I);
341   case TargetOpcode::G_BRCOND:
342     return selectBranchCond(I);
343 
344   case TargetOpcode::G_PHI:
345     return selectPhi(ResVReg, ResType, I);
346 
347   case TargetOpcode::G_FPTOSI:
348     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
349   case TargetOpcode::G_FPTOUI:
350     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
351 
352   case TargetOpcode::G_SITOFP:
353     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
354   case TargetOpcode::G_UITOFP:
355     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
356 
357   case TargetOpcode::G_CTPOP:
358     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
359   case TargetOpcode::G_SMIN:
360     return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
361   case TargetOpcode::G_UMIN:
362     return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
363 
364   case TargetOpcode::G_SMAX:
365     return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
366   case TargetOpcode::G_UMAX:
367     return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
368 
369   case TargetOpcode::G_FMA:
370     return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
371 
372   case TargetOpcode::G_FPOW:
373     return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
374   case TargetOpcode::G_FPOWI:
375     return selectExtInst(ResVReg, ResType, I, CL::pown);
376 
377   case TargetOpcode::G_FEXP:
378     return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
379   case TargetOpcode::G_FEXP2:
380     return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
381 
382   case TargetOpcode::G_FLOG:
383     return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
384   case TargetOpcode::G_FLOG2:
385     return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
386   case TargetOpcode::G_FLOG10:
387     return selectLog10(ResVReg, ResType, I);
388 
389   case TargetOpcode::G_FABS:
390     return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
391   case TargetOpcode::G_ABS:
392     return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
393 
394   case TargetOpcode::G_FMINNUM:
395   case TargetOpcode::G_FMINIMUM:
396     return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin);
397   case TargetOpcode::G_FMAXNUM:
398   case TargetOpcode::G_FMAXIMUM:
399     return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax);
400 
401   case TargetOpcode::G_FCOPYSIGN:
402     return selectExtInst(ResVReg, ResType, I, CL::copysign);
403 
404   case TargetOpcode::G_FCEIL:
405     return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
406   case TargetOpcode::G_FFLOOR:
407     return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
408 
409   case TargetOpcode::G_FCOS:
410     return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
411   case TargetOpcode::G_FSIN:
412     return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
413 
414   case TargetOpcode::G_FSQRT:
415     return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
416 
417   case TargetOpcode::G_CTTZ:
418   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
419     return selectExtInst(ResVReg, ResType, I, CL::ctz);
420   case TargetOpcode::G_CTLZ:
421   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
422     return selectExtInst(ResVReg, ResType, I, CL::clz);
423 
424   case TargetOpcode::G_INTRINSIC_ROUND:
425     return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
426   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
427     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
428   case TargetOpcode::G_INTRINSIC_TRUNC:
429     return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
430   case TargetOpcode::G_FRINT:
431   case TargetOpcode::G_FNEARBYINT:
432     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
433 
434   case TargetOpcode::G_SMULH:
435     return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
436   case TargetOpcode::G_UMULH:
437     return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
438 
439   case TargetOpcode::G_SEXT:
440     return selectExt(ResVReg, ResType, I, true);
441   case TargetOpcode::G_ANYEXT:
442   case TargetOpcode::G_ZEXT:
443     return selectExt(ResVReg, ResType, I, false);
444   case TargetOpcode::G_TRUNC:
445     return selectTrunc(ResVReg, ResType, I);
446   case TargetOpcode::G_FPTRUNC:
447   case TargetOpcode::G_FPEXT:
448     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
449 
450   case TargetOpcode::G_PTRTOINT:
451     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
452   case TargetOpcode::G_INTTOPTR:
453     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
454   case TargetOpcode::G_BITCAST:
455     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
456   case TargetOpcode::G_ADDRSPACE_CAST:
457     return selectAddrSpaceCast(ResVReg, ResType, I);
458   case TargetOpcode::G_PTR_ADD: {
459     // Currently, we get G_PTR_ADD only as a result of translating
460     // global variables, initialized with constant expressions like GV + Const
461     // (see test opencl/basic/progvar_prog_scope_init.ll).
462     // TODO: extend the handler once we have other cases.
463     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
464     Register GV = I.getOperand(1).getReg();
465     MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
466     assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
467             (*II).getOpcode() == TargetOpcode::COPY ||
468             (*II).getOpcode() == SPIRV::OpVariable) &&
469            isImm(I.getOperand(2), MRI));
470     Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
471     MachineBasicBlock &BB = *I.getParent();
472     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
473                    .addDef(ResVReg)
474                    .addUse(GR.getSPIRVTypeID(ResType))
475                    .addImm(static_cast<uint32_t>(
476                        SPIRV::Opcode::InBoundsPtrAccessChain))
477                    .addUse(GV)
478                    .addUse(Idx)
479                    .addUse(I.getOperand(2).getReg());
480     return MIB.constrainAllUses(TII, TRI, RBI);
481   }
482 
483   case TargetOpcode::G_ATOMICRMW_OR:
484     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
485   case TargetOpcode::G_ATOMICRMW_ADD:
486     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
487   case TargetOpcode::G_ATOMICRMW_AND:
488     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
489   case TargetOpcode::G_ATOMICRMW_MAX:
490     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
491   case TargetOpcode::G_ATOMICRMW_MIN:
492     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
493   case TargetOpcode::G_ATOMICRMW_SUB:
494     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
495   case TargetOpcode::G_ATOMICRMW_XOR:
496     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
497   case TargetOpcode::G_ATOMICRMW_UMAX:
498     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
499   case TargetOpcode::G_ATOMICRMW_UMIN:
500     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
501   case TargetOpcode::G_ATOMICRMW_XCHG:
502     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
503   case TargetOpcode::G_ATOMIC_CMPXCHG:
504     return selectAtomicCmpXchg(ResVReg, ResType, I);
505 
506   case TargetOpcode::G_ATOMICRMW_FADD:
507     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
508   case TargetOpcode::G_ATOMICRMW_FSUB:
509     // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
510     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
511                            SPIRV::OpFNegate);
512   case TargetOpcode::G_ATOMICRMW_FMIN:
513     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
514   case TargetOpcode::G_ATOMICRMW_FMAX:
515     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
516 
517   case TargetOpcode::G_FENCE:
518     return selectFence(I);
519 
520   case TargetOpcode::G_STACKSAVE:
521     return selectStackSave(ResVReg, ResType, I);
522   case TargetOpcode::G_STACKRESTORE:
523     return selectStackRestore(I);
524 
525   case TargetOpcode::G_UNMERGE_VALUES:
526     return selectUnmergeValues(I);
527 
528   default:
529     return false;
530   }
531 }
532 
533 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
534                                              const SPIRVType *ResType,
535                                              MachineInstr &I,
536                                              CL::OpenCLExtInst CLInst) const {
537   return selectExtInst(ResVReg, ResType, I,
538                        {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
539 }
540 
541 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
542                                              const SPIRVType *ResType,
543                                              MachineInstr &I,
544                                              CL::OpenCLExtInst CLInst,
545                                              GL::GLSLExtInst GLInst) const {
546   ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
547                           {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
548   return selectExtInst(ResVReg, ResType, I, ExtInsts);
549 }
550 
551 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
552                                              const SPIRVType *ResType,
553                                              MachineInstr &I,
554                                              const ExtInstList &Insts) const {
555 
556   for (const auto &Ex : Insts) {
557     SPIRV::InstructionSet::InstructionSet Set = Ex.first;
558     uint32_t Opcode = Ex.second;
559     if (STI.canUseExtInstSet(Set)) {
560       MachineBasicBlock &BB = *I.getParent();
561       auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
562                      .addDef(ResVReg)
563                      .addUse(GR.getSPIRVTypeID(ResType))
564                      .addImm(static_cast<uint32_t>(Set))
565                      .addImm(Opcode);
566       const unsigned NumOps = I.getNumOperands();
567       for (unsigned i = 1; i < NumOps; ++i)
568         MIB.add(I.getOperand(i));
569       return MIB.constrainAllUses(TII, TRI, RBI);
570     }
571   }
572   return false;
573 }
574 
575 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
576                                                  const SPIRVType *ResType,
577                                                  MachineInstr &I,
578                                                  Register SrcReg,
579                                                  unsigned Opcode) const {
580   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
581       .addDef(ResVReg)
582       .addUse(GR.getSPIRVTypeID(ResType))
583       .addUse(SrcReg)
584       .constrainAllUses(TII, TRI, RBI);
585 }
586 
587 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
588                                           const SPIRVType *ResType,
589                                           MachineInstr &I,
590                                           unsigned Opcode) const {
591   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
592                            Opcode);
593 }
594 
595 static SPIRV::Scope::Scope getScope(SyncScope::ID Ord) {
596   switch (Ord) {
597   case SyncScope::SingleThread:
598     return SPIRV::Scope::Invocation;
599   case SyncScope::System:
600     return SPIRV::Scope::Device;
601   default:
602     llvm_unreachable("Unsupported synchronization Scope ID.");
603   }
604 }
605 
606 static void addMemoryOperands(MachineMemOperand *MemOp,
607                               MachineInstrBuilder &MIB) {
608   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
609   if (MemOp->isVolatile())
610     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
611   if (MemOp->isNonTemporal())
612     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
613   if (MemOp->getAlign().value())
614     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
615 
616   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
617     MIB.addImm(SpvMemOp);
618     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
619       MIB.addImm(MemOp->getAlign().value());
620   }
621 }
622 
623 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
624   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
625   if (Flags & MachineMemOperand::Flags::MOVolatile)
626     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
627   if (Flags & MachineMemOperand::Flags::MONonTemporal)
628     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
629 
630   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
631     MIB.addImm(SpvMemOp);
632 }
633 
634 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
635                                           const SPIRVType *ResType,
636                                           MachineInstr &I) const {
637   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
638   Register Ptr = I.getOperand(1 + OpOffset).getReg();
639   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
640                  .addDef(ResVReg)
641                  .addUse(GR.getSPIRVTypeID(ResType))
642                  .addUse(Ptr);
643   if (!I.getNumMemOperands()) {
644     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
645            I.getOpcode() ==
646                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
647     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
648   } else {
649     addMemoryOperands(*I.memoperands_begin(), MIB);
650   }
651   return MIB.constrainAllUses(TII, TRI, RBI);
652 }
653 
654 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
655   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
656   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
657   Register Ptr = I.getOperand(1 + OpOffset).getReg();
658   MachineBasicBlock &BB = *I.getParent();
659   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
660                  .addUse(Ptr)
661                  .addUse(StoreVal);
662   if (!I.getNumMemOperands()) {
663     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
664            I.getOpcode() ==
665                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
666     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
667   } else {
668     addMemoryOperands(*I.memoperands_begin(), MIB);
669   }
670   return MIB.constrainAllUses(TII, TRI, RBI);
671 }
672 
673 bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
674                                                const SPIRVType *ResType,
675                                                MachineInstr &I) const {
676   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
677     report_fatal_error(
678         "llvm.stacksave intrinsic: this instruction requires the following "
679         "SPIR-V extension: SPV_INTEL_variable_length_array",
680         false);
681   MachineBasicBlock &BB = *I.getParent();
682   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
683       .addDef(ResVReg)
684       .addUse(GR.getSPIRVTypeID(ResType))
685       .constrainAllUses(TII, TRI, RBI);
686 }
687 
688 bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
689   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
690     report_fatal_error(
691         "llvm.stackrestore intrinsic: this instruction requires the following "
692         "SPIR-V extension: SPV_INTEL_variable_length_array",
693         false);
694   if (!I.getOperand(0).isReg())
695     return false;
696   MachineBasicBlock &BB = *I.getParent();
697   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
698       .addUse(I.getOperand(0).getReg())
699       .constrainAllUses(TII, TRI, RBI);
700 }
701 
702 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
703                                                   MachineInstr &I) const {
704   MachineBasicBlock &BB = *I.getParent();
705   Register SrcReg = I.getOperand(1).getReg();
706   if (I.getOpcode() == TargetOpcode::G_MEMSET) {
707     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
708     unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
709     unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
710     SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
711     SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
712     Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
713     SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
714         ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
715     // TODO: check if we have such GV, add init, use buildGlobalVariable.
716     Type *LLVMArrTy = ArrayType::get(
717         IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num);
718     GlobalVariable *GV =
719         new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage);
720     Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
721     GR.add(GV, GR.CurMF, VarReg);
722 
723     buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
724     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
725         .addDef(VarReg)
726         .addUse(GR.getSPIRVTypeID(VarTy))
727         .addImm(SPIRV::StorageClass::UniformConstant)
728         .addUse(Const)
729         .constrainAllUses(TII, TRI, RBI);
730     SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
731         ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
732     SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
733     selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
734   }
735   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
736                  .addUse(I.getOperand(0).getReg())
737                  .addUse(SrcReg)
738                  .addUse(I.getOperand(2).getReg());
739   if (I.getNumMemOperands())
740     addMemoryOperands(*I.memoperands_begin(), MIB);
741   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
742   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
743     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
744         .addUse(MIB->getOperand(0).getReg());
745   return Result;
746 }
747 
748 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
749                                                const SPIRVType *ResType,
750                                                MachineInstr &I,
751                                                unsigned NewOpcode,
752                                                unsigned NegateOpcode) const {
753   assert(I.hasOneMemOperand());
754   const MachineMemOperand *MemOp = *I.memoperands_begin();
755   uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
756   Register ScopeReg = buildI32Constant(Scope, I);
757 
758   Register Ptr = I.getOperand(1).getReg();
759   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
760   // auto ScSem =
761   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
762   AtomicOrdering AO = MemOp->getSuccessOrdering();
763   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
764   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
765 
766   bool Result = false;
767   Register ValueReg = I.getOperand(2).getReg();
768   if (NegateOpcode != 0) {
769     // Translation with negative value operand is requested
770     Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
771     Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
772     ValueReg = TmpReg;
773   }
774 
775   Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
776                 .addDef(ResVReg)
777                 .addUse(GR.getSPIRVTypeID(ResType))
778                 .addUse(Ptr)
779                 .addUse(ScopeReg)
780                 .addUse(MemSemReg)
781                 .addUse(ValueReg)
782                 .constrainAllUses(TII, TRI, RBI);
783   return Result;
784 }
785 
786 bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
787   unsigned ArgI = I.getNumOperands() - 1;
788   Register SrcReg =
789       I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
790   SPIRVType *DefType =
791       SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
792   if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
793     report_fatal_error(
794         "cannot select G_UNMERGE_VALUES with a non-vector argument");
795 
796   SPIRVType *ScalarType =
797       GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
798   MachineBasicBlock &BB = *I.getParent();
799   bool Res = false;
800   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
801     Register ResVReg = I.getOperand(i).getReg();
802     SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
803     if (!ResType) {
804       // There was no "assign type" actions, let's fix this now
805       ResType = ScalarType;
806       MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
807       MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
808       GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
809     }
810     auto MIB =
811         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
812             .addDef(ResVReg)
813             .addUse(GR.getSPIRVTypeID(ResType))
814             .addUse(SrcReg)
815             .addImm(static_cast<int64_t>(i));
816     Res |= MIB.constrainAllUses(TII, TRI, RBI);
817   }
818   return Res;
819 }
820 
821 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
822   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
823   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
824   Register MemSemReg = buildI32Constant(MemSem, I);
825   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
826   uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
827   Register ScopeReg = buildI32Constant(Scope, I);
828   MachineBasicBlock &BB = *I.getParent();
829   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
830       .addUse(ScopeReg)
831       .addUse(MemSemReg)
832       .constrainAllUses(TII, TRI, RBI);
833 }
834 
835 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
836                                                    const SPIRVType *ResType,
837                                                    MachineInstr &I) const {
838   Register ScopeReg;
839   Register MemSemEqReg;
840   Register MemSemNeqReg;
841   Register Ptr = I.getOperand(2).getReg();
842   if (!isa<GIntrinsic>(I)) {
843     assert(I.hasOneMemOperand());
844     const MachineMemOperand *MemOp = *I.memoperands_begin();
845     unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
846     ScopeReg = buildI32Constant(Scope, I);
847 
848     unsigned ScSem = static_cast<uint32_t>(
849         getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
850     AtomicOrdering AO = MemOp->getSuccessOrdering();
851     unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
852     MemSemEqReg = buildI32Constant(MemSemEq, I);
853     AtomicOrdering FO = MemOp->getFailureOrdering();
854     unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
855     MemSemNeqReg =
856         MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
857   } else {
858     ScopeReg = I.getOperand(5).getReg();
859     MemSemEqReg = I.getOperand(6).getReg();
860     MemSemNeqReg = I.getOperand(7).getReg();
861   }
862 
863   Register Cmp = I.getOperand(3).getReg();
864   Register Val = I.getOperand(4).getReg();
865   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
866   Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
867   const DebugLoc &DL = I.getDebugLoc();
868   bool Result =
869       BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
870           .addDef(ACmpRes)
871           .addUse(GR.getSPIRVTypeID(SpvValTy))
872           .addUse(Ptr)
873           .addUse(ScopeReg)
874           .addUse(MemSemEqReg)
875           .addUse(MemSemNeqReg)
876           .addUse(Val)
877           .addUse(Cmp)
878           .constrainAllUses(TII, TRI, RBI);
879   Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
880   SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
881   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
882                 .addDef(CmpSuccReg)
883                 .addUse(GR.getSPIRVTypeID(BoolTy))
884                 .addUse(ACmpRes)
885                 .addUse(Cmp)
886                 .constrainAllUses(TII, TRI, RBI);
887   Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
888   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
889                 .addDef(TmpReg)
890                 .addUse(GR.getSPIRVTypeID(ResType))
891                 .addUse(ACmpRes)
892                 .addUse(GR.getOrCreateUndef(I, ResType, TII))
893                 .addImm(0)
894                 .constrainAllUses(TII, TRI, RBI);
895   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
896                 .addDef(ResVReg)
897                 .addUse(GR.getSPIRVTypeID(ResType))
898                 .addUse(CmpSuccReg)
899                 .addUse(TmpReg)
900                 .addImm(1)
901                 .constrainAllUses(TII, TRI, RBI);
902   return Result;
903 }
904 
905 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
906   switch (SC) {
907   case SPIRV::StorageClass::Workgroup:
908   case SPIRV::StorageClass::CrossWorkgroup:
909   case SPIRV::StorageClass::Function:
910     return true;
911   default:
912     return false;
913   }
914 }
915 
916 static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
917   switch (SC) {
918   case SPIRV::StorageClass::DeviceOnlyINTEL:
919   case SPIRV::StorageClass::HostOnlyINTEL:
920     return true;
921   default:
922     return false;
923   }
924 }
925 
926 // In SPIR-V address space casting can only happen to and from the Generic
927 // storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
928 // pointers to and from Generic pointers. As such, we can convert e.g. from
929 // Workgroup to Function by going via a Generic pointer as an intermediary. All
930 // other combinations can only be done by a bitcast, and are probably not safe.
931 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
932                                                    const SPIRVType *ResType,
933                                                    MachineInstr &I) const {
934   // If the AddrSpaceCast user is single and in OpConstantComposite or
935   // OpVariable, we should select OpSpecConstantOp.
936   auto UIs = MRI->use_instructions(ResVReg);
937   if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
938       (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
939        UIs.begin()->getOpcode() == SPIRV::OpVariable ||
940        isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
941     Register NewReg = I.getOperand(1).getReg();
942     MachineBasicBlock &BB = *I.getParent();
943     SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
944     ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
945                                              SPIRV::StorageClass::Generic);
946     bool Result =
947         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
948             .addDef(ResVReg)
949             .addUse(GR.getSPIRVTypeID(ResType))
950             .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
951             .addUse(NewReg)
952             .constrainAllUses(TII, TRI, RBI);
953     return Result;
954   }
955   Register SrcPtr = I.getOperand(1).getReg();
956   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
957   SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
958   SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
959 
960   // don't generate a cast between identical storage classes
961   if (SrcSC == DstSC)
962     return true;
963 
964   // Casting from an eligible pointer to Generic.
965   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
966     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
967   // Casting from Generic to an eligible pointer.
968   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
969     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
970   // Casting between 2 eligible pointers using Generic as an intermediary.
971   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
972     Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
973     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
974         SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
975     MachineBasicBlock &BB = *I.getParent();
976     const DebugLoc &DL = I.getDebugLoc();
977     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
978                        .addDef(Tmp)
979                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
980                        .addUse(SrcPtr)
981                        .constrainAllUses(TII, TRI, RBI);
982     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
983                           .addDef(ResVReg)
984                           .addUse(GR.getSPIRVTypeID(ResType))
985                           .addUse(Tmp)
986                           .constrainAllUses(TII, TRI, RBI);
987   }
988 
989   // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
990   // be applied
991   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
992     return selectUnOp(ResVReg, ResType, I,
993                       SPIRV::OpPtrCastToCrossWorkgroupINTEL);
994   if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
995     return selectUnOp(ResVReg, ResType, I,
996                       SPIRV::OpCrossWorkgroupCastToPtrINTEL);
997 
998   // TODO Should this case just be disallowed completely?
999   // We're casting 2 other arbitrary address spaces, so have to bitcast.
1000   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1001 }
1002 
1003 static unsigned getFCmpOpcode(unsigned PredNum) {
1004   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1005   switch (Pred) {
1006   case CmpInst::FCMP_OEQ:
1007     return SPIRV::OpFOrdEqual;
1008   case CmpInst::FCMP_OGE:
1009     return SPIRV::OpFOrdGreaterThanEqual;
1010   case CmpInst::FCMP_OGT:
1011     return SPIRV::OpFOrdGreaterThan;
1012   case CmpInst::FCMP_OLE:
1013     return SPIRV::OpFOrdLessThanEqual;
1014   case CmpInst::FCMP_OLT:
1015     return SPIRV::OpFOrdLessThan;
1016   case CmpInst::FCMP_ONE:
1017     return SPIRV::OpFOrdNotEqual;
1018   case CmpInst::FCMP_ORD:
1019     return SPIRV::OpOrdered;
1020   case CmpInst::FCMP_UEQ:
1021     return SPIRV::OpFUnordEqual;
1022   case CmpInst::FCMP_UGE:
1023     return SPIRV::OpFUnordGreaterThanEqual;
1024   case CmpInst::FCMP_UGT:
1025     return SPIRV::OpFUnordGreaterThan;
1026   case CmpInst::FCMP_ULE:
1027     return SPIRV::OpFUnordLessThanEqual;
1028   case CmpInst::FCMP_ULT:
1029     return SPIRV::OpFUnordLessThan;
1030   case CmpInst::FCMP_UNE:
1031     return SPIRV::OpFUnordNotEqual;
1032   case CmpInst::FCMP_UNO:
1033     return SPIRV::OpUnordered;
1034   default:
1035     llvm_unreachable("Unknown predicate type for FCmp");
1036   }
1037 }
1038 
1039 static unsigned getICmpOpcode(unsigned PredNum) {
1040   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1041   switch (Pred) {
1042   case CmpInst::ICMP_EQ:
1043     return SPIRV::OpIEqual;
1044   case CmpInst::ICMP_NE:
1045     return SPIRV::OpINotEqual;
1046   case CmpInst::ICMP_SGE:
1047     return SPIRV::OpSGreaterThanEqual;
1048   case CmpInst::ICMP_SGT:
1049     return SPIRV::OpSGreaterThan;
1050   case CmpInst::ICMP_SLE:
1051     return SPIRV::OpSLessThanEqual;
1052   case CmpInst::ICMP_SLT:
1053     return SPIRV::OpSLessThan;
1054   case CmpInst::ICMP_UGE:
1055     return SPIRV::OpUGreaterThanEqual;
1056   case CmpInst::ICMP_UGT:
1057     return SPIRV::OpUGreaterThan;
1058   case CmpInst::ICMP_ULE:
1059     return SPIRV::OpULessThanEqual;
1060   case CmpInst::ICMP_ULT:
1061     return SPIRV::OpULessThan;
1062   default:
1063     llvm_unreachable("Unknown predicate type for ICmp");
1064   }
1065 }
1066 
1067 static unsigned getPtrCmpOpcode(unsigned Pred) {
1068   switch (static_cast<CmpInst::Predicate>(Pred)) {
1069   case CmpInst::ICMP_EQ:
1070     return SPIRV::OpPtrEqual;
1071   case CmpInst::ICMP_NE:
1072     return SPIRV::OpPtrNotEqual;
1073   default:
1074     llvm_unreachable("Unknown predicate type for pointer comparison");
1075   }
1076 }
1077 
1078 // Return the logical operation, or abort if none exists.
1079 static unsigned getBoolCmpOpcode(unsigned PredNum) {
1080   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1081   switch (Pred) {
1082   case CmpInst::ICMP_EQ:
1083     return SPIRV::OpLogicalEqual;
1084   case CmpInst::ICMP_NE:
1085     return SPIRV::OpLogicalNotEqual;
1086   default:
1087     llvm_unreachable("Unknown predicate type for Bool comparison");
1088   }
1089 }
1090 
1091 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1092                                                 const SPIRVType *ResType,
1093                                                 MachineInstr &I) const {
1094   MachineBasicBlock &BB = *I.getParent();
1095   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1096       .addDef(ResVReg)
1097       .addUse(GR.getSPIRVTypeID(ResType))
1098       .addUse(I.getOperand(1).getReg())
1099       .constrainAllUses(TII, TRI, RBI);
1100 }
1101 
1102 bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1103                                             const SPIRVType *ResType,
1104                                             MachineInstr &I) const {
1105   // There is no way to implement `freeze` correctly without support on SPIR-V
1106   // standard side, but we may at least address a simple (static) case when
1107   // undef/poison value presence is obvious. The main benefit of even
1108   // incomplete `freeze` support is preventing of translation from crashing due
1109   // to lack of support on legalization and instruction selection steps.
1110   if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1111     return false;
1112   Register OpReg = I.getOperand(1).getReg();
1113   if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1114     Register Reg;
1115     switch (Def->getOpcode()) {
1116     case SPIRV::ASSIGN_TYPE:
1117       if (MachineInstr *AssignToDef =
1118               MRI->getVRegDef(Def->getOperand(1).getReg())) {
1119         if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1120           Reg = Def->getOperand(2).getReg();
1121       }
1122       break;
1123     case SPIRV::OpUndef:
1124       Reg = Def->getOperand(1).getReg();
1125       break;
1126     }
1127     unsigned DestOpCode;
1128     if (Reg.isValid()) {
1129       DestOpCode = SPIRV::OpConstantNull;
1130     } else {
1131       DestOpCode = TargetOpcode::COPY;
1132       Reg = OpReg;
1133     }
1134     return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1135         .addDef(I.getOperand(0).getReg())
1136         .addUse(Reg)
1137         .constrainAllUses(TII, TRI, RBI);
1138   }
1139   return false;
1140 }
1141 
1142 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1143                                                  const SPIRVType *ResType,
1144                                                  MachineInstr &I) const {
1145   // TODO: only const case is supported for now.
1146   assert(std::all_of(
1147       I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1148         if (MO.isDef())
1149           return true;
1150         if (!MO.isReg())
1151           return false;
1152         SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1153         assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1154                ConstTy->getOperand(1).isReg());
1155         Register ConstReg = ConstTy->getOperand(1).getReg();
1156         const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1157         assert(Const);
1158         return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1159                 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1160       }));
1161 
1162   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1163                      TII.get(SPIRV::OpConstantComposite))
1164                  .addDef(ResVReg)
1165                  .addUse(GR.getSPIRVTypeID(ResType));
1166   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1167     MIB.addUse(I.getOperand(i).getReg());
1168   return MIB.constrainAllUses(TII, TRI, RBI);
1169 }
1170 
1171 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1172                                          const SPIRVType *ResType,
1173                                          unsigned CmpOpc,
1174                                          MachineInstr &I) const {
1175   Register Cmp0 = I.getOperand(2).getReg();
1176   Register Cmp1 = I.getOperand(3).getReg();
1177   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1178              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1179          "CMP operands should have the same type");
1180   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1181       .addDef(ResVReg)
1182       .addUse(GR.getSPIRVTypeID(ResType))
1183       .addUse(Cmp0)
1184       .addUse(Cmp1)
1185       .constrainAllUses(TII, TRI, RBI);
1186 }
1187 
1188 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1189                                           const SPIRVType *ResType,
1190                                           MachineInstr &I) const {
1191   auto Pred = I.getOperand(1).getPredicate();
1192   unsigned CmpOpc;
1193 
1194   Register CmpOperand = I.getOperand(2).getReg();
1195   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1196     CmpOpc = getPtrCmpOpcode(Pred);
1197   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1198     CmpOpc = getBoolCmpOpcode(Pred);
1199   else
1200     CmpOpc = getICmpOpcode(Pred);
1201   return selectCmp(ResVReg, ResType, CmpOpc, I);
1202 }
1203 
1204 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1205                                             const MachineInstr &I,
1206                                             int OpIdx) const {
1207   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1208          "Expected G_FCONSTANT");
1209   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1210   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1211 }
1212 
1213 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1214                                            const MachineInstr &I,
1215                                            int OpIdx) const {
1216   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1217          "Expected G_CONSTANT");
1218   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1219 }
1220 
1221 Register
1222 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1223                                            const SPIRVType *ResType) const {
1224   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1225   const SPIRVType *SpvI32Ty =
1226       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1227   // Find a constant in DT or build a new one.
1228   auto ConstInt = ConstantInt::get(LLVMTy, Val);
1229   Register NewReg = GR.find(ConstInt, GR.CurMF);
1230   if (!NewReg.isValid()) {
1231     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1232     GR.add(ConstInt, GR.CurMF, NewReg);
1233     MachineInstr *MI;
1234     MachineBasicBlock &BB = *I.getParent();
1235     if (Val == 0) {
1236       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1237                .addDef(NewReg)
1238                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1239     } else {
1240       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1241                .addDef(NewReg)
1242                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1243                .addImm(APInt(32, Val).getZExtValue());
1244     }
1245     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
1246   }
1247   return NewReg;
1248 }
1249 
1250 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1251                                           const SPIRVType *ResType,
1252                                           MachineInstr &I) const {
1253   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1254   return selectCmp(ResVReg, ResType, CmpOp, I);
1255 }
1256 
1257 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1258                                                  MachineInstr &I) const {
1259   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1260     return GR.getOrCreateConsIntVector(0, I, ResType, TII);
1261   return GR.getOrCreateConstInt(0, I, ResType, TII);
1262 }
1263 
1264 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1265                                                 const SPIRVType *ResType,
1266                                                 MachineInstr &I) const {
1267   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1268   APInt One =
1269       AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1270   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1271     return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
1272   return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1273 }
1274 
1275 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1276                                             const SPIRVType *ResType,
1277                                             MachineInstr &I,
1278                                             bool IsSigned) const {
1279   // To extend a bool, we need to use OpSelect between constants.
1280   Register ZeroReg = buildZerosVal(ResType, I);
1281   Register OneReg = buildOnesVal(IsSigned, ResType, I);
1282   bool IsScalarBool =
1283       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1284   unsigned Opcode =
1285       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1286   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1287       .addDef(ResVReg)
1288       .addUse(GR.getSPIRVTypeID(ResType))
1289       .addUse(I.getOperand(1).getReg())
1290       .addUse(OneReg)
1291       .addUse(ZeroReg)
1292       .constrainAllUses(TII, TRI, RBI);
1293 }
1294 
1295 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1296                                           const SPIRVType *ResType,
1297                                           MachineInstr &I, bool IsSigned,
1298                                           unsigned Opcode) const {
1299   Register SrcReg = I.getOperand(1).getReg();
1300   // We can convert bool value directly to float type without OpConvert*ToF,
1301   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1302   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1303     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1304     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1305     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1306       const unsigned NumElts = ResType->getOperand(2).getImm();
1307       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1308     }
1309     SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1310     selectSelect(SrcReg, TmpType, I, false);
1311   }
1312   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1313 }
1314 
1315 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1316                                          const SPIRVType *ResType,
1317                                          MachineInstr &I, bool IsSigned) const {
1318   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1319     return selectSelect(ResVReg, ResType, I, IsSigned);
1320   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1321   return selectUnOp(ResVReg, ResType, I, Opcode);
1322 }
1323 
1324 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1325                                                Register ResVReg,
1326                                                MachineInstr &I,
1327                                                const SPIRVType *IntTy,
1328                                                const SPIRVType *BoolTy) const {
1329   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1330   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1331   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1332   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1333   Register Zero = buildZerosVal(IntTy, I);
1334   Register One = buildOnesVal(false, IntTy, I);
1335   MachineBasicBlock &BB = *I.getParent();
1336   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1337       .addDef(BitIntReg)
1338       .addUse(GR.getSPIRVTypeID(IntTy))
1339       .addUse(IntReg)
1340       .addUse(One)
1341       .constrainAllUses(TII, TRI, RBI);
1342   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1343       .addDef(ResVReg)
1344       .addUse(GR.getSPIRVTypeID(BoolTy))
1345       .addUse(BitIntReg)
1346       .addUse(Zero)
1347       .constrainAllUses(TII, TRI, RBI);
1348 }
1349 
1350 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1351                                            const SPIRVType *ResType,
1352                                            MachineInstr &I) const {
1353   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1354     Register IntReg = I.getOperand(1).getReg();
1355     const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1356     return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1357   }
1358   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1359   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1360   return selectUnOp(ResVReg, ResType, I, Opcode);
1361 }
1362 
1363 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1364                                            const SPIRVType *ResType,
1365                                            const APInt &Imm,
1366                                            MachineInstr &I) const {
1367   unsigned TyOpcode = ResType->getOpcode();
1368   assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1369   MachineBasicBlock &BB = *I.getParent();
1370   if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1371       Imm.isZero())
1372     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1373         .addDef(ResVReg)
1374         .addUse(GR.getSPIRVTypeID(ResType))
1375         .constrainAllUses(TII, TRI, RBI);
1376   if (TyOpcode == SPIRV::OpTypeInt) {
1377     assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1378     Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1379     if (Reg == ResVReg)
1380       return true;
1381     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1382         .addDef(ResVReg)
1383         .addUse(Reg)
1384         .constrainAllUses(TII, TRI, RBI);
1385   }
1386   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1387                  .addDef(ResVReg)
1388                  .addUse(GR.getSPIRVTypeID(ResType));
1389   // <=32-bit integers should be caught by the sdag pattern.
1390   assert(Imm.getBitWidth() > 32);
1391   addNumImm(Imm, MIB);
1392   return MIB.constrainAllUses(TII, TRI, RBI);
1393 }
1394 
1395 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1396                                              const SPIRVType *ResType,
1397                                              MachineInstr &I) const {
1398   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1399       .addDef(ResVReg)
1400       .addUse(GR.getSPIRVTypeID(ResType))
1401       .constrainAllUses(TII, TRI, RBI);
1402 }
1403 
1404 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1405   assert(MO.isReg());
1406   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1407   if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1408     return false;
1409   assert(TypeInst->getOperand(1).isReg());
1410   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1411   return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1412 }
1413 
1414 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1415   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1416   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1417   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1418   return ImmInst->getOperand(1).getCImm()->getZExtValue();
1419 }
1420 
1421 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1422                                                const SPIRVType *ResType,
1423                                                MachineInstr &I) const {
1424   MachineBasicBlock &BB = *I.getParent();
1425   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1426                  .addDef(ResVReg)
1427                  .addUse(GR.getSPIRVTypeID(ResType))
1428                  // object to insert
1429                  .addUse(I.getOperand(3).getReg())
1430                  // composite to insert into
1431                  .addUse(I.getOperand(2).getReg());
1432   for (unsigned i = 4; i < I.getNumOperands(); i++)
1433     MIB.addImm(foldImm(I.getOperand(i), MRI));
1434   return MIB.constrainAllUses(TII, TRI, RBI);
1435 }
1436 
1437 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1438                                                 const SPIRVType *ResType,
1439                                                 MachineInstr &I) const {
1440   MachineBasicBlock &BB = *I.getParent();
1441   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1442                  .addDef(ResVReg)
1443                  .addUse(GR.getSPIRVTypeID(ResType))
1444                  .addUse(I.getOperand(2).getReg());
1445   for (unsigned i = 3; i < I.getNumOperands(); i++)
1446     MIB.addImm(foldImm(I.getOperand(i), MRI));
1447   return MIB.constrainAllUses(TII, TRI, RBI);
1448 }
1449 
1450 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1451                                                const SPIRVType *ResType,
1452                                                MachineInstr &I) const {
1453   if (isImm(I.getOperand(4), MRI))
1454     return selectInsertVal(ResVReg, ResType, I);
1455   MachineBasicBlock &BB = *I.getParent();
1456   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1457       .addDef(ResVReg)
1458       .addUse(GR.getSPIRVTypeID(ResType))
1459       .addUse(I.getOperand(2).getReg())
1460       .addUse(I.getOperand(3).getReg())
1461       .addUse(I.getOperand(4).getReg())
1462       .constrainAllUses(TII, TRI, RBI);
1463 }
1464 
1465 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1466                                                 const SPIRVType *ResType,
1467                                                 MachineInstr &I) const {
1468   if (isImm(I.getOperand(3), MRI))
1469     return selectExtractVal(ResVReg, ResType, I);
1470   MachineBasicBlock &BB = *I.getParent();
1471   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1472       .addDef(ResVReg)
1473       .addUse(GR.getSPIRVTypeID(ResType))
1474       .addUse(I.getOperand(2).getReg())
1475       .addUse(I.getOperand(3).getReg())
1476       .constrainAllUses(TII, TRI, RBI);
1477 }
1478 
1479 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1480                                          const SPIRVType *ResType,
1481                                          MachineInstr &I) const {
1482   const bool IsGEPInBounds = I.getOperand(2).getImm();
1483 
1484   // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1485   // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1486   // we have to use Op[InBounds]AccessChain.
1487   const unsigned Opcode = STI.isVulkanEnv()
1488                               ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1489                                                : SPIRV::OpAccessChain)
1490                               : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1491                                                : SPIRV::OpPtrAccessChain);
1492 
1493   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1494                  .addDef(ResVReg)
1495                  .addUse(GR.getSPIRVTypeID(ResType))
1496                  // Object to get a pointer to.
1497                  .addUse(I.getOperand(3).getReg());
1498   // Adding indices.
1499   const unsigned StartingIndex =
1500       (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1501           ? 5
1502           : 4;
1503   for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1504     Res.addUse(I.getOperand(i).getReg());
1505   return Res.constrainAllUses(TII, TRI, RBI);
1506 }
1507 
1508 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1509                                                const SPIRVType *ResType,
1510                                                MachineInstr &I) const {
1511   MachineBasicBlock &BB = *I.getParent();
1512   switch (cast<GIntrinsic>(I).getIntrinsicID()) {
1513   case Intrinsic::spv_load:
1514     return selectLoad(ResVReg, ResType, I);
1515   case Intrinsic::spv_store:
1516     return selectStore(I);
1517   case Intrinsic::spv_extractv:
1518     return selectExtractVal(ResVReg, ResType, I);
1519   case Intrinsic::spv_insertv:
1520     return selectInsertVal(ResVReg, ResType, I);
1521   case Intrinsic::spv_extractelt:
1522     return selectExtractElt(ResVReg, ResType, I);
1523   case Intrinsic::spv_insertelt:
1524     return selectInsertElt(ResVReg, ResType, I);
1525   case Intrinsic::spv_gep:
1526     return selectGEP(ResVReg, ResType, I);
1527   case Intrinsic::spv_unref_global:
1528   case Intrinsic::spv_init_global: {
1529     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1530     MachineInstr *Init = I.getNumExplicitOperands() > 2
1531                              ? MRI->getVRegDef(I.getOperand(2).getReg())
1532                              : nullptr;
1533     assert(MI);
1534     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1535   }
1536   case Intrinsic::spv_undef: {
1537     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1538                    .addDef(ResVReg)
1539                    .addUse(GR.getSPIRVTypeID(ResType));
1540     return MIB.constrainAllUses(TII, TRI, RBI);
1541   }
1542   case Intrinsic::spv_const_composite: {
1543     // If no values are attached, the composite is null constant.
1544     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1545     unsigned Opcode =
1546         IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1547     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1548                    .addDef(ResVReg)
1549                    .addUse(GR.getSPIRVTypeID(ResType));
1550     // skip type MD node we already used when generated assign.type for this
1551     if (!IsNull) {
1552       for (unsigned i = I.getNumExplicitDefs() + 1;
1553            i < I.getNumExplicitOperands(); ++i) {
1554         MIB.addUse(I.getOperand(i).getReg());
1555       }
1556     }
1557     return MIB.constrainAllUses(TII, TRI, RBI);
1558   }
1559   case Intrinsic::spv_assign_name: {
1560     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1561     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1562     for (unsigned i = I.getNumExplicitDefs() + 2;
1563          i < I.getNumExplicitOperands(); ++i) {
1564       MIB.addImm(I.getOperand(i).getImm());
1565     }
1566     return MIB.constrainAllUses(TII, TRI, RBI);
1567   }
1568   case Intrinsic::spv_switch: {
1569     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1570     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1571       if (I.getOperand(i).isReg())
1572         MIB.addReg(I.getOperand(i).getReg());
1573       else if (I.getOperand(i).isCImm())
1574         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1575       else if (I.getOperand(i).isMBB())
1576         MIB.addMBB(I.getOperand(i).getMBB());
1577       else
1578         llvm_unreachable("Unexpected OpSwitch operand");
1579     }
1580     return MIB.constrainAllUses(TII, TRI, RBI);
1581   }
1582   case Intrinsic::spv_cmpxchg:
1583     return selectAtomicCmpXchg(ResVReg, ResType, I);
1584   case Intrinsic::spv_unreachable:
1585     BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1586     break;
1587   case Intrinsic::spv_alloca:
1588     return selectFrameIndex(ResVReg, ResType, I);
1589   case Intrinsic::spv_alloca_array:
1590     return selectAllocaArray(ResVReg, ResType, I);
1591   case Intrinsic::spv_assume:
1592     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1593       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
1594           .addUse(I.getOperand(1).getReg());
1595     break;
1596   case Intrinsic::spv_expect:
1597     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1598       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
1599           .addDef(ResVReg)
1600           .addUse(GR.getSPIRVTypeID(ResType))
1601           .addUse(I.getOperand(2).getReg())
1602           .addUse(I.getOperand(3).getReg());
1603     break;
1604   default:
1605     llvm_unreachable("Intrinsic selection not implemented");
1606   }
1607   return true;
1608 }
1609 
1610 bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
1611                                                  const SPIRVType *ResType,
1612                                                  MachineInstr &I) const {
1613   // there was an allocation size parameter to the allocation instruction
1614   // that is not 1
1615   MachineBasicBlock &BB = *I.getParent();
1616   return BuildMI(BB, I, I.getDebugLoc(),
1617                  TII.get(SPIRV::OpVariableLengthArrayINTEL))
1618       .addDef(ResVReg)
1619       .addUse(GR.getSPIRVTypeID(ResType))
1620       .addUse(I.getOperand(2).getReg())
1621       .constrainAllUses(TII, TRI, RBI);
1622 }
1623 
1624 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1625                                                 const SPIRVType *ResType,
1626                                                 MachineInstr &I) const {
1627   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1628       .addDef(ResVReg)
1629       .addUse(GR.getSPIRVTypeID(ResType))
1630       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1631       .constrainAllUses(TII, TRI, RBI);
1632 }
1633 
1634 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1635   // InstructionSelector walks backwards through the instructions. We can use
1636   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1637   // first, so can generate an OpBranchConditional here. If there is no
1638   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1639   const MachineInstr *PrevI = I.getPrevNode();
1640   MachineBasicBlock &MBB = *I.getParent();
1641   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1642     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1643         .addUse(PrevI->getOperand(0).getReg())
1644         .addMBB(PrevI->getOperand(1).getMBB())
1645         .addMBB(I.getOperand(0).getMBB())
1646         .constrainAllUses(TII, TRI, RBI);
1647   }
1648   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1649       .addMBB(I.getOperand(0).getMBB())
1650       .constrainAllUses(TII, TRI, RBI);
1651 }
1652 
1653 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1654   // InstructionSelector walks backwards through the instructions. For an
1655   // explicit conditional branch with no fallthrough, we use both a G_BR and a
1656   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1657   // generate the OpBranchConditional in selectBranch above.
1658   //
1659   // If an OpBranchConditional has been generated, we simply return, as the work
1660   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1661   // implicit fallthrough to the next basic block, so we need to create an
1662   // OpBranchConditional with an explicit "false" argument pointing to the next
1663   // basic block that LLVM would fall through to.
1664   const MachineInstr *NextI = I.getNextNode();
1665   // Check if this has already been successfully selected.
1666   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1667     return true;
1668   // Must be relying on implicit block fallthrough, so generate an
1669   // OpBranchConditional with the "next" basic block as the "false" target.
1670   MachineBasicBlock &MBB = *I.getParent();
1671   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1672   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1673   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1674       .addUse(I.getOperand(0).getReg())
1675       .addMBB(I.getOperand(1).getMBB())
1676       .addMBB(NextMBB)
1677       .constrainAllUses(TII, TRI, RBI);
1678 }
1679 
1680 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1681                                          const SPIRVType *ResType,
1682                                          MachineInstr &I) const {
1683   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1684                  .addDef(ResVReg)
1685                  .addUse(GR.getSPIRVTypeID(ResType));
1686   const unsigned NumOps = I.getNumOperands();
1687   for (unsigned i = 1; i < NumOps; i += 2) {
1688     MIB.addUse(I.getOperand(i + 0).getReg());
1689     MIB.addMBB(I.getOperand(i + 1).getMBB());
1690   }
1691   return MIB.constrainAllUses(TII, TRI, RBI);
1692 }
1693 
1694 bool SPIRVInstructionSelector::selectGlobalValue(
1695     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1696   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1697   MachineIRBuilder MIRBuilder(I);
1698   const GlobalValue *GV = I.getOperand(1).getGlobal();
1699   Type *GVType = GV->getValueType();
1700   SPIRVType *PointerBaseType;
1701   if (GVType->isArrayTy()) {
1702     SPIRVType *ArrayElementType =
1703         GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
1704                                 SPIRV::AccessQualifier::ReadWrite, false);
1705     PointerBaseType = GR.getOrCreateSPIRVArrayType(
1706         ArrayElementType, GVType->getArrayNumElements(), I, TII);
1707   } else {
1708     PointerBaseType = GR.getOrCreateSPIRVType(
1709         GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1710   }
1711   SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
1712       PointerBaseType, I, TII,
1713       addressSpaceToStorageClass(GV->getAddressSpace(), STI));
1714 
1715   std::string GlobalIdent;
1716   if (!GV->hasName()) {
1717     unsigned &ID = UnnamedGlobalIDs[GV];
1718     if (ID == 0)
1719       ID = UnnamedGlobalIDs.size();
1720     GlobalIdent = "__unnamed_" + Twine(ID).str();
1721   } else {
1722     GlobalIdent = GV->getGlobalIdentifier();
1723   }
1724 
1725   // Behaviour of functions as operands depends on availability of the
1726   // corresponding extension (SPV_INTEL_function_pointers):
1727   // - If there is an extension to operate with functions as operands:
1728   // We create a proper constant operand and evaluate a correct type for a
1729   // function pointer.
1730   // - Without the required extension:
1731   // We have functions as operands in tests with blocks of instruction e.g. in
1732   // transcoding/global_block.ll. These operands are not used and should be
1733   // substituted by zero constants. Their type is expected to be always
1734   // OpTypePointer Function %uchar.
1735   if (isa<Function>(GV)) {
1736     const Constant *ConstVal = GV;
1737     MachineBasicBlock &BB = *I.getParent();
1738     Register NewReg = GR.find(ConstVal, GR.CurMF);
1739     if (!NewReg.isValid()) {
1740       Register NewReg = ResVReg;
1741       GR.add(ConstVal, GR.CurMF, NewReg);
1742       const Function *GVFun =
1743           STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
1744               ? dyn_cast<Function>(GV)
1745               : nullptr;
1746       if (GVFun) {
1747         // References to a function via function pointers generate virtual
1748         // registers without a definition. We will resolve it later, during
1749         // module analysis stage.
1750         MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1751         Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1752         MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
1753         MachineInstrBuilder MB =
1754             BuildMI(BB, I, I.getDebugLoc(),
1755                     TII.get(SPIRV::OpConstantFunctionPointerINTEL))
1756                 .addDef(NewReg)
1757                 .addUse(GR.getSPIRVTypeID(ResType))
1758                 .addUse(FuncVReg);
1759         // mapping the function pointer to the used Function
1760         GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
1761         return MB.constrainAllUses(TII, TRI, RBI);
1762       }
1763       return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1764           .addDef(NewReg)
1765           .addUse(GR.getSPIRVTypeID(ResType))
1766           .constrainAllUses(TII, TRI, RBI);
1767     }
1768     assert(NewReg != ResVReg);
1769     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1770         .addDef(ResVReg)
1771         .addUse(NewReg)
1772         .constrainAllUses(TII, TRI, RBI);
1773   }
1774   auto GlobalVar = cast<GlobalVariable>(GV);
1775   assert(GlobalVar->getName() != "llvm.global.annotations");
1776 
1777   bool HasInit = GlobalVar->hasInitializer() &&
1778                  !isa<UndefValue>(GlobalVar->getInitializer());
1779   // Skip empty declaration for GVs with initilaizers till we get the decl with
1780   // passed initializer.
1781   if (HasInit && !Init)
1782     return true;
1783 
1784   unsigned AddrSpace = GV->getAddressSpace();
1785   SPIRV::StorageClass::StorageClass Storage =
1786       addressSpaceToStorageClass(AddrSpace, STI);
1787   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1788                   Storage != SPIRV::StorageClass::Function;
1789   SPIRV::LinkageType::LinkageType LnkType =
1790       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
1791           ? SPIRV::LinkageType::Import
1792           : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
1793                      STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
1794                  ? SPIRV::LinkageType::LinkOnceODR
1795                  : SPIRV::LinkageType::Export);
1796 
1797   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1798                                         Storage, Init, GlobalVar->isConstant(),
1799                                         HasLnkTy, LnkType, MIRBuilder, true);
1800   return Reg.isValid();
1801 }
1802 
1803 bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
1804                                            const SPIRVType *ResType,
1805                                            MachineInstr &I) const {
1806   if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
1807     return selectExtInst(ResVReg, ResType, I, CL::log10);
1808   }
1809 
1810   // There is no log10 instruction in the GLSL Extended Instruction set, so it
1811   // is implemented as:
1812   // log10(x) = log2(x) * (1 / log2(10))
1813   //          = log2(x) * 0.30103
1814 
1815   MachineIRBuilder MIRBuilder(I);
1816   MachineBasicBlock &BB = *I.getParent();
1817 
1818   // Build log2(x).
1819   Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1820   bool Result =
1821       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1822           .addDef(VarReg)
1823           .addUse(GR.getSPIRVTypeID(ResType))
1824           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1825           .addImm(GL::Log2)
1826           .add(I.getOperand(1))
1827           .constrainAllUses(TII, TRI, RBI);
1828 
1829   // Build 0.30103.
1830   assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
1831          ResType->getOpcode() == SPIRV::OpTypeFloat);
1832   // TODO: Add matrix implementation once supported by the HLSL frontend.
1833   const SPIRVType *SpirvScalarType =
1834       ResType->getOpcode() == SPIRV::OpTypeVector
1835           ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
1836           : ResType;
1837   Register ScaleReg =
1838       GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
1839 
1840   // Multiply log2(x) by 0.30103 to get log10(x) result.
1841   auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
1842                     ? SPIRV::OpVectorTimesScalar
1843                     : SPIRV::OpFMulS;
1844   Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1845                 .addDef(ResVReg)
1846                 .addUse(GR.getSPIRVTypeID(ResType))
1847                 .addUse(VarReg)
1848                 .addUse(ScaleReg)
1849                 .constrainAllUses(TII, TRI, RBI);
1850 
1851   return Result;
1852 }
1853 
1854 namespace llvm {
1855 InstructionSelector *
1856 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
1857                                const SPIRVSubtarget &Subtarget,
1858                                const RegisterBankInfo &RBI) {
1859   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
1860 }
1861 } // namespace llvm
1862