xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (revision ecc3bdaae14a02acc879c018e21d58a83329dc6e)
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
17 #include "SPIRV.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/IR/IntrinsicsSPIRV.h"
31 #include "llvm/Support/Debug.h"
32 
33 #define DEBUG_TYPE "spirv-isel"
34 
35 using namespace llvm;
36 namespace CL = SPIRV::OpenCLExtInst;
37 namespace GL = SPIRV::GLSLExtInst;
38 
39 using ExtInstList =
40     std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
41 
42 namespace {
43 
44 #define GET_GLOBALISEL_PREDICATE_BITSET
45 #include "SPIRVGenGlobalISel.inc"
46 #undef GET_GLOBALISEL_PREDICATE_BITSET
47 
48 class SPIRVInstructionSelector : public InstructionSelector {
49   const SPIRVSubtarget &STI;
50   const SPIRVInstrInfo &TII;
51   const SPIRVRegisterInfo &TRI;
52   const RegisterBankInfo &RBI;
53   SPIRVGlobalRegistry &GR;
54   MachineRegisterInfo *MRI;
55 
56   /// We need to keep track of the number we give to anonymous global values to
57   /// generate the same name every time when this is needed.
58   mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
59 
60 public:
61   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
62                            const SPIRVSubtarget &ST,
63                            const RegisterBankInfo &RBI);
64   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
66                BlockFrequencyInfo *BFI) override;
67   // Common selection code. Instruction-specific selection occurs in spvSelect.
68   bool select(MachineInstr &I) override;
69   static const char *getName() { return DEBUG_TYPE; }
70 
71 #define GET_GLOBALISEL_PREDICATES_DECL
72 #include "SPIRVGenGlobalISel.inc"
73 #undef GET_GLOBALISEL_PREDICATES_DECL
74 
75 #define GET_GLOBALISEL_TEMPORARIES_DECL
76 #include "SPIRVGenGlobalISel.inc"
77 #undef GET_GLOBALISEL_TEMPORARIES_DECL
78 
79 private:
80   // tblgen-erated 'select' implementation, used as the initial selector for
81   // the patterns that don't require complex C++.
82   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
83 
84   // All instruction-specific selection that didn't happen in "select()".
85   // Is basically a large Switch/Case delegating to all other select method.
86   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
87                  MachineInstr &I) const;
88 
89   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
90                          const MachineInstr *Init = nullptr) const;
91 
92   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
93                          MachineInstr &I, Register SrcReg,
94                          unsigned Opcode) const;
95   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
96                   unsigned Opcode) const;
97 
98   bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
99                      MachineInstr &I) const;
100 
101   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
102                   MachineInstr &I) const;
103   bool selectStore(MachineInstr &I) const;
104 
105   bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
106                        MachineInstr &I) const;
107   bool selectStackRestore(MachineInstr &I) const;
108 
109   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
110 
111   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
112                        MachineInstr &I, unsigned NewOpcode,
113                        unsigned NegateOpcode = 0) const;
114 
115   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
116                            MachineInstr &I) const;
117 
118   bool selectFence(MachineInstr &I) const;
119 
120   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
121                            MachineInstr &I) const;
122 
123   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
124                         MachineInstr &I) const;
125 
126   bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
127                          MachineInstr &I) const;
128 
129   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
130                  unsigned comparisonOpcode, MachineInstr &I) const;
131 
132   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
133                   MachineInstr &I) const;
134   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
135                   MachineInstr &I) const;
136 
137   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
138                    int OpIdx) const;
139   void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
140                     int OpIdx) const;
141 
142   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
143                    MachineInstr &I) const;
144 
145   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
146                     bool IsSigned) const;
147   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
148                   bool IsSigned, unsigned Opcode) const;
149   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
150                  bool IsSigned) const;
151 
152   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
153                    MachineInstr &I) const;
154 
155   bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
156                        const SPIRVType *intTy, const SPIRVType *boolTy) const;
157 
158   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
159                      MachineInstr &I) const;
160   bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
161                     MachineInstr &I) const;
162   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
163                        MachineInstr &I) const;
164   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
165                         MachineInstr &I) const;
166   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
167                        MachineInstr &I) const;
168   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
169                         MachineInstr &I) const;
170   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
171                        MachineInstr &I) const;
172   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
173                  MachineInstr &I) const;
174 
175   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
176                         MachineInstr &I) const;
177   bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
178                          MachineInstr &I) const;
179 
180   bool selectBranch(MachineInstr &I) const;
181   bool selectBranchCond(MachineInstr &I) const;
182 
183   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
184                  MachineInstr &I) const;
185 
186   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
187                      MachineInstr &I, CL::OpenCLExtInst CLInst) const;
188   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
189                      MachineInstr &I, CL::OpenCLExtInst CLInst,
190                      GL::GLSLExtInst GLInst) const;
191   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
192                      MachineInstr &I, const ExtInstList &ExtInsts) const;
193 
194   bool selectLog10(Register ResVReg, const SPIRVType *ResType,
195                    MachineInstr &I) const;
196 
197   bool selectUnmergeValues(MachineInstr &I) const;
198 
199   Register buildI32Constant(uint32_t Val, MachineInstr &I,
200                             const SPIRVType *ResType = nullptr) const;
201 
202   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
203   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
204                         MachineInstr &I) const;
205 };
206 
207 } // end anonymous namespace
208 
209 #define GET_GLOBALISEL_IMPL
210 #include "SPIRVGenGlobalISel.inc"
211 #undef GET_GLOBALISEL_IMPL
212 
213 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
214                                                    const SPIRVSubtarget &ST,
215                                                    const RegisterBankInfo &RBI)
216     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
217       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
218 #define GET_GLOBALISEL_PREDICATES_INIT
219 #include "SPIRVGenGlobalISel.inc"
220 #undef GET_GLOBALISEL_PREDICATES_INIT
221 #define GET_GLOBALISEL_TEMPORARIES_INIT
222 #include "SPIRVGenGlobalISel.inc"
223 #undef GET_GLOBALISEL_TEMPORARIES_INIT
224 {
225 }
226 
227 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
228                                        CodeGenCoverage *CoverageInfo,
229                                        ProfileSummaryInfo *PSI,
230                                        BlockFrequencyInfo *BFI) {
231   MRI = &MF.getRegInfo();
232   GR.setCurrentFunc(MF);
233   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
234 }
235 
236 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
237 
238 // Defined in SPIRVLegalizerInfo.cpp.
239 extern bool isTypeFoldingSupported(unsigned Opcode);
240 
241 bool SPIRVInstructionSelector::select(MachineInstr &I) {
242   assert(I.getParent() && "Instruction should be in a basic block!");
243   assert(I.getParent()->getParent() && "Instruction should be in a function!");
244 
245   Register Opcode = I.getOpcode();
246   // If it's not a GMIR instruction, we've selected it already.
247   if (!isPreISelGenericOpcode(Opcode)) {
248     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
249       auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
250       if (isTypeFoldingSupported(Def->getOpcode())) {
251         bool Res = selectImpl(I, *CoverageInfo);
252         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
253         if (Res)
254           return Res;
255       }
256       MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
257       I.removeFromParent();
258       return true;
259     } else if (I.getNumDefs() == 1) {
260       // Make all vregs 32 bits (for SPIR-V IDs).
261       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
262     }
263     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
264   }
265 
266   if (I.getNumOperands() != I.getNumExplicitOperands()) {
267     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
268     return false;
269   }
270 
271   // Common code for getting return reg+type, and removing selected instr
272   // from parent occurs here. Instr-specific selection happens in spvSelect().
273   bool HasDefs = I.getNumDefs() > 0;
274   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
275   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
276   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
277   if (spvSelect(ResVReg, ResType, I)) {
278     if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
279       for (unsigned i = 0; i < I.getNumDefs(); ++i)
280         MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
281     I.removeFromParent();
282     return true;
283   }
284   return false;
285 }
286 
287 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
288                                          const SPIRVType *ResType,
289                                          MachineInstr &I) const {
290   const unsigned Opcode = I.getOpcode();
291   if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
292     return selectImpl(I, *CoverageInfo);
293   switch (Opcode) {
294   case TargetOpcode::G_CONSTANT:
295     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
296                        I);
297   case TargetOpcode::G_GLOBAL_VALUE:
298     return selectGlobalValue(ResVReg, I);
299   case TargetOpcode::G_IMPLICIT_DEF:
300     return selectOpUndef(ResVReg, ResType, I);
301   case TargetOpcode::G_FREEZE:
302     return selectFreeze(ResVReg, ResType, I);
303 
304   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
305   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
306     return selectIntrinsic(ResVReg, ResType, I);
307   case TargetOpcode::G_BITREVERSE:
308     return selectBitreverse(ResVReg, ResType, I);
309 
310   case TargetOpcode::G_BUILD_VECTOR:
311     return selectConstVector(ResVReg, ResType, I);
312 
313   case TargetOpcode::G_SHUFFLE_VECTOR: {
314     MachineBasicBlock &BB = *I.getParent();
315     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
316                    .addDef(ResVReg)
317                    .addUse(GR.getSPIRVTypeID(ResType))
318                    .addUse(I.getOperand(1).getReg())
319                    .addUse(I.getOperand(2).getReg());
320     for (auto V : I.getOperand(3).getShuffleMask())
321       MIB.addImm(V);
322     return MIB.constrainAllUses(TII, TRI, RBI);
323   }
324   case TargetOpcode::G_MEMMOVE:
325   case TargetOpcode::G_MEMCPY:
326   case TargetOpcode::G_MEMSET:
327     return selectMemOperation(ResVReg, I);
328 
329   case TargetOpcode::G_ICMP:
330     return selectICmp(ResVReg, ResType, I);
331   case TargetOpcode::G_FCMP:
332     return selectFCmp(ResVReg, ResType, I);
333 
334   case TargetOpcode::G_FRAME_INDEX:
335     return selectFrameIndex(ResVReg, ResType, I);
336 
337   case TargetOpcode::G_LOAD:
338     return selectLoad(ResVReg, ResType, I);
339   case TargetOpcode::G_STORE:
340     return selectStore(I);
341 
342   case TargetOpcode::G_BR:
343     return selectBranch(I);
344   case TargetOpcode::G_BRCOND:
345     return selectBranchCond(I);
346 
347   case TargetOpcode::G_PHI:
348     return selectPhi(ResVReg, ResType, I);
349 
350   case TargetOpcode::G_FPTOSI:
351     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
352   case TargetOpcode::G_FPTOUI:
353     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
354 
355   case TargetOpcode::G_SITOFP:
356     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
357   case TargetOpcode::G_UITOFP:
358     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
359 
360   case TargetOpcode::G_CTPOP:
361     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
362   case TargetOpcode::G_SMIN:
363     return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
364   case TargetOpcode::G_UMIN:
365     return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
366 
367   case TargetOpcode::G_SMAX:
368     return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
369   case TargetOpcode::G_UMAX:
370     return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
371 
372   case TargetOpcode::G_FMA:
373     return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
374 
375   case TargetOpcode::G_FPOW:
376     return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
377   case TargetOpcode::G_FPOWI:
378     return selectExtInst(ResVReg, ResType, I, CL::pown);
379 
380   case TargetOpcode::G_FEXP:
381     return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
382   case TargetOpcode::G_FEXP2:
383     return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
384 
385   case TargetOpcode::G_FLOG:
386     return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
387   case TargetOpcode::G_FLOG2:
388     return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
389   case TargetOpcode::G_FLOG10:
390     return selectLog10(ResVReg, ResType, I);
391 
392   case TargetOpcode::G_FABS:
393     return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
394   case TargetOpcode::G_ABS:
395     return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
396 
397   case TargetOpcode::G_FMINNUM:
398   case TargetOpcode::G_FMINIMUM:
399     return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin);
400   case TargetOpcode::G_FMAXNUM:
401   case TargetOpcode::G_FMAXIMUM:
402     return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax);
403 
404   case TargetOpcode::G_FCOPYSIGN:
405     return selectExtInst(ResVReg, ResType, I, CL::copysign);
406 
407   case TargetOpcode::G_FCEIL:
408     return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
409   case TargetOpcode::G_FFLOOR:
410     return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
411 
412   case TargetOpcode::G_FCOS:
413     return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
414   case TargetOpcode::G_FSIN:
415     return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
416 
417   case TargetOpcode::G_FSQRT:
418     return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
419 
420   case TargetOpcode::G_CTTZ:
421   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
422     return selectExtInst(ResVReg, ResType, I, CL::ctz);
423   case TargetOpcode::G_CTLZ:
424   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
425     return selectExtInst(ResVReg, ResType, I, CL::clz);
426 
427   case TargetOpcode::G_INTRINSIC_ROUND:
428     return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
429   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
430     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
431   case TargetOpcode::G_INTRINSIC_TRUNC:
432     return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
433   case TargetOpcode::G_FRINT:
434   case TargetOpcode::G_FNEARBYINT:
435     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
436 
437   case TargetOpcode::G_SMULH:
438     return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
439   case TargetOpcode::G_UMULH:
440     return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
441 
442   case TargetOpcode::G_SEXT:
443     return selectExt(ResVReg, ResType, I, true);
444   case TargetOpcode::G_ANYEXT:
445   case TargetOpcode::G_ZEXT:
446     return selectExt(ResVReg, ResType, I, false);
447   case TargetOpcode::G_TRUNC:
448     return selectTrunc(ResVReg, ResType, I);
449   case TargetOpcode::G_FPTRUNC:
450   case TargetOpcode::G_FPEXT:
451     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
452 
453   case TargetOpcode::G_PTRTOINT:
454     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
455   case TargetOpcode::G_INTTOPTR:
456     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
457   case TargetOpcode::G_BITCAST:
458     return selectBitcast(ResVReg, ResType, I);
459   case TargetOpcode::G_ADDRSPACE_CAST:
460     return selectAddrSpaceCast(ResVReg, ResType, I);
461   case TargetOpcode::G_PTR_ADD: {
462     // Currently, we get G_PTR_ADD only as a result of translating
463     // global variables, initialized with constant expressions like GV + Const
464     // (see test opencl/basic/progvar_prog_scope_init.ll).
465     // TODO: extend the handler once we have other cases.
466     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
467     Register GV = I.getOperand(1).getReg();
468     MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
469     assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
470             (*II).getOpcode() == TargetOpcode::COPY ||
471             (*II).getOpcode() == SPIRV::OpVariable) &&
472            isImm(I.getOperand(2), MRI));
473     Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
474     MachineBasicBlock &BB = *I.getParent();
475     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
476                    .addDef(ResVReg)
477                    .addUse(GR.getSPIRVTypeID(ResType))
478                    .addImm(static_cast<uint32_t>(
479                        SPIRV::Opcode::InBoundsPtrAccessChain))
480                    .addUse(GV)
481                    .addUse(Idx)
482                    .addUse(I.getOperand(2).getReg());
483     return MIB.constrainAllUses(TII, TRI, RBI);
484   }
485 
486   case TargetOpcode::G_ATOMICRMW_OR:
487     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
488   case TargetOpcode::G_ATOMICRMW_ADD:
489     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
490   case TargetOpcode::G_ATOMICRMW_AND:
491     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
492   case TargetOpcode::G_ATOMICRMW_MAX:
493     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
494   case TargetOpcode::G_ATOMICRMW_MIN:
495     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
496   case TargetOpcode::G_ATOMICRMW_SUB:
497     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
498   case TargetOpcode::G_ATOMICRMW_XOR:
499     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
500   case TargetOpcode::G_ATOMICRMW_UMAX:
501     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
502   case TargetOpcode::G_ATOMICRMW_UMIN:
503     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
504   case TargetOpcode::G_ATOMICRMW_XCHG:
505     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
506   case TargetOpcode::G_ATOMIC_CMPXCHG:
507     return selectAtomicCmpXchg(ResVReg, ResType, I);
508 
509   case TargetOpcode::G_ATOMICRMW_FADD:
510     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
511   case TargetOpcode::G_ATOMICRMW_FSUB:
512     // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
513     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
514                            SPIRV::OpFNegate);
515   case TargetOpcode::G_ATOMICRMW_FMIN:
516     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
517   case TargetOpcode::G_ATOMICRMW_FMAX:
518     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
519 
520   case TargetOpcode::G_FENCE:
521     return selectFence(I);
522 
523   case TargetOpcode::G_STACKSAVE:
524     return selectStackSave(ResVReg, ResType, I);
525   case TargetOpcode::G_STACKRESTORE:
526     return selectStackRestore(I);
527 
528   case TargetOpcode::G_UNMERGE_VALUES:
529     return selectUnmergeValues(I);
530 
531   default:
532     return false;
533   }
534 }
535 
536 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
537                                              const SPIRVType *ResType,
538                                              MachineInstr &I,
539                                              CL::OpenCLExtInst CLInst) const {
540   return selectExtInst(ResVReg, ResType, I,
541                        {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
542 }
543 
544 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
545                                              const SPIRVType *ResType,
546                                              MachineInstr &I,
547                                              CL::OpenCLExtInst CLInst,
548                                              GL::GLSLExtInst GLInst) const {
549   ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
550                           {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
551   return selectExtInst(ResVReg, ResType, I, ExtInsts);
552 }
553 
554 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
555                                              const SPIRVType *ResType,
556                                              MachineInstr &I,
557                                              const ExtInstList &Insts) const {
558 
559   for (const auto &Ex : Insts) {
560     SPIRV::InstructionSet::InstructionSet Set = Ex.first;
561     uint32_t Opcode = Ex.second;
562     if (STI.canUseExtInstSet(Set)) {
563       MachineBasicBlock &BB = *I.getParent();
564       auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
565                      .addDef(ResVReg)
566                      .addUse(GR.getSPIRVTypeID(ResType))
567                      .addImm(static_cast<uint32_t>(Set))
568                      .addImm(Opcode);
569       const unsigned NumOps = I.getNumOperands();
570       for (unsigned i = 1; i < NumOps; ++i)
571         MIB.add(I.getOperand(i));
572       return MIB.constrainAllUses(TII, TRI, RBI);
573     }
574   }
575   return false;
576 }
577 
578 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
579                                                  const SPIRVType *ResType,
580                                                  MachineInstr &I,
581                                                  Register SrcReg,
582                                                  unsigned Opcode) const {
583   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
584       .addDef(ResVReg)
585       .addUse(GR.getSPIRVTypeID(ResType))
586       .addUse(SrcReg)
587       .constrainAllUses(TII, TRI, RBI);
588 }
589 
590 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
591                                           const SPIRVType *ResType,
592                                           MachineInstr &I,
593                                           unsigned Opcode) const {
594   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
595                            Opcode);
596 }
597 
598 bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
599                                              const SPIRVType *ResType,
600                                              MachineInstr &I) const {
601   Register OpReg = I.getOperand(1).getReg();
602   SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
603   if (!GR.isBitcastCompatible(ResType, OpType))
604     report_fatal_error("incompatible result and operand types in a bitcast");
605   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
606 }
607 
608 static SPIRV::Scope::Scope getScope(SyncScope::ID Ord) {
609   switch (Ord) {
610   case SyncScope::SingleThread:
611     return SPIRV::Scope::Invocation;
612   case SyncScope::System:
613     return SPIRV::Scope::Device;
614   default:
615     llvm_unreachable("Unsupported synchronization Scope ID.");
616   }
617 }
618 
619 static void addMemoryOperands(MachineMemOperand *MemOp,
620                               MachineInstrBuilder &MIB) {
621   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
622   if (MemOp->isVolatile())
623     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
624   if (MemOp->isNonTemporal())
625     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
626   if (MemOp->getAlign().value())
627     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
628 
629   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
630     MIB.addImm(SpvMemOp);
631     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
632       MIB.addImm(MemOp->getAlign().value());
633   }
634 }
635 
636 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
637   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
638   if (Flags & MachineMemOperand::Flags::MOVolatile)
639     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
640   if (Flags & MachineMemOperand::Flags::MONonTemporal)
641     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
642 
643   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
644     MIB.addImm(SpvMemOp);
645 }
646 
647 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
648                                           const SPIRVType *ResType,
649                                           MachineInstr &I) const {
650   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
651   Register Ptr = I.getOperand(1 + OpOffset).getReg();
652   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
653                  .addDef(ResVReg)
654                  .addUse(GR.getSPIRVTypeID(ResType))
655                  .addUse(Ptr);
656   if (!I.getNumMemOperands()) {
657     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
658            I.getOpcode() ==
659                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
660     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
661   } else {
662     addMemoryOperands(*I.memoperands_begin(), MIB);
663   }
664   return MIB.constrainAllUses(TII, TRI, RBI);
665 }
666 
667 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
668   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
669   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
670   Register Ptr = I.getOperand(1 + OpOffset).getReg();
671   MachineBasicBlock &BB = *I.getParent();
672   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
673                  .addUse(Ptr)
674                  .addUse(StoreVal);
675   if (!I.getNumMemOperands()) {
676     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
677            I.getOpcode() ==
678                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
679     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
680   } else {
681     addMemoryOperands(*I.memoperands_begin(), MIB);
682   }
683   return MIB.constrainAllUses(TII, TRI, RBI);
684 }
685 
686 bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
687                                                const SPIRVType *ResType,
688                                                MachineInstr &I) const {
689   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
690     report_fatal_error(
691         "llvm.stacksave intrinsic: this instruction requires the following "
692         "SPIR-V extension: SPV_INTEL_variable_length_array",
693         false);
694   MachineBasicBlock &BB = *I.getParent();
695   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
696       .addDef(ResVReg)
697       .addUse(GR.getSPIRVTypeID(ResType))
698       .constrainAllUses(TII, TRI, RBI);
699 }
700 
701 bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
702   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
703     report_fatal_error(
704         "llvm.stackrestore intrinsic: this instruction requires the following "
705         "SPIR-V extension: SPV_INTEL_variable_length_array",
706         false);
707   if (!I.getOperand(0).isReg())
708     return false;
709   MachineBasicBlock &BB = *I.getParent();
710   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
711       .addUse(I.getOperand(0).getReg())
712       .constrainAllUses(TII, TRI, RBI);
713 }
714 
715 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
716                                                   MachineInstr &I) const {
717   MachineBasicBlock &BB = *I.getParent();
718   Register SrcReg = I.getOperand(1).getReg();
719   if (I.getOpcode() == TargetOpcode::G_MEMSET) {
720     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
721     unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
722     unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
723     SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
724     SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
725     Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
726     SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
727         ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
728     // TODO: check if we have such GV, add init, use buildGlobalVariable.
729     Type *LLVMArrTy = ArrayType::get(
730         IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num);
731     GlobalVariable *GV =
732         new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage);
733     Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
734     GR.add(GV, GR.CurMF, VarReg);
735 
736     buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
737     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
738         .addDef(VarReg)
739         .addUse(GR.getSPIRVTypeID(VarTy))
740         .addImm(SPIRV::StorageClass::UniformConstant)
741         .addUse(Const)
742         .constrainAllUses(TII, TRI, RBI);
743     SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
744         ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
745     SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
746     selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
747   }
748   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
749                  .addUse(I.getOperand(0).getReg())
750                  .addUse(SrcReg)
751                  .addUse(I.getOperand(2).getReg());
752   if (I.getNumMemOperands())
753     addMemoryOperands(*I.memoperands_begin(), MIB);
754   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
755   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
756     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
757         .addUse(MIB->getOperand(0).getReg());
758   return Result;
759 }
760 
761 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
762                                                const SPIRVType *ResType,
763                                                MachineInstr &I,
764                                                unsigned NewOpcode,
765                                                unsigned NegateOpcode) const {
766   assert(I.hasOneMemOperand());
767   const MachineMemOperand *MemOp = *I.memoperands_begin();
768   uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
769   Register ScopeReg = buildI32Constant(Scope, I);
770 
771   Register Ptr = I.getOperand(1).getReg();
772   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
773   // auto ScSem =
774   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
775   AtomicOrdering AO = MemOp->getSuccessOrdering();
776   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
777   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
778 
779   bool Result = false;
780   Register ValueReg = I.getOperand(2).getReg();
781   if (NegateOpcode != 0) {
782     // Translation with negative value operand is requested
783     Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
784     Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
785     ValueReg = TmpReg;
786   }
787 
788   Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
789                 .addDef(ResVReg)
790                 .addUse(GR.getSPIRVTypeID(ResType))
791                 .addUse(Ptr)
792                 .addUse(ScopeReg)
793                 .addUse(MemSemReg)
794                 .addUse(ValueReg)
795                 .constrainAllUses(TII, TRI, RBI);
796   return Result;
797 }
798 
799 bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
800   unsigned ArgI = I.getNumOperands() - 1;
801   Register SrcReg =
802       I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
803   SPIRVType *DefType =
804       SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
805   if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
806     report_fatal_error(
807         "cannot select G_UNMERGE_VALUES with a non-vector argument");
808 
809   SPIRVType *ScalarType =
810       GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
811   MachineBasicBlock &BB = *I.getParent();
812   bool Res = false;
813   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
814     Register ResVReg = I.getOperand(i).getReg();
815     SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
816     if (!ResType) {
817       // There was no "assign type" actions, let's fix this now
818       ResType = ScalarType;
819       MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
820       MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
821       GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
822     }
823     auto MIB =
824         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
825             .addDef(ResVReg)
826             .addUse(GR.getSPIRVTypeID(ResType))
827             .addUse(SrcReg)
828             .addImm(static_cast<int64_t>(i));
829     Res |= MIB.constrainAllUses(TII, TRI, RBI);
830   }
831   return Res;
832 }
833 
834 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
835   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
836   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
837   Register MemSemReg = buildI32Constant(MemSem, I);
838   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
839   uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
840   Register ScopeReg = buildI32Constant(Scope, I);
841   MachineBasicBlock &BB = *I.getParent();
842   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
843       .addUse(ScopeReg)
844       .addUse(MemSemReg)
845       .constrainAllUses(TII, TRI, RBI);
846 }
847 
848 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
849                                                    const SPIRVType *ResType,
850                                                    MachineInstr &I) const {
851   Register ScopeReg;
852   Register MemSemEqReg;
853   Register MemSemNeqReg;
854   Register Ptr = I.getOperand(2).getReg();
855   if (!isa<GIntrinsic>(I)) {
856     assert(I.hasOneMemOperand());
857     const MachineMemOperand *MemOp = *I.memoperands_begin();
858     unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
859     ScopeReg = buildI32Constant(Scope, I);
860 
861     unsigned ScSem = static_cast<uint32_t>(
862         getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
863     AtomicOrdering AO = MemOp->getSuccessOrdering();
864     unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
865     MemSemEqReg = buildI32Constant(MemSemEq, I);
866     AtomicOrdering FO = MemOp->getFailureOrdering();
867     unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
868     MemSemNeqReg =
869         MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
870   } else {
871     ScopeReg = I.getOperand(5).getReg();
872     MemSemEqReg = I.getOperand(6).getReg();
873     MemSemNeqReg = I.getOperand(7).getReg();
874   }
875 
876   Register Cmp = I.getOperand(3).getReg();
877   Register Val = I.getOperand(4).getReg();
878   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
879   Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
880   const DebugLoc &DL = I.getDebugLoc();
881   bool Result =
882       BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
883           .addDef(ACmpRes)
884           .addUse(GR.getSPIRVTypeID(SpvValTy))
885           .addUse(Ptr)
886           .addUse(ScopeReg)
887           .addUse(MemSemEqReg)
888           .addUse(MemSemNeqReg)
889           .addUse(Val)
890           .addUse(Cmp)
891           .constrainAllUses(TII, TRI, RBI);
892   Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
893   SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
894   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
895                 .addDef(CmpSuccReg)
896                 .addUse(GR.getSPIRVTypeID(BoolTy))
897                 .addUse(ACmpRes)
898                 .addUse(Cmp)
899                 .constrainAllUses(TII, TRI, RBI);
900   Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
901   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
902                 .addDef(TmpReg)
903                 .addUse(GR.getSPIRVTypeID(ResType))
904                 .addUse(ACmpRes)
905                 .addUse(GR.getOrCreateUndef(I, ResType, TII))
906                 .addImm(0)
907                 .constrainAllUses(TII, TRI, RBI);
908   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
909                 .addDef(ResVReg)
910                 .addUse(GR.getSPIRVTypeID(ResType))
911                 .addUse(CmpSuccReg)
912                 .addUse(TmpReg)
913                 .addImm(1)
914                 .constrainAllUses(TII, TRI, RBI);
915   return Result;
916 }
917 
918 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
919   switch (SC) {
920   case SPIRV::StorageClass::Workgroup:
921   case SPIRV::StorageClass::CrossWorkgroup:
922   case SPIRV::StorageClass::Function:
923     return true;
924   default:
925     return false;
926   }
927 }
928 
929 static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
930   switch (SC) {
931   case SPIRV::StorageClass::DeviceOnlyINTEL:
932   case SPIRV::StorageClass::HostOnlyINTEL:
933     return true;
934   default:
935     return false;
936   }
937 }
938 
939 // In SPIR-V address space casting can only happen to and from the Generic
940 // storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
941 // pointers to and from Generic pointers. As such, we can convert e.g. from
942 // Workgroup to Function by going via a Generic pointer as an intermediary. All
943 // other combinations can only be done by a bitcast, and are probably not safe.
944 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
945                                                    const SPIRVType *ResType,
946                                                    MachineInstr &I) const {
947   // If the AddrSpaceCast user is single and in OpConstantComposite or
948   // OpVariable, we should select OpSpecConstantOp.
949   auto UIs = MRI->use_instructions(ResVReg);
950   if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
951       (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
952        UIs.begin()->getOpcode() == SPIRV::OpVariable ||
953        isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
954     Register NewReg = I.getOperand(1).getReg();
955     MachineBasicBlock &BB = *I.getParent();
956     SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
957     ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
958                                              SPIRV::StorageClass::Generic);
959     bool Result =
960         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
961             .addDef(ResVReg)
962             .addUse(GR.getSPIRVTypeID(ResType))
963             .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
964             .addUse(NewReg)
965             .constrainAllUses(TII, TRI, RBI);
966     return Result;
967   }
968   Register SrcPtr = I.getOperand(1).getReg();
969   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
970   SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
971   SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
972 
973   // don't generate a cast between identical storage classes
974   if (SrcSC == DstSC)
975     return true;
976 
977   // Casting from an eligible pointer to Generic.
978   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
979     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
980   // Casting from Generic to an eligible pointer.
981   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
982     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
983   // Casting between 2 eligible pointers using Generic as an intermediary.
984   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
985     Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
986     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
987         SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
988     MachineBasicBlock &BB = *I.getParent();
989     const DebugLoc &DL = I.getDebugLoc();
990     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
991                        .addDef(Tmp)
992                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
993                        .addUse(SrcPtr)
994                        .constrainAllUses(TII, TRI, RBI);
995     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
996                           .addDef(ResVReg)
997                           .addUse(GR.getSPIRVTypeID(ResType))
998                           .addUse(Tmp)
999                           .constrainAllUses(TII, TRI, RBI);
1000   }
1001 
1002   // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1003   // be applied
1004   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1005     return selectUnOp(ResVReg, ResType, I,
1006                       SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1007   if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1008     return selectUnOp(ResVReg, ResType, I,
1009                       SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1010 
1011   // TODO Should this case just be disallowed completely?
1012   // We're casting 2 other arbitrary address spaces, so have to bitcast.
1013   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1014 }
1015 
1016 static unsigned getFCmpOpcode(unsigned PredNum) {
1017   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1018   switch (Pred) {
1019   case CmpInst::FCMP_OEQ:
1020     return SPIRV::OpFOrdEqual;
1021   case CmpInst::FCMP_OGE:
1022     return SPIRV::OpFOrdGreaterThanEqual;
1023   case CmpInst::FCMP_OGT:
1024     return SPIRV::OpFOrdGreaterThan;
1025   case CmpInst::FCMP_OLE:
1026     return SPIRV::OpFOrdLessThanEqual;
1027   case CmpInst::FCMP_OLT:
1028     return SPIRV::OpFOrdLessThan;
1029   case CmpInst::FCMP_ONE:
1030     return SPIRV::OpFOrdNotEqual;
1031   case CmpInst::FCMP_ORD:
1032     return SPIRV::OpOrdered;
1033   case CmpInst::FCMP_UEQ:
1034     return SPIRV::OpFUnordEqual;
1035   case CmpInst::FCMP_UGE:
1036     return SPIRV::OpFUnordGreaterThanEqual;
1037   case CmpInst::FCMP_UGT:
1038     return SPIRV::OpFUnordGreaterThan;
1039   case CmpInst::FCMP_ULE:
1040     return SPIRV::OpFUnordLessThanEqual;
1041   case CmpInst::FCMP_ULT:
1042     return SPIRV::OpFUnordLessThan;
1043   case CmpInst::FCMP_UNE:
1044     return SPIRV::OpFUnordNotEqual;
1045   case CmpInst::FCMP_UNO:
1046     return SPIRV::OpUnordered;
1047   default:
1048     llvm_unreachable("Unknown predicate type for FCmp");
1049   }
1050 }
1051 
1052 static unsigned getICmpOpcode(unsigned PredNum) {
1053   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1054   switch (Pred) {
1055   case CmpInst::ICMP_EQ:
1056     return SPIRV::OpIEqual;
1057   case CmpInst::ICMP_NE:
1058     return SPIRV::OpINotEqual;
1059   case CmpInst::ICMP_SGE:
1060     return SPIRV::OpSGreaterThanEqual;
1061   case CmpInst::ICMP_SGT:
1062     return SPIRV::OpSGreaterThan;
1063   case CmpInst::ICMP_SLE:
1064     return SPIRV::OpSLessThanEqual;
1065   case CmpInst::ICMP_SLT:
1066     return SPIRV::OpSLessThan;
1067   case CmpInst::ICMP_UGE:
1068     return SPIRV::OpUGreaterThanEqual;
1069   case CmpInst::ICMP_UGT:
1070     return SPIRV::OpUGreaterThan;
1071   case CmpInst::ICMP_ULE:
1072     return SPIRV::OpULessThanEqual;
1073   case CmpInst::ICMP_ULT:
1074     return SPIRV::OpULessThan;
1075   default:
1076     llvm_unreachable("Unknown predicate type for ICmp");
1077   }
1078 }
1079 
1080 static unsigned getPtrCmpOpcode(unsigned Pred) {
1081   switch (static_cast<CmpInst::Predicate>(Pred)) {
1082   case CmpInst::ICMP_EQ:
1083     return SPIRV::OpPtrEqual;
1084   case CmpInst::ICMP_NE:
1085     return SPIRV::OpPtrNotEqual;
1086   default:
1087     llvm_unreachable("Unknown predicate type for pointer comparison");
1088   }
1089 }
1090 
1091 // Return the logical operation, or abort if none exists.
1092 static unsigned getBoolCmpOpcode(unsigned PredNum) {
1093   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1094   switch (Pred) {
1095   case CmpInst::ICMP_EQ:
1096     return SPIRV::OpLogicalEqual;
1097   case CmpInst::ICMP_NE:
1098     return SPIRV::OpLogicalNotEqual;
1099   default:
1100     llvm_unreachable("Unknown predicate type for Bool comparison");
1101   }
1102 }
1103 
1104 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1105                                                 const SPIRVType *ResType,
1106                                                 MachineInstr &I) const {
1107   MachineBasicBlock &BB = *I.getParent();
1108   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1109       .addDef(ResVReg)
1110       .addUse(GR.getSPIRVTypeID(ResType))
1111       .addUse(I.getOperand(1).getReg())
1112       .constrainAllUses(TII, TRI, RBI);
1113 }
1114 
1115 bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1116                                             const SPIRVType *ResType,
1117                                             MachineInstr &I) const {
1118   // There is no way to implement `freeze` correctly without support on SPIR-V
1119   // standard side, but we may at least address a simple (static) case when
1120   // undef/poison value presence is obvious. The main benefit of even
1121   // incomplete `freeze` support is preventing of translation from crashing due
1122   // to lack of support on legalization and instruction selection steps.
1123   if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1124     return false;
1125   Register OpReg = I.getOperand(1).getReg();
1126   if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1127     Register Reg;
1128     switch (Def->getOpcode()) {
1129     case SPIRV::ASSIGN_TYPE:
1130       if (MachineInstr *AssignToDef =
1131               MRI->getVRegDef(Def->getOperand(1).getReg())) {
1132         if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1133           Reg = Def->getOperand(2).getReg();
1134       }
1135       break;
1136     case SPIRV::OpUndef:
1137       Reg = Def->getOperand(1).getReg();
1138       break;
1139     }
1140     unsigned DestOpCode;
1141     if (Reg.isValid()) {
1142       DestOpCode = SPIRV::OpConstantNull;
1143     } else {
1144       DestOpCode = TargetOpcode::COPY;
1145       Reg = OpReg;
1146     }
1147     return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1148         .addDef(I.getOperand(0).getReg())
1149         .addUse(Reg)
1150         .constrainAllUses(TII, TRI, RBI);
1151   }
1152   return false;
1153 }
1154 
1155 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1156                                                  const SPIRVType *ResType,
1157                                                  MachineInstr &I) const {
1158   // TODO: only const case is supported for now.
1159   assert(std::all_of(
1160       I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1161         if (MO.isDef())
1162           return true;
1163         if (!MO.isReg())
1164           return false;
1165         SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1166         assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1167                ConstTy->getOperand(1).isReg());
1168         Register ConstReg = ConstTy->getOperand(1).getReg();
1169         const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1170         assert(Const);
1171         return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1172                 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1173       }));
1174 
1175   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1176                      TII.get(SPIRV::OpConstantComposite))
1177                  .addDef(ResVReg)
1178                  .addUse(GR.getSPIRVTypeID(ResType));
1179   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1180     MIB.addUse(I.getOperand(i).getReg());
1181   return MIB.constrainAllUses(TII, TRI, RBI);
1182 }
1183 
1184 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1185                                          const SPIRVType *ResType,
1186                                          unsigned CmpOpc,
1187                                          MachineInstr &I) const {
1188   Register Cmp0 = I.getOperand(2).getReg();
1189   Register Cmp1 = I.getOperand(3).getReg();
1190   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1191              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1192          "CMP operands should have the same type");
1193   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1194       .addDef(ResVReg)
1195       .addUse(GR.getSPIRVTypeID(ResType))
1196       .addUse(Cmp0)
1197       .addUse(Cmp1)
1198       .constrainAllUses(TII, TRI, RBI);
1199 }
1200 
1201 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1202                                           const SPIRVType *ResType,
1203                                           MachineInstr &I) const {
1204   auto Pred = I.getOperand(1).getPredicate();
1205   unsigned CmpOpc;
1206 
1207   Register CmpOperand = I.getOperand(2).getReg();
1208   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1209     CmpOpc = getPtrCmpOpcode(Pred);
1210   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1211     CmpOpc = getBoolCmpOpcode(Pred);
1212   else
1213     CmpOpc = getICmpOpcode(Pred);
1214   return selectCmp(ResVReg, ResType, CmpOpc, I);
1215 }
1216 
1217 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1218                                             const MachineInstr &I,
1219                                             int OpIdx) const {
1220   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1221          "Expected G_FCONSTANT");
1222   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1223   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1224 }
1225 
1226 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1227                                            const MachineInstr &I,
1228                                            int OpIdx) const {
1229   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1230          "Expected G_CONSTANT");
1231   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1232 }
1233 
1234 Register
1235 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1236                                            const SPIRVType *ResType) const {
1237   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1238   const SPIRVType *SpvI32Ty =
1239       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1240   // Find a constant in DT or build a new one.
1241   auto ConstInt = ConstantInt::get(LLVMTy, Val);
1242   Register NewReg = GR.find(ConstInt, GR.CurMF);
1243   if (!NewReg.isValid()) {
1244     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1245     GR.add(ConstInt, GR.CurMF, NewReg);
1246     MachineInstr *MI;
1247     MachineBasicBlock &BB = *I.getParent();
1248     if (Val == 0) {
1249       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1250                .addDef(NewReg)
1251                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1252     } else {
1253       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1254                .addDef(NewReg)
1255                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1256                .addImm(APInt(32, Val).getZExtValue());
1257     }
1258     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
1259   }
1260   return NewReg;
1261 }
1262 
1263 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1264                                           const SPIRVType *ResType,
1265                                           MachineInstr &I) const {
1266   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1267   return selectCmp(ResVReg, ResType, CmpOp, I);
1268 }
1269 
1270 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1271                                                  MachineInstr &I) const {
1272   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1273     return GR.getOrCreateConsIntVector(0, I, ResType, TII);
1274   return GR.getOrCreateConstInt(0, I, ResType, TII);
1275 }
1276 
1277 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1278                                                 const SPIRVType *ResType,
1279                                                 MachineInstr &I) const {
1280   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1281   APInt One =
1282       AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1283   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1284     return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
1285   return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1286 }
1287 
1288 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1289                                             const SPIRVType *ResType,
1290                                             MachineInstr &I,
1291                                             bool IsSigned) const {
1292   // To extend a bool, we need to use OpSelect between constants.
1293   Register ZeroReg = buildZerosVal(ResType, I);
1294   Register OneReg = buildOnesVal(IsSigned, ResType, I);
1295   bool IsScalarBool =
1296       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1297   unsigned Opcode =
1298       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1299   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1300       .addDef(ResVReg)
1301       .addUse(GR.getSPIRVTypeID(ResType))
1302       .addUse(I.getOperand(1).getReg())
1303       .addUse(OneReg)
1304       .addUse(ZeroReg)
1305       .constrainAllUses(TII, TRI, RBI);
1306 }
1307 
1308 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1309                                           const SPIRVType *ResType,
1310                                           MachineInstr &I, bool IsSigned,
1311                                           unsigned Opcode) const {
1312   Register SrcReg = I.getOperand(1).getReg();
1313   // We can convert bool value directly to float type without OpConvert*ToF,
1314   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1315   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1316     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1317     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1318     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1319       const unsigned NumElts = ResType->getOperand(2).getImm();
1320       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1321     }
1322     SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1323     selectSelect(SrcReg, TmpType, I, false);
1324   }
1325   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1326 }
1327 
1328 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1329                                          const SPIRVType *ResType,
1330                                          MachineInstr &I, bool IsSigned) const {
1331   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1332     return selectSelect(ResVReg, ResType, I, IsSigned);
1333   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1334   return selectUnOp(ResVReg, ResType, I, Opcode);
1335 }
1336 
1337 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1338                                                Register ResVReg,
1339                                                MachineInstr &I,
1340                                                const SPIRVType *IntTy,
1341                                                const SPIRVType *BoolTy) const {
1342   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1343   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1344   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1345   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1346   Register Zero = buildZerosVal(IntTy, I);
1347   Register One = buildOnesVal(false, IntTy, I);
1348   MachineBasicBlock &BB = *I.getParent();
1349   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1350       .addDef(BitIntReg)
1351       .addUse(GR.getSPIRVTypeID(IntTy))
1352       .addUse(IntReg)
1353       .addUse(One)
1354       .constrainAllUses(TII, TRI, RBI);
1355   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1356       .addDef(ResVReg)
1357       .addUse(GR.getSPIRVTypeID(BoolTy))
1358       .addUse(BitIntReg)
1359       .addUse(Zero)
1360       .constrainAllUses(TII, TRI, RBI);
1361 }
1362 
1363 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1364                                            const SPIRVType *ResType,
1365                                            MachineInstr &I) const {
1366   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1367     Register IntReg = I.getOperand(1).getReg();
1368     const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1369     return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1370   }
1371   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1372   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1373   return selectUnOp(ResVReg, ResType, I, Opcode);
1374 }
1375 
1376 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1377                                            const SPIRVType *ResType,
1378                                            const APInt &Imm,
1379                                            MachineInstr &I) const {
1380   unsigned TyOpcode = ResType->getOpcode();
1381   assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1382   MachineBasicBlock &BB = *I.getParent();
1383   if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1384       Imm.isZero())
1385     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1386         .addDef(ResVReg)
1387         .addUse(GR.getSPIRVTypeID(ResType))
1388         .constrainAllUses(TII, TRI, RBI);
1389   if (TyOpcode == SPIRV::OpTypeInt) {
1390     assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1391     Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1392     if (Reg == ResVReg)
1393       return true;
1394     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1395         .addDef(ResVReg)
1396         .addUse(Reg)
1397         .constrainAllUses(TII, TRI, RBI);
1398   }
1399   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1400                  .addDef(ResVReg)
1401                  .addUse(GR.getSPIRVTypeID(ResType));
1402   // <=32-bit integers should be caught by the sdag pattern.
1403   assert(Imm.getBitWidth() > 32);
1404   addNumImm(Imm, MIB);
1405   return MIB.constrainAllUses(TII, TRI, RBI);
1406 }
1407 
1408 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1409                                              const SPIRVType *ResType,
1410                                              MachineInstr &I) const {
1411   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1412       .addDef(ResVReg)
1413       .addUse(GR.getSPIRVTypeID(ResType))
1414       .constrainAllUses(TII, TRI, RBI);
1415 }
1416 
1417 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1418   assert(MO.isReg());
1419   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1420   if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1421     return false;
1422   assert(TypeInst->getOperand(1).isReg());
1423   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1424   return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1425 }
1426 
1427 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1428   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1429   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1430   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1431   return ImmInst->getOperand(1).getCImm()->getZExtValue();
1432 }
1433 
1434 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1435                                                const SPIRVType *ResType,
1436                                                MachineInstr &I) const {
1437   MachineBasicBlock &BB = *I.getParent();
1438   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1439                  .addDef(ResVReg)
1440                  .addUse(GR.getSPIRVTypeID(ResType))
1441                  // object to insert
1442                  .addUse(I.getOperand(3).getReg())
1443                  // composite to insert into
1444                  .addUse(I.getOperand(2).getReg());
1445   for (unsigned i = 4; i < I.getNumOperands(); i++)
1446     MIB.addImm(foldImm(I.getOperand(i), MRI));
1447   return MIB.constrainAllUses(TII, TRI, RBI);
1448 }
1449 
1450 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1451                                                 const SPIRVType *ResType,
1452                                                 MachineInstr &I) const {
1453   MachineBasicBlock &BB = *I.getParent();
1454   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1455                  .addDef(ResVReg)
1456                  .addUse(GR.getSPIRVTypeID(ResType))
1457                  .addUse(I.getOperand(2).getReg());
1458   for (unsigned i = 3; i < I.getNumOperands(); i++)
1459     MIB.addImm(foldImm(I.getOperand(i), MRI));
1460   return MIB.constrainAllUses(TII, TRI, RBI);
1461 }
1462 
1463 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1464                                                const SPIRVType *ResType,
1465                                                MachineInstr &I) const {
1466   if (isImm(I.getOperand(4), MRI))
1467     return selectInsertVal(ResVReg, ResType, I);
1468   MachineBasicBlock &BB = *I.getParent();
1469   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1470       .addDef(ResVReg)
1471       .addUse(GR.getSPIRVTypeID(ResType))
1472       .addUse(I.getOperand(2).getReg())
1473       .addUse(I.getOperand(3).getReg())
1474       .addUse(I.getOperand(4).getReg())
1475       .constrainAllUses(TII, TRI, RBI);
1476 }
1477 
1478 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1479                                                 const SPIRVType *ResType,
1480                                                 MachineInstr &I) const {
1481   if (isImm(I.getOperand(3), MRI))
1482     return selectExtractVal(ResVReg, ResType, I);
1483   MachineBasicBlock &BB = *I.getParent();
1484   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1485       .addDef(ResVReg)
1486       .addUse(GR.getSPIRVTypeID(ResType))
1487       .addUse(I.getOperand(2).getReg())
1488       .addUse(I.getOperand(3).getReg())
1489       .constrainAllUses(TII, TRI, RBI);
1490 }
1491 
1492 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1493                                          const SPIRVType *ResType,
1494                                          MachineInstr &I) const {
1495   const bool IsGEPInBounds = I.getOperand(2).getImm();
1496 
1497   // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1498   // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1499   // we have to use Op[InBounds]AccessChain.
1500   const unsigned Opcode = STI.isVulkanEnv()
1501                               ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1502                                                : SPIRV::OpAccessChain)
1503                               : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1504                                                : SPIRV::OpPtrAccessChain);
1505 
1506   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1507                  .addDef(ResVReg)
1508                  .addUse(GR.getSPIRVTypeID(ResType))
1509                  // Object to get a pointer to.
1510                  .addUse(I.getOperand(3).getReg());
1511   // Adding indices.
1512   const unsigned StartingIndex =
1513       (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1514           ? 5
1515           : 4;
1516   for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1517     Res.addUse(I.getOperand(i).getReg());
1518   return Res.constrainAllUses(TII, TRI, RBI);
1519 }
1520 
1521 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1522                                                const SPIRVType *ResType,
1523                                                MachineInstr &I) const {
1524   MachineBasicBlock &BB = *I.getParent();
1525   switch (cast<GIntrinsic>(I).getIntrinsicID()) {
1526   case Intrinsic::spv_load:
1527     return selectLoad(ResVReg, ResType, I);
1528   case Intrinsic::spv_store:
1529     return selectStore(I);
1530   case Intrinsic::spv_extractv:
1531     return selectExtractVal(ResVReg, ResType, I);
1532   case Intrinsic::spv_insertv:
1533     return selectInsertVal(ResVReg, ResType, I);
1534   case Intrinsic::spv_extractelt:
1535     return selectExtractElt(ResVReg, ResType, I);
1536   case Intrinsic::spv_insertelt:
1537     return selectInsertElt(ResVReg, ResType, I);
1538   case Intrinsic::spv_gep:
1539     return selectGEP(ResVReg, ResType, I);
1540   case Intrinsic::spv_unref_global:
1541   case Intrinsic::spv_init_global: {
1542     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1543     MachineInstr *Init = I.getNumExplicitOperands() > 2
1544                              ? MRI->getVRegDef(I.getOperand(2).getReg())
1545                              : nullptr;
1546     assert(MI);
1547     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1548   }
1549   case Intrinsic::spv_undef: {
1550     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1551                    .addDef(ResVReg)
1552                    .addUse(GR.getSPIRVTypeID(ResType));
1553     return MIB.constrainAllUses(TII, TRI, RBI);
1554   }
1555   case Intrinsic::spv_const_composite: {
1556     // If no values are attached, the composite is null constant.
1557     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1558     unsigned Opcode =
1559         IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1560     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1561                    .addDef(ResVReg)
1562                    .addUse(GR.getSPIRVTypeID(ResType));
1563     // skip type MD node we already used when generated assign.type for this
1564     if (!IsNull) {
1565       for (unsigned i = I.getNumExplicitDefs() + 1;
1566            i < I.getNumExplicitOperands(); ++i) {
1567         MIB.addUse(I.getOperand(i).getReg());
1568       }
1569     }
1570     return MIB.constrainAllUses(TII, TRI, RBI);
1571   }
1572   case Intrinsic::spv_assign_name: {
1573     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1574     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1575     for (unsigned i = I.getNumExplicitDefs() + 2;
1576          i < I.getNumExplicitOperands(); ++i) {
1577       MIB.addImm(I.getOperand(i).getImm());
1578     }
1579     return MIB.constrainAllUses(TII, TRI, RBI);
1580   }
1581   case Intrinsic::spv_switch: {
1582     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1583     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1584       if (I.getOperand(i).isReg())
1585         MIB.addReg(I.getOperand(i).getReg());
1586       else if (I.getOperand(i).isCImm())
1587         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1588       else if (I.getOperand(i).isMBB())
1589         MIB.addMBB(I.getOperand(i).getMBB());
1590       else
1591         llvm_unreachable("Unexpected OpSwitch operand");
1592     }
1593     return MIB.constrainAllUses(TII, TRI, RBI);
1594   }
1595   case Intrinsic::spv_cmpxchg:
1596     return selectAtomicCmpXchg(ResVReg, ResType, I);
1597   case Intrinsic::spv_unreachable:
1598     BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1599     break;
1600   case Intrinsic::spv_alloca:
1601     return selectFrameIndex(ResVReg, ResType, I);
1602   case Intrinsic::spv_alloca_array:
1603     return selectAllocaArray(ResVReg, ResType, I);
1604   case Intrinsic::spv_assume:
1605     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1606       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
1607           .addUse(I.getOperand(1).getReg());
1608     break;
1609   case Intrinsic::spv_expect:
1610     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1611       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
1612           .addDef(ResVReg)
1613           .addUse(GR.getSPIRVTypeID(ResType))
1614           .addUse(I.getOperand(2).getReg())
1615           .addUse(I.getOperand(3).getReg());
1616     break;
1617   default:
1618     llvm_unreachable("Intrinsic selection not implemented");
1619   }
1620   return true;
1621 }
1622 
1623 bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
1624                                                  const SPIRVType *ResType,
1625                                                  MachineInstr &I) const {
1626   // there was an allocation size parameter to the allocation instruction
1627   // that is not 1
1628   MachineBasicBlock &BB = *I.getParent();
1629   return BuildMI(BB, I, I.getDebugLoc(),
1630                  TII.get(SPIRV::OpVariableLengthArrayINTEL))
1631       .addDef(ResVReg)
1632       .addUse(GR.getSPIRVTypeID(ResType))
1633       .addUse(I.getOperand(2).getReg())
1634       .constrainAllUses(TII, TRI, RBI);
1635 }
1636 
1637 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1638                                                 const SPIRVType *ResType,
1639                                                 MachineInstr &I) const {
1640   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1641       .addDef(ResVReg)
1642       .addUse(GR.getSPIRVTypeID(ResType))
1643       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1644       .constrainAllUses(TII, TRI, RBI);
1645 }
1646 
1647 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1648   // InstructionSelector walks backwards through the instructions. We can use
1649   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1650   // first, so can generate an OpBranchConditional here. If there is no
1651   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1652   const MachineInstr *PrevI = I.getPrevNode();
1653   MachineBasicBlock &MBB = *I.getParent();
1654   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1655     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1656         .addUse(PrevI->getOperand(0).getReg())
1657         .addMBB(PrevI->getOperand(1).getMBB())
1658         .addMBB(I.getOperand(0).getMBB())
1659         .constrainAllUses(TII, TRI, RBI);
1660   }
1661   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1662       .addMBB(I.getOperand(0).getMBB())
1663       .constrainAllUses(TII, TRI, RBI);
1664 }
1665 
1666 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1667   // InstructionSelector walks backwards through the instructions. For an
1668   // explicit conditional branch with no fallthrough, we use both a G_BR and a
1669   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1670   // generate the OpBranchConditional in selectBranch above.
1671   //
1672   // If an OpBranchConditional has been generated, we simply return, as the work
1673   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1674   // implicit fallthrough to the next basic block, so we need to create an
1675   // OpBranchConditional with an explicit "false" argument pointing to the next
1676   // basic block that LLVM would fall through to.
1677   const MachineInstr *NextI = I.getNextNode();
1678   // Check if this has already been successfully selected.
1679   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1680     return true;
1681   // Must be relying on implicit block fallthrough, so generate an
1682   // OpBranchConditional with the "next" basic block as the "false" target.
1683   MachineBasicBlock &MBB = *I.getParent();
1684   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1685   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1686   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1687       .addUse(I.getOperand(0).getReg())
1688       .addMBB(I.getOperand(1).getMBB())
1689       .addMBB(NextMBB)
1690       .constrainAllUses(TII, TRI, RBI);
1691 }
1692 
1693 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1694                                          const SPIRVType *ResType,
1695                                          MachineInstr &I) const {
1696   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1697                  .addDef(ResVReg)
1698                  .addUse(GR.getSPIRVTypeID(ResType));
1699   const unsigned NumOps = I.getNumOperands();
1700   for (unsigned i = 1; i < NumOps; i += 2) {
1701     MIB.addUse(I.getOperand(i + 0).getReg());
1702     MIB.addMBB(I.getOperand(i + 1).getMBB());
1703   }
1704   return MIB.constrainAllUses(TII, TRI, RBI);
1705 }
1706 
1707 bool SPIRVInstructionSelector::selectGlobalValue(
1708     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1709   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1710   MachineIRBuilder MIRBuilder(I);
1711   const GlobalValue *GV = I.getOperand(1).getGlobal();
1712   Type *GVType = GV->getValueType();
1713   SPIRVType *PointerBaseType;
1714   if (GVType->isArrayTy()) {
1715     SPIRVType *ArrayElementType =
1716         GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
1717                                 SPIRV::AccessQualifier::ReadWrite, false);
1718     PointerBaseType = GR.getOrCreateSPIRVArrayType(
1719         ArrayElementType, GVType->getArrayNumElements(), I, TII);
1720   } else {
1721     PointerBaseType = GR.getOrCreateSPIRVType(
1722         GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1723   }
1724   SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
1725       PointerBaseType, I, TII,
1726       addressSpaceToStorageClass(GV->getAddressSpace(), STI));
1727 
1728   std::string GlobalIdent;
1729   if (!GV->hasName()) {
1730     unsigned &ID = UnnamedGlobalIDs[GV];
1731     if (ID == 0)
1732       ID = UnnamedGlobalIDs.size();
1733     GlobalIdent = "__unnamed_" + Twine(ID).str();
1734   } else {
1735     GlobalIdent = GV->getGlobalIdentifier();
1736   }
1737 
1738   // Behaviour of functions as operands depends on availability of the
1739   // corresponding extension (SPV_INTEL_function_pointers):
1740   // - If there is an extension to operate with functions as operands:
1741   // We create a proper constant operand and evaluate a correct type for a
1742   // function pointer.
1743   // - Without the required extension:
1744   // We have functions as operands in tests with blocks of instruction e.g. in
1745   // transcoding/global_block.ll. These operands are not used and should be
1746   // substituted by zero constants. Their type is expected to be always
1747   // OpTypePointer Function %uchar.
1748   if (isa<Function>(GV)) {
1749     const Constant *ConstVal = GV;
1750     MachineBasicBlock &BB = *I.getParent();
1751     Register NewReg = GR.find(ConstVal, GR.CurMF);
1752     if (!NewReg.isValid()) {
1753       Register NewReg = ResVReg;
1754       GR.add(ConstVal, GR.CurMF, NewReg);
1755       const Function *GVFun =
1756           STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
1757               ? dyn_cast<Function>(GV)
1758               : nullptr;
1759       if (GVFun) {
1760         // References to a function via function pointers generate virtual
1761         // registers without a definition. We will resolve it later, during
1762         // module analysis stage.
1763         MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1764         Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1765         MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
1766         MachineInstrBuilder MB =
1767             BuildMI(BB, I, I.getDebugLoc(),
1768                     TII.get(SPIRV::OpConstantFunctionPointerINTEL))
1769                 .addDef(NewReg)
1770                 .addUse(GR.getSPIRVTypeID(ResType))
1771                 .addUse(FuncVReg);
1772         // mapping the function pointer to the used Function
1773         GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
1774         return MB.constrainAllUses(TII, TRI, RBI);
1775       }
1776       return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1777           .addDef(NewReg)
1778           .addUse(GR.getSPIRVTypeID(ResType))
1779           .constrainAllUses(TII, TRI, RBI);
1780     }
1781     assert(NewReg != ResVReg);
1782     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1783         .addDef(ResVReg)
1784         .addUse(NewReg)
1785         .constrainAllUses(TII, TRI, RBI);
1786   }
1787   auto GlobalVar = cast<GlobalVariable>(GV);
1788   assert(GlobalVar->getName() != "llvm.global.annotations");
1789 
1790   bool HasInit = GlobalVar->hasInitializer() &&
1791                  !isa<UndefValue>(GlobalVar->getInitializer());
1792   // Skip empty declaration for GVs with initilaizers till we get the decl with
1793   // passed initializer.
1794   if (HasInit && !Init)
1795     return true;
1796 
1797   unsigned AddrSpace = GV->getAddressSpace();
1798   SPIRV::StorageClass::StorageClass Storage =
1799       addressSpaceToStorageClass(AddrSpace, STI);
1800   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1801                   Storage != SPIRV::StorageClass::Function;
1802   SPIRV::LinkageType::LinkageType LnkType =
1803       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
1804           ? SPIRV::LinkageType::Import
1805           : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
1806                      STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
1807                  ? SPIRV::LinkageType::LinkOnceODR
1808                  : SPIRV::LinkageType::Export);
1809 
1810   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1811                                         Storage, Init, GlobalVar->isConstant(),
1812                                         HasLnkTy, LnkType, MIRBuilder, true);
1813   return Reg.isValid();
1814 }
1815 
1816 bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
1817                                            const SPIRVType *ResType,
1818                                            MachineInstr &I) const {
1819   if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
1820     return selectExtInst(ResVReg, ResType, I, CL::log10);
1821   }
1822 
1823   // There is no log10 instruction in the GLSL Extended Instruction set, so it
1824   // is implemented as:
1825   // log10(x) = log2(x) * (1 / log2(10))
1826   //          = log2(x) * 0.30103
1827 
1828   MachineIRBuilder MIRBuilder(I);
1829   MachineBasicBlock &BB = *I.getParent();
1830 
1831   // Build log2(x).
1832   Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1833   bool Result =
1834       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1835           .addDef(VarReg)
1836           .addUse(GR.getSPIRVTypeID(ResType))
1837           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1838           .addImm(GL::Log2)
1839           .add(I.getOperand(1))
1840           .constrainAllUses(TII, TRI, RBI);
1841 
1842   // Build 0.30103.
1843   assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
1844          ResType->getOpcode() == SPIRV::OpTypeFloat);
1845   // TODO: Add matrix implementation once supported by the HLSL frontend.
1846   const SPIRVType *SpirvScalarType =
1847       ResType->getOpcode() == SPIRV::OpTypeVector
1848           ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
1849           : ResType;
1850   Register ScaleReg =
1851       GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
1852 
1853   // Multiply log2(x) by 0.30103 to get log10(x) result.
1854   auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
1855                     ? SPIRV::OpVectorTimesScalar
1856                     : SPIRV::OpFMulS;
1857   Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1858                 .addDef(ResVReg)
1859                 .addUse(GR.getSPIRVTypeID(ResType))
1860                 .addUse(VarReg)
1861                 .addUse(ScaleReg)
1862                 .constrainAllUses(TII, TRI, RBI);
1863 
1864   return Result;
1865 }
1866 
1867 namespace llvm {
1868 InstructionSelector *
1869 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
1870                                const SPIRVSubtarget &Subtarget,
1871                                const RegisterBankInfo &RBI) {
1872   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
1873 }
1874 } // namespace llvm
1875