xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (revision 59f34e8c2b5463dcf39fbafffeb30a84ef7b6887)
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
17 #include "SPIRV.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/IR/IntrinsicsSPIRV.h"
31 #include "llvm/Support/Debug.h"
32 
33 #define DEBUG_TYPE "spirv-isel"
34 
35 using namespace llvm;
36 namespace CL = SPIRV::OpenCLExtInst;
37 namespace GL = SPIRV::GLSLExtInst;
38 
39 using ExtInstList =
40     std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
41 
42 namespace {
43 
44 #define GET_GLOBALISEL_PREDICATE_BITSET
45 #include "SPIRVGenGlobalISel.inc"
46 #undef GET_GLOBALISEL_PREDICATE_BITSET
47 
48 class SPIRVInstructionSelector : public InstructionSelector {
49   const SPIRVSubtarget &STI;
50   const SPIRVInstrInfo &TII;
51   const SPIRVRegisterInfo &TRI;
52   const RegisterBankInfo &RBI;
53   SPIRVGlobalRegistry &GR;
54   MachineRegisterInfo *MRI;
55 
56   /// We need to keep track of the number we give to anonymous global values to
57   /// generate the same name every time when this is needed.
58   mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
59 
60 public:
61   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
62                            const SPIRVSubtarget &ST,
63                            const RegisterBankInfo &RBI);
64   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
66                BlockFrequencyInfo *BFI) override;
67   // Common selection code. Instruction-specific selection occurs in spvSelect.
68   bool select(MachineInstr &I) override;
69   static const char *getName() { return DEBUG_TYPE; }
70 
71 #define GET_GLOBALISEL_PREDICATES_DECL
72 #include "SPIRVGenGlobalISel.inc"
73 #undef GET_GLOBALISEL_PREDICATES_DECL
74 
75 #define GET_GLOBALISEL_TEMPORARIES_DECL
76 #include "SPIRVGenGlobalISel.inc"
77 #undef GET_GLOBALISEL_TEMPORARIES_DECL
78 
79 private:
80   // tblgen-erated 'select' implementation, used as the initial selector for
81   // the patterns that don't require complex C++.
82   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
83 
84   // All instruction-specific selection that didn't happen in "select()".
85   // Is basically a large Switch/Case delegating to all other select method.
86   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
87                  MachineInstr &I) const;
88 
89   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
90                          const MachineInstr *Init = nullptr) const;
91 
92   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
93                          MachineInstr &I, Register SrcReg,
94                          unsigned Opcode) const;
95   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
96                   unsigned Opcode) const;
97 
98   bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
99                      MachineInstr &I) const;
100 
101   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
102                   MachineInstr &I) const;
103   bool selectStore(MachineInstr &I) const;
104 
105   bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
106                        MachineInstr &I) const;
107   bool selectStackRestore(MachineInstr &I) const;
108 
109   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
110 
111   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
112                        MachineInstr &I, unsigned NewOpcode,
113                        unsigned NegateOpcode = 0) const;
114 
115   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
116                            MachineInstr &I) const;
117 
118   bool selectFence(MachineInstr &I) const;
119 
120   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
121                            MachineInstr &I) const;
122 
123   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
124                         MachineInstr &I) const;
125 
126   bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
127                          MachineInstr &I) const;
128   bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
129                          MachineInstr &I) const;
130 
131   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
132                  unsigned comparisonOpcode, MachineInstr &I) const;
133 
134   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
135                   MachineInstr &I) const;
136   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
137                   MachineInstr &I) const;
138 
139   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
140                    int OpIdx) const;
141   void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
142                     int OpIdx) const;
143 
144   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
145                    MachineInstr &I) const;
146 
147   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
148                     bool IsSigned) const;
149   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
150                   bool IsSigned, unsigned Opcode) const;
151   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
152                  bool IsSigned) const;
153 
154   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
155                    MachineInstr &I) const;
156 
157   bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
158                        const SPIRVType *intTy, const SPIRVType *boolTy) const;
159 
160   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
161                      MachineInstr &I) const;
162   bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
163                     MachineInstr &I) const;
164   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
165                        MachineInstr &I) const;
166   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
167                         MachineInstr &I) const;
168   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
169                        MachineInstr &I) const;
170   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
171                         MachineInstr &I) const;
172   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
173                        MachineInstr &I) const;
174   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
175                  MachineInstr &I) const;
176 
177   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
178                         MachineInstr &I) const;
179   bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
180                          MachineInstr &I) const;
181 
182   bool selectBranch(MachineInstr &I) const;
183   bool selectBranchCond(MachineInstr &I) const;
184 
185   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
186                  MachineInstr &I) const;
187 
188   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
189                      MachineInstr &I, CL::OpenCLExtInst CLInst) const;
190   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
191                      MachineInstr &I, CL::OpenCLExtInst CLInst,
192                      GL::GLSLExtInst GLInst) const;
193   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
194                      MachineInstr &I, const ExtInstList &ExtInsts) const;
195 
196   bool selectLog10(Register ResVReg, const SPIRVType *ResType,
197                    MachineInstr &I) const;
198 
199   bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
200                          MachineInstr &I) const;
201 
202   bool selectUnmergeValues(MachineInstr &I) const;
203 
204   Register buildI32Constant(uint32_t Val, MachineInstr &I,
205                             const SPIRVType *ResType = nullptr) const;
206 
207   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
208   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
209                         MachineInstr &I) const;
210 };
211 
212 } // end anonymous namespace
213 
214 #define GET_GLOBALISEL_IMPL
215 #include "SPIRVGenGlobalISel.inc"
216 #undef GET_GLOBALISEL_IMPL
217 
218 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
219                                                    const SPIRVSubtarget &ST,
220                                                    const RegisterBankInfo &RBI)
221     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
222       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
223 #define GET_GLOBALISEL_PREDICATES_INIT
224 #include "SPIRVGenGlobalISel.inc"
225 #undef GET_GLOBALISEL_PREDICATES_INIT
226 #define GET_GLOBALISEL_TEMPORARIES_INIT
227 #include "SPIRVGenGlobalISel.inc"
228 #undef GET_GLOBALISEL_TEMPORARIES_INIT
229 {
230 }
231 
232 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
233                                        CodeGenCoverage *CoverageInfo,
234                                        ProfileSummaryInfo *PSI,
235                                        BlockFrequencyInfo *BFI) {
236   MRI = &MF.getRegInfo();
237   GR.setCurrentFunc(MF);
238   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
239 }
240 
241 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
242 
243 // Defined in SPIRVLegalizerInfo.cpp.
244 extern bool isTypeFoldingSupported(unsigned Opcode);
245 
246 bool SPIRVInstructionSelector::select(MachineInstr &I) {
247   assert(I.getParent() && "Instruction should be in a basic block!");
248   assert(I.getParent()->getParent() && "Instruction should be in a function!");
249 
250   Register Opcode = I.getOpcode();
251   // If it's not a GMIR instruction, we've selected it already.
252   if (!isPreISelGenericOpcode(Opcode)) {
253     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
254       auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
255       if (isTypeFoldingSupported(Def->getOpcode())) {
256         bool Res = selectImpl(I, *CoverageInfo);
257         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
258         if (Res)
259           return Res;
260       }
261       MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
262       I.removeFromParent();
263       return true;
264     } else if (I.getNumDefs() == 1) {
265       // Make all vregs 32 bits (for SPIR-V IDs).
266       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
267     }
268     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
269   }
270 
271   if (I.getNumOperands() != I.getNumExplicitOperands()) {
272     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
273     return false;
274   }
275 
276   // Common code for getting return reg+type, and removing selected instr
277   // from parent occurs here. Instr-specific selection happens in spvSelect().
278   bool HasDefs = I.getNumDefs() > 0;
279   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
280   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
281   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
282   if (spvSelect(ResVReg, ResType, I)) {
283     if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
284       for (unsigned i = 0; i < I.getNumDefs(); ++i)
285         MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
286     I.removeFromParent();
287     return true;
288   }
289   return false;
290 }
291 
292 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
293                                          const SPIRVType *ResType,
294                                          MachineInstr &I) const {
295   const unsigned Opcode = I.getOpcode();
296   if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
297     return selectImpl(I, *CoverageInfo);
298   switch (Opcode) {
299   case TargetOpcode::G_CONSTANT:
300     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
301                        I);
302   case TargetOpcode::G_GLOBAL_VALUE:
303     return selectGlobalValue(ResVReg, I);
304   case TargetOpcode::G_IMPLICIT_DEF:
305     return selectOpUndef(ResVReg, ResType, I);
306   case TargetOpcode::G_FREEZE:
307     return selectFreeze(ResVReg, ResType, I);
308 
309   case TargetOpcode::G_INTRINSIC:
310   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
311   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
312     return selectIntrinsic(ResVReg, ResType, I);
313   case TargetOpcode::G_BITREVERSE:
314     return selectBitreverse(ResVReg, ResType, I);
315 
316   case TargetOpcode::G_BUILD_VECTOR:
317     return selectConstVector(ResVReg, ResType, I);
318   case TargetOpcode::G_SPLAT_VECTOR:
319     return selectSplatVector(ResVReg, ResType, I);
320 
321   case TargetOpcode::G_SHUFFLE_VECTOR: {
322     MachineBasicBlock &BB = *I.getParent();
323     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
324                    .addDef(ResVReg)
325                    .addUse(GR.getSPIRVTypeID(ResType))
326                    .addUse(I.getOperand(1).getReg())
327                    .addUse(I.getOperand(2).getReg());
328     for (auto V : I.getOperand(3).getShuffleMask())
329       MIB.addImm(V);
330     return MIB.constrainAllUses(TII, TRI, RBI);
331   }
332   case TargetOpcode::G_MEMMOVE:
333   case TargetOpcode::G_MEMCPY:
334   case TargetOpcode::G_MEMSET:
335     return selectMemOperation(ResVReg, I);
336 
337   case TargetOpcode::G_ICMP:
338     return selectICmp(ResVReg, ResType, I);
339   case TargetOpcode::G_FCMP:
340     return selectFCmp(ResVReg, ResType, I);
341 
342   case TargetOpcode::G_FRAME_INDEX:
343     return selectFrameIndex(ResVReg, ResType, I);
344 
345   case TargetOpcode::G_LOAD:
346     return selectLoad(ResVReg, ResType, I);
347   case TargetOpcode::G_STORE:
348     return selectStore(I);
349 
350   case TargetOpcode::G_BR:
351     return selectBranch(I);
352   case TargetOpcode::G_BRCOND:
353     return selectBranchCond(I);
354 
355   case TargetOpcode::G_PHI:
356     return selectPhi(ResVReg, ResType, I);
357 
358   case TargetOpcode::G_FPTOSI:
359     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
360   case TargetOpcode::G_FPTOUI:
361     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
362 
363   case TargetOpcode::G_SITOFP:
364     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
365   case TargetOpcode::G_UITOFP:
366     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
367 
368   case TargetOpcode::G_CTPOP:
369     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
370   case TargetOpcode::G_SMIN:
371     return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
372   case TargetOpcode::G_UMIN:
373     return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
374 
375   case TargetOpcode::G_SMAX:
376     return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
377   case TargetOpcode::G_UMAX:
378     return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
379 
380   case TargetOpcode::G_FMA:
381     return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
382 
383   case TargetOpcode::G_FPOW:
384     return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
385   case TargetOpcode::G_FPOWI:
386     return selectExtInst(ResVReg, ResType, I, CL::pown);
387 
388   case TargetOpcode::G_FEXP:
389     return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
390   case TargetOpcode::G_FEXP2:
391     return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
392 
393   case TargetOpcode::G_FLOG:
394     return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
395   case TargetOpcode::G_FLOG2:
396     return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
397   case TargetOpcode::G_FLOG10:
398     return selectLog10(ResVReg, ResType, I);
399 
400   case TargetOpcode::G_FABS:
401     return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
402   case TargetOpcode::G_ABS:
403     return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
404 
405   case TargetOpcode::G_FMINNUM:
406   case TargetOpcode::G_FMINIMUM:
407     return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin);
408   case TargetOpcode::G_FMAXNUM:
409   case TargetOpcode::G_FMAXIMUM:
410     return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax);
411 
412   case TargetOpcode::G_FCOPYSIGN:
413     return selectExtInst(ResVReg, ResType, I, CL::copysign);
414 
415   case TargetOpcode::G_FCEIL:
416     return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
417   case TargetOpcode::G_FFLOOR:
418     return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
419 
420   case TargetOpcode::G_FCOS:
421     return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
422   case TargetOpcode::G_FSIN:
423     return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
424 
425   case TargetOpcode::G_FSQRT:
426     return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
427 
428   case TargetOpcode::G_CTTZ:
429   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
430     return selectExtInst(ResVReg, ResType, I, CL::ctz);
431   case TargetOpcode::G_CTLZ:
432   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
433     return selectExtInst(ResVReg, ResType, I, CL::clz);
434 
435   case TargetOpcode::G_INTRINSIC_ROUND:
436     return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
437   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
438     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
439   case TargetOpcode::G_INTRINSIC_TRUNC:
440     return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
441   case TargetOpcode::G_FRINT:
442   case TargetOpcode::G_FNEARBYINT:
443     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
444 
445   case TargetOpcode::G_SMULH:
446     return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
447   case TargetOpcode::G_UMULH:
448     return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
449 
450   case TargetOpcode::G_SEXT:
451     return selectExt(ResVReg, ResType, I, true);
452   case TargetOpcode::G_ANYEXT:
453   case TargetOpcode::G_ZEXT:
454     return selectExt(ResVReg, ResType, I, false);
455   case TargetOpcode::G_TRUNC:
456     return selectTrunc(ResVReg, ResType, I);
457   case TargetOpcode::G_FPTRUNC:
458   case TargetOpcode::G_FPEXT:
459     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
460 
461   case TargetOpcode::G_PTRTOINT:
462     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
463   case TargetOpcode::G_INTTOPTR:
464     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
465   case TargetOpcode::G_BITCAST:
466     return selectBitcast(ResVReg, ResType, I);
467   case TargetOpcode::G_ADDRSPACE_CAST:
468     return selectAddrSpaceCast(ResVReg, ResType, I);
469   case TargetOpcode::G_PTR_ADD: {
470     // Currently, we get G_PTR_ADD only as a result of translating
471     // global variables, initialized with constant expressions like GV + Const
472     // (see test opencl/basic/progvar_prog_scope_init.ll).
473     // TODO: extend the handler once we have other cases.
474     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
475     Register GV = I.getOperand(1).getReg();
476     MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
477     assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
478             (*II).getOpcode() == TargetOpcode::COPY ||
479             (*II).getOpcode() == SPIRV::OpVariable) &&
480            isImm(I.getOperand(2), MRI));
481     Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
482     MachineBasicBlock &BB = *I.getParent();
483     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
484                    .addDef(ResVReg)
485                    .addUse(GR.getSPIRVTypeID(ResType))
486                    .addImm(static_cast<uint32_t>(
487                        SPIRV::Opcode::InBoundsPtrAccessChain))
488                    .addUse(GV)
489                    .addUse(Idx)
490                    .addUse(I.getOperand(2).getReg());
491     return MIB.constrainAllUses(TII, TRI, RBI);
492   }
493 
494   case TargetOpcode::G_ATOMICRMW_OR:
495     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
496   case TargetOpcode::G_ATOMICRMW_ADD:
497     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
498   case TargetOpcode::G_ATOMICRMW_AND:
499     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
500   case TargetOpcode::G_ATOMICRMW_MAX:
501     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
502   case TargetOpcode::G_ATOMICRMW_MIN:
503     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
504   case TargetOpcode::G_ATOMICRMW_SUB:
505     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
506   case TargetOpcode::G_ATOMICRMW_XOR:
507     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
508   case TargetOpcode::G_ATOMICRMW_UMAX:
509     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
510   case TargetOpcode::G_ATOMICRMW_UMIN:
511     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
512   case TargetOpcode::G_ATOMICRMW_XCHG:
513     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
514   case TargetOpcode::G_ATOMIC_CMPXCHG:
515     return selectAtomicCmpXchg(ResVReg, ResType, I);
516 
517   case TargetOpcode::G_ATOMICRMW_FADD:
518     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
519   case TargetOpcode::G_ATOMICRMW_FSUB:
520     // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
521     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
522                            SPIRV::OpFNegate);
523   case TargetOpcode::G_ATOMICRMW_FMIN:
524     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
525   case TargetOpcode::G_ATOMICRMW_FMAX:
526     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
527 
528   case TargetOpcode::G_FENCE:
529     return selectFence(I);
530 
531   case TargetOpcode::G_STACKSAVE:
532     return selectStackSave(ResVReg, ResType, I);
533   case TargetOpcode::G_STACKRESTORE:
534     return selectStackRestore(I);
535 
536   case TargetOpcode::G_UNMERGE_VALUES:
537     return selectUnmergeValues(I);
538 
539   default:
540     return false;
541   }
542 }
543 
544 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
545                                              const SPIRVType *ResType,
546                                              MachineInstr &I,
547                                              CL::OpenCLExtInst CLInst) const {
548   return selectExtInst(ResVReg, ResType, I,
549                        {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
550 }
551 
552 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
553                                              const SPIRVType *ResType,
554                                              MachineInstr &I,
555                                              CL::OpenCLExtInst CLInst,
556                                              GL::GLSLExtInst GLInst) const {
557   ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
558                           {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
559   return selectExtInst(ResVReg, ResType, I, ExtInsts);
560 }
561 
562 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
563                                              const SPIRVType *ResType,
564                                              MachineInstr &I,
565                                              const ExtInstList &Insts) const {
566 
567   for (const auto &Ex : Insts) {
568     SPIRV::InstructionSet::InstructionSet Set = Ex.first;
569     uint32_t Opcode = Ex.second;
570     if (STI.canUseExtInstSet(Set)) {
571       MachineBasicBlock &BB = *I.getParent();
572       auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
573                      .addDef(ResVReg)
574                      .addUse(GR.getSPIRVTypeID(ResType))
575                      .addImm(static_cast<uint32_t>(Set))
576                      .addImm(Opcode);
577       const unsigned NumOps = I.getNumOperands();
578       for (unsigned i = 1; i < NumOps; ++i)
579         MIB.add(I.getOperand(i));
580       return MIB.constrainAllUses(TII, TRI, RBI);
581     }
582   }
583   return false;
584 }
585 
586 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
587                                                  const SPIRVType *ResType,
588                                                  MachineInstr &I,
589                                                  Register SrcReg,
590                                                  unsigned Opcode) const {
591   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
592       .addDef(ResVReg)
593       .addUse(GR.getSPIRVTypeID(ResType))
594       .addUse(SrcReg)
595       .constrainAllUses(TII, TRI, RBI);
596 }
597 
598 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
599                                           const SPIRVType *ResType,
600                                           MachineInstr &I,
601                                           unsigned Opcode) const {
602   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
603                            Opcode);
604 }
605 
606 bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
607                                              const SPIRVType *ResType,
608                                              MachineInstr &I) const {
609   Register OpReg = I.getOperand(1).getReg();
610   SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
611   if (!GR.isBitcastCompatible(ResType, OpType))
612     report_fatal_error("incompatible result and operand types in a bitcast");
613   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
614 }
615 
616 static SPIRV::Scope::Scope getScope(SyncScope::ID Ord) {
617   switch (Ord) {
618   case SyncScope::SingleThread:
619     return SPIRV::Scope::Invocation;
620   case SyncScope::System:
621     return SPIRV::Scope::Device;
622   default:
623     llvm_unreachable("Unsupported synchronization Scope ID.");
624   }
625 }
626 
627 static void addMemoryOperands(MachineMemOperand *MemOp,
628                               MachineInstrBuilder &MIB) {
629   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
630   if (MemOp->isVolatile())
631     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
632   if (MemOp->isNonTemporal())
633     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
634   if (MemOp->getAlign().value())
635     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
636 
637   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
638     MIB.addImm(SpvMemOp);
639     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
640       MIB.addImm(MemOp->getAlign().value());
641   }
642 }
643 
644 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
645   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
646   if (Flags & MachineMemOperand::Flags::MOVolatile)
647     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
648   if (Flags & MachineMemOperand::Flags::MONonTemporal)
649     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
650 
651   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
652     MIB.addImm(SpvMemOp);
653 }
654 
655 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
656                                           const SPIRVType *ResType,
657                                           MachineInstr &I) const {
658   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
659   Register Ptr = I.getOperand(1 + OpOffset).getReg();
660   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
661                  .addDef(ResVReg)
662                  .addUse(GR.getSPIRVTypeID(ResType))
663                  .addUse(Ptr);
664   if (!I.getNumMemOperands()) {
665     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
666            I.getOpcode() ==
667                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
668     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
669   } else {
670     addMemoryOperands(*I.memoperands_begin(), MIB);
671   }
672   return MIB.constrainAllUses(TII, TRI, RBI);
673 }
674 
675 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
676   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
677   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
678   Register Ptr = I.getOperand(1 + OpOffset).getReg();
679   MachineBasicBlock &BB = *I.getParent();
680   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
681                  .addUse(Ptr)
682                  .addUse(StoreVal);
683   if (!I.getNumMemOperands()) {
684     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
685            I.getOpcode() ==
686                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
687     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
688   } else {
689     addMemoryOperands(*I.memoperands_begin(), MIB);
690   }
691   return MIB.constrainAllUses(TII, TRI, RBI);
692 }
693 
694 bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
695                                                const SPIRVType *ResType,
696                                                MachineInstr &I) const {
697   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
698     report_fatal_error(
699         "llvm.stacksave intrinsic: this instruction requires the following "
700         "SPIR-V extension: SPV_INTEL_variable_length_array",
701         false);
702   MachineBasicBlock &BB = *I.getParent();
703   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
704       .addDef(ResVReg)
705       .addUse(GR.getSPIRVTypeID(ResType))
706       .constrainAllUses(TII, TRI, RBI);
707 }
708 
709 bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
710   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
711     report_fatal_error(
712         "llvm.stackrestore intrinsic: this instruction requires the following "
713         "SPIR-V extension: SPV_INTEL_variable_length_array",
714         false);
715   if (!I.getOperand(0).isReg())
716     return false;
717   MachineBasicBlock &BB = *I.getParent();
718   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
719       .addUse(I.getOperand(0).getReg())
720       .constrainAllUses(TII, TRI, RBI);
721 }
722 
723 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
724                                                   MachineInstr &I) const {
725   MachineBasicBlock &BB = *I.getParent();
726   Register SrcReg = I.getOperand(1).getReg();
727   if (I.getOpcode() == TargetOpcode::G_MEMSET) {
728     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
729     unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
730     unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
731     SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
732     SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
733     Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
734     SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
735         ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
736     // TODO: check if we have such GV, add init, use buildGlobalVariable.
737     Type *LLVMArrTy = ArrayType::get(
738         IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num);
739     GlobalVariable *GV =
740         new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage);
741     Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
742     GR.add(GV, GR.CurMF, VarReg);
743 
744     buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
745     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
746         .addDef(VarReg)
747         .addUse(GR.getSPIRVTypeID(VarTy))
748         .addImm(SPIRV::StorageClass::UniformConstant)
749         .addUse(Const)
750         .constrainAllUses(TII, TRI, RBI);
751     SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
752         ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
753     SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
754     selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
755   }
756   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
757                  .addUse(I.getOperand(0).getReg())
758                  .addUse(SrcReg)
759                  .addUse(I.getOperand(2).getReg());
760   if (I.getNumMemOperands())
761     addMemoryOperands(*I.memoperands_begin(), MIB);
762   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
763   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
764     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
765         .addUse(MIB->getOperand(0).getReg());
766   return Result;
767 }
768 
769 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
770                                                const SPIRVType *ResType,
771                                                MachineInstr &I,
772                                                unsigned NewOpcode,
773                                                unsigned NegateOpcode) const {
774   assert(I.hasOneMemOperand());
775   const MachineMemOperand *MemOp = *I.memoperands_begin();
776   uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
777   Register ScopeReg = buildI32Constant(Scope, I);
778 
779   Register Ptr = I.getOperand(1).getReg();
780   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
781   // auto ScSem =
782   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
783   AtomicOrdering AO = MemOp->getSuccessOrdering();
784   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
785   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
786 
787   bool Result = false;
788   Register ValueReg = I.getOperand(2).getReg();
789   if (NegateOpcode != 0) {
790     // Translation with negative value operand is requested
791     Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
792     Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
793     ValueReg = TmpReg;
794   }
795 
796   Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
797                 .addDef(ResVReg)
798                 .addUse(GR.getSPIRVTypeID(ResType))
799                 .addUse(Ptr)
800                 .addUse(ScopeReg)
801                 .addUse(MemSemReg)
802                 .addUse(ValueReg)
803                 .constrainAllUses(TII, TRI, RBI);
804   return Result;
805 }
806 
807 bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
808   unsigned ArgI = I.getNumOperands() - 1;
809   Register SrcReg =
810       I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
811   SPIRVType *DefType =
812       SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
813   if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
814     report_fatal_error(
815         "cannot select G_UNMERGE_VALUES with a non-vector argument");
816 
817   SPIRVType *ScalarType =
818       GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
819   MachineBasicBlock &BB = *I.getParent();
820   bool Res = false;
821   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
822     Register ResVReg = I.getOperand(i).getReg();
823     SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
824     if (!ResType) {
825       // There was no "assign type" actions, let's fix this now
826       ResType = ScalarType;
827       MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
828       MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
829       GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
830     }
831     auto MIB =
832         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
833             .addDef(ResVReg)
834             .addUse(GR.getSPIRVTypeID(ResType))
835             .addUse(SrcReg)
836             .addImm(static_cast<int64_t>(i));
837     Res |= MIB.constrainAllUses(TII, TRI, RBI);
838   }
839   return Res;
840 }
841 
842 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
843   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
844   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
845   Register MemSemReg = buildI32Constant(MemSem, I);
846   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
847   uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
848   Register ScopeReg = buildI32Constant(Scope, I);
849   MachineBasicBlock &BB = *I.getParent();
850   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
851       .addUse(ScopeReg)
852       .addUse(MemSemReg)
853       .constrainAllUses(TII, TRI, RBI);
854 }
855 
856 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
857                                                    const SPIRVType *ResType,
858                                                    MachineInstr &I) const {
859   Register ScopeReg;
860   Register MemSemEqReg;
861   Register MemSemNeqReg;
862   Register Ptr = I.getOperand(2).getReg();
863   if (!isa<GIntrinsic>(I)) {
864     assert(I.hasOneMemOperand());
865     const MachineMemOperand *MemOp = *I.memoperands_begin();
866     unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
867     ScopeReg = buildI32Constant(Scope, I);
868 
869     unsigned ScSem = static_cast<uint32_t>(
870         getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
871     AtomicOrdering AO = MemOp->getSuccessOrdering();
872     unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
873     MemSemEqReg = buildI32Constant(MemSemEq, I);
874     AtomicOrdering FO = MemOp->getFailureOrdering();
875     unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
876     MemSemNeqReg =
877         MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
878   } else {
879     ScopeReg = I.getOperand(5).getReg();
880     MemSemEqReg = I.getOperand(6).getReg();
881     MemSemNeqReg = I.getOperand(7).getReg();
882   }
883 
884   Register Cmp = I.getOperand(3).getReg();
885   Register Val = I.getOperand(4).getReg();
886   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
887   Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
888   const DebugLoc &DL = I.getDebugLoc();
889   bool Result =
890       BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
891           .addDef(ACmpRes)
892           .addUse(GR.getSPIRVTypeID(SpvValTy))
893           .addUse(Ptr)
894           .addUse(ScopeReg)
895           .addUse(MemSemEqReg)
896           .addUse(MemSemNeqReg)
897           .addUse(Val)
898           .addUse(Cmp)
899           .constrainAllUses(TII, TRI, RBI);
900   Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
901   SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
902   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
903                 .addDef(CmpSuccReg)
904                 .addUse(GR.getSPIRVTypeID(BoolTy))
905                 .addUse(ACmpRes)
906                 .addUse(Cmp)
907                 .constrainAllUses(TII, TRI, RBI);
908   Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
909   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
910                 .addDef(TmpReg)
911                 .addUse(GR.getSPIRVTypeID(ResType))
912                 .addUse(ACmpRes)
913                 .addUse(GR.getOrCreateUndef(I, ResType, TII))
914                 .addImm(0)
915                 .constrainAllUses(TII, TRI, RBI);
916   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
917                 .addDef(ResVReg)
918                 .addUse(GR.getSPIRVTypeID(ResType))
919                 .addUse(CmpSuccReg)
920                 .addUse(TmpReg)
921                 .addImm(1)
922                 .constrainAllUses(TII, TRI, RBI);
923   return Result;
924 }
925 
926 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
927   switch (SC) {
928   case SPIRV::StorageClass::Workgroup:
929   case SPIRV::StorageClass::CrossWorkgroup:
930   case SPIRV::StorageClass::Function:
931     return true;
932   default:
933     return false;
934   }
935 }
936 
937 static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
938   switch (SC) {
939   case SPIRV::StorageClass::DeviceOnlyINTEL:
940   case SPIRV::StorageClass::HostOnlyINTEL:
941     return true;
942   default:
943     return false;
944   }
945 }
946 
947 // In SPIR-V address space casting can only happen to and from the Generic
948 // storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
949 // pointers to and from Generic pointers. As such, we can convert e.g. from
950 // Workgroup to Function by going via a Generic pointer as an intermediary. All
951 // other combinations can only be done by a bitcast, and are probably not safe.
952 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
953                                                    const SPIRVType *ResType,
954                                                    MachineInstr &I) const {
955   // If the AddrSpaceCast user is single and in OpConstantComposite or
956   // OpVariable, we should select OpSpecConstantOp.
957   auto UIs = MRI->use_instructions(ResVReg);
958   if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
959       (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
960        UIs.begin()->getOpcode() == SPIRV::OpVariable ||
961        isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
962     Register NewReg = I.getOperand(1).getReg();
963     MachineBasicBlock &BB = *I.getParent();
964     SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
965     ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
966                                              SPIRV::StorageClass::Generic);
967     bool Result =
968         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
969             .addDef(ResVReg)
970             .addUse(GR.getSPIRVTypeID(ResType))
971             .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
972             .addUse(NewReg)
973             .constrainAllUses(TII, TRI, RBI);
974     return Result;
975   }
976   Register SrcPtr = I.getOperand(1).getReg();
977   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
978   SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
979   SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
980 
981   // don't generate a cast between identical storage classes
982   if (SrcSC == DstSC)
983     return true;
984 
985   // Casting from an eligible pointer to Generic.
986   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
987     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
988   // Casting from Generic to an eligible pointer.
989   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
990     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
991   // Casting between 2 eligible pointers using Generic as an intermediary.
992   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
993     Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
994     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
995         SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
996     MachineBasicBlock &BB = *I.getParent();
997     const DebugLoc &DL = I.getDebugLoc();
998     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
999                        .addDef(Tmp)
1000                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1001                        .addUse(SrcPtr)
1002                        .constrainAllUses(TII, TRI, RBI);
1003     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1004                           .addDef(ResVReg)
1005                           .addUse(GR.getSPIRVTypeID(ResType))
1006                           .addUse(Tmp)
1007                           .constrainAllUses(TII, TRI, RBI);
1008   }
1009 
1010   // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1011   // be applied
1012   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1013     return selectUnOp(ResVReg, ResType, I,
1014                       SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1015   if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1016     return selectUnOp(ResVReg, ResType, I,
1017                       SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1018 
1019   // TODO Should this case just be disallowed completely?
1020   // We're casting 2 other arbitrary address spaces, so have to bitcast.
1021   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1022 }
1023 
1024 static unsigned getFCmpOpcode(unsigned PredNum) {
1025   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1026   switch (Pred) {
1027   case CmpInst::FCMP_OEQ:
1028     return SPIRV::OpFOrdEqual;
1029   case CmpInst::FCMP_OGE:
1030     return SPIRV::OpFOrdGreaterThanEqual;
1031   case CmpInst::FCMP_OGT:
1032     return SPIRV::OpFOrdGreaterThan;
1033   case CmpInst::FCMP_OLE:
1034     return SPIRV::OpFOrdLessThanEqual;
1035   case CmpInst::FCMP_OLT:
1036     return SPIRV::OpFOrdLessThan;
1037   case CmpInst::FCMP_ONE:
1038     return SPIRV::OpFOrdNotEqual;
1039   case CmpInst::FCMP_ORD:
1040     return SPIRV::OpOrdered;
1041   case CmpInst::FCMP_UEQ:
1042     return SPIRV::OpFUnordEqual;
1043   case CmpInst::FCMP_UGE:
1044     return SPIRV::OpFUnordGreaterThanEqual;
1045   case CmpInst::FCMP_UGT:
1046     return SPIRV::OpFUnordGreaterThan;
1047   case CmpInst::FCMP_ULE:
1048     return SPIRV::OpFUnordLessThanEqual;
1049   case CmpInst::FCMP_ULT:
1050     return SPIRV::OpFUnordLessThan;
1051   case CmpInst::FCMP_UNE:
1052     return SPIRV::OpFUnordNotEqual;
1053   case CmpInst::FCMP_UNO:
1054     return SPIRV::OpUnordered;
1055   default:
1056     llvm_unreachable("Unknown predicate type for FCmp");
1057   }
1058 }
1059 
1060 static unsigned getICmpOpcode(unsigned PredNum) {
1061   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1062   switch (Pred) {
1063   case CmpInst::ICMP_EQ:
1064     return SPIRV::OpIEqual;
1065   case CmpInst::ICMP_NE:
1066     return SPIRV::OpINotEqual;
1067   case CmpInst::ICMP_SGE:
1068     return SPIRV::OpSGreaterThanEqual;
1069   case CmpInst::ICMP_SGT:
1070     return SPIRV::OpSGreaterThan;
1071   case CmpInst::ICMP_SLE:
1072     return SPIRV::OpSLessThanEqual;
1073   case CmpInst::ICMP_SLT:
1074     return SPIRV::OpSLessThan;
1075   case CmpInst::ICMP_UGE:
1076     return SPIRV::OpUGreaterThanEqual;
1077   case CmpInst::ICMP_UGT:
1078     return SPIRV::OpUGreaterThan;
1079   case CmpInst::ICMP_ULE:
1080     return SPIRV::OpULessThanEqual;
1081   case CmpInst::ICMP_ULT:
1082     return SPIRV::OpULessThan;
1083   default:
1084     llvm_unreachable("Unknown predicate type for ICmp");
1085   }
1086 }
1087 
1088 static unsigned getPtrCmpOpcode(unsigned Pred) {
1089   switch (static_cast<CmpInst::Predicate>(Pred)) {
1090   case CmpInst::ICMP_EQ:
1091     return SPIRV::OpPtrEqual;
1092   case CmpInst::ICMP_NE:
1093     return SPIRV::OpPtrNotEqual;
1094   default:
1095     llvm_unreachable("Unknown predicate type for pointer comparison");
1096   }
1097 }
1098 
1099 // Return the logical operation, or abort if none exists.
1100 static unsigned getBoolCmpOpcode(unsigned PredNum) {
1101   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1102   switch (Pred) {
1103   case CmpInst::ICMP_EQ:
1104     return SPIRV::OpLogicalEqual;
1105   case CmpInst::ICMP_NE:
1106     return SPIRV::OpLogicalNotEqual;
1107   default:
1108     llvm_unreachable("Unknown predicate type for Bool comparison");
1109   }
1110 }
1111 
1112 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1113                                                 const SPIRVType *ResType,
1114                                                 MachineInstr &I) const {
1115   MachineBasicBlock &BB = *I.getParent();
1116   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1117       .addDef(ResVReg)
1118       .addUse(GR.getSPIRVTypeID(ResType))
1119       .addUse(I.getOperand(1).getReg())
1120       .constrainAllUses(TII, TRI, RBI);
1121 }
1122 
1123 bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1124                                             const SPIRVType *ResType,
1125                                             MachineInstr &I) const {
1126   // There is no way to implement `freeze` correctly without support on SPIR-V
1127   // standard side, but we may at least address a simple (static) case when
1128   // undef/poison value presence is obvious. The main benefit of even
1129   // incomplete `freeze` support is preventing of translation from crashing due
1130   // to lack of support on legalization and instruction selection steps.
1131   if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1132     return false;
1133   Register OpReg = I.getOperand(1).getReg();
1134   if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1135     Register Reg;
1136     switch (Def->getOpcode()) {
1137     case SPIRV::ASSIGN_TYPE:
1138       if (MachineInstr *AssignToDef =
1139               MRI->getVRegDef(Def->getOperand(1).getReg())) {
1140         if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1141           Reg = Def->getOperand(2).getReg();
1142       }
1143       break;
1144     case SPIRV::OpUndef:
1145       Reg = Def->getOperand(1).getReg();
1146       break;
1147     }
1148     unsigned DestOpCode;
1149     if (Reg.isValid()) {
1150       DestOpCode = SPIRV::OpConstantNull;
1151     } else {
1152       DestOpCode = TargetOpcode::COPY;
1153       Reg = OpReg;
1154     }
1155     return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1156         .addDef(I.getOperand(0).getReg())
1157         .addUse(Reg)
1158         .constrainAllUses(TII, TRI, RBI);
1159   }
1160   return false;
1161 }
1162 
1163 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1164                                                  const SPIRVType *ResType,
1165                                                  MachineInstr &I) const {
1166   // TODO: only const case is supported for now.
1167   assert(std::all_of(
1168       I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1169         if (MO.isDef())
1170           return true;
1171         if (!MO.isReg())
1172           return false;
1173         SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1174         assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1175                ConstTy->getOperand(1).isReg());
1176         Register ConstReg = ConstTy->getOperand(1).getReg();
1177         const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1178         assert(Const);
1179         return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1180                 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1181       }));
1182 
1183   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1184                      TII.get(SPIRV::OpConstantComposite))
1185                  .addDef(ResVReg)
1186                  .addUse(GR.getSPIRVTypeID(ResType));
1187   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1188     MIB.addUse(I.getOperand(i).getReg());
1189   return MIB.constrainAllUses(TII, TRI, RBI);
1190 }
1191 
1192 bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1193                                                  const SPIRVType *ResType,
1194                                                  MachineInstr &I) const {
1195   if (ResType->getOpcode() != SPIRV::OpTypeVector)
1196     report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1197   unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1198   unsigned OpIdx = I.getNumExplicitDefs();
1199   if (!I.getOperand(OpIdx).isReg())
1200     report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1201 
1202   // check if we may construct a constant vector
1203   Register OpReg = I.getOperand(OpIdx).getReg();
1204   bool IsConst = false;
1205   if (SPIRVType *OpDef = MRI->getVRegDef(OpReg)) {
1206     if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1207         OpDef->getOperand(1).isReg()) {
1208       if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1209         OpDef = RefDef;
1210     }
1211     IsConst = OpDef->getOpcode() == TargetOpcode::G_CONSTANT ||
1212               OpDef->getOpcode() == TargetOpcode::G_FCONSTANT;
1213   }
1214 
1215   if (!IsConst && N < 2)
1216     report_fatal_error(
1217         "There must be at least two constituent operands in a vector");
1218 
1219   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1220                      TII.get(IsConst ? SPIRV::OpConstantComposite
1221                                      : SPIRV::OpCompositeConstruct))
1222                  .addDef(ResVReg)
1223                  .addUse(GR.getSPIRVTypeID(ResType));
1224   for (unsigned i = 0; i < N; ++i)
1225     MIB.addUse(OpReg);
1226   return MIB.constrainAllUses(TII, TRI, RBI);
1227 }
1228 
1229 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1230                                          const SPIRVType *ResType,
1231                                          unsigned CmpOpc,
1232                                          MachineInstr &I) const {
1233   Register Cmp0 = I.getOperand(2).getReg();
1234   Register Cmp1 = I.getOperand(3).getReg();
1235   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1236              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1237          "CMP operands should have the same type");
1238   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1239       .addDef(ResVReg)
1240       .addUse(GR.getSPIRVTypeID(ResType))
1241       .addUse(Cmp0)
1242       .addUse(Cmp1)
1243       .constrainAllUses(TII, TRI, RBI);
1244 }
1245 
1246 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1247                                           const SPIRVType *ResType,
1248                                           MachineInstr &I) const {
1249   auto Pred = I.getOperand(1).getPredicate();
1250   unsigned CmpOpc;
1251 
1252   Register CmpOperand = I.getOperand(2).getReg();
1253   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1254     CmpOpc = getPtrCmpOpcode(Pred);
1255   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1256     CmpOpc = getBoolCmpOpcode(Pred);
1257   else
1258     CmpOpc = getICmpOpcode(Pred);
1259   return selectCmp(ResVReg, ResType, CmpOpc, I);
1260 }
1261 
1262 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1263                                             const MachineInstr &I,
1264                                             int OpIdx) const {
1265   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1266          "Expected G_FCONSTANT");
1267   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1268   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1269 }
1270 
1271 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1272                                            const MachineInstr &I,
1273                                            int OpIdx) const {
1274   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1275          "Expected G_CONSTANT");
1276   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1277 }
1278 
1279 Register
1280 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1281                                            const SPIRVType *ResType) const {
1282   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1283   const SPIRVType *SpvI32Ty =
1284       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1285   // Find a constant in DT or build a new one.
1286   auto ConstInt = ConstantInt::get(LLVMTy, Val);
1287   Register NewReg = GR.find(ConstInt, GR.CurMF);
1288   if (!NewReg.isValid()) {
1289     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1290     GR.add(ConstInt, GR.CurMF, NewReg);
1291     MachineInstr *MI;
1292     MachineBasicBlock &BB = *I.getParent();
1293     if (Val == 0) {
1294       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1295                .addDef(NewReg)
1296                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1297     } else {
1298       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1299                .addDef(NewReg)
1300                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1301                .addImm(APInt(32, Val).getZExtValue());
1302     }
1303     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
1304   }
1305   return NewReg;
1306 }
1307 
1308 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1309                                           const SPIRVType *ResType,
1310                                           MachineInstr &I) const {
1311   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1312   return selectCmp(ResVReg, ResType, CmpOp, I);
1313 }
1314 
1315 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1316                                                  MachineInstr &I) const {
1317   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1318     return GR.getOrCreateConsIntVector(0, I, ResType, TII);
1319   return GR.getOrCreateConstInt(0, I, ResType, TII);
1320 }
1321 
1322 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1323                                                 const SPIRVType *ResType,
1324                                                 MachineInstr &I) const {
1325   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1326   APInt One =
1327       AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1328   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1329     return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
1330   return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1331 }
1332 
1333 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1334                                             const SPIRVType *ResType,
1335                                             MachineInstr &I,
1336                                             bool IsSigned) const {
1337   // To extend a bool, we need to use OpSelect between constants.
1338   Register ZeroReg = buildZerosVal(ResType, I);
1339   Register OneReg = buildOnesVal(IsSigned, ResType, I);
1340   bool IsScalarBool =
1341       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1342   unsigned Opcode =
1343       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1344   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1345       .addDef(ResVReg)
1346       .addUse(GR.getSPIRVTypeID(ResType))
1347       .addUse(I.getOperand(1).getReg())
1348       .addUse(OneReg)
1349       .addUse(ZeroReg)
1350       .constrainAllUses(TII, TRI, RBI);
1351 }
1352 
1353 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1354                                           const SPIRVType *ResType,
1355                                           MachineInstr &I, bool IsSigned,
1356                                           unsigned Opcode) const {
1357   Register SrcReg = I.getOperand(1).getReg();
1358   // We can convert bool value directly to float type without OpConvert*ToF,
1359   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1360   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1361     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1362     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1363     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1364       const unsigned NumElts = ResType->getOperand(2).getImm();
1365       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1366     }
1367     SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1368     selectSelect(SrcReg, TmpType, I, false);
1369   }
1370   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1371 }
1372 
1373 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1374                                          const SPIRVType *ResType,
1375                                          MachineInstr &I, bool IsSigned) const {
1376   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1377     return selectSelect(ResVReg, ResType, I, IsSigned);
1378   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1379   return selectUnOp(ResVReg, ResType, I, Opcode);
1380 }
1381 
1382 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1383                                                Register ResVReg,
1384                                                MachineInstr &I,
1385                                                const SPIRVType *IntTy,
1386                                                const SPIRVType *BoolTy) const {
1387   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1388   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1389   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1390   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1391   Register Zero = buildZerosVal(IntTy, I);
1392   Register One = buildOnesVal(false, IntTy, I);
1393   MachineBasicBlock &BB = *I.getParent();
1394   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1395       .addDef(BitIntReg)
1396       .addUse(GR.getSPIRVTypeID(IntTy))
1397       .addUse(IntReg)
1398       .addUse(One)
1399       .constrainAllUses(TII, TRI, RBI);
1400   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1401       .addDef(ResVReg)
1402       .addUse(GR.getSPIRVTypeID(BoolTy))
1403       .addUse(BitIntReg)
1404       .addUse(Zero)
1405       .constrainAllUses(TII, TRI, RBI);
1406 }
1407 
1408 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1409                                            const SPIRVType *ResType,
1410                                            MachineInstr &I) const {
1411   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1412     Register IntReg = I.getOperand(1).getReg();
1413     const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1414     return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1415   }
1416   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1417   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1418   return selectUnOp(ResVReg, ResType, I, Opcode);
1419 }
1420 
1421 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1422                                            const SPIRVType *ResType,
1423                                            const APInt &Imm,
1424                                            MachineInstr &I) const {
1425   unsigned TyOpcode = ResType->getOpcode();
1426   assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1427   MachineBasicBlock &BB = *I.getParent();
1428   if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1429       Imm.isZero())
1430     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1431         .addDef(ResVReg)
1432         .addUse(GR.getSPIRVTypeID(ResType))
1433         .constrainAllUses(TII, TRI, RBI);
1434   if (TyOpcode == SPIRV::OpTypeInt) {
1435     assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1436     Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1437     if (Reg == ResVReg)
1438       return true;
1439     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1440         .addDef(ResVReg)
1441         .addUse(Reg)
1442         .constrainAllUses(TII, TRI, RBI);
1443   }
1444   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1445                  .addDef(ResVReg)
1446                  .addUse(GR.getSPIRVTypeID(ResType));
1447   // <=32-bit integers should be caught by the sdag pattern.
1448   assert(Imm.getBitWidth() > 32);
1449   addNumImm(Imm, MIB);
1450   return MIB.constrainAllUses(TII, TRI, RBI);
1451 }
1452 
1453 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1454                                              const SPIRVType *ResType,
1455                                              MachineInstr &I) const {
1456   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1457       .addDef(ResVReg)
1458       .addUse(GR.getSPIRVTypeID(ResType))
1459       .constrainAllUses(TII, TRI, RBI);
1460 }
1461 
1462 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1463   assert(MO.isReg());
1464   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1465   if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1466     return false;
1467   assert(TypeInst->getOperand(1).isReg());
1468   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1469   return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1470 }
1471 
1472 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1473   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1474   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1475   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1476   return ImmInst->getOperand(1).getCImm()->getZExtValue();
1477 }
1478 
1479 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1480                                                const SPIRVType *ResType,
1481                                                MachineInstr &I) const {
1482   MachineBasicBlock &BB = *I.getParent();
1483   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1484                  .addDef(ResVReg)
1485                  .addUse(GR.getSPIRVTypeID(ResType))
1486                  // object to insert
1487                  .addUse(I.getOperand(3).getReg())
1488                  // composite to insert into
1489                  .addUse(I.getOperand(2).getReg());
1490   for (unsigned i = 4; i < I.getNumOperands(); i++)
1491     MIB.addImm(foldImm(I.getOperand(i), MRI));
1492   return MIB.constrainAllUses(TII, TRI, RBI);
1493 }
1494 
1495 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1496                                                 const SPIRVType *ResType,
1497                                                 MachineInstr &I) const {
1498   MachineBasicBlock &BB = *I.getParent();
1499   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1500                  .addDef(ResVReg)
1501                  .addUse(GR.getSPIRVTypeID(ResType))
1502                  .addUse(I.getOperand(2).getReg());
1503   for (unsigned i = 3; i < I.getNumOperands(); i++)
1504     MIB.addImm(foldImm(I.getOperand(i), MRI));
1505   return MIB.constrainAllUses(TII, TRI, RBI);
1506 }
1507 
1508 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1509                                                const SPIRVType *ResType,
1510                                                MachineInstr &I) const {
1511   if (isImm(I.getOperand(4), MRI))
1512     return selectInsertVal(ResVReg, ResType, I);
1513   MachineBasicBlock &BB = *I.getParent();
1514   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1515       .addDef(ResVReg)
1516       .addUse(GR.getSPIRVTypeID(ResType))
1517       .addUse(I.getOperand(2).getReg())
1518       .addUse(I.getOperand(3).getReg())
1519       .addUse(I.getOperand(4).getReg())
1520       .constrainAllUses(TII, TRI, RBI);
1521 }
1522 
1523 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1524                                                 const SPIRVType *ResType,
1525                                                 MachineInstr &I) const {
1526   if (isImm(I.getOperand(3), MRI))
1527     return selectExtractVal(ResVReg, ResType, I);
1528   MachineBasicBlock &BB = *I.getParent();
1529   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1530       .addDef(ResVReg)
1531       .addUse(GR.getSPIRVTypeID(ResType))
1532       .addUse(I.getOperand(2).getReg())
1533       .addUse(I.getOperand(3).getReg())
1534       .constrainAllUses(TII, TRI, RBI);
1535 }
1536 
1537 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1538                                          const SPIRVType *ResType,
1539                                          MachineInstr &I) const {
1540   const bool IsGEPInBounds = I.getOperand(2).getImm();
1541 
1542   // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1543   // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1544   // we have to use Op[InBounds]AccessChain.
1545   const unsigned Opcode = STI.isVulkanEnv()
1546                               ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1547                                                : SPIRV::OpAccessChain)
1548                               : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1549                                                : SPIRV::OpPtrAccessChain);
1550 
1551   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1552                  .addDef(ResVReg)
1553                  .addUse(GR.getSPIRVTypeID(ResType))
1554                  // Object to get a pointer to.
1555                  .addUse(I.getOperand(3).getReg());
1556   // Adding indices.
1557   const unsigned StartingIndex =
1558       (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1559           ? 5
1560           : 4;
1561   for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1562     Res.addUse(I.getOperand(i).getReg());
1563   return Res.constrainAllUses(TII, TRI, RBI);
1564 }
1565 
1566 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1567                                                const SPIRVType *ResType,
1568                                                MachineInstr &I) const {
1569   MachineBasicBlock &BB = *I.getParent();
1570   Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
1571   switch (IID) {
1572   case Intrinsic::spv_load:
1573     return selectLoad(ResVReg, ResType, I);
1574   case Intrinsic::spv_store:
1575     return selectStore(I);
1576   case Intrinsic::spv_extractv:
1577     return selectExtractVal(ResVReg, ResType, I);
1578   case Intrinsic::spv_insertv:
1579     return selectInsertVal(ResVReg, ResType, I);
1580   case Intrinsic::spv_extractelt:
1581     return selectExtractElt(ResVReg, ResType, I);
1582   case Intrinsic::spv_insertelt:
1583     return selectInsertElt(ResVReg, ResType, I);
1584   case Intrinsic::spv_gep:
1585     return selectGEP(ResVReg, ResType, I);
1586   case Intrinsic::spv_unref_global:
1587   case Intrinsic::spv_init_global: {
1588     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1589     MachineInstr *Init = I.getNumExplicitOperands() > 2
1590                              ? MRI->getVRegDef(I.getOperand(2).getReg())
1591                              : nullptr;
1592     assert(MI);
1593     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1594   }
1595   case Intrinsic::spv_undef: {
1596     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1597                    .addDef(ResVReg)
1598                    .addUse(GR.getSPIRVTypeID(ResType));
1599     return MIB.constrainAllUses(TII, TRI, RBI);
1600   }
1601   case Intrinsic::spv_const_composite: {
1602     // If no values are attached, the composite is null constant.
1603     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1604     unsigned Opcode =
1605         IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1606     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1607                    .addDef(ResVReg)
1608                    .addUse(GR.getSPIRVTypeID(ResType));
1609     // skip type MD node we already used when generated assign.type for this
1610     if (!IsNull) {
1611       for (unsigned i = I.getNumExplicitDefs() + 1;
1612            i < I.getNumExplicitOperands(); ++i) {
1613         MIB.addUse(I.getOperand(i).getReg());
1614       }
1615     }
1616     return MIB.constrainAllUses(TII, TRI, RBI);
1617   }
1618   case Intrinsic::spv_assign_name: {
1619     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1620     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1621     for (unsigned i = I.getNumExplicitDefs() + 2;
1622          i < I.getNumExplicitOperands(); ++i) {
1623       MIB.addImm(I.getOperand(i).getImm());
1624     }
1625     return MIB.constrainAllUses(TII, TRI, RBI);
1626   }
1627   case Intrinsic::spv_switch: {
1628     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1629     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1630       if (I.getOperand(i).isReg())
1631         MIB.addReg(I.getOperand(i).getReg());
1632       else if (I.getOperand(i).isCImm())
1633         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1634       else if (I.getOperand(i).isMBB())
1635         MIB.addMBB(I.getOperand(i).getMBB());
1636       else
1637         llvm_unreachable("Unexpected OpSwitch operand");
1638     }
1639     return MIB.constrainAllUses(TII, TRI, RBI);
1640   }
1641   case Intrinsic::spv_cmpxchg:
1642     return selectAtomicCmpXchg(ResVReg, ResType, I);
1643   case Intrinsic::spv_unreachable:
1644     BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1645     break;
1646   case Intrinsic::spv_alloca:
1647     return selectFrameIndex(ResVReg, ResType, I);
1648   case Intrinsic::spv_alloca_array:
1649     return selectAllocaArray(ResVReg, ResType, I);
1650   case Intrinsic::spv_assume:
1651     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1652       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
1653           .addUse(I.getOperand(1).getReg());
1654     break;
1655   case Intrinsic::spv_expect:
1656     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1657       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
1658           .addDef(ResVReg)
1659           .addUse(GR.getSPIRVTypeID(ResType))
1660           .addUse(I.getOperand(2).getReg())
1661           .addUse(I.getOperand(3).getReg());
1662     break;
1663   case Intrinsic::spv_thread_id:
1664     return selectSpvThreadId(ResVReg, ResType, I);
1665   case Intrinsic::spv_lifetime_start:
1666   case Intrinsic::spv_lifetime_end: {
1667     unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
1668                                                        : SPIRV::OpLifetimeStop;
1669     int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
1670     Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
1671     unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
1672     bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
1673     if (Size == -1 || IsNonvoidPtr)
1674       Size = 0;
1675     BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
1676   } break;
1677   default: {
1678     std::string DiagMsg;
1679     raw_string_ostream OS(DiagMsg);
1680     I.print(OS);
1681     DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
1682     report_fatal_error(DiagMsg.c_str(), false);
1683   }
1684   }
1685   return true;
1686 }
1687 
1688 bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
1689                                                  const SPIRVType *ResType,
1690                                                  MachineInstr &I) const {
1691   // there was an allocation size parameter to the allocation instruction
1692   // that is not 1
1693   MachineBasicBlock &BB = *I.getParent();
1694   return BuildMI(BB, I, I.getDebugLoc(),
1695                  TII.get(SPIRV::OpVariableLengthArrayINTEL))
1696       .addDef(ResVReg)
1697       .addUse(GR.getSPIRVTypeID(ResType))
1698       .addUse(I.getOperand(2).getReg())
1699       .constrainAllUses(TII, TRI, RBI);
1700 }
1701 
1702 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1703                                                 const SPIRVType *ResType,
1704                                                 MachineInstr &I) const {
1705   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1706       .addDef(ResVReg)
1707       .addUse(GR.getSPIRVTypeID(ResType))
1708       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1709       .constrainAllUses(TII, TRI, RBI);
1710 }
1711 
1712 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1713   // InstructionSelector walks backwards through the instructions. We can use
1714   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1715   // first, so can generate an OpBranchConditional here. If there is no
1716   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1717   const MachineInstr *PrevI = I.getPrevNode();
1718   MachineBasicBlock &MBB = *I.getParent();
1719   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1720     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1721         .addUse(PrevI->getOperand(0).getReg())
1722         .addMBB(PrevI->getOperand(1).getMBB())
1723         .addMBB(I.getOperand(0).getMBB())
1724         .constrainAllUses(TII, TRI, RBI);
1725   }
1726   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1727       .addMBB(I.getOperand(0).getMBB())
1728       .constrainAllUses(TII, TRI, RBI);
1729 }
1730 
1731 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1732   // InstructionSelector walks backwards through the instructions. For an
1733   // explicit conditional branch with no fallthrough, we use both a G_BR and a
1734   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1735   // generate the OpBranchConditional in selectBranch above.
1736   //
1737   // If an OpBranchConditional has been generated, we simply return, as the work
1738   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1739   // implicit fallthrough to the next basic block, so we need to create an
1740   // OpBranchConditional with an explicit "false" argument pointing to the next
1741   // basic block that LLVM would fall through to.
1742   const MachineInstr *NextI = I.getNextNode();
1743   // Check if this has already been successfully selected.
1744   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1745     return true;
1746   // Must be relying on implicit block fallthrough, so generate an
1747   // OpBranchConditional with the "next" basic block as the "false" target.
1748   MachineBasicBlock &MBB = *I.getParent();
1749   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1750   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1751   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1752       .addUse(I.getOperand(0).getReg())
1753       .addMBB(I.getOperand(1).getMBB())
1754       .addMBB(NextMBB)
1755       .constrainAllUses(TII, TRI, RBI);
1756 }
1757 
1758 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1759                                          const SPIRVType *ResType,
1760                                          MachineInstr &I) const {
1761   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1762                  .addDef(ResVReg)
1763                  .addUse(GR.getSPIRVTypeID(ResType));
1764   const unsigned NumOps = I.getNumOperands();
1765   for (unsigned i = 1; i < NumOps; i += 2) {
1766     MIB.addUse(I.getOperand(i + 0).getReg());
1767     MIB.addMBB(I.getOperand(i + 1).getMBB());
1768   }
1769   return MIB.constrainAllUses(TII, TRI, RBI);
1770 }
1771 
1772 bool SPIRVInstructionSelector::selectGlobalValue(
1773     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1774   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1775   MachineIRBuilder MIRBuilder(I);
1776   const GlobalValue *GV = I.getOperand(1).getGlobal();
1777   Type *GVType = GV->getValueType();
1778   SPIRVType *PointerBaseType;
1779   if (GVType->isArrayTy()) {
1780     SPIRVType *ArrayElementType =
1781         GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
1782                                 SPIRV::AccessQualifier::ReadWrite, false);
1783     PointerBaseType = GR.getOrCreateSPIRVArrayType(
1784         ArrayElementType, GVType->getArrayNumElements(), I, TII);
1785   } else {
1786     PointerBaseType = GR.getOrCreateSPIRVType(
1787         GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1788   }
1789   SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
1790       PointerBaseType, I, TII,
1791       addressSpaceToStorageClass(GV->getAddressSpace(), STI));
1792 
1793   std::string GlobalIdent;
1794   if (!GV->hasName()) {
1795     unsigned &ID = UnnamedGlobalIDs[GV];
1796     if (ID == 0)
1797       ID = UnnamedGlobalIDs.size();
1798     GlobalIdent = "__unnamed_" + Twine(ID).str();
1799   } else {
1800     GlobalIdent = GV->getGlobalIdentifier();
1801   }
1802 
1803   // Behaviour of functions as operands depends on availability of the
1804   // corresponding extension (SPV_INTEL_function_pointers):
1805   // - If there is an extension to operate with functions as operands:
1806   // We create a proper constant operand and evaluate a correct type for a
1807   // function pointer.
1808   // - Without the required extension:
1809   // We have functions as operands in tests with blocks of instruction e.g. in
1810   // transcoding/global_block.ll. These operands are not used and should be
1811   // substituted by zero constants. Their type is expected to be always
1812   // OpTypePointer Function %uchar.
1813   if (isa<Function>(GV)) {
1814     const Constant *ConstVal = GV;
1815     MachineBasicBlock &BB = *I.getParent();
1816     Register NewReg = GR.find(ConstVal, GR.CurMF);
1817     if (!NewReg.isValid()) {
1818       Register NewReg = ResVReg;
1819       GR.add(ConstVal, GR.CurMF, NewReg);
1820       const Function *GVFun =
1821           STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
1822               ? dyn_cast<Function>(GV)
1823               : nullptr;
1824       if (GVFun) {
1825         // References to a function via function pointers generate virtual
1826         // registers without a definition. We will resolve it later, during
1827         // module analysis stage.
1828         MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1829         Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1830         MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
1831         MachineInstrBuilder MB =
1832             BuildMI(BB, I, I.getDebugLoc(),
1833                     TII.get(SPIRV::OpConstantFunctionPointerINTEL))
1834                 .addDef(NewReg)
1835                 .addUse(GR.getSPIRVTypeID(ResType))
1836                 .addUse(FuncVReg);
1837         // mapping the function pointer to the used Function
1838         GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
1839         return MB.constrainAllUses(TII, TRI, RBI);
1840       }
1841       return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1842           .addDef(NewReg)
1843           .addUse(GR.getSPIRVTypeID(ResType))
1844           .constrainAllUses(TII, TRI, RBI);
1845     }
1846     assert(NewReg != ResVReg);
1847     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1848         .addDef(ResVReg)
1849         .addUse(NewReg)
1850         .constrainAllUses(TII, TRI, RBI);
1851   }
1852   auto GlobalVar = cast<GlobalVariable>(GV);
1853   assert(GlobalVar->getName() != "llvm.global.annotations");
1854 
1855   bool HasInit = GlobalVar->hasInitializer() &&
1856                  !isa<UndefValue>(GlobalVar->getInitializer());
1857   // Skip empty declaration for GVs with initilaizers till we get the decl with
1858   // passed initializer.
1859   if (HasInit && !Init)
1860     return true;
1861 
1862   unsigned AddrSpace = GV->getAddressSpace();
1863   SPIRV::StorageClass::StorageClass Storage =
1864       addressSpaceToStorageClass(AddrSpace, STI);
1865   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1866                   Storage != SPIRV::StorageClass::Function;
1867   SPIRV::LinkageType::LinkageType LnkType =
1868       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
1869           ? SPIRV::LinkageType::Import
1870           : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
1871                      STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
1872                  ? SPIRV::LinkageType::LinkOnceODR
1873                  : SPIRV::LinkageType::Export);
1874 
1875   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1876                                         Storage, Init, GlobalVar->isConstant(),
1877                                         HasLnkTy, LnkType, MIRBuilder, true);
1878   return Reg.isValid();
1879 }
1880 
1881 bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
1882                                            const SPIRVType *ResType,
1883                                            MachineInstr &I) const {
1884   if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
1885     return selectExtInst(ResVReg, ResType, I, CL::log10);
1886   }
1887 
1888   // There is no log10 instruction in the GLSL Extended Instruction set, so it
1889   // is implemented as:
1890   // log10(x) = log2(x) * (1 / log2(10))
1891   //          = log2(x) * 0.30103
1892 
1893   MachineIRBuilder MIRBuilder(I);
1894   MachineBasicBlock &BB = *I.getParent();
1895 
1896   // Build log2(x).
1897   Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1898   bool Result =
1899       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1900           .addDef(VarReg)
1901           .addUse(GR.getSPIRVTypeID(ResType))
1902           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1903           .addImm(GL::Log2)
1904           .add(I.getOperand(1))
1905           .constrainAllUses(TII, TRI, RBI);
1906 
1907   // Build 0.30103.
1908   assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
1909          ResType->getOpcode() == SPIRV::OpTypeFloat);
1910   // TODO: Add matrix implementation once supported by the HLSL frontend.
1911   const SPIRVType *SpirvScalarType =
1912       ResType->getOpcode() == SPIRV::OpTypeVector
1913           ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
1914           : ResType;
1915   Register ScaleReg =
1916       GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
1917 
1918   // Multiply log2(x) by 0.30103 to get log10(x) result.
1919   auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
1920                     ? SPIRV::OpVectorTimesScalar
1921                     : SPIRV::OpFMulS;
1922   Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1923                 .addDef(ResVReg)
1924                 .addUse(GR.getSPIRVTypeID(ResType))
1925                 .addUse(VarReg)
1926                 .addUse(ScaleReg)
1927                 .constrainAllUses(TII, TRI, RBI);
1928 
1929   return Result;
1930 }
1931 
1932 bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
1933                                                  const SPIRVType *ResType,
1934                                                  MachineInstr &I) const {
1935   // DX intrinsic: @llvm.dx.thread.id(i32)
1936   // ID  Name      Description
1937   // 93  ThreadId  reads the thread ID
1938 
1939   MachineIRBuilder MIRBuilder(I);
1940   const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
1941   const SPIRVType *Vec3Ty =
1942       GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
1943   const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
1944       Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
1945 
1946   // Create new register for GlobalInvocationID builtin variable.
1947   Register NewRegister =
1948       MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
1949   MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 32));
1950   GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
1951 
1952   // Build GlobalInvocationID global variable with the necessary decorations.
1953   Register Variable = GR.buildGlobalVariable(
1954       NewRegister, PtrType,
1955       getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
1956       SPIRV::StorageClass::Input, nullptr, true, true,
1957       SPIRV::LinkageType::Import, MIRBuilder, false);
1958 
1959   // Create new register for loading value.
1960   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1961   Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1962   MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 32));
1963   GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
1964 
1965   // Load v3uint value from the global variable.
1966   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
1967       .addDef(LoadedRegister)
1968       .addUse(GR.getSPIRVTypeID(Vec3Ty))
1969       .addUse(Variable);
1970 
1971   // Get Thread ID index. Expecting operand is a constant immediate value,
1972   // wrapped in a type assignment.
1973   assert(I.getOperand(2).isReg());
1974   Register ThreadIdReg = I.getOperand(2).getReg();
1975   SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
1976   assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1977          ConstTy->getOperand(1).isReg());
1978   Register ConstReg = ConstTy->getOperand(1).getReg();
1979   const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1980   assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
1981   const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
1982   const uint32_t ThreadId = Val.getZExtValue();
1983 
1984   // Extract the thread ID from the loaded vector value.
1985   MachineBasicBlock &BB = *I.getParent();
1986   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1987                  .addDef(ResVReg)
1988                  .addUse(GR.getSPIRVTypeID(ResType))
1989                  .addUse(LoadedRegister)
1990                  .addImm(ThreadId);
1991   return MIB.constrainAllUses(TII, TRI, RBI);
1992 }
1993 
1994 namespace llvm {
1995 InstructionSelector *
1996 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
1997                                const SPIRVSubtarget &Subtarget,
1998                                const RegisterBankInfo &RBI) {
1999   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2000 }
2001 } // namespace llvm
2002