xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (revision cf9a5a162b701b4c27eda1ddf823137ed16ca235)
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
17 #include "SPIRV.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/TargetOpcodes.h"
32 #include "llvm/IR/IntrinsicsSPIRV.h"
33 #include "llvm/Support/Debug.h"
34 
35 namespace llvm {
36 
37 class SPIRVMachineModuleInfo : public MachineModuleInfoImpl {
38 public:
39   SyncScope::ID Work_ItemSSID;
40   SyncScope::ID WorkGroupSSID;
41   SyncScope::ID DeviceSSID;
42   SyncScope::ID AllSVMDevicesSSID;
43   SyncScope::ID SubGroupSSID;
44 
45   SPIRVMachineModuleInfo(const MachineModuleInfo &MMI) {
46     LLVMContext &CTX = MMI.getModule()->getContext();
47     Work_ItemSSID = CTX.getOrInsertSyncScopeID("work_item");
48     WorkGroupSSID = CTX.getOrInsertSyncScopeID("workgroup");
49     DeviceSSID = CTX.getOrInsertSyncScopeID("device");
50     AllSVMDevicesSSID = CTX.getOrInsertSyncScopeID("all_svm_devices");
51     SubGroupSSID = CTX.getOrInsertSyncScopeID("sub_group");
52   }
53 };
54 
55 } // end namespace llvm
56 
57 #define DEBUG_TYPE "spirv-isel"
58 
59 using namespace llvm;
60 namespace CL = SPIRV::OpenCLExtInst;
61 namespace GL = SPIRV::GLSLExtInst;
62 
63 using ExtInstList =
64     std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
65 
66 namespace {
67 
68 #define GET_GLOBALISEL_PREDICATE_BITSET
69 #include "SPIRVGenGlobalISel.inc"
70 #undef GET_GLOBALISEL_PREDICATE_BITSET
71 
72 class SPIRVInstructionSelector : public InstructionSelector {
73   const SPIRVSubtarget &STI;
74   const SPIRVInstrInfo &TII;
75   const SPIRVRegisterInfo &TRI;
76   const RegisterBankInfo &RBI;
77   SPIRVGlobalRegistry &GR;
78   MachineRegisterInfo *MRI;
79   SPIRVMachineModuleInfo *MMI = nullptr;
80 
81   /// We need to keep track of the number we give to anonymous global values to
82   /// generate the same name every time when this is needed.
83   mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
84 
85 public:
86   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
87                            const SPIRVSubtarget &ST,
88                            const RegisterBankInfo &RBI);
89   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
90                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
91                BlockFrequencyInfo *BFI) override;
92   // Common selection code. Instruction-specific selection occurs in spvSelect.
93   bool select(MachineInstr &I) override;
94   static const char *getName() { return DEBUG_TYPE; }
95 
96 #define GET_GLOBALISEL_PREDICATES_DECL
97 #include "SPIRVGenGlobalISel.inc"
98 #undef GET_GLOBALISEL_PREDICATES_DECL
99 
100 #define GET_GLOBALISEL_TEMPORARIES_DECL
101 #include "SPIRVGenGlobalISel.inc"
102 #undef GET_GLOBALISEL_TEMPORARIES_DECL
103 
104 private:
105   // tblgen-erated 'select' implementation, used as the initial selector for
106   // the patterns that don't require complex C++.
107   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
108 
109   // All instruction-specific selection that didn't happen in "select()".
110   // Is basically a large Switch/Case delegating to all other select method.
111   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
112                  MachineInstr &I) const;
113 
114   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
115                          const MachineInstr *Init = nullptr) const;
116 
117   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
118                          MachineInstr &I, Register SrcReg,
119                          unsigned Opcode) const;
120   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
121                   unsigned Opcode) const;
122 
123   bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
124                      MachineInstr &I) const;
125 
126   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
127                   MachineInstr &I) const;
128   bool selectStore(MachineInstr &I) const;
129 
130   bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
131                        MachineInstr &I) const;
132   bool selectStackRestore(MachineInstr &I) const;
133 
134   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
135 
136   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
137                        MachineInstr &I, unsigned NewOpcode,
138                        unsigned NegateOpcode = 0) const;
139 
140   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
141                            MachineInstr &I) const;
142 
143   bool selectFence(MachineInstr &I) const;
144 
145   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
146                            MachineInstr &I) const;
147 
148   bool selectAnyOrAll(Register ResVReg, const SPIRVType *ResType,
149                       MachineInstr &I, unsigned OpType) const;
150 
151   bool selectAll(Register ResVReg, const SPIRVType *ResType,
152                  MachineInstr &I) const;
153 
154   bool selectAny(Register ResVReg, const SPIRVType *ResType,
155                  MachineInstr &I) const;
156 
157   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
158                         MachineInstr &I) const;
159 
160   bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
161                          MachineInstr &I) const;
162   bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
163                          MachineInstr &I) const;
164 
165   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
166                  unsigned comparisonOpcode, MachineInstr &I) const;
167 
168   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
169                   MachineInstr &I) const;
170   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
171                   MachineInstr &I) const;
172 
173   bool selectFmix(Register ResVReg, const SPIRVType *ResType,
174                   MachineInstr &I) const;
175 
176   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
177                    int OpIdx) const;
178   void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
179                     int OpIdx) const;
180 
181   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
182                    MachineInstr &I) const;
183 
184   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
185                     bool IsSigned) const;
186   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
187                   bool IsSigned, unsigned Opcode) const;
188   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
189                  bool IsSigned) const;
190 
191   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
192                    MachineInstr &I) const;
193 
194   bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
195                        const SPIRVType *intTy, const SPIRVType *boolTy) const;
196 
197   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
198                      MachineInstr &I) const;
199   bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
200                     MachineInstr &I) const;
201   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
202                        MachineInstr &I) const;
203   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
204                         MachineInstr &I) const;
205   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
206                        MachineInstr &I) const;
207   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
208                         MachineInstr &I) const;
209   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
210                        MachineInstr &I) const;
211   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
212                  MachineInstr &I) const;
213 
214   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
215                         MachineInstr &I) const;
216   bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
217                          MachineInstr &I) const;
218 
219   bool selectBranch(MachineInstr &I) const;
220   bool selectBranchCond(MachineInstr &I) const;
221 
222   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
223                  MachineInstr &I) const;
224 
225   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
226                      MachineInstr &I, CL::OpenCLExtInst CLInst) const;
227   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
228                      MachineInstr &I, CL::OpenCLExtInst CLInst,
229                      GL::GLSLExtInst GLInst) const;
230   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
231                      MachineInstr &I, const ExtInstList &ExtInsts) const;
232 
233   bool selectLog10(Register ResVReg, const SPIRVType *ResType,
234                    MachineInstr &I) const;
235 
236   bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
237                          MachineInstr &I) const;
238 
239   bool selectUnmergeValues(MachineInstr &I) const;
240 
241   Register buildI32Constant(uint32_t Val, MachineInstr &I,
242                             const SPIRVType *ResType = nullptr) const;
243 
244   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
245   Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
246   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
247                         MachineInstr &I) const;
248 
249   bool wrapIntoSpecConstantOp(MachineInstr &I,
250                               SmallVector<Register> &CompositeArgs) const;
251 };
252 
253 } // end anonymous namespace
254 
255 #define GET_GLOBALISEL_IMPL
256 #include "SPIRVGenGlobalISel.inc"
257 #undef GET_GLOBALISEL_IMPL
258 
259 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
260                                                    const SPIRVSubtarget &ST,
261                                                    const RegisterBankInfo &RBI)
262     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
263       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
264 #define GET_GLOBALISEL_PREDICATES_INIT
265 #include "SPIRVGenGlobalISel.inc"
266 #undef GET_GLOBALISEL_PREDICATES_INIT
267 #define GET_GLOBALISEL_TEMPORARIES_INIT
268 #include "SPIRVGenGlobalISel.inc"
269 #undef GET_GLOBALISEL_TEMPORARIES_INIT
270 {
271 }
272 
273 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
274                                        CodeGenCoverage *CoverageInfo,
275                                        ProfileSummaryInfo *PSI,
276                                        BlockFrequencyInfo *BFI) {
277   MMI = &MF.getMMI().getObjFileInfo<SPIRVMachineModuleInfo>();
278   MRI = &MF.getRegInfo();
279   GR.setCurrentFunc(MF);
280   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
281 }
282 
283 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
284 
285 // Defined in SPIRVLegalizerInfo.cpp.
286 extern bool isTypeFoldingSupported(unsigned Opcode);
287 
288 bool SPIRVInstructionSelector::select(MachineInstr &I) {
289   assert(I.getParent() && "Instruction should be in a basic block!");
290   assert(I.getParent()->getParent() && "Instruction should be in a function!");
291 
292   Register Opcode = I.getOpcode();
293   // If it's not a GMIR instruction, we've selected it already.
294   if (!isPreISelGenericOpcode(Opcode)) {
295     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
296       Register DstReg = I.getOperand(0).getReg();
297       Register SrcReg = I.getOperand(1).getReg();
298       auto *Def = MRI->getVRegDef(SrcReg);
299       if (isTypeFoldingSupported(Def->getOpcode())) {
300         if (MRI->getType(DstReg).isPointer())
301           MRI->setType(DstReg, LLT::scalar(32));
302         bool Res = selectImpl(I, *CoverageInfo);
303         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
304         if (Res)
305           return Res;
306       }
307       MRI->replaceRegWith(SrcReg, DstReg);
308       I.removeFromParent();
309       return true;
310     } else if (I.getNumDefs() == 1) {
311       // Make all vregs 32 bits (for SPIR-V IDs).
312       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
313     }
314     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
315   }
316 
317   if (I.getNumOperands() != I.getNumExplicitOperands()) {
318     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
319     return false;
320   }
321 
322   // Common code for getting return reg+type, and removing selected instr
323   // from parent occurs here. Instr-specific selection happens in spvSelect().
324   bool HasDefs = I.getNumDefs() > 0;
325   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
326   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
327   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
328   if (spvSelect(ResVReg, ResType, I)) {
329     if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
330       for (unsigned i = 0; i < I.getNumDefs(); ++i)
331         MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
332     I.removeFromParent();
333     return true;
334   }
335   return false;
336 }
337 
338 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
339                                          const SPIRVType *ResType,
340                                          MachineInstr &I) const {
341   const unsigned Opcode = I.getOpcode();
342   if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
343     return selectImpl(I, *CoverageInfo);
344   switch (Opcode) {
345   case TargetOpcode::G_CONSTANT:
346     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
347                        I);
348   case TargetOpcode::G_GLOBAL_VALUE:
349     return selectGlobalValue(ResVReg, I);
350   case TargetOpcode::G_IMPLICIT_DEF:
351     return selectOpUndef(ResVReg, ResType, I);
352   case TargetOpcode::G_FREEZE:
353     return selectFreeze(ResVReg, ResType, I);
354 
355   case TargetOpcode::G_INTRINSIC:
356   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
357   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
358     return selectIntrinsic(ResVReg, ResType, I);
359   case TargetOpcode::G_BITREVERSE:
360     return selectBitreverse(ResVReg, ResType, I);
361 
362   case TargetOpcode::G_BUILD_VECTOR:
363     return selectConstVector(ResVReg, ResType, I);
364   case TargetOpcode::G_SPLAT_VECTOR:
365     return selectSplatVector(ResVReg, ResType, I);
366 
367   case TargetOpcode::G_SHUFFLE_VECTOR: {
368     MachineBasicBlock &BB = *I.getParent();
369     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
370                    .addDef(ResVReg)
371                    .addUse(GR.getSPIRVTypeID(ResType))
372                    .addUse(I.getOperand(1).getReg())
373                    .addUse(I.getOperand(2).getReg());
374     for (auto V : I.getOperand(3).getShuffleMask())
375       MIB.addImm(V);
376     return MIB.constrainAllUses(TII, TRI, RBI);
377   }
378   case TargetOpcode::G_MEMMOVE:
379   case TargetOpcode::G_MEMCPY:
380   case TargetOpcode::G_MEMSET:
381     return selectMemOperation(ResVReg, I);
382 
383   case TargetOpcode::G_ICMP:
384     return selectICmp(ResVReg, ResType, I);
385   case TargetOpcode::G_FCMP:
386     return selectFCmp(ResVReg, ResType, I);
387 
388   case TargetOpcode::G_FRAME_INDEX:
389     return selectFrameIndex(ResVReg, ResType, I);
390 
391   case TargetOpcode::G_LOAD:
392     return selectLoad(ResVReg, ResType, I);
393   case TargetOpcode::G_STORE:
394     return selectStore(I);
395 
396   case TargetOpcode::G_BR:
397     return selectBranch(I);
398   case TargetOpcode::G_BRCOND:
399     return selectBranchCond(I);
400 
401   case TargetOpcode::G_PHI:
402     return selectPhi(ResVReg, ResType, I);
403 
404   case TargetOpcode::G_FPTOSI:
405     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
406   case TargetOpcode::G_FPTOUI:
407     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
408 
409   case TargetOpcode::G_SITOFP:
410     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
411   case TargetOpcode::G_UITOFP:
412     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
413 
414   case TargetOpcode::G_CTPOP:
415     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
416   case TargetOpcode::G_SMIN:
417     return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
418   case TargetOpcode::G_UMIN:
419     return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
420 
421   case TargetOpcode::G_SMAX:
422     return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
423   case TargetOpcode::G_UMAX:
424     return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
425 
426   case TargetOpcode::G_FMA:
427     return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
428 
429   case TargetOpcode::G_FPOW:
430     return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
431   case TargetOpcode::G_FPOWI:
432     return selectExtInst(ResVReg, ResType, I, CL::pown);
433 
434   case TargetOpcode::G_FEXP:
435     return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
436   case TargetOpcode::G_FEXP2:
437     return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
438 
439   case TargetOpcode::G_FLOG:
440     return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
441   case TargetOpcode::G_FLOG2:
442     return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
443   case TargetOpcode::G_FLOG10:
444     return selectLog10(ResVReg, ResType, I);
445 
446   case TargetOpcode::G_FABS:
447     return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
448   case TargetOpcode::G_ABS:
449     return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
450 
451   case TargetOpcode::G_FMINNUM:
452   case TargetOpcode::G_FMINIMUM:
453     return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
454   case TargetOpcode::G_FMAXNUM:
455   case TargetOpcode::G_FMAXIMUM:
456     return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);
457 
458   case TargetOpcode::G_FCOPYSIGN:
459     return selectExtInst(ResVReg, ResType, I, CL::copysign);
460 
461   case TargetOpcode::G_FCEIL:
462     return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
463   case TargetOpcode::G_FFLOOR:
464     return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
465 
466   case TargetOpcode::G_FCOS:
467     return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
468   case TargetOpcode::G_FSIN:
469     return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
470   case TargetOpcode::G_FTAN:
471     return selectExtInst(ResVReg, ResType, I, CL::tan, GL::Tan);
472 
473   case TargetOpcode::G_FSQRT:
474     return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
475 
476   case TargetOpcode::G_CTTZ:
477   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
478     return selectExtInst(ResVReg, ResType, I, CL::ctz);
479   case TargetOpcode::G_CTLZ:
480   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
481     return selectExtInst(ResVReg, ResType, I, CL::clz);
482 
483   case TargetOpcode::G_INTRINSIC_ROUND:
484     return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
485   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
486     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
487   case TargetOpcode::G_INTRINSIC_TRUNC:
488     return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
489   case TargetOpcode::G_FRINT:
490   case TargetOpcode::G_FNEARBYINT:
491     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
492 
493   case TargetOpcode::G_SMULH:
494     return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
495   case TargetOpcode::G_UMULH:
496     return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
497 
498   case TargetOpcode::G_SADDSAT:
499     return selectExtInst(ResVReg, ResType, I, CL::s_add_sat);
500   case TargetOpcode::G_UADDSAT:
501     return selectExtInst(ResVReg, ResType, I, CL::u_add_sat);
502   case TargetOpcode::G_SSUBSAT:
503     return selectExtInst(ResVReg, ResType, I, CL::s_sub_sat);
504   case TargetOpcode::G_USUBSAT:
505     return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat);
506 
507   case TargetOpcode::G_SEXT:
508     return selectExt(ResVReg, ResType, I, true);
509   case TargetOpcode::G_ANYEXT:
510   case TargetOpcode::G_ZEXT:
511     return selectExt(ResVReg, ResType, I, false);
512   case TargetOpcode::G_TRUNC:
513     return selectTrunc(ResVReg, ResType, I);
514   case TargetOpcode::G_FPTRUNC:
515   case TargetOpcode::G_FPEXT:
516     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
517 
518   case TargetOpcode::G_PTRTOINT:
519     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
520   case TargetOpcode::G_INTTOPTR:
521     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
522   case TargetOpcode::G_BITCAST:
523     return selectBitcast(ResVReg, ResType, I);
524   case TargetOpcode::G_ADDRSPACE_CAST:
525     return selectAddrSpaceCast(ResVReg, ResType, I);
526   case TargetOpcode::G_PTR_ADD: {
527     // Currently, we get G_PTR_ADD only as a result of translating
528     // global variables, initialized with constant expressions like GV + Const
529     // (see test opencl/basic/progvar_prog_scope_init.ll).
530     // TODO: extend the handler once we have other cases.
531     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
532     Register GV = I.getOperand(1).getReg();
533     MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
534     (void)II;
535     assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
536             (*II).getOpcode() == TargetOpcode::COPY ||
537             (*II).getOpcode() == SPIRV::OpVariable) &&
538            isImm(I.getOperand(2), MRI));
539     Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
540     MachineBasicBlock &BB = *I.getParent();
541     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
542                    .addDef(ResVReg)
543                    .addUse(GR.getSPIRVTypeID(ResType))
544                    .addImm(static_cast<uint32_t>(
545                        SPIRV::Opcode::InBoundsPtrAccessChain))
546                    .addUse(GV)
547                    .addUse(Idx)
548                    .addUse(I.getOperand(2).getReg());
549     return MIB.constrainAllUses(TII, TRI, RBI);
550   }
551 
552   case TargetOpcode::G_ATOMICRMW_OR:
553     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
554   case TargetOpcode::G_ATOMICRMW_ADD:
555     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
556   case TargetOpcode::G_ATOMICRMW_AND:
557     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
558   case TargetOpcode::G_ATOMICRMW_MAX:
559     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
560   case TargetOpcode::G_ATOMICRMW_MIN:
561     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
562   case TargetOpcode::G_ATOMICRMW_SUB:
563     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
564   case TargetOpcode::G_ATOMICRMW_XOR:
565     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
566   case TargetOpcode::G_ATOMICRMW_UMAX:
567     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
568   case TargetOpcode::G_ATOMICRMW_UMIN:
569     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
570   case TargetOpcode::G_ATOMICRMW_XCHG:
571     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
572   case TargetOpcode::G_ATOMIC_CMPXCHG:
573     return selectAtomicCmpXchg(ResVReg, ResType, I);
574 
575   case TargetOpcode::G_ATOMICRMW_FADD:
576     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
577   case TargetOpcode::G_ATOMICRMW_FSUB:
578     // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
579     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
580                            SPIRV::OpFNegate);
581   case TargetOpcode::G_ATOMICRMW_FMIN:
582     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
583   case TargetOpcode::G_ATOMICRMW_FMAX:
584     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
585 
586   case TargetOpcode::G_FENCE:
587     return selectFence(I);
588 
589   case TargetOpcode::G_STACKSAVE:
590     return selectStackSave(ResVReg, ResType, I);
591   case TargetOpcode::G_STACKRESTORE:
592     return selectStackRestore(I);
593 
594   case TargetOpcode::G_UNMERGE_VALUES:
595     return selectUnmergeValues(I);
596 
597   default:
598     return false;
599   }
600 }
601 
602 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
603                                              const SPIRVType *ResType,
604                                              MachineInstr &I,
605                                              CL::OpenCLExtInst CLInst) const {
606   return selectExtInst(ResVReg, ResType, I,
607                        {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
608 }
609 
610 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
611                                              const SPIRVType *ResType,
612                                              MachineInstr &I,
613                                              CL::OpenCLExtInst CLInst,
614                                              GL::GLSLExtInst GLInst) const {
615   ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
616                           {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
617   return selectExtInst(ResVReg, ResType, I, ExtInsts);
618 }
619 
620 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
621                                              const SPIRVType *ResType,
622                                              MachineInstr &I,
623                                              const ExtInstList &Insts) const {
624 
625   for (const auto &Ex : Insts) {
626     SPIRV::InstructionSet::InstructionSet Set = Ex.first;
627     uint32_t Opcode = Ex.second;
628     if (STI.canUseExtInstSet(Set)) {
629       MachineBasicBlock &BB = *I.getParent();
630       auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
631                      .addDef(ResVReg)
632                      .addUse(GR.getSPIRVTypeID(ResType))
633                      .addImm(static_cast<uint32_t>(Set))
634                      .addImm(Opcode);
635       const unsigned NumOps = I.getNumOperands();
636       for (unsigned i = 1; i < NumOps; ++i)
637         MIB.add(I.getOperand(i));
638       return MIB.constrainAllUses(TII, TRI, RBI);
639     }
640   }
641   return false;
642 }
643 
644 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
645                                                  const SPIRVType *ResType,
646                                                  MachineInstr &I,
647                                                  Register SrcReg,
648                                                  unsigned Opcode) const {
649   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
650       .addDef(ResVReg)
651       .addUse(GR.getSPIRVTypeID(ResType))
652       .addUse(SrcReg)
653       .constrainAllUses(TII, TRI, RBI);
654 }
655 
656 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
657                                           const SPIRVType *ResType,
658                                           MachineInstr &I,
659                                           unsigned Opcode) const {
660   if (STI.isOpenCLEnv() && I.getOperand(1).isReg()) {
661     Register SrcReg = I.getOperand(1).getReg();
662     bool IsGV = false;
663     for (MachineRegisterInfo::def_instr_iterator DefIt =
664              MRI->def_instr_begin(SrcReg);
665          DefIt != MRI->def_instr_end(); DefIt = std::next(DefIt)) {
666       if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
667         IsGV = true;
668         break;
669       }
670     }
671     if (IsGV) {
672       uint32_t SpecOpcode = 0;
673       switch (Opcode) {
674       case SPIRV::OpConvertPtrToU:
675         SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
676         break;
677       case SPIRV::OpConvertUToPtr:
678         SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
679         break;
680       }
681       if (SpecOpcode)
682         return BuildMI(*I.getParent(), I, I.getDebugLoc(),
683                        TII.get(SPIRV::OpSpecConstantOp))
684             .addDef(ResVReg)
685             .addUse(GR.getSPIRVTypeID(ResType))
686             .addImm(SpecOpcode)
687             .addUse(SrcReg)
688             .constrainAllUses(TII, TRI, RBI);
689     }
690   }
691   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
692                            Opcode);
693 }
694 
695 bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
696                                              const SPIRVType *ResType,
697                                              MachineInstr &I) const {
698   Register OpReg = I.getOperand(1).getReg();
699   SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
700   if (!GR.isBitcastCompatible(ResType, OpType))
701     report_fatal_error("incompatible result and operand types in a bitcast");
702   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
703 }
704 
705 static SPIRV::Scope::Scope getScope(SyncScope::ID Ord,
706                                     SPIRVMachineModuleInfo *MMI) {
707   if (Ord == SyncScope::SingleThread || Ord == MMI->Work_ItemSSID)
708     return SPIRV::Scope::Invocation;
709   else if (Ord == SyncScope::System || Ord == MMI->DeviceSSID)
710     return SPIRV::Scope::Device;
711   else if (Ord == MMI->WorkGroupSSID)
712     return SPIRV::Scope::Workgroup;
713   else if (Ord == MMI->AllSVMDevicesSSID)
714     return SPIRV::Scope::CrossDevice;
715   else if (Ord == MMI->SubGroupSSID)
716     return SPIRV::Scope::Subgroup;
717   else
718     // OpenCL approach is: "The functions that do not have memory_scope argument
719     // have the same semantics as the corresponding functions with the
720     // memory_scope argument set to memory_scope_device." See ref.: //
721     // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
722     // In our case if the scope is unknown, assuming that SPIR-V code is to be
723     // consumed in an OpenCL environment, we use the same approach and set the
724     // scope to memory_scope_device.
725     return SPIRV::Scope::Device;
726 }
727 
728 static void addMemoryOperands(MachineMemOperand *MemOp,
729                               MachineInstrBuilder &MIB) {
730   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
731   if (MemOp->isVolatile())
732     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
733   if (MemOp->isNonTemporal())
734     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
735   if (MemOp->getAlign().value())
736     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
737 
738   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
739     MIB.addImm(SpvMemOp);
740     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
741       MIB.addImm(MemOp->getAlign().value());
742   }
743 }
744 
745 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
746   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
747   if (Flags & MachineMemOperand::Flags::MOVolatile)
748     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
749   if (Flags & MachineMemOperand::Flags::MONonTemporal)
750     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
751 
752   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
753     MIB.addImm(SpvMemOp);
754 }
755 
756 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
757                                           const SPIRVType *ResType,
758                                           MachineInstr &I) const {
759   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
760   Register Ptr = I.getOperand(1 + OpOffset).getReg();
761   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
762                  .addDef(ResVReg)
763                  .addUse(GR.getSPIRVTypeID(ResType))
764                  .addUse(Ptr);
765   if (!I.getNumMemOperands()) {
766     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
767            I.getOpcode() ==
768                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
769     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
770   } else {
771     addMemoryOperands(*I.memoperands_begin(), MIB);
772   }
773   return MIB.constrainAllUses(TII, TRI, RBI);
774 }
775 
776 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
777   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
778   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
779   Register Ptr = I.getOperand(1 + OpOffset).getReg();
780   MachineBasicBlock &BB = *I.getParent();
781   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
782                  .addUse(Ptr)
783                  .addUse(StoreVal);
784   if (!I.getNumMemOperands()) {
785     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
786            I.getOpcode() ==
787                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
788     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
789   } else {
790     addMemoryOperands(*I.memoperands_begin(), MIB);
791   }
792   return MIB.constrainAllUses(TII, TRI, RBI);
793 }
794 
795 bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
796                                                const SPIRVType *ResType,
797                                                MachineInstr &I) const {
798   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
799     report_fatal_error(
800         "llvm.stacksave intrinsic: this instruction requires the following "
801         "SPIR-V extension: SPV_INTEL_variable_length_array",
802         false);
803   MachineBasicBlock &BB = *I.getParent();
804   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
805       .addDef(ResVReg)
806       .addUse(GR.getSPIRVTypeID(ResType))
807       .constrainAllUses(TII, TRI, RBI);
808 }
809 
810 bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
811   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
812     report_fatal_error(
813         "llvm.stackrestore intrinsic: this instruction requires the following "
814         "SPIR-V extension: SPV_INTEL_variable_length_array",
815         false);
816   if (!I.getOperand(0).isReg())
817     return false;
818   MachineBasicBlock &BB = *I.getParent();
819   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
820       .addUse(I.getOperand(0).getReg())
821       .constrainAllUses(TII, TRI, RBI);
822 }
823 
824 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
825                                                   MachineInstr &I) const {
826   MachineBasicBlock &BB = *I.getParent();
827   Register SrcReg = I.getOperand(1).getReg();
828   if (I.getOpcode() == TargetOpcode::G_MEMSET) {
829     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
830     unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
831     unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
832     SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
833     SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
834     Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
835     SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
836         ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
837     // TODO: check if we have such GV, add init, use buildGlobalVariable.
838     Function &CurFunction = GR.CurMF->getFunction();
839     Type *LLVMArrTy =
840         ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
841     // Module takes ownership of the global var.
842     GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
843                                             true, GlobalValue::InternalLinkage,
844                                             Constant::getNullValue(LLVMArrTy));
845     Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
846     GR.add(GV, GR.CurMF, VarReg);
847 
848     buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
849     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
850         .addDef(VarReg)
851         .addUse(GR.getSPIRVTypeID(VarTy))
852         .addImm(SPIRV::StorageClass::UniformConstant)
853         .addUse(Const)
854         .constrainAllUses(TII, TRI, RBI);
855     SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
856         ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
857     SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
858     selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
859   }
860   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
861                  .addUse(I.getOperand(0).getReg())
862                  .addUse(SrcReg)
863                  .addUse(I.getOperand(2).getReg());
864   if (I.getNumMemOperands())
865     addMemoryOperands(*I.memoperands_begin(), MIB);
866   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
867   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
868     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
869         .addUse(MIB->getOperand(0).getReg());
870   return Result;
871 }
872 
873 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
874                                                const SPIRVType *ResType,
875                                                MachineInstr &I,
876                                                unsigned NewOpcode,
877                                                unsigned NegateOpcode) const {
878   assert(I.hasOneMemOperand());
879   const MachineMemOperand *MemOp = *I.memoperands_begin();
880   uint32_t Scope =
881       static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
882   Register ScopeReg = buildI32Constant(Scope, I);
883 
884   Register Ptr = I.getOperand(1).getReg();
885   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
886   // auto ScSem =
887   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
888   AtomicOrdering AO = MemOp->getSuccessOrdering();
889   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
890   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
891 
892   bool Result = false;
893   Register ValueReg = I.getOperand(2).getReg();
894   if (NegateOpcode != 0) {
895     // Translation with negative value operand is requested
896     Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
897     Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
898     ValueReg = TmpReg;
899   }
900 
901   Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
902                 .addDef(ResVReg)
903                 .addUse(GR.getSPIRVTypeID(ResType))
904                 .addUse(Ptr)
905                 .addUse(ScopeReg)
906                 .addUse(MemSemReg)
907                 .addUse(ValueReg)
908                 .constrainAllUses(TII, TRI, RBI);
909   return Result;
910 }
911 
912 bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
913   unsigned ArgI = I.getNumOperands() - 1;
914   Register SrcReg =
915       I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
916   SPIRVType *DefType =
917       SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
918   if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
919     report_fatal_error(
920         "cannot select G_UNMERGE_VALUES with a non-vector argument");
921 
922   SPIRVType *ScalarType =
923       GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
924   MachineBasicBlock &BB = *I.getParent();
925   bool Res = false;
926   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
927     Register ResVReg = I.getOperand(i).getReg();
928     SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
929     if (!ResType) {
930       // There was no "assign type" actions, let's fix this now
931       ResType = ScalarType;
932       MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
933       MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
934       GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
935     }
936     auto MIB =
937         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
938             .addDef(ResVReg)
939             .addUse(GR.getSPIRVTypeID(ResType))
940             .addUse(SrcReg)
941             .addImm(static_cast<int64_t>(i));
942     Res |= MIB.constrainAllUses(TII, TRI, RBI);
943   }
944   return Res;
945 }
946 
947 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
948   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
949   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
950   Register MemSemReg = buildI32Constant(MemSem, I);
951   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
952   uint32_t Scope = static_cast<uint32_t>(getScope(Ord, MMI));
953   Register ScopeReg = buildI32Constant(Scope, I);
954   MachineBasicBlock &BB = *I.getParent();
955   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
956       .addUse(ScopeReg)
957       .addUse(MemSemReg)
958       .constrainAllUses(TII, TRI, RBI);
959 }
960 
961 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
962                                                    const SPIRVType *ResType,
963                                                    MachineInstr &I) const {
964   Register ScopeReg;
965   Register MemSemEqReg;
966   Register MemSemNeqReg;
967   Register Ptr = I.getOperand(2).getReg();
968   if (!isa<GIntrinsic>(I)) {
969     assert(I.hasOneMemOperand());
970     const MachineMemOperand *MemOp = *I.memoperands_begin();
971     unsigned Scope =
972         static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
973     ScopeReg = buildI32Constant(Scope, I);
974 
975     unsigned ScSem = static_cast<uint32_t>(
976         getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
977     AtomicOrdering AO = MemOp->getSuccessOrdering();
978     unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
979     MemSemEqReg = buildI32Constant(MemSemEq, I);
980     AtomicOrdering FO = MemOp->getFailureOrdering();
981     unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
982     MemSemNeqReg =
983         MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
984   } else {
985     ScopeReg = I.getOperand(5).getReg();
986     MemSemEqReg = I.getOperand(6).getReg();
987     MemSemNeqReg = I.getOperand(7).getReg();
988   }
989 
990   Register Cmp = I.getOperand(3).getReg();
991   Register Val = I.getOperand(4).getReg();
992   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
993   Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
994   const DebugLoc &DL = I.getDebugLoc();
995   bool Result =
996       BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
997           .addDef(ACmpRes)
998           .addUse(GR.getSPIRVTypeID(SpvValTy))
999           .addUse(Ptr)
1000           .addUse(ScopeReg)
1001           .addUse(MemSemEqReg)
1002           .addUse(MemSemNeqReg)
1003           .addUse(Val)
1004           .addUse(Cmp)
1005           .constrainAllUses(TII, TRI, RBI);
1006   Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1007   SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
1008   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
1009                 .addDef(CmpSuccReg)
1010                 .addUse(GR.getSPIRVTypeID(BoolTy))
1011                 .addUse(ACmpRes)
1012                 .addUse(Cmp)
1013                 .constrainAllUses(TII, TRI, RBI);
1014   Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1015   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1016                 .addDef(TmpReg)
1017                 .addUse(GR.getSPIRVTypeID(ResType))
1018                 .addUse(ACmpRes)
1019                 .addUse(GR.getOrCreateUndef(I, ResType, TII))
1020                 .addImm(0)
1021                 .constrainAllUses(TII, TRI, RBI);
1022   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1023                 .addDef(ResVReg)
1024                 .addUse(GR.getSPIRVTypeID(ResType))
1025                 .addUse(CmpSuccReg)
1026                 .addUse(TmpReg)
1027                 .addImm(1)
1028                 .constrainAllUses(TII, TRI, RBI);
1029   return Result;
1030 }
1031 
1032 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
1033   switch (SC) {
1034   case SPIRV::StorageClass::Workgroup:
1035   case SPIRV::StorageClass::CrossWorkgroup:
1036   case SPIRV::StorageClass::Function:
1037     return true;
1038   default:
1039     return false;
1040   }
1041 }
1042 
1043 static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
1044   switch (SC) {
1045   case SPIRV::StorageClass::DeviceOnlyINTEL:
1046   case SPIRV::StorageClass::HostOnlyINTEL:
1047     return true;
1048   default:
1049     return false;
1050   }
1051 }
1052 
1053 // In SPIR-V address space casting can only happen to and from the Generic
1054 // storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
1055 // pointers to and from Generic pointers. As such, we can convert e.g. from
1056 // Workgroup to Function by going via a Generic pointer as an intermediary. All
1057 // other combinations can only be done by a bitcast, and are probably not safe.
1058 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
1059                                                    const SPIRVType *ResType,
1060                                                    MachineInstr &I) const {
1061   // If the AddrSpaceCast user is single and in OpConstantComposite or
1062   // OpVariable, we should select OpSpecConstantOp.
1063   auto UIs = MRI->use_instructions(ResVReg);
1064   if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1065       (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1066        UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1067        isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
1068     Register NewReg = I.getOperand(1).getReg();
1069     MachineBasicBlock &BB = *I.getParent();
1070     SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1071     ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
1072                                              SPIRV::StorageClass::Generic);
1073     bool Result =
1074         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1075             .addDef(ResVReg)
1076             .addUse(GR.getSPIRVTypeID(ResType))
1077             .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
1078             .addUse(NewReg)
1079             .constrainAllUses(TII, TRI, RBI);
1080     return Result;
1081   }
1082   Register SrcPtr = I.getOperand(1).getReg();
1083   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1084   SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1085   SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1086 
1087   // don't generate a cast between identical storage classes
1088   if (SrcSC == DstSC)
1089     return true;
1090 
1091   // Casting from an eligible pointer to Generic.
1092   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1093     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1094   // Casting from Generic to an eligible pointer.
1095   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1096     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1097   // Casting between 2 eligible pointers using Generic as an intermediary.
1098   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1099     Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1100     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1101         SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
1102     MachineBasicBlock &BB = *I.getParent();
1103     const DebugLoc &DL = I.getDebugLoc();
1104     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1105                        .addDef(Tmp)
1106                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1107                        .addUse(SrcPtr)
1108                        .constrainAllUses(TII, TRI, RBI);
1109     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1110                           .addDef(ResVReg)
1111                           .addUse(GR.getSPIRVTypeID(ResType))
1112                           .addUse(Tmp)
1113                           .constrainAllUses(TII, TRI, RBI);
1114   }
1115 
1116   // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1117   // be applied
1118   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1119     return selectUnOp(ResVReg, ResType, I,
1120                       SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1121   if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1122     return selectUnOp(ResVReg, ResType, I,
1123                       SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1124 
1125   // TODO Should this case just be disallowed completely?
1126   // We're casting 2 other arbitrary address spaces, so have to bitcast.
1127   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1128 }
1129 
1130 static unsigned getFCmpOpcode(unsigned PredNum) {
1131   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1132   switch (Pred) {
1133   case CmpInst::FCMP_OEQ:
1134     return SPIRV::OpFOrdEqual;
1135   case CmpInst::FCMP_OGE:
1136     return SPIRV::OpFOrdGreaterThanEqual;
1137   case CmpInst::FCMP_OGT:
1138     return SPIRV::OpFOrdGreaterThan;
1139   case CmpInst::FCMP_OLE:
1140     return SPIRV::OpFOrdLessThanEqual;
1141   case CmpInst::FCMP_OLT:
1142     return SPIRV::OpFOrdLessThan;
1143   case CmpInst::FCMP_ONE:
1144     return SPIRV::OpFOrdNotEqual;
1145   case CmpInst::FCMP_ORD:
1146     return SPIRV::OpOrdered;
1147   case CmpInst::FCMP_UEQ:
1148     return SPIRV::OpFUnordEqual;
1149   case CmpInst::FCMP_UGE:
1150     return SPIRV::OpFUnordGreaterThanEqual;
1151   case CmpInst::FCMP_UGT:
1152     return SPIRV::OpFUnordGreaterThan;
1153   case CmpInst::FCMP_ULE:
1154     return SPIRV::OpFUnordLessThanEqual;
1155   case CmpInst::FCMP_ULT:
1156     return SPIRV::OpFUnordLessThan;
1157   case CmpInst::FCMP_UNE:
1158     return SPIRV::OpFUnordNotEqual;
1159   case CmpInst::FCMP_UNO:
1160     return SPIRV::OpUnordered;
1161   default:
1162     llvm_unreachable("Unknown predicate type for FCmp");
1163   }
1164 }
1165 
1166 static unsigned getICmpOpcode(unsigned PredNum) {
1167   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1168   switch (Pred) {
1169   case CmpInst::ICMP_EQ:
1170     return SPIRV::OpIEqual;
1171   case CmpInst::ICMP_NE:
1172     return SPIRV::OpINotEqual;
1173   case CmpInst::ICMP_SGE:
1174     return SPIRV::OpSGreaterThanEqual;
1175   case CmpInst::ICMP_SGT:
1176     return SPIRV::OpSGreaterThan;
1177   case CmpInst::ICMP_SLE:
1178     return SPIRV::OpSLessThanEqual;
1179   case CmpInst::ICMP_SLT:
1180     return SPIRV::OpSLessThan;
1181   case CmpInst::ICMP_UGE:
1182     return SPIRV::OpUGreaterThanEqual;
1183   case CmpInst::ICMP_UGT:
1184     return SPIRV::OpUGreaterThan;
1185   case CmpInst::ICMP_ULE:
1186     return SPIRV::OpULessThanEqual;
1187   case CmpInst::ICMP_ULT:
1188     return SPIRV::OpULessThan;
1189   default:
1190     llvm_unreachable("Unknown predicate type for ICmp");
1191   }
1192 }
1193 
1194 static unsigned getPtrCmpOpcode(unsigned Pred) {
1195   switch (static_cast<CmpInst::Predicate>(Pred)) {
1196   case CmpInst::ICMP_EQ:
1197     return SPIRV::OpPtrEqual;
1198   case CmpInst::ICMP_NE:
1199     return SPIRV::OpPtrNotEqual;
1200   default:
1201     llvm_unreachable("Unknown predicate type for pointer comparison");
1202   }
1203 }
1204 
1205 // Return the logical operation, or abort if none exists.
1206 static unsigned getBoolCmpOpcode(unsigned PredNum) {
1207   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1208   switch (Pred) {
1209   case CmpInst::ICMP_EQ:
1210     return SPIRV::OpLogicalEqual;
1211   case CmpInst::ICMP_NE:
1212     return SPIRV::OpLogicalNotEqual;
1213   default:
1214     llvm_unreachable("Unknown predicate type for Bool comparison");
1215   }
1216 }
1217 
1218 bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
1219                                               const SPIRVType *ResType,
1220                                               MachineInstr &I,
1221                                               unsigned OpAnyOrAll) const {
1222   assert(I.getNumOperands() == 3);
1223   assert(I.getOperand(2).isReg());
1224   MachineBasicBlock &BB = *I.getParent();
1225   Register InputRegister = I.getOperand(2).getReg();
1226   SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1227 
1228   if (!InputType)
1229     report_fatal_error("Input Type could not be determined.");
1230 
1231   bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1232   bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
1233   if (IsBoolTy && !IsVectorTy) {
1234     assert(ResVReg == I.getOperand(0).getReg());
1235     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1236                    TII.get(TargetOpcode::COPY))
1237         .addDef(ResVReg)
1238         .addUse(InputRegister)
1239         .constrainAllUses(TII, TRI, RBI);
1240   }
1241 
1242   bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1243   unsigned SpirvNotEqualId =
1244       IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1245   SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(I, TII);
1246   SPIRVType *SpvBoolTy = SpvBoolScalarTy;
1247   Register NotEqualReg = ResVReg;
1248 
1249   if (IsVectorTy) {
1250     NotEqualReg = IsBoolTy ? InputRegister
1251                            : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1252     const unsigned NumElts = InputType->getOperand(2).getImm();
1253     SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
1254   }
1255 
1256   if (!IsBoolTy) {
1257     Register ConstZeroReg =
1258         IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
1259 
1260     BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
1261         .addDef(NotEqualReg)
1262         .addUse(GR.getSPIRVTypeID(SpvBoolTy))
1263         .addUse(InputRegister)
1264         .addUse(ConstZeroReg)
1265         .constrainAllUses(TII, TRI, RBI);
1266   }
1267 
1268   if (!IsVectorTy)
1269     return true;
1270 
1271   return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
1272       .addDef(ResVReg)
1273       .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1274       .addUse(NotEqualReg)
1275       .constrainAllUses(TII, TRI, RBI);
1276 }
1277 
1278 bool SPIRVInstructionSelector::selectAll(Register ResVReg,
1279                                          const SPIRVType *ResType,
1280                                          MachineInstr &I) const {
1281   return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
1282 }
1283 
1284 bool SPIRVInstructionSelector::selectAny(Register ResVReg,
1285                                          const SPIRVType *ResType,
1286                                          MachineInstr &I) const {
1287   return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
1288 }
1289 
1290 bool SPIRVInstructionSelector::selectFmix(Register ResVReg,
1291                                           const SPIRVType *ResType,
1292                                           MachineInstr &I) const {
1293 
1294   assert(I.getNumOperands() == 5);
1295   assert(I.getOperand(2).isReg());
1296   assert(I.getOperand(3).isReg());
1297   assert(I.getOperand(4).isReg());
1298   MachineBasicBlock &BB = *I.getParent();
1299 
1300   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1301       .addDef(ResVReg)
1302       .addUse(GR.getSPIRVTypeID(ResType))
1303       .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1304       .addImm(GL::FMix)
1305       .addUse(I.getOperand(2).getReg())
1306       .addUse(I.getOperand(3).getReg())
1307       .addUse(I.getOperand(4).getReg())
1308       .constrainAllUses(TII, TRI, RBI);
1309 }
1310 
1311 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1312                                                 const SPIRVType *ResType,
1313                                                 MachineInstr &I) const {
1314   MachineBasicBlock &BB = *I.getParent();
1315   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1316       .addDef(ResVReg)
1317       .addUse(GR.getSPIRVTypeID(ResType))
1318       .addUse(I.getOperand(1).getReg())
1319       .constrainAllUses(TII, TRI, RBI);
1320 }
1321 
1322 bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1323                                             const SPIRVType *ResType,
1324                                             MachineInstr &I) const {
1325   // There is no way to implement `freeze` correctly without support on SPIR-V
1326   // standard side, but we may at least address a simple (static) case when
1327   // undef/poison value presence is obvious. The main benefit of even
1328   // incomplete `freeze` support is preventing of translation from crashing due
1329   // to lack of support on legalization and instruction selection steps.
1330   if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1331     return false;
1332   Register OpReg = I.getOperand(1).getReg();
1333   if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1334     Register Reg;
1335     switch (Def->getOpcode()) {
1336     case SPIRV::ASSIGN_TYPE:
1337       if (MachineInstr *AssignToDef =
1338               MRI->getVRegDef(Def->getOperand(1).getReg())) {
1339         if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1340           Reg = Def->getOperand(2).getReg();
1341       }
1342       break;
1343     case SPIRV::OpUndef:
1344       Reg = Def->getOperand(1).getReg();
1345       break;
1346     }
1347     unsigned DestOpCode;
1348     if (Reg.isValid()) {
1349       DestOpCode = SPIRV::OpConstantNull;
1350     } else {
1351       DestOpCode = TargetOpcode::COPY;
1352       Reg = OpReg;
1353     }
1354     return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1355         .addDef(I.getOperand(0).getReg())
1356         .addUse(Reg)
1357         .constrainAllUses(TII, TRI, RBI);
1358   }
1359   return false;
1360 }
1361 
1362 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1363                                                  const SPIRVType *ResType,
1364                                                  MachineInstr &I) const {
1365   // TODO: only const case is supported for now.
1366   assert(std::all_of(
1367       I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1368         if (MO.isDef())
1369           return true;
1370         if (!MO.isReg())
1371           return false;
1372         SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1373         assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1374                ConstTy->getOperand(1).isReg());
1375         Register ConstReg = ConstTy->getOperand(1).getReg();
1376         const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1377         assert(Const);
1378         return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1379                 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1380       }));
1381 
1382   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1383                      TII.get(SPIRV::OpConstantComposite))
1384                  .addDef(ResVReg)
1385                  .addUse(GR.getSPIRVTypeID(ResType));
1386   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1387     MIB.addUse(I.getOperand(i).getReg());
1388   return MIB.constrainAllUses(TII, TRI, RBI);
1389 }
1390 
1391 static unsigned getArrayComponentCount(MachineRegisterInfo *MRI,
1392                                        const SPIRVType *ResType) {
1393   Register OpReg = ResType->getOperand(2).getReg();
1394   SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1395   if (!OpDef)
1396     return 0;
1397   if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1398       OpDef->getOperand(1).isReg()) {
1399     if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1400       OpDef = RefDef;
1401   }
1402   unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1403                    ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1404                    : 0;
1405   return N;
1406 }
1407 
1408 // Return true if the type represents a constant register
1409 static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef) {
1410   if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1411       OpDef->getOperand(1).isReg()) {
1412     if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1413       OpDef = RefDef;
1414   }
1415   return OpDef->getOpcode() == TargetOpcode::G_CONSTANT ||
1416          OpDef->getOpcode() == TargetOpcode::G_FCONSTANT;
1417 }
1418 
1419 // Return true if the virtual register represents a constant
1420 static bool isConstReg(MachineRegisterInfo *MRI, Register OpReg) {
1421   if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
1422     return isConstReg(MRI, OpDef);
1423   return false;
1424 }
1425 
1426 bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1427                                                  const SPIRVType *ResType,
1428                                                  MachineInstr &I) const {
1429   unsigned N = 0;
1430   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1431     N = GR.getScalarOrVectorComponentCount(ResType);
1432   else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1433     N = getArrayComponentCount(MRI, ResType);
1434   else
1435     report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1436 
1437   unsigned OpIdx = I.getNumExplicitDefs();
1438   if (!I.getOperand(OpIdx).isReg())
1439     report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1440 
1441   // check if we may construct a constant vector
1442   Register OpReg = I.getOperand(OpIdx).getReg();
1443   bool IsConst = isConstReg(MRI, OpReg);
1444 
1445   if (!IsConst && N < 2)
1446     report_fatal_error(
1447         "There must be at least two constituent operands in a vector");
1448 
1449   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1450                      TII.get(IsConst ? SPIRV::OpConstantComposite
1451                                      : SPIRV::OpCompositeConstruct))
1452                  .addDef(ResVReg)
1453                  .addUse(GR.getSPIRVTypeID(ResType));
1454   for (unsigned i = 0; i < N; ++i)
1455     MIB.addUse(OpReg);
1456   return MIB.constrainAllUses(TII, TRI, RBI);
1457 }
1458 
1459 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1460                                          const SPIRVType *ResType,
1461                                          unsigned CmpOpc,
1462                                          MachineInstr &I) const {
1463   Register Cmp0 = I.getOperand(2).getReg();
1464   Register Cmp1 = I.getOperand(3).getReg();
1465   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1466              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1467          "CMP operands should have the same type");
1468   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1469       .addDef(ResVReg)
1470       .addUse(GR.getSPIRVTypeID(ResType))
1471       .addUse(Cmp0)
1472       .addUse(Cmp1)
1473       .constrainAllUses(TII, TRI, RBI);
1474 }
1475 
1476 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1477                                           const SPIRVType *ResType,
1478                                           MachineInstr &I) const {
1479   auto Pred = I.getOperand(1).getPredicate();
1480   unsigned CmpOpc;
1481 
1482   Register CmpOperand = I.getOperand(2).getReg();
1483   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1484     CmpOpc = getPtrCmpOpcode(Pred);
1485   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1486     CmpOpc = getBoolCmpOpcode(Pred);
1487   else
1488     CmpOpc = getICmpOpcode(Pred);
1489   return selectCmp(ResVReg, ResType, CmpOpc, I);
1490 }
1491 
1492 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1493                                             const MachineInstr &I,
1494                                             int OpIdx) const {
1495   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1496          "Expected G_FCONSTANT");
1497   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1498   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1499 }
1500 
1501 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1502                                            const MachineInstr &I,
1503                                            int OpIdx) const {
1504   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1505          "Expected G_CONSTANT");
1506   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1507 }
1508 
1509 Register
1510 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1511                                            const SPIRVType *ResType) const {
1512   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1513   const SPIRVType *SpvI32Ty =
1514       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1515   // Find a constant in DT or build a new one.
1516   auto ConstInt = ConstantInt::get(LLVMTy, Val);
1517   Register NewReg = GR.find(ConstInt, GR.CurMF);
1518   if (!NewReg.isValid()) {
1519     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1520     GR.add(ConstInt, GR.CurMF, NewReg);
1521     MachineInstr *MI;
1522     MachineBasicBlock &BB = *I.getParent();
1523     if (Val == 0) {
1524       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1525                .addDef(NewReg)
1526                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1527     } else {
1528       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1529                .addDef(NewReg)
1530                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1531                .addImm(APInt(32, Val).getZExtValue());
1532     }
1533     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
1534   }
1535   return NewReg;
1536 }
1537 
1538 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1539                                           const SPIRVType *ResType,
1540                                           MachineInstr &I) const {
1541   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1542   return selectCmp(ResVReg, ResType, CmpOp, I);
1543 }
1544 
1545 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1546                                                  MachineInstr &I) const {
1547   // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1548   bool ZeroAsNull = STI.isOpenCLEnv();
1549   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1550     return GR.getOrCreateConstVector(0UL, I, ResType, TII, ZeroAsNull);
1551   return GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
1552 }
1553 
1554 static APFloat getZeroFP(const Type *LLVMFloatTy) {
1555   if (!LLVMFloatTy)
1556     return APFloat::getZero(APFloat::IEEEsingle());
1557   switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1558   case Type::HalfTyID:
1559     return APFloat::getZero(APFloat::IEEEhalf());
1560   default:
1561   case Type::FloatTyID:
1562     return APFloat::getZero(APFloat::IEEEsingle());
1563   case Type::DoubleTyID:
1564     return APFloat::getZero(APFloat::IEEEdouble());
1565   }
1566 }
1567 
1568 Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
1569                                                   MachineInstr &I) const {
1570   // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1571   bool ZeroAsNull = STI.isOpenCLEnv();
1572   APFloat VZero = getZeroFP(GR.getTypeForSPIRVType(ResType));
1573   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1574     return GR.getOrCreateConstVector(VZero, I, ResType, TII, ZeroAsNull);
1575   return GR.getOrCreateConstFP(VZero, I, ResType, TII, ZeroAsNull);
1576 }
1577 
1578 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1579                                                 const SPIRVType *ResType,
1580                                                 MachineInstr &I) const {
1581   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1582   APInt One =
1583       AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1584   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1585     return GR.getOrCreateConstVector(One.getZExtValue(), I, ResType, TII);
1586   return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1587 }
1588 
1589 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1590                                             const SPIRVType *ResType,
1591                                             MachineInstr &I,
1592                                             bool IsSigned) const {
1593   // To extend a bool, we need to use OpSelect between constants.
1594   Register ZeroReg = buildZerosVal(ResType, I);
1595   Register OneReg = buildOnesVal(IsSigned, ResType, I);
1596   bool IsScalarBool =
1597       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1598   unsigned Opcode =
1599       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1600   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1601       .addDef(ResVReg)
1602       .addUse(GR.getSPIRVTypeID(ResType))
1603       .addUse(I.getOperand(1).getReg())
1604       .addUse(OneReg)
1605       .addUse(ZeroReg)
1606       .constrainAllUses(TII, TRI, RBI);
1607 }
1608 
1609 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1610                                           const SPIRVType *ResType,
1611                                           MachineInstr &I, bool IsSigned,
1612                                           unsigned Opcode) const {
1613   Register SrcReg = I.getOperand(1).getReg();
1614   // We can convert bool value directly to float type without OpConvert*ToF,
1615   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1616   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1617     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1618     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1619     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1620       const unsigned NumElts = ResType->getOperand(2).getImm();
1621       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1622     }
1623     SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1624     selectSelect(SrcReg, TmpType, I, false);
1625   }
1626   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1627 }
1628 
1629 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1630                                          const SPIRVType *ResType,
1631                                          MachineInstr &I, bool IsSigned) const {
1632   Register SrcReg = I.getOperand(1).getReg();
1633   if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
1634     return selectSelect(ResVReg, ResType, I, IsSigned);
1635 
1636   SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
1637   if (SrcType == ResType)
1638     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1639                    TII.get(TargetOpcode::COPY))
1640         .addDef(ResVReg)
1641         .addUse(SrcReg)
1642         .constrainAllUses(TII, TRI, RBI);
1643 
1644   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1645   return selectUnOp(ResVReg, ResType, I, Opcode);
1646 }
1647 
1648 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1649                                                Register ResVReg,
1650                                                MachineInstr &I,
1651                                                const SPIRVType *IntTy,
1652                                                const SPIRVType *BoolTy) const {
1653   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1654   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1655   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1656   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1657   Register Zero = buildZerosVal(IntTy, I);
1658   Register One = buildOnesVal(false, IntTy, I);
1659   MachineBasicBlock &BB = *I.getParent();
1660   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1661       .addDef(BitIntReg)
1662       .addUse(GR.getSPIRVTypeID(IntTy))
1663       .addUse(IntReg)
1664       .addUse(One)
1665       .constrainAllUses(TII, TRI, RBI);
1666   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1667       .addDef(ResVReg)
1668       .addUse(GR.getSPIRVTypeID(BoolTy))
1669       .addUse(BitIntReg)
1670       .addUse(Zero)
1671       .constrainAllUses(TII, TRI, RBI);
1672 }
1673 
1674 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1675                                            const SPIRVType *ResType,
1676                                            MachineInstr &I) const {
1677   Register IntReg = I.getOperand(1).getReg();
1678   const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1679   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
1680     return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1681   if (ArgType == ResType)
1682     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1683                    TII.get(TargetOpcode::COPY))
1684         .addDef(ResVReg)
1685         .addUse(IntReg)
1686         .constrainAllUses(TII, TRI, RBI);
1687   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1688   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1689   return selectUnOp(ResVReg, ResType, I, Opcode);
1690 }
1691 
1692 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1693                                            const SPIRVType *ResType,
1694                                            const APInt &Imm,
1695                                            MachineInstr &I) const {
1696   unsigned TyOpcode = ResType->getOpcode();
1697   assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1698   MachineBasicBlock &BB = *I.getParent();
1699   if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1700       Imm.isZero())
1701     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1702         .addDef(ResVReg)
1703         .addUse(GR.getSPIRVTypeID(ResType))
1704         .constrainAllUses(TII, TRI, RBI);
1705   if (TyOpcode == SPIRV::OpTypeInt) {
1706     assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1707     Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1708     if (Reg == ResVReg)
1709       return true;
1710     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1711         .addDef(ResVReg)
1712         .addUse(Reg)
1713         .constrainAllUses(TII, TRI, RBI);
1714   }
1715   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1716                  .addDef(ResVReg)
1717                  .addUse(GR.getSPIRVTypeID(ResType));
1718   // <=32-bit integers should be caught by the sdag pattern.
1719   assert(Imm.getBitWidth() > 32);
1720   addNumImm(Imm, MIB);
1721   return MIB.constrainAllUses(TII, TRI, RBI);
1722 }
1723 
1724 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1725                                              const SPIRVType *ResType,
1726                                              MachineInstr &I) const {
1727   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1728       .addDef(ResVReg)
1729       .addUse(GR.getSPIRVTypeID(ResType))
1730       .constrainAllUses(TII, TRI, RBI);
1731 }
1732 
1733 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1734   assert(MO.isReg());
1735   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1736   if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1737     return false;
1738   assert(TypeInst->getOperand(1).isReg());
1739   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1740   return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1741 }
1742 
1743 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1744   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1745   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1746   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1747   return ImmInst->getOperand(1).getCImm()->getZExtValue();
1748 }
1749 
1750 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1751                                                const SPIRVType *ResType,
1752                                                MachineInstr &I) const {
1753   MachineBasicBlock &BB = *I.getParent();
1754   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1755                  .addDef(ResVReg)
1756                  .addUse(GR.getSPIRVTypeID(ResType))
1757                  // object to insert
1758                  .addUse(I.getOperand(3).getReg())
1759                  // composite to insert into
1760                  .addUse(I.getOperand(2).getReg());
1761   for (unsigned i = 4; i < I.getNumOperands(); i++)
1762     MIB.addImm(foldImm(I.getOperand(i), MRI));
1763   return MIB.constrainAllUses(TII, TRI, RBI);
1764 }
1765 
1766 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1767                                                 const SPIRVType *ResType,
1768                                                 MachineInstr &I) const {
1769   MachineBasicBlock &BB = *I.getParent();
1770   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1771                  .addDef(ResVReg)
1772                  .addUse(GR.getSPIRVTypeID(ResType))
1773                  .addUse(I.getOperand(2).getReg());
1774   for (unsigned i = 3; i < I.getNumOperands(); i++)
1775     MIB.addImm(foldImm(I.getOperand(i), MRI));
1776   return MIB.constrainAllUses(TII, TRI, RBI);
1777 }
1778 
1779 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1780                                                const SPIRVType *ResType,
1781                                                MachineInstr &I) const {
1782   if (isImm(I.getOperand(4), MRI))
1783     return selectInsertVal(ResVReg, ResType, I);
1784   MachineBasicBlock &BB = *I.getParent();
1785   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1786       .addDef(ResVReg)
1787       .addUse(GR.getSPIRVTypeID(ResType))
1788       .addUse(I.getOperand(2).getReg())
1789       .addUse(I.getOperand(3).getReg())
1790       .addUse(I.getOperand(4).getReg())
1791       .constrainAllUses(TII, TRI, RBI);
1792 }
1793 
1794 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1795                                                 const SPIRVType *ResType,
1796                                                 MachineInstr &I) const {
1797   if (isImm(I.getOperand(3), MRI))
1798     return selectExtractVal(ResVReg, ResType, I);
1799   MachineBasicBlock &BB = *I.getParent();
1800   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1801       .addDef(ResVReg)
1802       .addUse(GR.getSPIRVTypeID(ResType))
1803       .addUse(I.getOperand(2).getReg())
1804       .addUse(I.getOperand(3).getReg())
1805       .constrainAllUses(TII, TRI, RBI);
1806 }
1807 
1808 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1809                                          const SPIRVType *ResType,
1810                                          MachineInstr &I) const {
1811   const bool IsGEPInBounds = I.getOperand(2).getImm();
1812 
1813   // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1814   // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1815   // we have to use Op[InBounds]AccessChain.
1816   const unsigned Opcode = STI.isVulkanEnv()
1817                               ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1818                                                : SPIRV::OpAccessChain)
1819                               : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1820                                                : SPIRV::OpPtrAccessChain);
1821 
1822   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1823                  .addDef(ResVReg)
1824                  .addUse(GR.getSPIRVTypeID(ResType))
1825                  // Object to get a pointer to.
1826                  .addUse(I.getOperand(3).getReg());
1827   // Adding indices.
1828   const unsigned StartingIndex =
1829       (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1830           ? 5
1831           : 4;
1832   for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1833     Res.addUse(I.getOperand(i).getReg());
1834   return Res.constrainAllUses(TII, TRI, RBI);
1835 }
1836 
1837 // Maybe wrap a value into OpSpecConstantOp
1838 bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1839     MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
1840   bool Result = true;
1841   unsigned Lim = I.getNumExplicitOperands();
1842   for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
1843     Register OpReg = I.getOperand(i).getReg();
1844     SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
1845     SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
1846     if (!OpDefine || !OpType || isConstReg(MRI, OpDefine) ||
1847         OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST) {
1848       // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
1849       // by selectAddrSpaceCast()
1850       CompositeArgs.push_back(OpReg);
1851       continue;
1852     }
1853     MachineFunction *MF = I.getMF();
1854     Register WrapReg = GR.find(OpDefine, MF);
1855     if (WrapReg.isValid()) {
1856       CompositeArgs.push_back(WrapReg);
1857       continue;
1858     }
1859     // Create a new register for the wrapper
1860     WrapReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1861     GR.add(OpDefine, MF, WrapReg);
1862     CompositeArgs.push_back(WrapReg);
1863     // Decorate the wrapper register and generate a new instruction
1864     MRI->setType(WrapReg, LLT::pointer(0, 32));
1865     GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
1866     MachineBasicBlock &BB = *I.getParent();
1867     Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1868                  .addDef(WrapReg)
1869                  .addUse(GR.getSPIRVTypeID(OpType))
1870                  .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
1871                  .addUse(OpReg)
1872                  .constrainAllUses(TII, TRI, RBI);
1873     if (!Result)
1874       break;
1875   }
1876   return Result;
1877 }
1878 
1879 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1880                                                const SPIRVType *ResType,
1881                                                MachineInstr &I) const {
1882   MachineBasicBlock &BB = *I.getParent();
1883   Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
1884   switch (IID) {
1885   case Intrinsic::spv_load:
1886     return selectLoad(ResVReg, ResType, I);
1887   case Intrinsic::spv_store:
1888     return selectStore(I);
1889   case Intrinsic::spv_extractv:
1890     return selectExtractVal(ResVReg, ResType, I);
1891   case Intrinsic::spv_insertv:
1892     return selectInsertVal(ResVReg, ResType, I);
1893   case Intrinsic::spv_extractelt:
1894     return selectExtractElt(ResVReg, ResType, I);
1895   case Intrinsic::spv_insertelt:
1896     return selectInsertElt(ResVReg, ResType, I);
1897   case Intrinsic::spv_gep:
1898     return selectGEP(ResVReg, ResType, I);
1899   case Intrinsic::spv_unref_global:
1900   case Intrinsic::spv_init_global: {
1901     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1902     MachineInstr *Init = I.getNumExplicitOperands() > 2
1903                              ? MRI->getVRegDef(I.getOperand(2).getReg())
1904                              : nullptr;
1905     assert(MI);
1906     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1907   }
1908   case Intrinsic::spv_undef: {
1909     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1910                    .addDef(ResVReg)
1911                    .addUse(GR.getSPIRVTypeID(ResType));
1912     return MIB.constrainAllUses(TII, TRI, RBI);
1913   }
1914   case Intrinsic::spv_const_composite: {
1915     // If no values are attached, the composite is null constant.
1916     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1917     // Select a proper instruction.
1918     unsigned Opcode = SPIRV::OpConstantNull;
1919     SmallVector<Register> CompositeArgs;
1920     if (!IsNull) {
1921       Opcode = SPIRV::OpConstantComposite;
1922       if (!wrapIntoSpecConstantOp(I, CompositeArgs))
1923         return false;
1924     }
1925     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1926                    .addDef(ResVReg)
1927                    .addUse(GR.getSPIRVTypeID(ResType));
1928     // skip type MD node we already used when generated assign.type for this
1929     if (!IsNull) {
1930       for (Register OpReg : CompositeArgs)
1931         MIB.addUse(OpReg);
1932     }
1933     return MIB.constrainAllUses(TII, TRI, RBI);
1934   }
1935   case Intrinsic::spv_assign_name: {
1936     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1937     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1938     for (unsigned i = I.getNumExplicitDefs() + 2;
1939          i < I.getNumExplicitOperands(); ++i) {
1940       MIB.addImm(I.getOperand(i).getImm());
1941     }
1942     return MIB.constrainAllUses(TII, TRI, RBI);
1943   }
1944   case Intrinsic::spv_switch: {
1945     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1946     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1947       if (I.getOperand(i).isReg())
1948         MIB.addReg(I.getOperand(i).getReg());
1949       else if (I.getOperand(i).isCImm())
1950         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1951       else if (I.getOperand(i).isMBB())
1952         MIB.addMBB(I.getOperand(i).getMBB());
1953       else
1954         llvm_unreachable("Unexpected OpSwitch operand");
1955     }
1956     return MIB.constrainAllUses(TII, TRI, RBI);
1957   }
1958   case Intrinsic::spv_cmpxchg:
1959     return selectAtomicCmpXchg(ResVReg, ResType, I);
1960   case Intrinsic::spv_unreachable:
1961     BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1962     break;
1963   case Intrinsic::spv_alloca:
1964     return selectFrameIndex(ResVReg, ResType, I);
1965   case Intrinsic::spv_alloca_array:
1966     return selectAllocaArray(ResVReg, ResType, I);
1967   case Intrinsic::spv_assume:
1968     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1969       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
1970           .addUse(I.getOperand(1).getReg());
1971     break;
1972   case Intrinsic::spv_expect:
1973     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1974       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
1975           .addDef(ResVReg)
1976           .addUse(GR.getSPIRVTypeID(ResType))
1977           .addUse(I.getOperand(2).getReg())
1978           .addUse(I.getOperand(3).getReg());
1979     break;
1980   case Intrinsic::spv_thread_id:
1981     return selectSpvThreadId(ResVReg, ResType, I);
1982   case Intrinsic::spv_all:
1983     return selectAll(ResVReg, ResType, I);
1984   case Intrinsic::spv_any:
1985     return selectAny(ResVReg, ResType, I);
1986   case Intrinsic::spv_lerp:
1987     return selectFmix(ResVReg, ResType, I);
1988   case Intrinsic::spv_lifetime_start:
1989   case Intrinsic::spv_lifetime_end: {
1990     unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
1991                                                        : SPIRV::OpLifetimeStop;
1992     int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
1993     Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
1994     unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
1995     bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
1996     if (Size == -1 || IsNonvoidPtr)
1997       Size = 0;
1998     BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
1999   } break;
2000   default: {
2001     std::string DiagMsg;
2002     raw_string_ostream OS(DiagMsg);
2003     I.print(OS);
2004     DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
2005     report_fatal_error(DiagMsg.c_str(), false);
2006   }
2007   }
2008   return true;
2009 }
2010 
2011 bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
2012                                                  const SPIRVType *ResType,
2013                                                  MachineInstr &I) const {
2014   // there was an allocation size parameter to the allocation instruction
2015   // that is not 1
2016   MachineBasicBlock &BB = *I.getParent();
2017   return BuildMI(BB, I, I.getDebugLoc(),
2018                  TII.get(SPIRV::OpVariableLengthArrayINTEL))
2019       .addDef(ResVReg)
2020       .addUse(GR.getSPIRVTypeID(ResType))
2021       .addUse(I.getOperand(2).getReg())
2022       .constrainAllUses(TII, TRI, RBI);
2023 }
2024 
2025 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
2026                                                 const SPIRVType *ResType,
2027                                                 MachineInstr &I) const {
2028   // Change order of instructions if needed: all OpVariable instructions in a
2029   // function must be the first instructions in the first block
2030   MachineFunction *MF = I.getParent()->getParent();
2031   MachineBasicBlock *MBB = &MF->front();
2032   auto It = MBB->SkipPHIsAndLabels(MBB->begin()), E = MBB->end();
2033   bool IsHeader = false;
2034   unsigned Opcode;
2035   for (; It != E && It != I; ++It) {
2036     Opcode = It->getOpcode();
2037     if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
2038       IsHeader = true;
2039     } else if (IsHeader &&
2040                !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
2041       ++It;
2042       break;
2043     }
2044   }
2045   return BuildMI(*MBB, It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
2046       .addDef(ResVReg)
2047       .addUse(GR.getSPIRVTypeID(ResType))
2048       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
2049       .constrainAllUses(TII, TRI, RBI);
2050 }
2051 
2052 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
2053   // InstructionSelector walks backwards through the instructions. We can use
2054   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
2055   // first, so can generate an OpBranchConditional here. If there is no
2056   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
2057   const MachineInstr *PrevI = I.getPrevNode();
2058   MachineBasicBlock &MBB = *I.getParent();
2059   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
2060     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2061         .addUse(PrevI->getOperand(0).getReg())
2062         .addMBB(PrevI->getOperand(1).getMBB())
2063         .addMBB(I.getOperand(0).getMBB())
2064         .constrainAllUses(TII, TRI, RBI);
2065   }
2066   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
2067       .addMBB(I.getOperand(0).getMBB())
2068       .constrainAllUses(TII, TRI, RBI);
2069 }
2070 
2071 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
2072   // InstructionSelector walks backwards through the instructions. For an
2073   // explicit conditional branch with no fallthrough, we use both a G_BR and a
2074   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
2075   // generate the OpBranchConditional in selectBranch above.
2076   //
2077   // If an OpBranchConditional has been generated, we simply return, as the work
2078   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
2079   // implicit fallthrough to the next basic block, so we need to create an
2080   // OpBranchConditional with an explicit "false" argument pointing to the next
2081   // basic block that LLVM would fall through to.
2082   const MachineInstr *NextI = I.getNextNode();
2083   // Check if this has already been successfully selected.
2084   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
2085     return true;
2086   // Must be relying on implicit block fallthrough, so generate an
2087   // OpBranchConditional with the "next" basic block as the "false" target.
2088   MachineBasicBlock &MBB = *I.getParent();
2089   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
2090   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
2091   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2092       .addUse(I.getOperand(0).getReg())
2093       .addMBB(I.getOperand(1).getMBB())
2094       .addMBB(NextMBB)
2095       .constrainAllUses(TII, TRI, RBI);
2096 }
2097 
2098 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
2099                                          const SPIRVType *ResType,
2100                                          MachineInstr &I) const {
2101   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
2102                  .addDef(ResVReg)
2103                  .addUse(GR.getSPIRVTypeID(ResType));
2104   const unsigned NumOps = I.getNumOperands();
2105   for (unsigned i = 1; i < NumOps; i += 2) {
2106     MIB.addUse(I.getOperand(i + 0).getReg());
2107     MIB.addMBB(I.getOperand(i + 1).getMBB());
2108   }
2109   return MIB.constrainAllUses(TII, TRI, RBI);
2110 }
2111 
2112 bool SPIRVInstructionSelector::selectGlobalValue(
2113     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
2114   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
2115   MachineIRBuilder MIRBuilder(I);
2116   const GlobalValue *GV = I.getOperand(1).getGlobal();
2117   Type *GVType = GR.getDeducedGlobalValueType(GV);
2118   SPIRVType *PointerBaseType;
2119   if (GVType->isArrayTy()) {
2120     SPIRVType *ArrayElementType =
2121         GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
2122                                 SPIRV::AccessQualifier::ReadWrite, false);
2123     PointerBaseType = GR.getOrCreateSPIRVArrayType(
2124         ArrayElementType, GVType->getArrayNumElements(), I, TII);
2125   } else {
2126     PointerBaseType = GR.getOrCreateSPIRVType(
2127         GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
2128   }
2129   SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2130       PointerBaseType, I, TII,
2131       addressSpaceToStorageClass(GV->getAddressSpace(), STI));
2132 
2133   std::string GlobalIdent;
2134   if (!GV->hasName()) {
2135     unsigned &ID = UnnamedGlobalIDs[GV];
2136     if (ID == 0)
2137       ID = UnnamedGlobalIDs.size();
2138     GlobalIdent = "__unnamed_" + Twine(ID).str();
2139   } else {
2140     GlobalIdent = GV->getGlobalIdentifier();
2141   }
2142 
2143   // Behaviour of functions as operands depends on availability of the
2144   // corresponding extension (SPV_INTEL_function_pointers):
2145   // - If there is an extension to operate with functions as operands:
2146   // We create a proper constant operand and evaluate a correct type for a
2147   // function pointer.
2148   // - Without the required extension:
2149   // We have functions as operands in tests with blocks of instruction e.g. in
2150   // transcoding/global_block.ll. These operands are not used and should be
2151   // substituted by zero constants. Their type is expected to be always
2152   // OpTypePointer Function %uchar.
2153   if (isa<Function>(GV)) {
2154     const Constant *ConstVal = GV;
2155     MachineBasicBlock &BB = *I.getParent();
2156     Register NewReg = GR.find(ConstVal, GR.CurMF);
2157     if (!NewReg.isValid()) {
2158       Register NewReg = ResVReg;
2159       GR.add(ConstVal, GR.CurMF, NewReg);
2160       const Function *GVFun =
2161           STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2162               ? dyn_cast<Function>(GV)
2163               : nullptr;
2164       if (GVFun) {
2165         // References to a function via function pointers generate virtual
2166         // registers without a definition. We will resolve it later, during
2167         // module analysis stage.
2168         MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2169         Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
2170         MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
2171         MachineInstrBuilder MB =
2172             BuildMI(BB, I, I.getDebugLoc(),
2173                     TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2174                 .addDef(NewReg)
2175                 .addUse(GR.getSPIRVTypeID(ResType))
2176                 .addUse(FuncVReg);
2177         // mapping the function pointer to the used Function
2178         GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
2179         return MB.constrainAllUses(TII, TRI, RBI);
2180       }
2181       return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2182           .addDef(NewReg)
2183           .addUse(GR.getSPIRVTypeID(ResType))
2184           .constrainAllUses(TII, TRI, RBI);
2185     }
2186     assert(NewReg != ResVReg);
2187     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
2188         .addDef(ResVReg)
2189         .addUse(NewReg)
2190         .constrainAllUses(TII, TRI, RBI);
2191   }
2192   auto GlobalVar = cast<GlobalVariable>(GV);
2193   assert(GlobalVar->getName() != "llvm.global.annotations");
2194 
2195   bool HasInit = GlobalVar->hasInitializer() &&
2196                  !isa<UndefValue>(GlobalVar->getInitializer());
2197   // Skip empty declaration for GVs with initilaizers till we get the decl with
2198   // passed initializer.
2199   if (HasInit && !Init)
2200     return true;
2201 
2202   unsigned AddrSpace = GV->getAddressSpace();
2203   SPIRV::StorageClass::StorageClass Storage =
2204       addressSpaceToStorageClass(AddrSpace, STI);
2205   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
2206                   Storage != SPIRV::StorageClass::Function;
2207   SPIRV::LinkageType::LinkageType LnkType =
2208       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
2209           ? SPIRV::LinkageType::Import
2210           : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
2211                      STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2212                  ? SPIRV::LinkageType::LinkOnceODR
2213                  : SPIRV::LinkageType::Export);
2214 
2215   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2216                                         Storage, Init, GlobalVar->isConstant(),
2217                                         HasLnkTy, LnkType, MIRBuilder, true);
2218   return Reg.isValid();
2219 }
2220 
2221 bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
2222                                            const SPIRVType *ResType,
2223                                            MachineInstr &I) const {
2224   if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2225     return selectExtInst(ResVReg, ResType, I, CL::log10);
2226   }
2227 
2228   // There is no log10 instruction in the GLSL Extended Instruction set, so it
2229   // is implemented as:
2230   // log10(x) = log2(x) * (1 / log2(10))
2231   //          = log2(x) * 0.30103
2232 
2233   MachineIRBuilder MIRBuilder(I);
2234   MachineBasicBlock &BB = *I.getParent();
2235 
2236   // Build log2(x).
2237   Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2238   bool Result =
2239       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
2240           .addDef(VarReg)
2241           .addUse(GR.getSPIRVTypeID(ResType))
2242           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2243           .addImm(GL::Log2)
2244           .add(I.getOperand(1))
2245           .constrainAllUses(TII, TRI, RBI);
2246 
2247   // Build 0.30103.
2248   assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
2249          ResType->getOpcode() == SPIRV::OpTypeFloat);
2250   // TODO: Add matrix implementation once supported by the HLSL frontend.
2251   const SPIRVType *SpirvScalarType =
2252       ResType->getOpcode() == SPIRV::OpTypeVector
2253           ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
2254           : ResType;
2255   Register ScaleReg =
2256       GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2257 
2258   // Multiply log2(x) by 0.30103 to get log10(x) result.
2259   auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
2260                     ? SPIRV::OpVectorTimesScalar
2261                     : SPIRV::OpFMulS;
2262   Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2263                 .addDef(ResVReg)
2264                 .addUse(GR.getSPIRVTypeID(ResType))
2265                 .addUse(VarReg)
2266                 .addUse(ScaleReg)
2267                 .constrainAllUses(TII, TRI, RBI);
2268 
2269   return Result;
2270 }
2271 
2272 bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
2273                                                  const SPIRVType *ResType,
2274                                                  MachineInstr &I) const {
2275   // DX intrinsic: @llvm.dx.thread.id(i32)
2276   // ID  Name      Description
2277   // 93  ThreadId  reads the thread ID
2278 
2279   MachineIRBuilder MIRBuilder(I);
2280   const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2281   const SPIRVType *Vec3Ty =
2282       GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2283   const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2284       Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2285 
2286   // Create new register for GlobalInvocationID builtin variable.
2287   Register NewRegister =
2288       MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2289   MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 32));
2290   GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2291 
2292   // Build GlobalInvocationID global variable with the necessary decorations.
2293   Register Variable = GR.buildGlobalVariable(
2294       NewRegister, PtrType,
2295       getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2296       SPIRV::StorageClass::Input, nullptr, true, true,
2297       SPIRV::LinkageType::Import, MIRBuilder, false);
2298 
2299   // Create new register for loading value.
2300   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2301   Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2302   MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 32));
2303   GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2304 
2305   // Load v3uint value from the global variable.
2306   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2307       .addDef(LoadedRegister)
2308       .addUse(GR.getSPIRVTypeID(Vec3Ty))
2309       .addUse(Variable);
2310 
2311   // Get Thread ID index. Expecting operand is a constant immediate value,
2312   // wrapped in a type assignment.
2313   assert(I.getOperand(2).isReg());
2314   Register ThreadIdReg = I.getOperand(2).getReg();
2315   SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
2316   assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
2317          ConstTy->getOperand(1).isReg());
2318   Register ConstReg = ConstTy->getOperand(1).getReg();
2319   const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
2320   assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
2321   const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
2322   const uint32_t ThreadId = Val.getZExtValue();
2323 
2324   // Extract the thread ID from the loaded vector value.
2325   MachineBasicBlock &BB = *I.getParent();
2326   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2327                  .addDef(ResVReg)
2328                  .addUse(GR.getSPIRVTypeID(ResType))
2329                  .addUse(LoadedRegister)
2330                  .addImm(ThreadId);
2331   return MIB.constrainAllUses(TII, TRI, RBI);
2332 }
2333 
2334 namespace llvm {
2335 InstructionSelector *
2336 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
2337                                const SPIRVSubtarget &Subtarget,
2338                                const RegisterBankInfo &RBI) {
2339   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2340 }
2341 } // namespace llvm
2342