xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (revision 89d125564a619068cead98c970215e69653503e8)
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
17 #include "SPIRV.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/TargetOpcodes.h"
32 #include "llvm/IR/IntrinsicsSPIRV.h"
33 #include "llvm/Support/Debug.h"
34 
35 namespace llvm {
36 
37 class SPIRVMachineModuleInfo : public MachineModuleInfoImpl {
38 public:
39   SyncScope::ID Work_ItemSSID;
40   SyncScope::ID WorkGroupSSID;
41   SyncScope::ID DeviceSSID;
42   SyncScope::ID AllSVMDevicesSSID;
43   SyncScope::ID SubGroupSSID;
44 
45   SPIRVMachineModuleInfo(const MachineModuleInfo &MMI) {
46     LLVMContext &CTX = MMI.getModule()->getContext();
47     Work_ItemSSID = CTX.getOrInsertSyncScopeID("work_item");
48     WorkGroupSSID = CTX.getOrInsertSyncScopeID("workgroup");
49     DeviceSSID = CTX.getOrInsertSyncScopeID("device");
50     AllSVMDevicesSSID = CTX.getOrInsertSyncScopeID("all_svm_devices");
51     SubGroupSSID = CTX.getOrInsertSyncScopeID("sub_group");
52   }
53 };
54 
55 } // end namespace llvm
56 
57 #define DEBUG_TYPE "spirv-isel"
58 
59 using namespace llvm;
60 namespace CL = SPIRV::OpenCLExtInst;
61 namespace GL = SPIRV::GLSLExtInst;
62 
63 using ExtInstList =
64     std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
65 
66 namespace {
67 
68 #define GET_GLOBALISEL_PREDICATE_BITSET
69 #include "SPIRVGenGlobalISel.inc"
70 #undef GET_GLOBALISEL_PREDICATE_BITSET
71 
72 class SPIRVInstructionSelector : public InstructionSelector {
73   const SPIRVSubtarget &STI;
74   const SPIRVInstrInfo &TII;
75   const SPIRVRegisterInfo &TRI;
76   const RegisterBankInfo &RBI;
77   SPIRVGlobalRegistry &GR;
78   MachineRegisterInfo *MRI;
79   SPIRVMachineModuleInfo *MMI = nullptr;
80 
81   /// We need to keep track of the number we give to anonymous global values to
82   /// generate the same name every time when this is needed.
83   mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
84 
85 public:
86   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
87                            const SPIRVSubtarget &ST,
88                            const RegisterBankInfo &RBI);
89   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
90                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
91                BlockFrequencyInfo *BFI) override;
92   // Common selection code. Instruction-specific selection occurs in spvSelect.
93   bool select(MachineInstr &I) override;
94   static const char *getName() { return DEBUG_TYPE; }
95 
96 #define GET_GLOBALISEL_PREDICATES_DECL
97 #include "SPIRVGenGlobalISel.inc"
98 #undef GET_GLOBALISEL_PREDICATES_DECL
99 
100 #define GET_GLOBALISEL_TEMPORARIES_DECL
101 #include "SPIRVGenGlobalISel.inc"
102 #undef GET_GLOBALISEL_TEMPORARIES_DECL
103 
104 private:
105   // tblgen-erated 'select' implementation, used as the initial selector for
106   // the patterns that don't require complex C++.
107   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
108 
109   // All instruction-specific selection that didn't happen in "select()".
110   // Is basically a large Switch/Case delegating to all other select method.
111   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
112                  MachineInstr &I) const;
113 
114   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
115                          const MachineInstr *Init = nullptr) const;
116 
117   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
118                          MachineInstr &I, Register SrcReg,
119                          unsigned Opcode) const;
120   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
121                   unsigned Opcode) const;
122 
123   bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
124                      MachineInstr &I) const;
125 
126   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
127                   MachineInstr &I) const;
128   bool selectStore(MachineInstr &I) const;
129 
130   bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
131                        MachineInstr &I) const;
132   bool selectStackRestore(MachineInstr &I) const;
133 
134   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
135 
136   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
137                        MachineInstr &I, unsigned NewOpcode,
138                        unsigned NegateOpcode = 0) const;
139 
140   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
141                            MachineInstr &I) const;
142 
143   bool selectFence(MachineInstr &I) const;
144 
145   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
146                            MachineInstr &I) const;
147 
148   bool selectAnyOrAll(Register ResVReg, const SPIRVType *ResType,
149                       MachineInstr &I, unsigned OpType) const;
150 
151   bool selectAll(Register ResVReg, const SPIRVType *ResType,
152                  MachineInstr &I) const;
153 
154   bool selectAny(Register ResVReg, const SPIRVType *ResType,
155                  MachineInstr &I) const;
156 
157   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
158                         MachineInstr &I) const;
159 
160   bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
161                          MachineInstr &I) const;
162   bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
163                          MachineInstr &I) const;
164 
165   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
166                  unsigned comparisonOpcode, MachineInstr &I) const;
167 
168   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
169                   MachineInstr &I) const;
170   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
171                   MachineInstr &I) const;
172 
173   bool selectFmix(Register ResVReg, const SPIRVType *ResType,
174                   MachineInstr &I) const;
175 
176   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
177                    int OpIdx) const;
178   void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
179                     int OpIdx) const;
180 
181   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
182                    MachineInstr &I) const;
183 
184   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
185                     bool IsSigned) const;
186   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
187                   bool IsSigned, unsigned Opcode) const;
188   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
189                  bool IsSigned) const;
190 
191   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
192                    MachineInstr &I) const;
193 
194   bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
195                        const SPIRVType *intTy, const SPIRVType *boolTy) const;
196 
197   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
198                      MachineInstr &I) const;
199   bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
200                     MachineInstr &I) const;
201   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
202                        MachineInstr &I) const;
203   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
204                         MachineInstr &I) const;
205   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
206                        MachineInstr &I) const;
207   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
208                         MachineInstr &I) const;
209   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
210                        MachineInstr &I) const;
211   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
212                  MachineInstr &I) const;
213 
214   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
215                         MachineInstr &I) const;
216   bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
217                          MachineInstr &I) const;
218 
219   bool selectBranch(MachineInstr &I) const;
220   bool selectBranchCond(MachineInstr &I) const;
221 
222   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
223                  MachineInstr &I) const;
224 
225   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
226                      MachineInstr &I, CL::OpenCLExtInst CLInst) const;
227   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
228                      MachineInstr &I, CL::OpenCLExtInst CLInst,
229                      GL::GLSLExtInst GLInst) const;
230   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
231                      MachineInstr &I, const ExtInstList &ExtInsts) const;
232 
233   bool selectLog10(Register ResVReg, const SPIRVType *ResType,
234                    MachineInstr &I) const;
235 
236   bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
237                          MachineInstr &I) const;
238 
239   bool selectUnmergeValues(MachineInstr &I) const;
240 
241   Register buildI32Constant(uint32_t Val, MachineInstr &I,
242                             const SPIRVType *ResType = nullptr) const;
243 
244   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
245   Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
246   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
247                         MachineInstr &I) const;
248 
249   bool wrapIntoSpecConstantOp(MachineInstr &I,
250                               SmallVector<Register> &CompositeArgs) const;
251 };
252 
253 } // end anonymous namespace
254 
255 #define GET_GLOBALISEL_IMPL
256 #include "SPIRVGenGlobalISel.inc"
257 #undef GET_GLOBALISEL_IMPL
258 
259 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
260                                                    const SPIRVSubtarget &ST,
261                                                    const RegisterBankInfo &RBI)
262     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
263       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
264 #define GET_GLOBALISEL_PREDICATES_INIT
265 #include "SPIRVGenGlobalISel.inc"
266 #undef GET_GLOBALISEL_PREDICATES_INIT
267 #define GET_GLOBALISEL_TEMPORARIES_INIT
268 #include "SPIRVGenGlobalISel.inc"
269 #undef GET_GLOBALISEL_TEMPORARIES_INIT
270 {
271 }
272 
273 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
274                                        CodeGenCoverage *CoverageInfo,
275                                        ProfileSummaryInfo *PSI,
276                                        BlockFrequencyInfo *BFI) {
277   MMI = &MF.getMMI().getObjFileInfo<SPIRVMachineModuleInfo>();
278   MRI = &MF.getRegInfo();
279   GR.setCurrentFunc(MF);
280   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
281 }
282 
283 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
284 
285 // Defined in SPIRVLegalizerInfo.cpp.
286 extern bool isTypeFoldingSupported(unsigned Opcode);
287 
288 bool SPIRVInstructionSelector::select(MachineInstr &I) {
289   assert(I.getParent() && "Instruction should be in a basic block!");
290   assert(I.getParent()->getParent() && "Instruction should be in a function!");
291 
292   Register Opcode = I.getOpcode();
293   // If it's not a GMIR instruction, we've selected it already.
294   if (!isPreISelGenericOpcode(Opcode)) {
295     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
296       Register DstReg = I.getOperand(0).getReg();
297       Register SrcReg = I.getOperand(1).getReg();
298       auto *Def = MRI->getVRegDef(SrcReg);
299       if (isTypeFoldingSupported(Def->getOpcode())) {
300         if (MRI->getType(DstReg).isPointer())
301           MRI->setType(DstReg, LLT::scalar(32));
302         bool Res = selectImpl(I, *CoverageInfo);
303         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
304         if (Res)
305           return Res;
306       }
307       MRI->replaceRegWith(SrcReg, DstReg);
308       I.removeFromParent();
309       return true;
310     } else if (I.getNumDefs() == 1) {
311       // Make all vregs 32 bits (for SPIR-V IDs).
312       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
313     }
314     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
315   }
316 
317   if (I.getNumOperands() != I.getNumExplicitOperands()) {
318     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
319     return false;
320   }
321 
322   // Common code for getting return reg+type, and removing selected instr
323   // from parent occurs here. Instr-specific selection happens in spvSelect().
324   bool HasDefs = I.getNumDefs() > 0;
325   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
326   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
327   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
328   if (spvSelect(ResVReg, ResType, I)) {
329     if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
330       for (unsigned i = 0; i < I.getNumDefs(); ++i)
331         MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
332     I.removeFromParent();
333     return true;
334   }
335   return false;
336 }
337 
338 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
339                                          const SPIRVType *ResType,
340                                          MachineInstr &I) const {
341   const unsigned Opcode = I.getOpcode();
342   if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
343     return selectImpl(I, *CoverageInfo);
344   switch (Opcode) {
345   case TargetOpcode::G_CONSTANT:
346     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
347                        I);
348   case TargetOpcode::G_GLOBAL_VALUE:
349     return selectGlobalValue(ResVReg, I);
350   case TargetOpcode::G_IMPLICIT_DEF:
351     return selectOpUndef(ResVReg, ResType, I);
352   case TargetOpcode::G_FREEZE:
353     return selectFreeze(ResVReg, ResType, I);
354 
355   case TargetOpcode::G_INTRINSIC:
356   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
357   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
358     return selectIntrinsic(ResVReg, ResType, I);
359   case TargetOpcode::G_BITREVERSE:
360     return selectBitreverse(ResVReg, ResType, I);
361 
362   case TargetOpcode::G_BUILD_VECTOR:
363     return selectConstVector(ResVReg, ResType, I);
364   case TargetOpcode::G_SPLAT_VECTOR:
365     return selectSplatVector(ResVReg, ResType, I);
366 
367   case TargetOpcode::G_SHUFFLE_VECTOR: {
368     MachineBasicBlock &BB = *I.getParent();
369     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
370                    .addDef(ResVReg)
371                    .addUse(GR.getSPIRVTypeID(ResType))
372                    .addUse(I.getOperand(1).getReg())
373                    .addUse(I.getOperand(2).getReg());
374     for (auto V : I.getOperand(3).getShuffleMask())
375       MIB.addImm(V);
376     return MIB.constrainAllUses(TII, TRI, RBI);
377   }
378   case TargetOpcode::G_MEMMOVE:
379   case TargetOpcode::G_MEMCPY:
380   case TargetOpcode::G_MEMSET:
381     return selectMemOperation(ResVReg, I);
382 
383   case TargetOpcode::G_ICMP:
384     return selectICmp(ResVReg, ResType, I);
385   case TargetOpcode::G_FCMP:
386     return selectFCmp(ResVReg, ResType, I);
387 
388   case TargetOpcode::G_FRAME_INDEX:
389     return selectFrameIndex(ResVReg, ResType, I);
390 
391   case TargetOpcode::G_LOAD:
392     return selectLoad(ResVReg, ResType, I);
393   case TargetOpcode::G_STORE:
394     return selectStore(I);
395 
396   case TargetOpcode::G_BR:
397     return selectBranch(I);
398   case TargetOpcode::G_BRCOND:
399     return selectBranchCond(I);
400 
401   case TargetOpcode::G_PHI:
402     return selectPhi(ResVReg, ResType, I);
403 
404   case TargetOpcode::G_FPTOSI:
405     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
406   case TargetOpcode::G_FPTOUI:
407     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
408 
409   case TargetOpcode::G_SITOFP:
410     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
411   case TargetOpcode::G_UITOFP:
412     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
413 
414   case TargetOpcode::G_CTPOP:
415     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
416   case TargetOpcode::G_SMIN:
417     return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
418   case TargetOpcode::G_UMIN:
419     return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
420 
421   case TargetOpcode::G_SMAX:
422     return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
423   case TargetOpcode::G_UMAX:
424     return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
425 
426   case TargetOpcode::G_FMA:
427     return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
428 
429   case TargetOpcode::G_FPOW:
430     return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
431   case TargetOpcode::G_FPOWI:
432     return selectExtInst(ResVReg, ResType, I, CL::pown);
433 
434   case TargetOpcode::G_FEXP:
435     return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
436   case TargetOpcode::G_FEXP2:
437     return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
438 
439   case TargetOpcode::G_FLOG:
440     return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
441   case TargetOpcode::G_FLOG2:
442     return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
443   case TargetOpcode::G_FLOG10:
444     return selectLog10(ResVReg, ResType, I);
445 
446   case TargetOpcode::G_FABS:
447     return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
448   case TargetOpcode::G_ABS:
449     return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
450 
451   case TargetOpcode::G_FMINNUM:
452   case TargetOpcode::G_FMINIMUM:
453     return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
454   case TargetOpcode::G_FMAXNUM:
455   case TargetOpcode::G_FMAXIMUM:
456     return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);
457 
458   case TargetOpcode::G_FCOPYSIGN:
459     return selectExtInst(ResVReg, ResType, I, CL::copysign);
460 
461   case TargetOpcode::G_FCEIL:
462     return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
463   case TargetOpcode::G_FFLOOR:
464     return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
465 
466   case TargetOpcode::G_FCOS:
467     return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
468   case TargetOpcode::G_FSIN:
469     return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
470 
471   case TargetOpcode::G_FSQRT:
472     return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
473 
474   case TargetOpcode::G_CTTZ:
475   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
476     return selectExtInst(ResVReg, ResType, I, CL::ctz);
477   case TargetOpcode::G_CTLZ:
478   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
479     return selectExtInst(ResVReg, ResType, I, CL::clz);
480 
481   case TargetOpcode::G_INTRINSIC_ROUND:
482     return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
483   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
484     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
485   case TargetOpcode::G_INTRINSIC_TRUNC:
486     return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
487   case TargetOpcode::G_FRINT:
488   case TargetOpcode::G_FNEARBYINT:
489     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
490 
491   case TargetOpcode::G_SMULH:
492     return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
493   case TargetOpcode::G_UMULH:
494     return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
495 
496   case TargetOpcode::G_SEXT:
497     return selectExt(ResVReg, ResType, I, true);
498   case TargetOpcode::G_ANYEXT:
499   case TargetOpcode::G_ZEXT:
500     return selectExt(ResVReg, ResType, I, false);
501   case TargetOpcode::G_TRUNC:
502     return selectTrunc(ResVReg, ResType, I);
503   case TargetOpcode::G_FPTRUNC:
504   case TargetOpcode::G_FPEXT:
505     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
506 
507   case TargetOpcode::G_PTRTOINT:
508     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
509   case TargetOpcode::G_INTTOPTR:
510     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
511   case TargetOpcode::G_BITCAST:
512     return selectBitcast(ResVReg, ResType, I);
513   case TargetOpcode::G_ADDRSPACE_CAST:
514     return selectAddrSpaceCast(ResVReg, ResType, I);
515   case TargetOpcode::G_PTR_ADD: {
516     // Currently, we get G_PTR_ADD only as a result of translating
517     // global variables, initialized with constant expressions like GV + Const
518     // (see test opencl/basic/progvar_prog_scope_init.ll).
519     // TODO: extend the handler once we have other cases.
520     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
521     Register GV = I.getOperand(1).getReg();
522     MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
523     (void)II;
524     assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
525             (*II).getOpcode() == TargetOpcode::COPY ||
526             (*II).getOpcode() == SPIRV::OpVariable) &&
527            isImm(I.getOperand(2), MRI));
528     Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
529     MachineBasicBlock &BB = *I.getParent();
530     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
531                    .addDef(ResVReg)
532                    .addUse(GR.getSPIRVTypeID(ResType))
533                    .addImm(static_cast<uint32_t>(
534                        SPIRV::Opcode::InBoundsPtrAccessChain))
535                    .addUse(GV)
536                    .addUse(Idx)
537                    .addUse(I.getOperand(2).getReg());
538     return MIB.constrainAllUses(TII, TRI, RBI);
539   }
540 
541   case TargetOpcode::G_ATOMICRMW_OR:
542     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
543   case TargetOpcode::G_ATOMICRMW_ADD:
544     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
545   case TargetOpcode::G_ATOMICRMW_AND:
546     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
547   case TargetOpcode::G_ATOMICRMW_MAX:
548     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
549   case TargetOpcode::G_ATOMICRMW_MIN:
550     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
551   case TargetOpcode::G_ATOMICRMW_SUB:
552     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
553   case TargetOpcode::G_ATOMICRMW_XOR:
554     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
555   case TargetOpcode::G_ATOMICRMW_UMAX:
556     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
557   case TargetOpcode::G_ATOMICRMW_UMIN:
558     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
559   case TargetOpcode::G_ATOMICRMW_XCHG:
560     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
561   case TargetOpcode::G_ATOMIC_CMPXCHG:
562     return selectAtomicCmpXchg(ResVReg, ResType, I);
563 
564   case TargetOpcode::G_ATOMICRMW_FADD:
565     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
566   case TargetOpcode::G_ATOMICRMW_FSUB:
567     // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
568     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
569                            SPIRV::OpFNegate);
570   case TargetOpcode::G_ATOMICRMW_FMIN:
571     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
572   case TargetOpcode::G_ATOMICRMW_FMAX:
573     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
574 
575   case TargetOpcode::G_FENCE:
576     return selectFence(I);
577 
578   case TargetOpcode::G_STACKSAVE:
579     return selectStackSave(ResVReg, ResType, I);
580   case TargetOpcode::G_STACKRESTORE:
581     return selectStackRestore(I);
582 
583   case TargetOpcode::G_UNMERGE_VALUES:
584     return selectUnmergeValues(I);
585 
586   default:
587     return false;
588   }
589 }
590 
591 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
592                                              const SPIRVType *ResType,
593                                              MachineInstr &I,
594                                              CL::OpenCLExtInst CLInst) const {
595   return selectExtInst(ResVReg, ResType, I,
596                        {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
597 }
598 
599 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
600                                              const SPIRVType *ResType,
601                                              MachineInstr &I,
602                                              CL::OpenCLExtInst CLInst,
603                                              GL::GLSLExtInst GLInst) const {
604   ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
605                           {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
606   return selectExtInst(ResVReg, ResType, I, ExtInsts);
607 }
608 
609 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
610                                              const SPIRVType *ResType,
611                                              MachineInstr &I,
612                                              const ExtInstList &Insts) const {
613 
614   for (const auto &Ex : Insts) {
615     SPIRV::InstructionSet::InstructionSet Set = Ex.first;
616     uint32_t Opcode = Ex.second;
617     if (STI.canUseExtInstSet(Set)) {
618       MachineBasicBlock &BB = *I.getParent();
619       auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
620                      .addDef(ResVReg)
621                      .addUse(GR.getSPIRVTypeID(ResType))
622                      .addImm(static_cast<uint32_t>(Set))
623                      .addImm(Opcode);
624       const unsigned NumOps = I.getNumOperands();
625       for (unsigned i = 1; i < NumOps; ++i)
626         MIB.add(I.getOperand(i));
627       return MIB.constrainAllUses(TII, TRI, RBI);
628     }
629   }
630   return false;
631 }
632 
633 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
634                                                  const SPIRVType *ResType,
635                                                  MachineInstr &I,
636                                                  Register SrcReg,
637                                                  unsigned Opcode) const {
638   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
639       .addDef(ResVReg)
640       .addUse(GR.getSPIRVTypeID(ResType))
641       .addUse(SrcReg)
642       .constrainAllUses(TII, TRI, RBI);
643 }
644 
645 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
646                                           const SPIRVType *ResType,
647                                           MachineInstr &I,
648                                           unsigned Opcode) const {
649   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
650                            Opcode);
651 }
652 
653 bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
654                                              const SPIRVType *ResType,
655                                              MachineInstr &I) const {
656   Register OpReg = I.getOperand(1).getReg();
657   SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
658   if (!GR.isBitcastCompatible(ResType, OpType))
659     report_fatal_error("incompatible result and operand types in a bitcast");
660   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
661 }
662 
663 static SPIRV::Scope::Scope getScope(SyncScope::ID Ord,
664                                     SPIRVMachineModuleInfo *MMI) {
665   if (Ord == SyncScope::SingleThread || Ord == MMI->Work_ItemSSID)
666     return SPIRV::Scope::Invocation;
667   else if (Ord == SyncScope::System || Ord == MMI->DeviceSSID)
668     return SPIRV::Scope::Device;
669   else if (Ord == MMI->WorkGroupSSID)
670     return SPIRV::Scope::Workgroup;
671   else if (Ord == MMI->AllSVMDevicesSSID)
672     return SPIRV::Scope::CrossDevice;
673   else if (Ord == MMI->SubGroupSSID)
674     return SPIRV::Scope::Subgroup;
675   else
676     // OpenCL approach is: "The functions that do not have memory_scope argument
677     // have the same semantics as the corresponding functions with the
678     // memory_scope argument set to memory_scope_device." See ref.: //
679     // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
680     // In our case if the scope is unknown, assuming that SPIR-V code is to be
681     // consumed in an OpenCL environment, we use the same approach and set the
682     // scope to memory_scope_device.
683     return SPIRV::Scope::Device;
684 }
685 
686 static void addMemoryOperands(MachineMemOperand *MemOp,
687                               MachineInstrBuilder &MIB) {
688   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
689   if (MemOp->isVolatile())
690     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
691   if (MemOp->isNonTemporal())
692     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
693   if (MemOp->getAlign().value())
694     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
695 
696   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
697     MIB.addImm(SpvMemOp);
698     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
699       MIB.addImm(MemOp->getAlign().value());
700   }
701 }
702 
703 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
704   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
705   if (Flags & MachineMemOperand::Flags::MOVolatile)
706     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
707   if (Flags & MachineMemOperand::Flags::MONonTemporal)
708     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
709 
710   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
711     MIB.addImm(SpvMemOp);
712 }
713 
714 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
715                                           const SPIRVType *ResType,
716                                           MachineInstr &I) const {
717   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
718   Register Ptr = I.getOperand(1 + OpOffset).getReg();
719   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
720                  .addDef(ResVReg)
721                  .addUse(GR.getSPIRVTypeID(ResType))
722                  .addUse(Ptr);
723   if (!I.getNumMemOperands()) {
724     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
725            I.getOpcode() ==
726                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
727     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
728   } else {
729     addMemoryOperands(*I.memoperands_begin(), MIB);
730   }
731   return MIB.constrainAllUses(TII, TRI, RBI);
732 }
733 
734 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
735   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
736   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
737   Register Ptr = I.getOperand(1 + OpOffset).getReg();
738   MachineBasicBlock &BB = *I.getParent();
739   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
740                  .addUse(Ptr)
741                  .addUse(StoreVal);
742   if (!I.getNumMemOperands()) {
743     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
744            I.getOpcode() ==
745                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
746     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
747   } else {
748     addMemoryOperands(*I.memoperands_begin(), MIB);
749   }
750   return MIB.constrainAllUses(TII, TRI, RBI);
751 }
752 
753 bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
754                                                const SPIRVType *ResType,
755                                                MachineInstr &I) const {
756   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
757     report_fatal_error(
758         "llvm.stacksave intrinsic: this instruction requires the following "
759         "SPIR-V extension: SPV_INTEL_variable_length_array",
760         false);
761   MachineBasicBlock &BB = *I.getParent();
762   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
763       .addDef(ResVReg)
764       .addUse(GR.getSPIRVTypeID(ResType))
765       .constrainAllUses(TII, TRI, RBI);
766 }
767 
768 bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
769   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
770     report_fatal_error(
771         "llvm.stackrestore intrinsic: this instruction requires the following "
772         "SPIR-V extension: SPV_INTEL_variable_length_array",
773         false);
774   if (!I.getOperand(0).isReg())
775     return false;
776   MachineBasicBlock &BB = *I.getParent();
777   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
778       .addUse(I.getOperand(0).getReg())
779       .constrainAllUses(TII, TRI, RBI);
780 }
781 
782 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
783                                                   MachineInstr &I) const {
784   MachineBasicBlock &BB = *I.getParent();
785   Register SrcReg = I.getOperand(1).getReg();
786   if (I.getOpcode() == TargetOpcode::G_MEMSET) {
787     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
788     unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
789     unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
790     SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
791     SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
792     Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
793     SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
794         ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
795     // TODO: check if we have such GV, add init, use buildGlobalVariable.
796     Function &CurFunction = GR.CurMF->getFunction();
797     Type *LLVMArrTy =
798         ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
799     // Module takes ownership of the global var.
800     GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
801                                             true, GlobalValue::InternalLinkage,
802                                             Constant::getNullValue(LLVMArrTy));
803     Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
804     GR.add(GV, GR.CurMF, VarReg);
805 
806     buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
807     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
808         .addDef(VarReg)
809         .addUse(GR.getSPIRVTypeID(VarTy))
810         .addImm(SPIRV::StorageClass::UniformConstant)
811         .addUse(Const)
812         .constrainAllUses(TII, TRI, RBI);
813     SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
814         ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
815     SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
816     selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
817   }
818   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
819                  .addUse(I.getOperand(0).getReg())
820                  .addUse(SrcReg)
821                  .addUse(I.getOperand(2).getReg());
822   if (I.getNumMemOperands())
823     addMemoryOperands(*I.memoperands_begin(), MIB);
824   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
825   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
826     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
827         .addUse(MIB->getOperand(0).getReg());
828   return Result;
829 }
830 
831 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
832                                                const SPIRVType *ResType,
833                                                MachineInstr &I,
834                                                unsigned NewOpcode,
835                                                unsigned NegateOpcode) const {
836   assert(I.hasOneMemOperand());
837   const MachineMemOperand *MemOp = *I.memoperands_begin();
838   uint32_t Scope =
839       static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
840   Register ScopeReg = buildI32Constant(Scope, I);
841 
842   Register Ptr = I.getOperand(1).getReg();
843   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
844   // auto ScSem =
845   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
846   AtomicOrdering AO = MemOp->getSuccessOrdering();
847   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
848   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
849 
850   bool Result = false;
851   Register ValueReg = I.getOperand(2).getReg();
852   if (NegateOpcode != 0) {
853     // Translation with negative value operand is requested
854     Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
855     Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
856     ValueReg = TmpReg;
857   }
858 
859   Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
860                 .addDef(ResVReg)
861                 .addUse(GR.getSPIRVTypeID(ResType))
862                 .addUse(Ptr)
863                 .addUse(ScopeReg)
864                 .addUse(MemSemReg)
865                 .addUse(ValueReg)
866                 .constrainAllUses(TII, TRI, RBI);
867   return Result;
868 }
869 
870 bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
871   unsigned ArgI = I.getNumOperands() - 1;
872   Register SrcReg =
873       I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
874   SPIRVType *DefType =
875       SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
876   if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
877     report_fatal_error(
878         "cannot select G_UNMERGE_VALUES with a non-vector argument");
879 
880   SPIRVType *ScalarType =
881       GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
882   MachineBasicBlock &BB = *I.getParent();
883   bool Res = false;
884   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
885     Register ResVReg = I.getOperand(i).getReg();
886     SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
887     if (!ResType) {
888       // There was no "assign type" actions, let's fix this now
889       ResType = ScalarType;
890       MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
891       MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
892       GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
893     }
894     auto MIB =
895         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
896             .addDef(ResVReg)
897             .addUse(GR.getSPIRVTypeID(ResType))
898             .addUse(SrcReg)
899             .addImm(static_cast<int64_t>(i));
900     Res |= MIB.constrainAllUses(TII, TRI, RBI);
901   }
902   return Res;
903 }
904 
905 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
906   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
907   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
908   Register MemSemReg = buildI32Constant(MemSem, I);
909   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
910   uint32_t Scope = static_cast<uint32_t>(getScope(Ord, MMI));
911   Register ScopeReg = buildI32Constant(Scope, I);
912   MachineBasicBlock &BB = *I.getParent();
913   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
914       .addUse(ScopeReg)
915       .addUse(MemSemReg)
916       .constrainAllUses(TII, TRI, RBI);
917 }
918 
919 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
920                                                    const SPIRVType *ResType,
921                                                    MachineInstr &I) const {
922   Register ScopeReg;
923   Register MemSemEqReg;
924   Register MemSemNeqReg;
925   Register Ptr = I.getOperand(2).getReg();
926   if (!isa<GIntrinsic>(I)) {
927     assert(I.hasOneMemOperand());
928     const MachineMemOperand *MemOp = *I.memoperands_begin();
929     unsigned Scope =
930         static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
931     ScopeReg = buildI32Constant(Scope, I);
932 
933     unsigned ScSem = static_cast<uint32_t>(
934         getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
935     AtomicOrdering AO = MemOp->getSuccessOrdering();
936     unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
937     MemSemEqReg = buildI32Constant(MemSemEq, I);
938     AtomicOrdering FO = MemOp->getFailureOrdering();
939     unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
940     MemSemNeqReg =
941         MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
942   } else {
943     ScopeReg = I.getOperand(5).getReg();
944     MemSemEqReg = I.getOperand(6).getReg();
945     MemSemNeqReg = I.getOperand(7).getReg();
946   }
947 
948   Register Cmp = I.getOperand(3).getReg();
949   Register Val = I.getOperand(4).getReg();
950   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
951   Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
952   const DebugLoc &DL = I.getDebugLoc();
953   bool Result =
954       BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
955           .addDef(ACmpRes)
956           .addUse(GR.getSPIRVTypeID(SpvValTy))
957           .addUse(Ptr)
958           .addUse(ScopeReg)
959           .addUse(MemSemEqReg)
960           .addUse(MemSemNeqReg)
961           .addUse(Val)
962           .addUse(Cmp)
963           .constrainAllUses(TII, TRI, RBI);
964   Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
965   SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
966   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
967                 .addDef(CmpSuccReg)
968                 .addUse(GR.getSPIRVTypeID(BoolTy))
969                 .addUse(ACmpRes)
970                 .addUse(Cmp)
971                 .constrainAllUses(TII, TRI, RBI);
972   Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
973   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
974                 .addDef(TmpReg)
975                 .addUse(GR.getSPIRVTypeID(ResType))
976                 .addUse(ACmpRes)
977                 .addUse(GR.getOrCreateUndef(I, ResType, TII))
978                 .addImm(0)
979                 .constrainAllUses(TII, TRI, RBI);
980   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
981                 .addDef(ResVReg)
982                 .addUse(GR.getSPIRVTypeID(ResType))
983                 .addUse(CmpSuccReg)
984                 .addUse(TmpReg)
985                 .addImm(1)
986                 .constrainAllUses(TII, TRI, RBI);
987   return Result;
988 }
989 
990 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
991   switch (SC) {
992   case SPIRV::StorageClass::Workgroup:
993   case SPIRV::StorageClass::CrossWorkgroup:
994   case SPIRV::StorageClass::Function:
995     return true;
996   default:
997     return false;
998   }
999 }
1000 
1001 static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
1002   switch (SC) {
1003   case SPIRV::StorageClass::DeviceOnlyINTEL:
1004   case SPIRV::StorageClass::HostOnlyINTEL:
1005     return true;
1006   default:
1007     return false;
1008   }
1009 }
1010 
1011 // In SPIR-V address space casting can only happen to and from the Generic
1012 // storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
1013 // pointers to and from Generic pointers. As such, we can convert e.g. from
1014 // Workgroup to Function by going via a Generic pointer as an intermediary. All
1015 // other combinations can only be done by a bitcast, and are probably not safe.
1016 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
1017                                                    const SPIRVType *ResType,
1018                                                    MachineInstr &I) const {
1019   // If the AddrSpaceCast user is single and in OpConstantComposite or
1020   // OpVariable, we should select OpSpecConstantOp.
1021   auto UIs = MRI->use_instructions(ResVReg);
1022   if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1023       (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1024        UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1025        isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
1026     Register NewReg = I.getOperand(1).getReg();
1027     MachineBasicBlock &BB = *I.getParent();
1028     SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1029     ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
1030                                              SPIRV::StorageClass::Generic);
1031     bool Result =
1032         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1033             .addDef(ResVReg)
1034             .addUse(GR.getSPIRVTypeID(ResType))
1035             .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
1036             .addUse(NewReg)
1037             .constrainAllUses(TII, TRI, RBI);
1038     return Result;
1039   }
1040   Register SrcPtr = I.getOperand(1).getReg();
1041   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1042   SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1043   SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1044 
1045   // don't generate a cast between identical storage classes
1046   if (SrcSC == DstSC)
1047     return true;
1048 
1049   // Casting from an eligible pointer to Generic.
1050   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1051     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1052   // Casting from Generic to an eligible pointer.
1053   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1054     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1055   // Casting between 2 eligible pointers using Generic as an intermediary.
1056   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1057     Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1058     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1059         SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
1060     MachineBasicBlock &BB = *I.getParent();
1061     const DebugLoc &DL = I.getDebugLoc();
1062     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1063                        .addDef(Tmp)
1064                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1065                        .addUse(SrcPtr)
1066                        .constrainAllUses(TII, TRI, RBI);
1067     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1068                           .addDef(ResVReg)
1069                           .addUse(GR.getSPIRVTypeID(ResType))
1070                           .addUse(Tmp)
1071                           .constrainAllUses(TII, TRI, RBI);
1072   }
1073 
1074   // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1075   // be applied
1076   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1077     return selectUnOp(ResVReg, ResType, I,
1078                       SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1079   if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1080     return selectUnOp(ResVReg, ResType, I,
1081                       SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1082 
1083   // TODO Should this case just be disallowed completely?
1084   // We're casting 2 other arbitrary address spaces, so have to bitcast.
1085   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1086 }
1087 
1088 static unsigned getFCmpOpcode(unsigned PredNum) {
1089   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1090   switch (Pred) {
1091   case CmpInst::FCMP_OEQ:
1092     return SPIRV::OpFOrdEqual;
1093   case CmpInst::FCMP_OGE:
1094     return SPIRV::OpFOrdGreaterThanEqual;
1095   case CmpInst::FCMP_OGT:
1096     return SPIRV::OpFOrdGreaterThan;
1097   case CmpInst::FCMP_OLE:
1098     return SPIRV::OpFOrdLessThanEqual;
1099   case CmpInst::FCMP_OLT:
1100     return SPIRV::OpFOrdLessThan;
1101   case CmpInst::FCMP_ONE:
1102     return SPIRV::OpFOrdNotEqual;
1103   case CmpInst::FCMP_ORD:
1104     return SPIRV::OpOrdered;
1105   case CmpInst::FCMP_UEQ:
1106     return SPIRV::OpFUnordEqual;
1107   case CmpInst::FCMP_UGE:
1108     return SPIRV::OpFUnordGreaterThanEqual;
1109   case CmpInst::FCMP_UGT:
1110     return SPIRV::OpFUnordGreaterThan;
1111   case CmpInst::FCMP_ULE:
1112     return SPIRV::OpFUnordLessThanEqual;
1113   case CmpInst::FCMP_ULT:
1114     return SPIRV::OpFUnordLessThan;
1115   case CmpInst::FCMP_UNE:
1116     return SPIRV::OpFUnordNotEqual;
1117   case CmpInst::FCMP_UNO:
1118     return SPIRV::OpUnordered;
1119   default:
1120     llvm_unreachable("Unknown predicate type for FCmp");
1121   }
1122 }
1123 
1124 static unsigned getICmpOpcode(unsigned PredNum) {
1125   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1126   switch (Pred) {
1127   case CmpInst::ICMP_EQ:
1128     return SPIRV::OpIEqual;
1129   case CmpInst::ICMP_NE:
1130     return SPIRV::OpINotEqual;
1131   case CmpInst::ICMP_SGE:
1132     return SPIRV::OpSGreaterThanEqual;
1133   case CmpInst::ICMP_SGT:
1134     return SPIRV::OpSGreaterThan;
1135   case CmpInst::ICMP_SLE:
1136     return SPIRV::OpSLessThanEqual;
1137   case CmpInst::ICMP_SLT:
1138     return SPIRV::OpSLessThan;
1139   case CmpInst::ICMP_UGE:
1140     return SPIRV::OpUGreaterThanEqual;
1141   case CmpInst::ICMP_UGT:
1142     return SPIRV::OpUGreaterThan;
1143   case CmpInst::ICMP_ULE:
1144     return SPIRV::OpULessThanEqual;
1145   case CmpInst::ICMP_ULT:
1146     return SPIRV::OpULessThan;
1147   default:
1148     llvm_unreachable("Unknown predicate type for ICmp");
1149   }
1150 }
1151 
1152 static unsigned getPtrCmpOpcode(unsigned Pred) {
1153   switch (static_cast<CmpInst::Predicate>(Pred)) {
1154   case CmpInst::ICMP_EQ:
1155     return SPIRV::OpPtrEqual;
1156   case CmpInst::ICMP_NE:
1157     return SPIRV::OpPtrNotEqual;
1158   default:
1159     llvm_unreachable("Unknown predicate type for pointer comparison");
1160   }
1161 }
1162 
1163 // Return the logical operation, or abort if none exists.
1164 static unsigned getBoolCmpOpcode(unsigned PredNum) {
1165   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1166   switch (Pred) {
1167   case CmpInst::ICMP_EQ:
1168     return SPIRV::OpLogicalEqual;
1169   case CmpInst::ICMP_NE:
1170     return SPIRV::OpLogicalNotEqual;
1171   default:
1172     llvm_unreachable("Unknown predicate type for Bool comparison");
1173   }
1174 }
1175 
1176 bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
1177                                               const SPIRVType *ResType,
1178                                               MachineInstr &I,
1179                                               unsigned OpAnyOrAll) const {
1180   assert(I.getNumOperands() == 3);
1181   assert(I.getOperand(2).isReg());
1182   MachineBasicBlock &BB = *I.getParent();
1183   Register InputRegister = I.getOperand(2).getReg();
1184   SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1185 
1186   if (!InputType)
1187     report_fatal_error("Input Type could not be determined.");
1188 
1189   bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1190   bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
1191   if (IsBoolTy && !IsVectorTy) {
1192     assert(ResVReg == I.getOperand(0).getReg());
1193     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1194                    TII.get(TargetOpcode::COPY))
1195         .addDef(ResVReg)
1196         .addUse(InputRegister)
1197         .constrainAllUses(TII, TRI, RBI);
1198   }
1199 
1200   bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1201   unsigned SpirvNotEqualId =
1202       IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1203   SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(I, TII);
1204   SPIRVType *SpvBoolTy = SpvBoolScalarTy;
1205   Register NotEqualReg = ResVReg;
1206 
1207   if (IsVectorTy) {
1208     NotEqualReg = IsBoolTy ? InputRegister
1209                            : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1210     const unsigned NumElts = InputType->getOperand(2).getImm();
1211     SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
1212   }
1213 
1214   if (!IsBoolTy) {
1215     Register ConstZeroReg =
1216         IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
1217 
1218     BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
1219         .addDef(NotEqualReg)
1220         .addUse(GR.getSPIRVTypeID(SpvBoolTy))
1221         .addUse(InputRegister)
1222         .addUse(ConstZeroReg)
1223         .constrainAllUses(TII, TRI, RBI);
1224   }
1225 
1226   if (!IsVectorTy)
1227     return true;
1228 
1229   return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
1230       .addDef(ResVReg)
1231       .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1232       .addUse(NotEqualReg)
1233       .constrainAllUses(TII, TRI, RBI);
1234 }
1235 
1236 bool SPIRVInstructionSelector::selectAll(Register ResVReg,
1237                                          const SPIRVType *ResType,
1238                                          MachineInstr &I) const {
1239   return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
1240 }
1241 
1242 bool SPIRVInstructionSelector::selectAny(Register ResVReg,
1243                                          const SPIRVType *ResType,
1244                                          MachineInstr &I) const {
1245   return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
1246 }
1247 
1248 bool SPIRVInstructionSelector::selectFmix(Register ResVReg,
1249                                           const SPIRVType *ResType,
1250                                           MachineInstr &I) const {
1251 
1252   assert(I.getNumOperands() == 5);
1253   assert(I.getOperand(2).isReg());
1254   assert(I.getOperand(3).isReg());
1255   assert(I.getOperand(4).isReg());
1256   MachineBasicBlock &BB = *I.getParent();
1257 
1258   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1259       .addDef(ResVReg)
1260       .addUse(GR.getSPIRVTypeID(ResType))
1261       .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1262       .addImm(GL::FMix)
1263       .addUse(I.getOperand(2).getReg())
1264       .addUse(I.getOperand(3).getReg())
1265       .addUse(I.getOperand(4).getReg())
1266       .constrainAllUses(TII, TRI, RBI);
1267 }
1268 
1269 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1270                                                 const SPIRVType *ResType,
1271                                                 MachineInstr &I) const {
1272   MachineBasicBlock &BB = *I.getParent();
1273   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1274       .addDef(ResVReg)
1275       .addUse(GR.getSPIRVTypeID(ResType))
1276       .addUse(I.getOperand(1).getReg())
1277       .constrainAllUses(TII, TRI, RBI);
1278 }
1279 
1280 bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1281                                             const SPIRVType *ResType,
1282                                             MachineInstr &I) const {
1283   // There is no way to implement `freeze` correctly without support on SPIR-V
1284   // standard side, but we may at least address a simple (static) case when
1285   // undef/poison value presence is obvious. The main benefit of even
1286   // incomplete `freeze` support is preventing of translation from crashing due
1287   // to lack of support on legalization and instruction selection steps.
1288   if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1289     return false;
1290   Register OpReg = I.getOperand(1).getReg();
1291   if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1292     Register Reg;
1293     switch (Def->getOpcode()) {
1294     case SPIRV::ASSIGN_TYPE:
1295       if (MachineInstr *AssignToDef =
1296               MRI->getVRegDef(Def->getOperand(1).getReg())) {
1297         if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1298           Reg = Def->getOperand(2).getReg();
1299       }
1300       break;
1301     case SPIRV::OpUndef:
1302       Reg = Def->getOperand(1).getReg();
1303       break;
1304     }
1305     unsigned DestOpCode;
1306     if (Reg.isValid()) {
1307       DestOpCode = SPIRV::OpConstantNull;
1308     } else {
1309       DestOpCode = TargetOpcode::COPY;
1310       Reg = OpReg;
1311     }
1312     return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1313         .addDef(I.getOperand(0).getReg())
1314         .addUse(Reg)
1315         .constrainAllUses(TII, TRI, RBI);
1316   }
1317   return false;
1318 }
1319 
1320 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1321                                                  const SPIRVType *ResType,
1322                                                  MachineInstr &I) const {
1323   // TODO: only const case is supported for now.
1324   assert(std::all_of(
1325       I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1326         if (MO.isDef())
1327           return true;
1328         if (!MO.isReg())
1329           return false;
1330         SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1331         assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1332                ConstTy->getOperand(1).isReg());
1333         Register ConstReg = ConstTy->getOperand(1).getReg();
1334         const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1335         assert(Const);
1336         return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1337                 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1338       }));
1339 
1340   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1341                      TII.get(SPIRV::OpConstantComposite))
1342                  .addDef(ResVReg)
1343                  .addUse(GR.getSPIRVTypeID(ResType));
1344   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1345     MIB.addUse(I.getOperand(i).getReg());
1346   return MIB.constrainAllUses(TII, TRI, RBI);
1347 }
1348 
1349 static unsigned getArrayComponentCount(MachineRegisterInfo *MRI,
1350                                        const SPIRVType *ResType) {
1351   Register OpReg = ResType->getOperand(2).getReg();
1352   SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1353   if (!OpDef)
1354     return 0;
1355   if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1356       OpDef->getOperand(1).isReg()) {
1357     if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1358       OpDef = RefDef;
1359   }
1360   unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1361                    ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1362                    : 0;
1363   return N;
1364 }
1365 
1366 // Return true if the type represents a constant register
1367 static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef) {
1368   if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1369       OpDef->getOperand(1).isReg()) {
1370     if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1371       OpDef = RefDef;
1372   }
1373   return OpDef->getOpcode() == TargetOpcode::G_CONSTANT ||
1374          OpDef->getOpcode() == TargetOpcode::G_FCONSTANT;
1375 }
1376 
1377 // Return true if the virtual register represents a constant
1378 static bool isConstReg(MachineRegisterInfo *MRI, Register OpReg) {
1379   if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
1380     return isConstReg(MRI, OpDef);
1381   return false;
1382 }
1383 
1384 bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1385                                                  const SPIRVType *ResType,
1386                                                  MachineInstr &I) const {
1387   unsigned N = 0;
1388   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1389     N = GR.getScalarOrVectorComponentCount(ResType);
1390   else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1391     N = getArrayComponentCount(MRI, ResType);
1392   else
1393     report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1394 
1395   unsigned OpIdx = I.getNumExplicitDefs();
1396   if (!I.getOperand(OpIdx).isReg())
1397     report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1398 
1399   // check if we may construct a constant vector
1400   Register OpReg = I.getOperand(OpIdx).getReg();
1401   bool IsConst = isConstReg(MRI, OpReg);
1402 
1403   if (!IsConst && N < 2)
1404     report_fatal_error(
1405         "There must be at least two constituent operands in a vector");
1406 
1407   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1408                      TII.get(IsConst ? SPIRV::OpConstantComposite
1409                                      : SPIRV::OpCompositeConstruct))
1410                  .addDef(ResVReg)
1411                  .addUse(GR.getSPIRVTypeID(ResType));
1412   for (unsigned i = 0; i < N; ++i)
1413     MIB.addUse(OpReg);
1414   return MIB.constrainAllUses(TII, TRI, RBI);
1415 }
1416 
1417 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1418                                          const SPIRVType *ResType,
1419                                          unsigned CmpOpc,
1420                                          MachineInstr &I) const {
1421   Register Cmp0 = I.getOperand(2).getReg();
1422   Register Cmp1 = I.getOperand(3).getReg();
1423   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1424              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1425          "CMP operands should have the same type");
1426   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1427       .addDef(ResVReg)
1428       .addUse(GR.getSPIRVTypeID(ResType))
1429       .addUse(Cmp0)
1430       .addUse(Cmp1)
1431       .constrainAllUses(TII, TRI, RBI);
1432 }
1433 
1434 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1435                                           const SPIRVType *ResType,
1436                                           MachineInstr &I) const {
1437   auto Pred = I.getOperand(1).getPredicate();
1438   unsigned CmpOpc;
1439 
1440   Register CmpOperand = I.getOperand(2).getReg();
1441   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1442     CmpOpc = getPtrCmpOpcode(Pred);
1443   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1444     CmpOpc = getBoolCmpOpcode(Pred);
1445   else
1446     CmpOpc = getICmpOpcode(Pred);
1447   return selectCmp(ResVReg, ResType, CmpOpc, I);
1448 }
1449 
1450 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1451                                             const MachineInstr &I,
1452                                             int OpIdx) const {
1453   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1454          "Expected G_FCONSTANT");
1455   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1456   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1457 }
1458 
1459 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1460                                            const MachineInstr &I,
1461                                            int OpIdx) const {
1462   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1463          "Expected G_CONSTANT");
1464   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1465 }
1466 
1467 Register
1468 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1469                                            const SPIRVType *ResType) const {
1470   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1471   const SPIRVType *SpvI32Ty =
1472       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1473   // Find a constant in DT or build a new one.
1474   auto ConstInt = ConstantInt::get(LLVMTy, Val);
1475   Register NewReg = GR.find(ConstInt, GR.CurMF);
1476   if (!NewReg.isValid()) {
1477     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1478     GR.add(ConstInt, GR.CurMF, NewReg);
1479     MachineInstr *MI;
1480     MachineBasicBlock &BB = *I.getParent();
1481     if (Val == 0) {
1482       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1483                .addDef(NewReg)
1484                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1485     } else {
1486       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1487                .addDef(NewReg)
1488                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1489                .addImm(APInt(32, Val).getZExtValue());
1490     }
1491     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
1492   }
1493   return NewReg;
1494 }
1495 
1496 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1497                                           const SPIRVType *ResType,
1498                                           MachineInstr &I) const {
1499   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1500   return selectCmp(ResVReg, ResType, CmpOp, I);
1501 }
1502 
1503 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1504                                                  MachineInstr &I) const {
1505   // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1506   bool ZeroAsNull = STI.isOpenCLEnv();
1507   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1508     return GR.getOrCreateConstVector(0UL, I, ResType, TII, ZeroAsNull);
1509   return GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
1510 }
1511 
1512 static APFloat getZeroFP(const Type *LLVMFloatTy) {
1513   if (!LLVMFloatTy)
1514     return APFloat::getZero(APFloat::IEEEsingle());
1515   switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1516   case Type::HalfTyID:
1517     return APFloat::getZero(APFloat::IEEEhalf());
1518   default:
1519   case Type::FloatTyID:
1520     return APFloat::getZero(APFloat::IEEEsingle());
1521   case Type::DoubleTyID:
1522     return APFloat::getZero(APFloat::IEEEdouble());
1523   }
1524 }
1525 
1526 Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
1527                                                   MachineInstr &I) const {
1528   // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1529   bool ZeroAsNull = STI.isOpenCLEnv();
1530   APFloat VZero = getZeroFP(GR.getTypeForSPIRVType(ResType));
1531   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1532     return GR.getOrCreateConstVector(VZero, I, ResType, TII, ZeroAsNull);
1533   return GR.getOrCreateConstFP(VZero, I, ResType, TII, ZeroAsNull);
1534 }
1535 
1536 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1537                                                 const SPIRVType *ResType,
1538                                                 MachineInstr &I) const {
1539   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1540   APInt One =
1541       AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1542   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1543     return GR.getOrCreateConstVector(One.getZExtValue(), I, ResType, TII);
1544   return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1545 }
1546 
1547 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1548                                             const SPIRVType *ResType,
1549                                             MachineInstr &I,
1550                                             bool IsSigned) const {
1551   // To extend a bool, we need to use OpSelect between constants.
1552   Register ZeroReg = buildZerosVal(ResType, I);
1553   Register OneReg = buildOnesVal(IsSigned, ResType, I);
1554   bool IsScalarBool =
1555       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1556   unsigned Opcode =
1557       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1558   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1559       .addDef(ResVReg)
1560       .addUse(GR.getSPIRVTypeID(ResType))
1561       .addUse(I.getOperand(1).getReg())
1562       .addUse(OneReg)
1563       .addUse(ZeroReg)
1564       .constrainAllUses(TII, TRI, RBI);
1565 }
1566 
1567 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1568                                           const SPIRVType *ResType,
1569                                           MachineInstr &I, bool IsSigned,
1570                                           unsigned Opcode) const {
1571   Register SrcReg = I.getOperand(1).getReg();
1572   // We can convert bool value directly to float type without OpConvert*ToF,
1573   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1574   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1575     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1576     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1577     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1578       const unsigned NumElts = ResType->getOperand(2).getImm();
1579       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1580     }
1581     SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1582     selectSelect(SrcReg, TmpType, I, false);
1583   }
1584   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1585 }
1586 
1587 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1588                                          const SPIRVType *ResType,
1589                                          MachineInstr &I, bool IsSigned) const {
1590   Register SrcReg = I.getOperand(1).getReg();
1591   if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
1592     return selectSelect(ResVReg, ResType, I, IsSigned);
1593 
1594   SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
1595   if (SrcType == ResType)
1596     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1597                    TII.get(TargetOpcode::COPY))
1598         .addDef(ResVReg)
1599         .addUse(SrcReg)
1600         .constrainAllUses(TII, TRI, RBI);
1601 
1602   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1603   return selectUnOp(ResVReg, ResType, I, Opcode);
1604 }
1605 
1606 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1607                                                Register ResVReg,
1608                                                MachineInstr &I,
1609                                                const SPIRVType *IntTy,
1610                                                const SPIRVType *BoolTy) const {
1611   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1612   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1613   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1614   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1615   Register Zero = buildZerosVal(IntTy, I);
1616   Register One = buildOnesVal(false, IntTy, I);
1617   MachineBasicBlock &BB = *I.getParent();
1618   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1619       .addDef(BitIntReg)
1620       .addUse(GR.getSPIRVTypeID(IntTy))
1621       .addUse(IntReg)
1622       .addUse(One)
1623       .constrainAllUses(TII, TRI, RBI);
1624   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1625       .addDef(ResVReg)
1626       .addUse(GR.getSPIRVTypeID(BoolTy))
1627       .addUse(BitIntReg)
1628       .addUse(Zero)
1629       .constrainAllUses(TII, TRI, RBI);
1630 }
1631 
1632 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1633                                            const SPIRVType *ResType,
1634                                            MachineInstr &I) const {
1635   Register IntReg = I.getOperand(1).getReg();
1636   const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1637   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
1638     return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1639   if (ArgType == ResType)
1640     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1641                    TII.get(TargetOpcode::COPY))
1642         .addDef(ResVReg)
1643         .addUse(IntReg)
1644         .constrainAllUses(TII, TRI, RBI);
1645   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1646   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1647   return selectUnOp(ResVReg, ResType, I, Opcode);
1648 }
1649 
1650 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1651                                            const SPIRVType *ResType,
1652                                            const APInt &Imm,
1653                                            MachineInstr &I) const {
1654   unsigned TyOpcode = ResType->getOpcode();
1655   assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1656   MachineBasicBlock &BB = *I.getParent();
1657   if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1658       Imm.isZero())
1659     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1660         .addDef(ResVReg)
1661         .addUse(GR.getSPIRVTypeID(ResType))
1662         .constrainAllUses(TII, TRI, RBI);
1663   if (TyOpcode == SPIRV::OpTypeInt) {
1664     assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1665     Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1666     if (Reg == ResVReg)
1667       return true;
1668     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1669         .addDef(ResVReg)
1670         .addUse(Reg)
1671         .constrainAllUses(TII, TRI, RBI);
1672   }
1673   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1674                  .addDef(ResVReg)
1675                  .addUse(GR.getSPIRVTypeID(ResType));
1676   // <=32-bit integers should be caught by the sdag pattern.
1677   assert(Imm.getBitWidth() > 32);
1678   addNumImm(Imm, MIB);
1679   return MIB.constrainAllUses(TII, TRI, RBI);
1680 }
1681 
1682 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1683                                              const SPIRVType *ResType,
1684                                              MachineInstr &I) const {
1685   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1686       .addDef(ResVReg)
1687       .addUse(GR.getSPIRVTypeID(ResType))
1688       .constrainAllUses(TII, TRI, RBI);
1689 }
1690 
1691 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1692   assert(MO.isReg());
1693   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1694   if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1695     return false;
1696   assert(TypeInst->getOperand(1).isReg());
1697   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1698   return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1699 }
1700 
1701 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1702   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1703   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1704   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1705   return ImmInst->getOperand(1).getCImm()->getZExtValue();
1706 }
1707 
1708 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1709                                                const SPIRVType *ResType,
1710                                                MachineInstr &I) const {
1711   MachineBasicBlock &BB = *I.getParent();
1712   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1713                  .addDef(ResVReg)
1714                  .addUse(GR.getSPIRVTypeID(ResType))
1715                  // object to insert
1716                  .addUse(I.getOperand(3).getReg())
1717                  // composite to insert into
1718                  .addUse(I.getOperand(2).getReg());
1719   for (unsigned i = 4; i < I.getNumOperands(); i++)
1720     MIB.addImm(foldImm(I.getOperand(i), MRI));
1721   return MIB.constrainAllUses(TII, TRI, RBI);
1722 }
1723 
1724 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1725                                                 const SPIRVType *ResType,
1726                                                 MachineInstr &I) const {
1727   MachineBasicBlock &BB = *I.getParent();
1728   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1729                  .addDef(ResVReg)
1730                  .addUse(GR.getSPIRVTypeID(ResType))
1731                  .addUse(I.getOperand(2).getReg());
1732   for (unsigned i = 3; i < I.getNumOperands(); i++)
1733     MIB.addImm(foldImm(I.getOperand(i), MRI));
1734   return MIB.constrainAllUses(TII, TRI, RBI);
1735 }
1736 
1737 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1738                                                const SPIRVType *ResType,
1739                                                MachineInstr &I) const {
1740   if (isImm(I.getOperand(4), MRI))
1741     return selectInsertVal(ResVReg, ResType, I);
1742   MachineBasicBlock &BB = *I.getParent();
1743   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1744       .addDef(ResVReg)
1745       .addUse(GR.getSPIRVTypeID(ResType))
1746       .addUse(I.getOperand(2).getReg())
1747       .addUse(I.getOperand(3).getReg())
1748       .addUse(I.getOperand(4).getReg())
1749       .constrainAllUses(TII, TRI, RBI);
1750 }
1751 
1752 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1753                                                 const SPIRVType *ResType,
1754                                                 MachineInstr &I) const {
1755   if (isImm(I.getOperand(3), MRI))
1756     return selectExtractVal(ResVReg, ResType, I);
1757   MachineBasicBlock &BB = *I.getParent();
1758   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1759       .addDef(ResVReg)
1760       .addUse(GR.getSPIRVTypeID(ResType))
1761       .addUse(I.getOperand(2).getReg())
1762       .addUse(I.getOperand(3).getReg())
1763       .constrainAllUses(TII, TRI, RBI);
1764 }
1765 
1766 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1767                                          const SPIRVType *ResType,
1768                                          MachineInstr &I) const {
1769   const bool IsGEPInBounds = I.getOperand(2).getImm();
1770 
1771   // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1772   // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1773   // we have to use Op[InBounds]AccessChain.
1774   const unsigned Opcode = STI.isVulkanEnv()
1775                               ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1776                                                : SPIRV::OpAccessChain)
1777                               : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1778                                                : SPIRV::OpPtrAccessChain);
1779 
1780   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1781                  .addDef(ResVReg)
1782                  .addUse(GR.getSPIRVTypeID(ResType))
1783                  // Object to get a pointer to.
1784                  .addUse(I.getOperand(3).getReg());
1785   // Adding indices.
1786   const unsigned StartingIndex =
1787       (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1788           ? 5
1789           : 4;
1790   for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1791     Res.addUse(I.getOperand(i).getReg());
1792   return Res.constrainAllUses(TII, TRI, RBI);
1793 }
1794 
1795 // Maybe wrap a value into OpSpecConstantOp
1796 bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1797     MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
1798   bool Result = true;
1799   unsigned Lim = I.getNumExplicitOperands();
1800   for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
1801     Register OpReg = I.getOperand(i).getReg();
1802     SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
1803     SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
1804     if (!OpDefine || !OpType || isConstReg(MRI, OpDefine) ||
1805         OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST) {
1806       // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
1807       // by selectAddrSpaceCast()
1808       CompositeArgs.push_back(OpReg);
1809       continue;
1810     }
1811     MachineFunction *MF = I.getMF();
1812     Register WrapReg = GR.find(OpDefine, MF);
1813     if (WrapReg.isValid()) {
1814       CompositeArgs.push_back(WrapReg);
1815       continue;
1816     }
1817     // Create a new register for the wrapper
1818     WrapReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1819     GR.add(OpDefine, MF, WrapReg);
1820     CompositeArgs.push_back(WrapReg);
1821     // Decorate the wrapper register and generate a new instruction
1822     MRI->setType(WrapReg, LLT::pointer(0, 32));
1823     GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
1824     MachineBasicBlock &BB = *I.getParent();
1825     Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1826                  .addDef(WrapReg)
1827                  .addUse(GR.getSPIRVTypeID(OpType))
1828                  .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
1829                  .addUse(OpReg)
1830                  .constrainAllUses(TII, TRI, RBI);
1831     if (!Result)
1832       break;
1833   }
1834   return Result;
1835 }
1836 
1837 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1838                                                const SPIRVType *ResType,
1839                                                MachineInstr &I) const {
1840   MachineBasicBlock &BB = *I.getParent();
1841   Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
1842   switch (IID) {
1843   case Intrinsic::spv_load:
1844     return selectLoad(ResVReg, ResType, I);
1845   case Intrinsic::spv_store:
1846     return selectStore(I);
1847   case Intrinsic::spv_extractv:
1848     return selectExtractVal(ResVReg, ResType, I);
1849   case Intrinsic::spv_insertv:
1850     return selectInsertVal(ResVReg, ResType, I);
1851   case Intrinsic::spv_extractelt:
1852     return selectExtractElt(ResVReg, ResType, I);
1853   case Intrinsic::spv_insertelt:
1854     return selectInsertElt(ResVReg, ResType, I);
1855   case Intrinsic::spv_gep:
1856     return selectGEP(ResVReg, ResType, I);
1857   case Intrinsic::spv_unref_global:
1858   case Intrinsic::spv_init_global: {
1859     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1860     MachineInstr *Init = I.getNumExplicitOperands() > 2
1861                              ? MRI->getVRegDef(I.getOperand(2).getReg())
1862                              : nullptr;
1863     assert(MI);
1864     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1865   }
1866   case Intrinsic::spv_undef: {
1867     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1868                    .addDef(ResVReg)
1869                    .addUse(GR.getSPIRVTypeID(ResType));
1870     return MIB.constrainAllUses(TII, TRI, RBI);
1871   }
1872   case Intrinsic::spv_const_composite: {
1873     // If no values are attached, the composite is null constant.
1874     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1875     // Select a proper instruction.
1876     unsigned Opcode = SPIRV::OpConstantNull;
1877     SmallVector<Register> CompositeArgs;
1878     if (!IsNull) {
1879       Opcode = SPIRV::OpConstantComposite;
1880       if (!wrapIntoSpecConstantOp(I, CompositeArgs))
1881         return false;
1882     }
1883     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1884                    .addDef(ResVReg)
1885                    .addUse(GR.getSPIRVTypeID(ResType));
1886     // skip type MD node we already used when generated assign.type for this
1887     if (!IsNull) {
1888       for (Register OpReg : CompositeArgs)
1889         MIB.addUse(OpReg);
1890     }
1891     return MIB.constrainAllUses(TII, TRI, RBI);
1892   }
1893   case Intrinsic::spv_assign_name: {
1894     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1895     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1896     for (unsigned i = I.getNumExplicitDefs() + 2;
1897          i < I.getNumExplicitOperands(); ++i) {
1898       MIB.addImm(I.getOperand(i).getImm());
1899     }
1900     return MIB.constrainAllUses(TII, TRI, RBI);
1901   }
1902   case Intrinsic::spv_switch: {
1903     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1904     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1905       if (I.getOperand(i).isReg())
1906         MIB.addReg(I.getOperand(i).getReg());
1907       else if (I.getOperand(i).isCImm())
1908         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1909       else if (I.getOperand(i).isMBB())
1910         MIB.addMBB(I.getOperand(i).getMBB());
1911       else
1912         llvm_unreachable("Unexpected OpSwitch operand");
1913     }
1914     return MIB.constrainAllUses(TII, TRI, RBI);
1915   }
1916   case Intrinsic::spv_cmpxchg:
1917     return selectAtomicCmpXchg(ResVReg, ResType, I);
1918   case Intrinsic::spv_unreachable:
1919     BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1920     break;
1921   case Intrinsic::spv_alloca:
1922     return selectFrameIndex(ResVReg, ResType, I);
1923   case Intrinsic::spv_alloca_array:
1924     return selectAllocaArray(ResVReg, ResType, I);
1925   case Intrinsic::spv_assume:
1926     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1927       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
1928           .addUse(I.getOperand(1).getReg());
1929     break;
1930   case Intrinsic::spv_expect:
1931     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1932       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
1933           .addDef(ResVReg)
1934           .addUse(GR.getSPIRVTypeID(ResType))
1935           .addUse(I.getOperand(2).getReg())
1936           .addUse(I.getOperand(3).getReg());
1937     break;
1938   case Intrinsic::spv_thread_id:
1939     return selectSpvThreadId(ResVReg, ResType, I);
1940   case Intrinsic::spv_all:
1941     return selectAll(ResVReg, ResType, I);
1942   case Intrinsic::spv_any:
1943     return selectAny(ResVReg, ResType, I);
1944   case Intrinsic::spv_lerp:
1945     return selectFmix(ResVReg, ResType, I);
1946   case Intrinsic::spv_lifetime_start:
1947   case Intrinsic::spv_lifetime_end: {
1948     unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
1949                                                        : SPIRV::OpLifetimeStop;
1950     int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
1951     Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
1952     unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
1953     bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
1954     if (Size == -1 || IsNonvoidPtr)
1955       Size = 0;
1956     BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
1957   } break;
1958   default: {
1959     std::string DiagMsg;
1960     raw_string_ostream OS(DiagMsg);
1961     I.print(OS);
1962     DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
1963     report_fatal_error(DiagMsg.c_str(), false);
1964   }
1965   }
1966   return true;
1967 }
1968 
1969 bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
1970                                                  const SPIRVType *ResType,
1971                                                  MachineInstr &I) const {
1972   // there was an allocation size parameter to the allocation instruction
1973   // that is not 1
1974   MachineBasicBlock &BB = *I.getParent();
1975   return BuildMI(BB, I, I.getDebugLoc(),
1976                  TII.get(SPIRV::OpVariableLengthArrayINTEL))
1977       .addDef(ResVReg)
1978       .addUse(GR.getSPIRVTypeID(ResType))
1979       .addUse(I.getOperand(2).getReg())
1980       .constrainAllUses(TII, TRI, RBI);
1981 }
1982 
1983 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1984                                                 const SPIRVType *ResType,
1985                                                 MachineInstr &I) const {
1986   // Change order of instructions if needed: all OpVariable instructions in a
1987   // function must be the first instructions in the first block
1988   MachineFunction *MF = I.getParent()->getParent();
1989   MachineBasicBlock *MBB = &MF->front();
1990   auto It = MBB->SkipPHIsAndLabels(MBB->begin()), E = MBB->end();
1991   bool IsHeader = false;
1992   unsigned Opcode;
1993   for (; It != E && It != I; ++It) {
1994     Opcode = It->getOpcode();
1995     if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
1996       IsHeader = true;
1997     } else if (IsHeader &&
1998                !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
1999       ++It;
2000       break;
2001     }
2002   }
2003   return BuildMI(*MBB, It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
2004       .addDef(ResVReg)
2005       .addUse(GR.getSPIRVTypeID(ResType))
2006       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
2007       .constrainAllUses(TII, TRI, RBI);
2008 }
2009 
2010 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
2011   // InstructionSelector walks backwards through the instructions. We can use
2012   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
2013   // first, so can generate an OpBranchConditional here. If there is no
2014   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
2015   const MachineInstr *PrevI = I.getPrevNode();
2016   MachineBasicBlock &MBB = *I.getParent();
2017   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
2018     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2019         .addUse(PrevI->getOperand(0).getReg())
2020         .addMBB(PrevI->getOperand(1).getMBB())
2021         .addMBB(I.getOperand(0).getMBB())
2022         .constrainAllUses(TII, TRI, RBI);
2023   }
2024   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
2025       .addMBB(I.getOperand(0).getMBB())
2026       .constrainAllUses(TII, TRI, RBI);
2027 }
2028 
2029 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
2030   // InstructionSelector walks backwards through the instructions. For an
2031   // explicit conditional branch with no fallthrough, we use both a G_BR and a
2032   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
2033   // generate the OpBranchConditional in selectBranch above.
2034   //
2035   // If an OpBranchConditional has been generated, we simply return, as the work
2036   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
2037   // implicit fallthrough to the next basic block, so we need to create an
2038   // OpBranchConditional with an explicit "false" argument pointing to the next
2039   // basic block that LLVM would fall through to.
2040   const MachineInstr *NextI = I.getNextNode();
2041   // Check if this has already been successfully selected.
2042   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
2043     return true;
2044   // Must be relying on implicit block fallthrough, so generate an
2045   // OpBranchConditional with the "next" basic block as the "false" target.
2046   MachineBasicBlock &MBB = *I.getParent();
2047   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
2048   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
2049   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2050       .addUse(I.getOperand(0).getReg())
2051       .addMBB(I.getOperand(1).getMBB())
2052       .addMBB(NextMBB)
2053       .constrainAllUses(TII, TRI, RBI);
2054 }
2055 
2056 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
2057                                          const SPIRVType *ResType,
2058                                          MachineInstr &I) const {
2059   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
2060                  .addDef(ResVReg)
2061                  .addUse(GR.getSPIRVTypeID(ResType));
2062   const unsigned NumOps = I.getNumOperands();
2063   for (unsigned i = 1; i < NumOps; i += 2) {
2064     MIB.addUse(I.getOperand(i + 0).getReg());
2065     MIB.addMBB(I.getOperand(i + 1).getMBB());
2066   }
2067   return MIB.constrainAllUses(TII, TRI, RBI);
2068 }
2069 
2070 bool SPIRVInstructionSelector::selectGlobalValue(
2071     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
2072   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
2073   MachineIRBuilder MIRBuilder(I);
2074   const GlobalValue *GV = I.getOperand(1).getGlobal();
2075   Type *GVType = GR.getDeducedGlobalValueType(GV);
2076   SPIRVType *PointerBaseType;
2077   if (GVType->isArrayTy()) {
2078     SPIRVType *ArrayElementType =
2079         GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
2080                                 SPIRV::AccessQualifier::ReadWrite, false);
2081     PointerBaseType = GR.getOrCreateSPIRVArrayType(
2082         ArrayElementType, GVType->getArrayNumElements(), I, TII);
2083   } else {
2084     PointerBaseType = GR.getOrCreateSPIRVType(
2085         GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
2086   }
2087   SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2088       PointerBaseType, I, TII,
2089       addressSpaceToStorageClass(GV->getAddressSpace(), STI));
2090 
2091   std::string GlobalIdent;
2092   if (!GV->hasName()) {
2093     unsigned &ID = UnnamedGlobalIDs[GV];
2094     if (ID == 0)
2095       ID = UnnamedGlobalIDs.size();
2096     GlobalIdent = "__unnamed_" + Twine(ID).str();
2097   } else {
2098     GlobalIdent = GV->getGlobalIdentifier();
2099   }
2100 
2101   // Behaviour of functions as operands depends on availability of the
2102   // corresponding extension (SPV_INTEL_function_pointers):
2103   // - If there is an extension to operate with functions as operands:
2104   // We create a proper constant operand and evaluate a correct type for a
2105   // function pointer.
2106   // - Without the required extension:
2107   // We have functions as operands in tests with blocks of instruction e.g. in
2108   // transcoding/global_block.ll. These operands are not used and should be
2109   // substituted by zero constants. Their type is expected to be always
2110   // OpTypePointer Function %uchar.
2111   if (isa<Function>(GV)) {
2112     const Constant *ConstVal = GV;
2113     MachineBasicBlock &BB = *I.getParent();
2114     Register NewReg = GR.find(ConstVal, GR.CurMF);
2115     if (!NewReg.isValid()) {
2116       Register NewReg = ResVReg;
2117       GR.add(ConstVal, GR.CurMF, NewReg);
2118       const Function *GVFun =
2119           STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2120               ? dyn_cast<Function>(GV)
2121               : nullptr;
2122       if (GVFun) {
2123         // References to a function via function pointers generate virtual
2124         // registers without a definition. We will resolve it later, during
2125         // module analysis stage.
2126         MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2127         Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
2128         MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
2129         MachineInstrBuilder MB =
2130             BuildMI(BB, I, I.getDebugLoc(),
2131                     TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2132                 .addDef(NewReg)
2133                 .addUse(GR.getSPIRVTypeID(ResType))
2134                 .addUse(FuncVReg);
2135         // mapping the function pointer to the used Function
2136         GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
2137         return MB.constrainAllUses(TII, TRI, RBI);
2138       }
2139       return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2140           .addDef(NewReg)
2141           .addUse(GR.getSPIRVTypeID(ResType))
2142           .constrainAllUses(TII, TRI, RBI);
2143     }
2144     assert(NewReg != ResVReg);
2145     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
2146         .addDef(ResVReg)
2147         .addUse(NewReg)
2148         .constrainAllUses(TII, TRI, RBI);
2149   }
2150   auto GlobalVar = cast<GlobalVariable>(GV);
2151   assert(GlobalVar->getName() != "llvm.global.annotations");
2152 
2153   bool HasInit = GlobalVar->hasInitializer() &&
2154                  !isa<UndefValue>(GlobalVar->getInitializer());
2155   // Skip empty declaration for GVs with initilaizers till we get the decl with
2156   // passed initializer.
2157   if (HasInit && !Init)
2158     return true;
2159 
2160   unsigned AddrSpace = GV->getAddressSpace();
2161   SPIRV::StorageClass::StorageClass Storage =
2162       addressSpaceToStorageClass(AddrSpace, STI);
2163   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
2164                   Storage != SPIRV::StorageClass::Function;
2165   SPIRV::LinkageType::LinkageType LnkType =
2166       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
2167           ? SPIRV::LinkageType::Import
2168           : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
2169                      STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2170                  ? SPIRV::LinkageType::LinkOnceODR
2171                  : SPIRV::LinkageType::Export);
2172 
2173   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2174                                         Storage, Init, GlobalVar->isConstant(),
2175                                         HasLnkTy, LnkType, MIRBuilder, true);
2176   return Reg.isValid();
2177 }
2178 
2179 bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
2180                                            const SPIRVType *ResType,
2181                                            MachineInstr &I) const {
2182   if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2183     return selectExtInst(ResVReg, ResType, I, CL::log10);
2184   }
2185 
2186   // There is no log10 instruction in the GLSL Extended Instruction set, so it
2187   // is implemented as:
2188   // log10(x) = log2(x) * (1 / log2(10))
2189   //          = log2(x) * 0.30103
2190 
2191   MachineIRBuilder MIRBuilder(I);
2192   MachineBasicBlock &BB = *I.getParent();
2193 
2194   // Build log2(x).
2195   Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2196   bool Result =
2197       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
2198           .addDef(VarReg)
2199           .addUse(GR.getSPIRVTypeID(ResType))
2200           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2201           .addImm(GL::Log2)
2202           .add(I.getOperand(1))
2203           .constrainAllUses(TII, TRI, RBI);
2204 
2205   // Build 0.30103.
2206   assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
2207          ResType->getOpcode() == SPIRV::OpTypeFloat);
2208   // TODO: Add matrix implementation once supported by the HLSL frontend.
2209   const SPIRVType *SpirvScalarType =
2210       ResType->getOpcode() == SPIRV::OpTypeVector
2211           ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
2212           : ResType;
2213   Register ScaleReg =
2214       GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2215 
2216   // Multiply log2(x) by 0.30103 to get log10(x) result.
2217   auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
2218                     ? SPIRV::OpVectorTimesScalar
2219                     : SPIRV::OpFMulS;
2220   Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2221                 .addDef(ResVReg)
2222                 .addUse(GR.getSPIRVTypeID(ResType))
2223                 .addUse(VarReg)
2224                 .addUse(ScaleReg)
2225                 .constrainAllUses(TII, TRI, RBI);
2226 
2227   return Result;
2228 }
2229 
2230 bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
2231                                                  const SPIRVType *ResType,
2232                                                  MachineInstr &I) const {
2233   // DX intrinsic: @llvm.dx.thread.id(i32)
2234   // ID  Name      Description
2235   // 93  ThreadId  reads the thread ID
2236 
2237   MachineIRBuilder MIRBuilder(I);
2238   const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2239   const SPIRVType *Vec3Ty =
2240       GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2241   const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2242       Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2243 
2244   // Create new register for GlobalInvocationID builtin variable.
2245   Register NewRegister =
2246       MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2247   MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 32));
2248   GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2249 
2250   // Build GlobalInvocationID global variable with the necessary decorations.
2251   Register Variable = GR.buildGlobalVariable(
2252       NewRegister, PtrType,
2253       getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2254       SPIRV::StorageClass::Input, nullptr, true, true,
2255       SPIRV::LinkageType::Import, MIRBuilder, false);
2256 
2257   // Create new register for loading value.
2258   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2259   Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2260   MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 32));
2261   GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2262 
2263   // Load v3uint value from the global variable.
2264   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2265       .addDef(LoadedRegister)
2266       .addUse(GR.getSPIRVTypeID(Vec3Ty))
2267       .addUse(Variable);
2268 
2269   // Get Thread ID index. Expecting operand is a constant immediate value,
2270   // wrapped in a type assignment.
2271   assert(I.getOperand(2).isReg());
2272   Register ThreadIdReg = I.getOperand(2).getReg();
2273   SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
2274   assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
2275          ConstTy->getOperand(1).isReg());
2276   Register ConstReg = ConstTy->getOperand(1).getReg();
2277   const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
2278   assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
2279   const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
2280   const uint32_t ThreadId = Val.getZExtValue();
2281 
2282   // Extract the thread ID from the loaded vector value.
2283   MachineBasicBlock &BB = *I.getParent();
2284   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2285                  .addDef(ResVReg)
2286                  .addUse(GR.getSPIRVTypeID(ResType))
2287                  .addUse(LoadedRegister)
2288                  .addImm(ThreadId);
2289   return MIB.constrainAllUses(TII, TRI, RBI);
2290 }
2291 
2292 namespace llvm {
2293 InstructionSelector *
2294 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
2295                                const SPIRVSubtarget &Subtarget,
2296                                const RegisterBankInfo &RBI) {
2297   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2298 }
2299 } // namespace llvm
2300