xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (revision c2483ed52d6f600a91663a49e35bab1dff2ed977)
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
17 #include "SPIRV.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/IR/IntrinsicsSPIRV.h"
32 #include "llvm/Support/Debug.h"
33 
34 namespace llvm {
35 
36 class SPIRVMachineModuleInfo : public MachineModuleInfoImpl {
37 public:
38   SyncScope::ID Work_ItemSSID;
39   SyncScope::ID WorkGroupSSID;
40   SyncScope::ID DeviceSSID;
41   SyncScope::ID AllSVMDevicesSSID;
42   SyncScope::ID SubGroupSSID;
43 
44   SPIRVMachineModuleInfo(const MachineModuleInfo &MMI) {
45     LLVMContext &CTX = MMI.getModule()->getContext();
46     Work_ItemSSID = CTX.getOrInsertSyncScopeID("work_item");
47     WorkGroupSSID = CTX.getOrInsertSyncScopeID("workgroup");
48     DeviceSSID = CTX.getOrInsertSyncScopeID("device");
49     AllSVMDevicesSSID = CTX.getOrInsertSyncScopeID("all_svm_devices");
50     SubGroupSSID = CTX.getOrInsertSyncScopeID("sub_group");
51   }
52 };
53 
54 } // end namespace llvm
55 
56 #define DEBUG_TYPE "spirv-isel"
57 
58 using namespace llvm;
59 namespace CL = SPIRV::OpenCLExtInst;
60 namespace GL = SPIRV::GLSLExtInst;
61 
62 using ExtInstList =
63     std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
64 
65 namespace {
66 
67 #define GET_GLOBALISEL_PREDICATE_BITSET
68 #include "SPIRVGenGlobalISel.inc"
69 #undef GET_GLOBALISEL_PREDICATE_BITSET
70 
71 class SPIRVInstructionSelector : public InstructionSelector {
72   const SPIRVSubtarget &STI;
73   const SPIRVInstrInfo &TII;
74   const SPIRVRegisterInfo &TRI;
75   const RegisterBankInfo &RBI;
76   SPIRVGlobalRegistry &GR;
77   MachineRegisterInfo *MRI;
78   SPIRVMachineModuleInfo *MMI = nullptr;
79 
80   /// We need to keep track of the number we give to anonymous global values to
81   /// generate the same name every time when this is needed.
82   mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
83 
84 public:
85   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
86                            const SPIRVSubtarget &ST,
87                            const RegisterBankInfo &RBI);
88   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
89                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
90                BlockFrequencyInfo *BFI) override;
91   // Common selection code. Instruction-specific selection occurs in spvSelect.
92   bool select(MachineInstr &I) override;
93   static const char *getName() { return DEBUG_TYPE; }
94 
95 #define GET_GLOBALISEL_PREDICATES_DECL
96 #include "SPIRVGenGlobalISel.inc"
97 #undef GET_GLOBALISEL_PREDICATES_DECL
98 
99 #define GET_GLOBALISEL_TEMPORARIES_DECL
100 #include "SPIRVGenGlobalISel.inc"
101 #undef GET_GLOBALISEL_TEMPORARIES_DECL
102 
103 private:
104   // tblgen-erated 'select' implementation, used as the initial selector for
105   // the patterns that don't require complex C++.
106   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
107 
108   // All instruction-specific selection that didn't happen in "select()".
109   // Is basically a large Switch/Case delegating to all other select method.
110   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
111                  MachineInstr &I) const;
112 
113   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
114                          const MachineInstr *Init = nullptr) const;
115 
116   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
117                          MachineInstr &I, Register SrcReg,
118                          unsigned Opcode) const;
119   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
120                   unsigned Opcode) const;
121 
122   bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
123                      MachineInstr &I) const;
124 
125   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
126                   MachineInstr &I) const;
127   bool selectStore(MachineInstr &I) const;
128 
129   bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
130                        MachineInstr &I) const;
131   bool selectStackRestore(MachineInstr &I) const;
132 
133   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
134 
135   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
136                        MachineInstr &I, unsigned NewOpcode,
137                        unsigned NegateOpcode = 0) const;
138 
139   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
140                            MachineInstr &I) const;
141 
142   bool selectFence(MachineInstr &I) const;
143 
144   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
145                            MachineInstr &I) const;
146 
147   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
148                         MachineInstr &I) const;
149 
150   bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
151                          MachineInstr &I) const;
152   bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
153                          MachineInstr &I) const;
154 
155   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
156                  unsigned comparisonOpcode, MachineInstr &I) const;
157 
158   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
159                   MachineInstr &I) const;
160   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
161                   MachineInstr &I) const;
162 
163   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
164                    int OpIdx) const;
165   void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
166                     int OpIdx) const;
167 
168   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
169                    MachineInstr &I) const;
170 
171   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
172                     bool IsSigned) const;
173   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
174                   bool IsSigned, unsigned Opcode) const;
175   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
176                  bool IsSigned) const;
177 
178   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
179                    MachineInstr &I) const;
180 
181   bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
182                        const SPIRVType *intTy, const SPIRVType *boolTy) const;
183 
184   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
185                      MachineInstr &I) const;
186   bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
187                     MachineInstr &I) const;
188   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
189                        MachineInstr &I) const;
190   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
191                         MachineInstr &I) const;
192   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
193                        MachineInstr &I) const;
194   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
195                         MachineInstr &I) const;
196   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
197                        MachineInstr &I) const;
198   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
199                  MachineInstr &I) const;
200 
201   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
202                         MachineInstr &I) const;
203   bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
204                          MachineInstr &I) const;
205 
206   bool selectBranch(MachineInstr &I) const;
207   bool selectBranchCond(MachineInstr &I) const;
208 
209   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
210                  MachineInstr &I) const;
211 
212   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
213                      MachineInstr &I, CL::OpenCLExtInst CLInst) const;
214   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
215                      MachineInstr &I, CL::OpenCLExtInst CLInst,
216                      GL::GLSLExtInst GLInst) const;
217   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
218                      MachineInstr &I, const ExtInstList &ExtInsts) const;
219 
220   bool selectLog10(Register ResVReg, const SPIRVType *ResType,
221                    MachineInstr &I) const;
222 
223   bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
224                          MachineInstr &I) const;
225 
226   bool selectUnmergeValues(MachineInstr &I) const;
227 
228   Register buildI32Constant(uint32_t Val, MachineInstr &I,
229                             const SPIRVType *ResType = nullptr) const;
230 
231   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
232   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
233                         MachineInstr &I) const;
234 };
235 
236 } // end anonymous namespace
237 
238 #define GET_GLOBALISEL_IMPL
239 #include "SPIRVGenGlobalISel.inc"
240 #undef GET_GLOBALISEL_IMPL
241 
242 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
243                                                    const SPIRVSubtarget &ST,
244                                                    const RegisterBankInfo &RBI)
245     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
246       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
247 #define GET_GLOBALISEL_PREDICATES_INIT
248 #include "SPIRVGenGlobalISel.inc"
249 #undef GET_GLOBALISEL_PREDICATES_INIT
250 #define GET_GLOBALISEL_TEMPORARIES_INIT
251 #include "SPIRVGenGlobalISel.inc"
252 #undef GET_GLOBALISEL_TEMPORARIES_INIT
253 {
254 }
255 
256 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
257                                        CodeGenCoverage *CoverageInfo,
258                                        ProfileSummaryInfo *PSI,
259                                        BlockFrequencyInfo *BFI) {
260   MMI = &MF.getMMI().getObjFileInfo<SPIRVMachineModuleInfo>();
261   MRI = &MF.getRegInfo();
262   GR.setCurrentFunc(MF);
263   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
264 }
265 
266 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
267 
268 // Defined in SPIRVLegalizerInfo.cpp.
269 extern bool isTypeFoldingSupported(unsigned Opcode);
270 
271 bool SPIRVInstructionSelector::select(MachineInstr &I) {
272   assert(I.getParent() && "Instruction should be in a basic block!");
273   assert(I.getParent()->getParent() && "Instruction should be in a function!");
274 
275   Register Opcode = I.getOpcode();
276   // If it's not a GMIR instruction, we've selected it already.
277   if (!isPreISelGenericOpcode(Opcode)) {
278     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
279       auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
280       if (isTypeFoldingSupported(Def->getOpcode())) {
281         bool Res = selectImpl(I, *CoverageInfo);
282         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
283         if (Res)
284           return Res;
285       }
286       MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
287       I.removeFromParent();
288       return true;
289     } else if (I.getNumDefs() == 1) {
290       // Make all vregs 32 bits (for SPIR-V IDs).
291       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
292     }
293     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
294   }
295 
296   if (I.getNumOperands() != I.getNumExplicitOperands()) {
297     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
298     return false;
299   }
300 
301   // Common code for getting return reg+type, and removing selected instr
302   // from parent occurs here. Instr-specific selection happens in spvSelect().
303   bool HasDefs = I.getNumDefs() > 0;
304   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
305   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
306   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
307   if (spvSelect(ResVReg, ResType, I)) {
308     if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
309       for (unsigned i = 0; i < I.getNumDefs(); ++i)
310         MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
311     I.removeFromParent();
312     return true;
313   }
314   return false;
315 }
316 
317 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
318                                          const SPIRVType *ResType,
319                                          MachineInstr &I) const {
320   const unsigned Opcode = I.getOpcode();
321   if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
322     return selectImpl(I, *CoverageInfo);
323   switch (Opcode) {
324   case TargetOpcode::G_CONSTANT:
325     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
326                        I);
327   case TargetOpcode::G_GLOBAL_VALUE:
328     return selectGlobalValue(ResVReg, I);
329   case TargetOpcode::G_IMPLICIT_DEF:
330     return selectOpUndef(ResVReg, ResType, I);
331   case TargetOpcode::G_FREEZE:
332     return selectFreeze(ResVReg, ResType, I);
333 
334   case TargetOpcode::G_INTRINSIC:
335   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
336   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
337     return selectIntrinsic(ResVReg, ResType, I);
338   case TargetOpcode::G_BITREVERSE:
339     return selectBitreverse(ResVReg, ResType, I);
340 
341   case TargetOpcode::G_BUILD_VECTOR:
342     return selectConstVector(ResVReg, ResType, I);
343   case TargetOpcode::G_SPLAT_VECTOR:
344     return selectSplatVector(ResVReg, ResType, I);
345 
346   case TargetOpcode::G_SHUFFLE_VECTOR: {
347     MachineBasicBlock &BB = *I.getParent();
348     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
349                    .addDef(ResVReg)
350                    .addUse(GR.getSPIRVTypeID(ResType))
351                    .addUse(I.getOperand(1).getReg())
352                    .addUse(I.getOperand(2).getReg());
353     for (auto V : I.getOperand(3).getShuffleMask())
354       MIB.addImm(V);
355     return MIB.constrainAllUses(TII, TRI, RBI);
356   }
357   case TargetOpcode::G_MEMMOVE:
358   case TargetOpcode::G_MEMCPY:
359   case TargetOpcode::G_MEMSET:
360     return selectMemOperation(ResVReg, I);
361 
362   case TargetOpcode::G_ICMP:
363     return selectICmp(ResVReg, ResType, I);
364   case TargetOpcode::G_FCMP:
365     return selectFCmp(ResVReg, ResType, I);
366 
367   case TargetOpcode::G_FRAME_INDEX:
368     return selectFrameIndex(ResVReg, ResType, I);
369 
370   case TargetOpcode::G_LOAD:
371     return selectLoad(ResVReg, ResType, I);
372   case TargetOpcode::G_STORE:
373     return selectStore(I);
374 
375   case TargetOpcode::G_BR:
376     return selectBranch(I);
377   case TargetOpcode::G_BRCOND:
378     return selectBranchCond(I);
379 
380   case TargetOpcode::G_PHI:
381     return selectPhi(ResVReg, ResType, I);
382 
383   case TargetOpcode::G_FPTOSI:
384     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
385   case TargetOpcode::G_FPTOUI:
386     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
387 
388   case TargetOpcode::G_SITOFP:
389     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
390   case TargetOpcode::G_UITOFP:
391     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
392 
393   case TargetOpcode::G_CTPOP:
394     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
395   case TargetOpcode::G_SMIN:
396     return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
397   case TargetOpcode::G_UMIN:
398     return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
399 
400   case TargetOpcode::G_SMAX:
401     return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
402   case TargetOpcode::G_UMAX:
403     return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
404 
405   case TargetOpcode::G_FMA:
406     return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
407 
408   case TargetOpcode::G_FPOW:
409     return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
410   case TargetOpcode::G_FPOWI:
411     return selectExtInst(ResVReg, ResType, I, CL::pown);
412 
413   case TargetOpcode::G_FEXP:
414     return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
415   case TargetOpcode::G_FEXP2:
416     return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
417 
418   case TargetOpcode::G_FLOG:
419     return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
420   case TargetOpcode::G_FLOG2:
421     return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
422   case TargetOpcode::G_FLOG10:
423     return selectLog10(ResVReg, ResType, I);
424 
425   case TargetOpcode::G_FABS:
426     return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
427   case TargetOpcode::G_ABS:
428     return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
429 
430   case TargetOpcode::G_FMINNUM:
431   case TargetOpcode::G_FMINIMUM:
432     return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin);
433   case TargetOpcode::G_FMAXNUM:
434   case TargetOpcode::G_FMAXIMUM:
435     return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax);
436 
437   case TargetOpcode::G_FCOPYSIGN:
438     return selectExtInst(ResVReg, ResType, I, CL::copysign);
439 
440   case TargetOpcode::G_FCEIL:
441     return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
442   case TargetOpcode::G_FFLOOR:
443     return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
444 
445   case TargetOpcode::G_FCOS:
446     return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
447   case TargetOpcode::G_FSIN:
448     return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
449 
450   case TargetOpcode::G_FSQRT:
451     return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
452 
453   case TargetOpcode::G_CTTZ:
454   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
455     return selectExtInst(ResVReg, ResType, I, CL::ctz);
456   case TargetOpcode::G_CTLZ:
457   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
458     return selectExtInst(ResVReg, ResType, I, CL::clz);
459 
460   case TargetOpcode::G_INTRINSIC_ROUND:
461     return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
462   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
463     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
464   case TargetOpcode::G_INTRINSIC_TRUNC:
465     return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
466   case TargetOpcode::G_FRINT:
467   case TargetOpcode::G_FNEARBYINT:
468     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
469 
470   case TargetOpcode::G_SMULH:
471     return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
472   case TargetOpcode::G_UMULH:
473     return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
474 
475   case TargetOpcode::G_SEXT:
476     return selectExt(ResVReg, ResType, I, true);
477   case TargetOpcode::G_ANYEXT:
478   case TargetOpcode::G_ZEXT:
479     return selectExt(ResVReg, ResType, I, false);
480   case TargetOpcode::G_TRUNC:
481     return selectTrunc(ResVReg, ResType, I);
482   case TargetOpcode::G_FPTRUNC:
483   case TargetOpcode::G_FPEXT:
484     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
485 
486   case TargetOpcode::G_PTRTOINT:
487     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
488   case TargetOpcode::G_INTTOPTR:
489     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
490   case TargetOpcode::G_BITCAST:
491     return selectBitcast(ResVReg, ResType, I);
492   case TargetOpcode::G_ADDRSPACE_CAST:
493     return selectAddrSpaceCast(ResVReg, ResType, I);
494   case TargetOpcode::G_PTR_ADD: {
495     // Currently, we get G_PTR_ADD only as a result of translating
496     // global variables, initialized with constant expressions like GV + Const
497     // (see test opencl/basic/progvar_prog_scope_init.ll).
498     // TODO: extend the handler once we have other cases.
499     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
500     Register GV = I.getOperand(1).getReg();
501     MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
502     assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
503             (*II).getOpcode() == TargetOpcode::COPY ||
504             (*II).getOpcode() == SPIRV::OpVariable) &&
505            isImm(I.getOperand(2), MRI));
506     Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
507     MachineBasicBlock &BB = *I.getParent();
508     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
509                    .addDef(ResVReg)
510                    .addUse(GR.getSPIRVTypeID(ResType))
511                    .addImm(static_cast<uint32_t>(
512                        SPIRV::Opcode::InBoundsPtrAccessChain))
513                    .addUse(GV)
514                    .addUse(Idx)
515                    .addUse(I.getOperand(2).getReg());
516     return MIB.constrainAllUses(TII, TRI, RBI);
517   }
518 
519   case TargetOpcode::G_ATOMICRMW_OR:
520     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
521   case TargetOpcode::G_ATOMICRMW_ADD:
522     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
523   case TargetOpcode::G_ATOMICRMW_AND:
524     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
525   case TargetOpcode::G_ATOMICRMW_MAX:
526     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
527   case TargetOpcode::G_ATOMICRMW_MIN:
528     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
529   case TargetOpcode::G_ATOMICRMW_SUB:
530     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
531   case TargetOpcode::G_ATOMICRMW_XOR:
532     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
533   case TargetOpcode::G_ATOMICRMW_UMAX:
534     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
535   case TargetOpcode::G_ATOMICRMW_UMIN:
536     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
537   case TargetOpcode::G_ATOMICRMW_XCHG:
538     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
539   case TargetOpcode::G_ATOMIC_CMPXCHG:
540     return selectAtomicCmpXchg(ResVReg, ResType, I);
541 
542   case TargetOpcode::G_ATOMICRMW_FADD:
543     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
544   case TargetOpcode::G_ATOMICRMW_FSUB:
545     // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
546     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
547                            SPIRV::OpFNegate);
548   case TargetOpcode::G_ATOMICRMW_FMIN:
549     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
550   case TargetOpcode::G_ATOMICRMW_FMAX:
551     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
552 
553   case TargetOpcode::G_FENCE:
554     return selectFence(I);
555 
556   case TargetOpcode::G_STACKSAVE:
557     return selectStackSave(ResVReg, ResType, I);
558   case TargetOpcode::G_STACKRESTORE:
559     return selectStackRestore(I);
560 
561   case TargetOpcode::G_UNMERGE_VALUES:
562     return selectUnmergeValues(I);
563 
564   default:
565     return false;
566   }
567 }
568 
569 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
570                                              const SPIRVType *ResType,
571                                              MachineInstr &I,
572                                              CL::OpenCLExtInst CLInst) const {
573   return selectExtInst(ResVReg, ResType, I,
574                        {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
575 }
576 
577 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
578                                              const SPIRVType *ResType,
579                                              MachineInstr &I,
580                                              CL::OpenCLExtInst CLInst,
581                                              GL::GLSLExtInst GLInst) const {
582   ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
583                           {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
584   return selectExtInst(ResVReg, ResType, I, ExtInsts);
585 }
586 
587 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
588                                              const SPIRVType *ResType,
589                                              MachineInstr &I,
590                                              const ExtInstList &Insts) const {
591 
592   for (const auto &Ex : Insts) {
593     SPIRV::InstructionSet::InstructionSet Set = Ex.first;
594     uint32_t Opcode = Ex.second;
595     if (STI.canUseExtInstSet(Set)) {
596       MachineBasicBlock &BB = *I.getParent();
597       auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
598                      .addDef(ResVReg)
599                      .addUse(GR.getSPIRVTypeID(ResType))
600                      .addImm(static_cast<uint32_t>(Set))
601                      .addImm(Opcode);
602       const unsigned NumOps = I.getNumOperands();
603       for (unsigned i = 1; i < NumOps; ++i)
604         MIB.add(I.getOperand(i));
605       return MIB.constrainAllUses(TII, TRI, RBI);
606     }
607   }
608   return false;
609 }
610 
611 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
612                                                  const SPIRVType *ResType,
613                                                  MachineInstr &I,
614                                                  Register SrcReg,
615                                                  unsigned Opcode) const {
616   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
617       .addDef(ResVReg)
618       .addUse(GR.getSPIRVTypeID(ResType))
619       .addUse(SrcReg)
620       .constrainAllUses(TII, TRI, RBI);
621 }
622 
623 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
624                                           const SPIRVType *ResType,
625                                           MachineInstr &I,
626                                           unsigned Opcode) const {
627   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
628                            Opcode);
629 }
630 
631 bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
632                                              const SPIRVType *ResType,
633                                              MachineInstr &I) const {
634   Register OpReg = I.getOperand(1).getReg();
635   SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
636   if (!GR.isBitcastCompatible(ResType, OpType))
637     report_fatal_error("incompatible result and operand types in a bitcast");
638   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
639 }
640 
641 static SPIRV::Scope::Scope getScope(SyncScope::ID Ord,
642                                     SPIRVMachineModuleInfo *MMI) {
643   if (Ord == SyncScope::SingleThread || Ord == MMI->Work_ItemSSID)
644     return SPIRV::Scope::Invocation;
645   else if (Ord == SyncScope::System || Ord == MMI->DeviceSSID)
646     return SPIRV::Scope::Device;
647   else if (Ord == MMI->WorkGroupSSID)
648     return SPIRV::Scope::Workgroup;
649   else if (Ord == MMI->AllSVMDevicesSSID)
650     return SPIRV::Scope::CrossDevice;
651   else if (Ord == MMI->SubGroupSSID)
652     return SPIRV::Scope::Subgroup;
653   else
654     // OpenCL approach is: "The functions that do not have memory_scope argument
655     // have the same semantics as the corresponding functions with the
656     // memory_scope argument set to memory_scope_device." See ref.: //
657     // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
658     // In our case if the scope is unknown, assuming that SPIR-V code is to be
659     // consumed in an OpenCL environment, we use the same approach and set the
660     // scope to memory_scope_device.
661     return SPIRV::Scope::Device;
662 }
663 
664 static void addMemoryOperands(MachineMemOperand *MemOp,
665                               MachineInstrBuilder &MIB) {
666   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
667   if (MemOp->isVolatile())
668     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
669   if (MemOp->isNonTemporal())
670     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
671   if (MemOp->getAlign().value())
672     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
673 
674   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
675     MIB.addImm(SpvMemOp);
676     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
677       MIB.addImm(MemOp->getAlign().value());
678   }
679 }
680 
681 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
682   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
683   if (Flags & MachineMemOperand::Flags::MOVolatile)
684     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
685   if (Flags & MachineMemOperand::Flags::MONonTemporal)
686     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
687 
688   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
689     MIB.addImm(SpvMemOp);
690 }
691 
692 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
693                                           const SPIRVType *ResType,
694                                           MachineInstr &I) const {
695   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
696   Register Ptr = I.getOperand(1 + OpOffset).getReg();
697   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
698                  .addDef(ResVReg)
699                  .addUse(GR.getSPIRVTypeID(ResType))
700                  .addUse(Ptr);
701   if (!I.getNumMemOperands()) {
702     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
703            I.getOpcode() ==
704                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
705     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
706   } else {
707     addMemoryOperands(*I.memoperands_begin(), MIB);
708   }
709   return MIB.constrainAllUses(TII, TRI, RBI);
710 }
711 
712 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
713   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
714   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
715   Register Ptr = I.getOperand(1 + OpOffset).getReg();
716   MachineBasicBlock &BB = *I.getParent();
717   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
718                  .addUse(Ptr)
719                  .addUse(StoreVal);
720   if (!I.getNumMemOperands()) {
721     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
722            I.getOpcode() ==
723                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
724     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
725   } else {
726     addMemoryOperands(*I.memoperands_begin(), MIB);
727   }
728   return MIB.constrainAllUses(TII, TRI, RBI);
729 }
730 
731 bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
732                                                const SPIRVType *ResType,
733                                                MachineInstr &I) const {
734   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
735     report_fatal_error(
736         "llvm.stacksave intrinsic: this instruction requires the following "
737         "SPIR-V extension: SPV_INTEL_variable_length_array",
738         false);
739   MachineBasicBlock &BB = *I.getParent();
740   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
741       .addDef(ResVReg)
742       .addUse(GR.getSPIRVTypeID(ResType))
743       .constrainAllUses(TII, TRI, RBI);
744 }
745 
746 bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
747   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
748     report_fatal_error(
749         "llvm.stackrestore intrinsic: this instruction requires the following "
750         "SPIR-V extension: SPV_INTEL_variable_length_array",
751         false);
752   if (!I.getOperand(0).isReg())
753     return false;
754   MachineBasicBlock &BB = *I.getParent();
755   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
756       .addUse(I.getOperand(0).getReg())
757       .constrainAllUses(TII, TRI, RBI);
758 }
759 
760 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
761                                                   MachineInstr &I) const {
762   MachineBasicBlock &BB = *I.getParent();
763   Register SrcReg = I.getOperand(1).getReg();
764   if (I.getOpcode() == TargetOpcode::G_MEMSET) {
765     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
766     unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
767     unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
768     SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
769     SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
770     Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
771     SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
772         ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
773     // TODO: check if we have such GV, add init, use buildGlobalVariable.
774     Type *LLVMArrTy = ArrayType::get(
775         IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num);
776     GlobalVariable *GV =
777         new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage);
778     Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
779     GR.add(GV, GR.CurMF, VarReg);
780 
781     buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
782     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
783         .addDef(VarReg)
784         .addUse(GR.getSPIRVTypeID(VarTy))
785         .addImm(SPIRV::StorageClass::UniformConstant)
786         .addUse(Const)
787         .constrainAllUses(TII, TRI, RBI);
788     SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
789         ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
790     SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
791     selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
792   }
793   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
794                  .addUse(I.getOperand(0).getReg())
795                  .addUse(SrcReg)
796                  .addUse(I.getOperand(2).getReg());
797   if (I.getNumMemOperands())
798     addMemoryOperands(*I.memoperands_begin(), MIB);
799   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
800   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
801     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
802         .addUse(MIB->getOperand(0).getReg());
803   return Result;
804 }
805 
806 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
807                                                const SPIRVType *ResType,
808                                                MachineInstr &I,
809                                                unsigned NewOpcode,
810                                                unsigned NegateOpcode) const {
811   assert(I.hasOneMemOperand());
812   const MachineMemOperand *MemOp = *I.memoperands_begin();
813   uint32_t Scope =
814       static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
815   Register ScopeReg = buildI32Constant(Scope, I);
816 
817   Register Ptr = I.getOperand(1).getReg();
818   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
819   // auto ScSem =
820   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
821   AtomicOrdering AO = MemOp->getSuccessOrdering();
822   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
823   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
824 
825   bool Result = false;
826   Register ValueReg = I.getOperand(2).getReg();
827   if (NegateOpcode != 0) {
828     // Translation with negative value operand is requested
829     Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
830     Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
831     ValueReg = TmpReg;
832   }
833 
834   Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
835                 .addDef(ResVReg)
836                 .addUse(GR.getSPIRVTypeID(ResType))
837                 .addUse(Ptr)
838                 .addUse(ScopeReg)
839                 .addUse(MemSemReg)
840                 .addUse(ValueReg)
841                 .constrainAllUses(TII, TRI, RBI);
842   return Result;
843 }
844 
845 bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
846   unsigned ArgI = I.getNumOperands() - 1;
847   Register SrcReg =
848       I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
849   SPIRVType *DefType =
850       SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
851   if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
852     report_fatal_error(
853         "cannot select G_UNMERGE_VALUES with a non-vector argument");
854 
855   SPIRVType *ScalarType =
856       GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
857   MachineBasicBlock &BB = *I.getParent();
858   bool Res = false;
859   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
860     Register ResVReg = I.getOperand(i).getReg();
861     SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
862     if (!ResType) {
863       // There was no "assign type" actions, let's fix this now
864       ResType = ScalarType;
865       MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
866       MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
867       GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
868     }
869     auto MIB =
870         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
871             .addDef(ResVReg)
872             .addUse(GR.getSPIRVTypeID(ResType))
873             .addUse(SrcReg)
874             .addImm(static_cast<int64_t>(i));
875     Res |= MIB.constrainAllUses(TII, TRI, RBI);
876   }
877   return Res;
878 }
879 
880 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
881   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
882   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
883   Register MemSemReg = buildI32Constant(MemSem, I);
884   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
885   uint32_t Scope = static_cast<uint32_t>(getScope(Ord, MMI));
886   Register ScopeReg = buildI32Constant(Scope, I);
887   MachineBasicBlock &BB = *I.getParent();
888   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
889       .addUse(ScopeReg)
890       .addUse(MemSemReg)
891       .constrainAllUses(TII, TRI, RBI);
892 }
893 
894 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
895                                                    const SPIRVType *ResType,
896                                                    MachineInstr &I) const {
897   Register ScopeReg;
898   Register MemSemEqReg;
899   Register MemSemNeqReg;
900   Register Ptr = I.getOperand(2).getReg();
901   if (!isa<GIntrinsic>(I)) {
902     assert(I.hasOneMemOperand());
903     const MachineMemOperand *MemOp = *I.memoperands_begin();
904     unsigned Scope =
905         static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
906     ScopeReg = buildI32Constant(Scope, I);
907 
908     unsigned ScSem = static_cast<uint32_t>(
909         getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
910     AtomicOrdering AO = MemOp->getSuccessOrdering();
911     unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
912     MemSemEqReg = buildI32Constant(MemSemEq, I);
913     AtomicOrdering FO = MemOp->getFailureOrdering();
914     unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
915     MemSemNeqReg =
916         MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
917   } else {
918     ScopeReg = I.getOperand(5).getReg();
919     MemSemEqReg = I.getOperand(6).getReg();
920     MemSemNeqReg = I.getOperand(7).getReg();
921   }
922 
923   Register Cmp = I.getOperand(3).getReg();
924   Register Val = I.getOperand(4).getReg();
925   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
926   Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
927   const DebugLoc &DL = I.getDebugLoc();
928   bool Result =
929       BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
930           .addDef(ACmpRes)
931           .addUse(GR.getSPIRVTypeID(SpvValTy))
932           .addUse(Ptr)
933           .addUse(ScopeReg)
934           .addUse(MemSemEqReg)
935           .addUse(MemSemNeqReg)
936           .addUse(Val)
937           .addUse(Cmp)
938           .constrainAllUses(TII, TRI, RBI);
939   Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
940   SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
941   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
942                 .addDef(CmpSuccReg)
943                 .addUse(GR.getSPIRVTypeID(BoolTy))
944                 .addUse(ACmpRes)
945                 .addUse(Cmp)
946                 .constrainAllUses(TII, TRI, RBI);
947   Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
948   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
949                 .addDef(TmpReg)
950                 .addUse(GR.getSPIRVTypeID(ResType))
951                 .addUse(ACmpRes)
952                 .addUse(GR.getOrCreateUndef(I, ResType, TII))
953                 .addImm(0)
954                 .constrainAllUses(TII, TRI, RBI);
955   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
956                 .addDef(ResVReg)
957                 .addUse(GR.getSPIRVTypeID(ResType))
958                 .addUse(CmpSuccReg)
959                 .addUse(TmpReg)
960                 .addImm(1)
961                 .constrainAllUses(TII, TRI, RBI);
962   return Result;
963 }
964 
965 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
966   switch (SC) {
967   case SPIRV::StorageClass::Workgroup:
968   case SPIRV::StorageClass::CrossWorkgroup:
969   case SPIRV::StorageClass::Function:
970     return true;
971   default:
972     return false;
973   }
974 }
975 
976 static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
977   switch (SC) {
978   case SPIRV::StorageClass::DeviceOnlyINTEL:
979   case SPIRV::StorageClass::HostOnlyINTEL:
980     return true;
981   default:
982     return false;
983   }
984 }
985 
986 // In SPIR-V address space casting can only happen to and from the Generic
987 // storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
988 // pointers to and from Generic pointers. As such, we can convert e.g. from
989 // Workgroup to Function by going via a Generic pointer as an intermediary. All
990 // other combinations can only be done by a bitcast, and are probably not safe.
991 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
992                                                    const SPIRVType *ResType,
993                                                    MachineInstr &I) const {
994   // If the AddrSpaceCast user is single and in OpConstantComposite or
995   // OpVariable, we should select OpSpecConstantOp.
996   auto UIs = MRI->use_instructions(ResVReg);
997   if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
998       (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
999        UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1000        isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
1001     Register NewReg = I.getOperand(1).getReg();
1002     MachineBasicBlock &BB = *I.getParent();
1003     SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1004     ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
1005                                              SPIRV::StorageClass::Generic);
1006     bool Result =
1007         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1008             .addDef(ResVReg)
1009             .addUse(GR.getSPIRVTypeID(ResType))
1010             .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
1011             .addUse(NewReg)
1012             .constrainAllUses(TII, TRI, RBI);
1013     return Result;
1014   }
1015   Register SrcPtr = I.getOperand(1).getReg();
1016   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1017   SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1018   SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1019 
1020   // don't generate a cast between identical storage classes
1021   if (SrcSC == DstSC)
1022     return true;
1023 
1024   // Casting from an eligible pointer to Generic.
1025   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1026     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1027   // Casting from Generic to an eligible pointer.
1028   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1029     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1030   // Casting between 2 eligible pointers using Generic as an intermediary.
1031   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1032     Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1033     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1034         SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
1035     MachineBasicBlock &BB = *I.getParent();
1036     const DebugLoc &DL = I.getDebugLoc();
1037     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1038                        .addDef(Tmp)
1039                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1040                        .addUse(SrcPtr)
1041                        .constrainAllUses(TII, TRI, RBI);
1042     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1043                           .addDef(ResVReg)
1044                           .addUse(GR.getSPIRVTypeID(ResType))
1045                           .addUse(Tmp)
1046                           .constrainAllUses(TII, TRI, RBI);
1047   }
1048 
1049   // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1050   // be applied
1051   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1052     return selectUnOp(ResVReg, ResType, I,
1053                       SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1054   if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1055     return selectUnOp(ResVReg, ResType, I,
1056                       SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1057 
1058   // TODO Should this case just be disallowed completely?
1059   // We're casting 2 other arbitrary address spaces, so have to bitcast.
1060   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1061 }
1062 
1063 static unsigned getFCmpOpcode(unsigned PredNum) {
1064   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1065   switch (Pred) {
1066   case CmpInst::FCMP_OEQ:
1067     return SPIRV::OpFOrdEqual;
1068   case CmpInst::FCMP_OGE:
1069     return SPIRV::OpFOrdGreaterThanEqual;
1070   case CmpInst::FCMP_OGT:
1071     return SPIRV::OpFOrdGreaterThan;
1072   case CmpInst::FCMP_OLE:
1073     return SPIRV::OpFOrdLessThanEqual;
1074   case CmpInst::FCMP_OLT:
1075     return SPIRV::OpFOrdLessThan;
1076   case CmpInst::FCMP_ONE:
1077     return SPIRV::OpFOrdNotEqual;
1078   case CmpInst::FCMP_ORD:
1079     return SPIRV::OpOrdered;
1080   case CmpInst::FCMP_UEQ:
1081     return SPIRV::OpFUnordEqual;
1082   case CmpInst::FCMP_UGE:
1083     return SPIRV::OpFUnordGreaterThanEqual;
1084   case CmpInst::FCMP_UGT:
1085     return SPIRV::OpFUnordGreaterThan;
1086   case CmpInst::FCMP_ULE:
1087     return SPIRV::OpFUnordLessThanEqual;
1088   case CmpInst::FCMP_ULT:
1089     return SPIRV::OpFUnordLessThan;
1090   case CmpInst::FCMP_UNE:
1091     return SPIRV::OpFUnordNotEqual;
1092   case CmpInst::FCMP_UNO:
1093     return SPIRV::OpUnordered;
1094   default:
1095     llvm_unreachable("Unknown predicate type for FCmp");
1096   }
1097 }
1098 
1099 static unsigned getICmpOpcode(unsigned PredNum) {
1100   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1101   switch (Pred) {
1102   case CmpInst::ICMP_EQ:
1103     return SPIRV::OpIEqual;
1104   case CmpInst::ICMP_NE:
1105     return SPIRV::OpINotEqual;
1106   case CmpInst::ICMP_SGE:
1107     return SPIRV::OpSGreaterThanEqual;
1108   case CmpInst::ICMP_SGT:
1109     return SPIRV::OpSGreaterThan;
1110   case CmpInst::ICMP_SLE:
1111     return SPIRV::OpSLessThanEqual;
1112   case CmpInst::ICMP_SLT:
1113     return SPIRV::OpSLessThan;
1114   case CmpInst::ICMP_UGE:
1115     return SPIRV::OpUGreaterThanEqual;
1116   case CmpInst::ICMP_UGT:
1117     return SPIRV::OpUGreaterThan;
1118   case CmpInst::ICMP_ULE:
1119     return SPIRV::OpULessThanEqual;
1120   case CmpInst::ICMP_ULT:
1121     return SPIRV::OpULessThan;
1122   default:
1123     llvm_unreachable("Unknown predicate type for ICmp");
1124   }
1125 }
1126 
1127 static unsigned getPtrCmpOpcode(unsigned Pred) {
1128   switch (static_cast<CmpInst::Predicate>(Pred)) {
1129   case CmpInst::ICMP_EQ:
1130     return SPIRV::OpPtrEqual;
1131   case CmpInst::ICMP_NE:
1132     return SPIRV::OpPtrNotEqual;
1133   default:
1134     llvm_unreachable("Unknown predicate type for pointer comparison");
1135   }
1136 }
1137 
1138 // Return the logical operation, or abort if none exists.
1139 static unsigned getBoolCmpOpcode(unsigned PredNum) {
1140   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1141   switch (Pred) {
1142   case CmpInst::ICMP_EQ:
1143     return SPIRV::OpLogicalEqual;
1144   case CmpInst::ICMP_NE:
1145     return SPIRV::OpLogicalNotEqual;
1146   default:
1147     llvm_unreachable("Unknown predicate type for Bool comparison");
1148   }
1149 }
1150 
1151 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1152                                                 const SPIRVType *ResType,
1153                                                 MachineInstr &I) const {
1154   MachineBasicBlock &BB = *I.getParent();
1155   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1156       .addDef(ResVReg)
1157       .addUse(GR.getSPIRVTypeID(ResType))
1158       .addUse(I.getOperand(1).getReg())
1159       .constrainAllUses(TII, TRI, RBI);
1160 }
1161 
1162 bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1163                                             const SPIRVType *ResType,
1164                                             MachineInstr &I) const {
1165   // There is no way to implement `freeze` correctly without support on SPIR-V
1166   // standard side, but we may at least address a simple (static) case when
1167   // undef/poison value presence is obvious. The main benefit of even
1168   // incomplete `freeze` support is preventing of translation from crashing due
1169   // to lack of support on legalization and instruction selection steps.
1170   if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1171     return false;
1172   Register OpReg = I.getOperand(1).getReg();
1173   if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1174     Register Reg;
1175     switch (Def->getOpcode()) {
1176     case SPIRV::ASSIGN_TYPE:
1177       if (MachineInstr *AssignToDef =
1178               MRI->getVRegDef(Def->getOperand(1).getReg())) {
1179         if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1180           Reg = Def->getOperand(2).getReg();
1181       }
1182       break;
1183     case SPIRV::OpUndef:
1184       Reg = Def->getOperand(1).getReg();
1185       break;
1186     }
1187     unsigned DestOpCode;
1188     if (Reg.isValid()) {
1189       DestOpCode = SPIRV::OpConstantNull;
1190     } else {
1191       DestOpCode = TargetOpcode::COPY;
1192       Reg = OpReg;
1193     }
1194     return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1195         .addDef(I.getOperand(0).getReg())
1196         .addUse(Reg)
1197         .constrainAllUses(TII, TRI, RBI);
1198   }
1199   return false;
1200 }
1201 
1202 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1203                                                  const SPIRVType *ResType,
1204                                                  MachineInstr &I) const {
1205   // TODO: only const case is supported for now.
1206   assert(std::all_of(
1207       I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1208         if (MO.isDef())
1209           return true;
1210         if (!MO.isReg())
1211           return false;
1212         SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1213         assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1214                ConstTy->getOperand(1).isReg());
1215         Register ConstReg = ConstTy->getOperand(1).getReg();
1216         const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1217         assert(Const);
1218         return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1219                 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1220       }));
1221 
1222   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1223                      TII.get(SPIRV::OpConstantComposite))
1224                  .addDef(ResVReg)
1225                  .addUse(GR.getSPIRVTypeID(ResType));
1226   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1227     MIB.addUse(I.getOperand(i).getReg());
1228   return MIB.constrainAllUses(TII, TRI, RBI);
1229 }
1230 
1231 static unsigned getArrayComponentCount(MachineRegisterInfo *MRI,
1232                                        const SPIRVType *ResType) {
1233   Register OpReg = ResType->getOperand(2).getReg();
1234   SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1235   if (!OpDef)
1236     return 0;
1237   if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1238       OpDef->getOperand(1).isReg()) {
1239     if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1240       OpDef = RefDef;
1241   }
1242   unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1243                    ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1244                    : 0;
1245   return N;
1246 }
1247 
1248 bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1249                                                  const SPIRVType *ResType,
1250                                                  MachineInstr &I) const {
1251   unsigned N = 0;
1252   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1253     N = GR.getScalarOrVectorComponentCount(ResType);
1254   else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1255     N = getArrayComponentCount(MRI, ResType);
1256   else
1257     report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1258 
1259   unsigned OpIdx = I.getNumExplicitDefs();
1260   if (!I.getOperand(OpIdx).isReg())
1261     report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1262 
1263   // check if we may construct a constant vector
1264   Register OpReg = I.getOperand(OpIdx).getReg();
1265   bool IsConst = false;
1266   if (SPIRVType *OpDef = MRI->getVRegDef(OpReg)) {
1267     if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1268         OpDef->getOperand(1).isReg()) {
1269       if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1270         OpDef = RefDef;
1271     }
1272     IsConst = OpDef->getOpcode() == TargetOpcode::G_CONSTANT ||
1273               OpDef->getOpcode() == TargetOpcode::G_FCONSTANT;
1274   }
1275 
1276   if (!IsConst && N < 2)
1277     report_fatal_error(
1278         "There must be at least two constituent operands in a vector");
1279 
1280   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1281                      TII.get(IsConst ? SPIRV::OpConstantComposite
1282                                      : SPIRV::OpCompositeConstruct))
1283                  .addDef(ResVReg)
1284                  .addUse(GR.getSPIRVTypeID(ResType));
1285   for (unsigned i = 0; i < N; ++i)
1286     MIB.addUse(OpReg);
1287   return MIB.constrainAllUses(TII, TRI, RBI);
1288 }
1289 
1290 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1291                                          const SPIRVType *ResType,
1292                                          unsigned CmpOpc,
1293                                          MachineInstr &I) const {
1294   Register Cmp0 = I.getOperand(2).getReg();
1295   Register Cmp1 = I.getOperand(3).getReg();
1296   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1297              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1298          "CMP operands should have the same type");
1299   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1300       .addDef(ResVReg)
1301       .addUse(GR.getSPIRVTypeID(ResType))
1302       .addUse(Cmp0)
1303       .addUse(Cmp1)
1304       .constrainAllUses(TII, TRI, RBI);
1305 }
1306 
1307 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1308                                           const SPIRVType *ResType,
1309                                           MachineInstr &I) const {
1310   auto Pred = I.getOperand(1).getPredicate();
1311   unsigned CmpOpc;
1312 
1313   Register CmpOperand = I.getOperand(2).getReg();
1314   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1315     CmpOpc = getPtrCmpOpcode(Pred);
1316   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1317     CmpOpc = getBoolCmpOpcode(Pred);
1318   else
1319     CmpOpc = getICmpOpcode(Pred);
1320   return selectCmp(ResVReg, ResType, CmpOpc, I);
1321 }
1322 
1323 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1324                                             const MachineInstr &I,
1325                                             int OpIdx) const {
1326   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1327          "Expected G_FCONSTANT");
1328   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1329   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1330 }
1331 
1332 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1333                                            const MachineInstr &I,
1334                                            int OpIdx) const {
1335   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1336          "Expected G_CONSTANT");
1337   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1338 }
1339 
1340 Register
1341 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1342                                            const SPIRVType *ResType) const {
1343   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1344   const SPIRVType *SpvI32Ty =
1345       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1346   // Find a constant in DT or build a new one.
1347   auto ConstInt = ConstantInt::get(LLVMTy, Val);
1348   Register NewReg = GR.find(ConstInt, GR.CurMF);
1349   if (!NewReg.isValid()) {
1350     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1351     GR.add(ConstInt, GR.CurMF, NewReg);
1352     MachineInstr *MI;
1353     MachineBasicBlock &BB = *I.getParent();
1354     if (Val == 0) {
1355       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1356                .addDef(NewReg)
1357                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1358     } else {
1359       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1360                .addDef(NewReg)
1361                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1362                .addImm(APInt(32, Val).getZExtValue());
1363     }
1364     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
1365   }
1366   return NewReg;
1367 }
1368 
1369 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1370                                           const SPIRVType *ResType,
1371                                           MachineInstr &I) const {
1372   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1373   return selectCmp(ResVReg, ResType, CmpOp, I);
1374 }
1375 
1376 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1377                                                  MachineInstr &I) const {
1378   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1379     return GR.getOrCreateConsIntVector(0, I, ResType, TII);
1380   return GR.getOrCreateConstInt(0, I, ResType, TII);
1381 }
1382 
1383 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1384                                                 const SPIRVType *ResType,
1385                                                 MachineInstr &I) const {
1386   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1387   APInt One =
1388       AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1389   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1390     return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
1391   return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1392 }
1393 
1394 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1395                                             const SPIRVType *ResType,
1396                                             MachineInstr &I,
1397                                             bool IsSigned) const {
1398   // To extend a bool, we need to use OpSelect between constants.
1399   Register ZeroReg = buildZerosVal(ResType, I);
1400   Register OneReg = buildOnesVal(IsSigned, ResType, I);
1401   bool IsScalarBool =
1402       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1403   unsigned Opcode =
1404       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1405   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1406       .addDef(ResVReg)
1407       .addUse(GR.getSPIRVTypeID(ResType))
1408       .addUse(I.getOperand(1).getReg())
1409       .addUse(OneReg)
1410       .addUse(ZeroReg)
1411       .constrainAllUses(TII, TRI, RBI);
1412 }
1413 
1414 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1415                                           const SPIRVType *ResType,
1416                                           MachineInstr &I, bool IsSigned,
1417                                           unsigned Opcode) const {
1418   Register SrcReg = I.getOperand(1).getReg();
1419   // We can convert bool value directly to float type without OpConvert*ToF,
1420   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1421   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1422     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1423     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1424     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1425       const unsigned NumElts = ResType->getOperand(2).getImm();
1426       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1427     }
1428     SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1429     selectSelect(SrcReg, TmpType, I, false);
1430   }
1431   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1432 }
1433 
1434 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1435                                          const SPIRVType *ResType,
1436                                          MachineInstr &I, bool IsSigned) const {
1437   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1438     return selectSelect(ResVReg, ResType, I, IsSigned);
1439   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1440   return selectUnOp(ResVReg, ResType, I, Opcode);
1441 }
1442 
1443 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1444                                                Register ResVReg,
1445                                                MachineInstr &I,
1446                                                const SPIRVType *IntTy,
1447                                                const SPIRVType *BoolTy) const {
1448   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1449   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1450   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1451   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1452   Register Zero = buildZerosVal(IntTy, I);
1453   Register One = buildOnesVal(false, IntTy, I);
1454   MachineBasicBlock &BB = *I.getParent();
1455   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1456       .addDef(BitIntReg)
1457       .addUse(GR.getSPIRVTypeID(IntTy))
1458       .addUse(IntReg)
1459       .addUse(One)
1460       .constrainAllUses(TII, TRI, RBI);
1461   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1462       .addDef(ResVReg)
1463       .addUse(GR.getSPIRVTypeID(BoolTy))
1464       .addUse(BitIntReg)
1465       .addUse(Zero)
1466       .constrainAllUses(TII, TRI, RBI);
1467 }
1468 
1469 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1470                                            const SPIRVType *ResType,
1471                                            MachineInstr &I) const {
1472   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1473     Register IntReg = I.getOperand(1).getReg();
1474     const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1475     return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1476   }
1477   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1478   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1479   return selectUnOp(ResVReg, ResType, I, Opcode);
1480 }
1481 
1482 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1483                                            const SPIRVType *ResType,
1484                                            const APInt &Imm,
1485                                            MachineInstr &I) const {
1486   unsigned TyOpcode = ResType->getOpcode();
1487   assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1488   MachineBasicBlock &BB = *I.getParent();
1489   if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1490       Imm.isZero())
1491     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1492         .addDef(ResVReg)
1493         .addUse(GR.getSPIRVTypeID(ResType))
1494         .constrainAllUses(TII, TRI, RBI);
1495   if (TyOpcode == SPIRV::OpTypeInt) {
1496     assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1497     Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1498     if (Reg == ResVReg)
1499       return true;
1500     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1501         .addDef(ResVReg)
1502         .addUse(Reg)
1503         .constrainAllUses(TII, TRI, RBI);
1504   }
1505   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1506                  .addDef(ResVReg)
1507                  .addUse(GR.getSPIRVTypeID(ResType));
1508   // <=32-bit integers should be caught by the sdag pattern.
1509   assert(Imm.getBitWidth() > 32);
1510   addNumImm(Imm, MIB);
1511   return MIB.constrainAllUses(TII, TRI, RBI);
1512 }
1513 
1514 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1515                                              const SPIRVType *ResType,
1516                                              MachineInstr &I) const {
1517   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1518       .addDef(ResVReg)
1519       .addUse(GR.getSPIRVTypeID(ResType))
1520       .constrainAllUses(TII, TRI, RBI);
1521 }
1522 
1523 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1524   assert(MO.isReg());
1525   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1526   if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1527     return false;
1528   assert(TypeInst->getOperand(1).isReg());
1529   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1530   return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1531 }
1532 
1533 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1534   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1535   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1536   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1537   return ImmInst->getOperand(1).getCImm()->getZExtValue();
1538 }
1539 
1540 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1541                                                const SPIRVType *ResType,
1542                                                MachineInstr &I) const {
1543   MachineBasicBlock &BB = *I.getParent();
1544   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1545                  .addDef(ResVReg)
1546                  .addUse(GR.getSPIRVTypeID(ResType))
1547                  // object to insert
1548                  .addUse(I.getOperand(3).getReg())
1549                  // composite to insert into
1550                  .addUse(I.getOperand(2).getReg());
1551   for (unsigned i = 4; i < I.getNumOperands(); i++)
1552     MIB.addImm(foldImm(I.getOperand(i), MRI));
1553   return MIB.constrainAllUses(TII, TRI, RBI);
1554 }
1555 
1556 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1557                                                 const SPIRVType *ResType,
1558                                                 MachineInstr &I) const {
1559   MachineBasicBlock &BB = *I.getParent();
1560   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1561                  .addDef(ResVReg)
1562                  .addUse(GR.getSPIRVTypeID(ResType))
1563                  .addUse(I.getOperand(2).getReg());
1564   for (unsigned i = 3; i < I.getNumOperands(); i++)
1565     MIB.addImm(foldImm(I.getOperand(i), MRI));
1566   return MIB.constrainAllUses(TII, TRI, RBI);
1567 }
1568 
1569 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1570                                                const SPIRVType *ResType,
1571                                                MachineInstr &I) const {
1572   if (isImm(I.getOperand(4), MRI))
1573     return selectInsertVal(ResVReg, ResType, I);
1574   MachineBasicBlock &BB = *I.getParent();
1575   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1576       .addDef(ResVReg)
1577       .addUse(GR.getSPIRVTypeID(ResType))
1578       .addUse(I.getOperand(2).getReg())
1579       .addUse(I.getOperand(3).getReg())
1580       .addUse(I.getOperand(4).getReg())
1581       .constrainAllUses(TII, TRI, RBI);
1582 }
1583 
1584 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1585                                                 const SPIRVType *ResType,
1586                                                 MachineInstr &I) const {
1587   if (isImm(I.getOperand(3), MRI))
1588     return selectExtractVal(ResVReg, ResType, I);
1589   MachineBasicBlock &BB = *I.getParent();
1590   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1591       .addDef(ResVReg)
1592       .addUse(GR.getSPIRVTypeID(ResType))
1593       .addUse(I.getOperand(2).getReg())
1594       .addUse(I.getOperand(3).getReg())
1595       .constrainAllUses(TII, TRI, RBI);
1596 }
1597 
1598 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1599                                          const SPIRVType *ResType,
1600                                          MachineInstr &I) const {
1601   const bool IsGEPInBounds = I.getOperand(2).getImm();
1602 
1603   // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1604   // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1605   // we have to use Op[InBounds]AccessChain.
1606   const unsigned Opcode = STI.isVulkanEnv()
1607                               ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1608                                                : SPIRV::OpAccessChain)
1609                               : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1610                                                : SPIRV::OpPtrAccessChain);
1611 
1612   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1613                  .addDef(ResVReg)
1614                  .addUse(GR.getSPIRVTypeID(ResType))
1615                  // Object to get a pointer to.
1616                  .addUse(I.getOperand(3).getReg());
1617   // Adding indices.
1618   const unsigned StartingIndex =
1619       (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1620           ? 5
1621           : 4;
1622   for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1623     Res.addUse(I.getOperand(i).getReg());
1624   return Res.constrainAllUses(TII, TRI, RBI);
1625 }
1626 
1627 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1628                                                const SPIRVType *ResType,
1629                                                MachineInstr &I) const {
1630   MachineBasicBlock &BB = *I.getParent();
1631   Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
1632   switch (IID) {
1633   case Intrinsic::spv_load:
1634     return selectLoad(ResVReg, ResType, I);
1635   case Intrinsic::spv_store:
1636     return selectStore(I);
1637   case Intrinsic::spv_extractv:
1638     return selectExtractVal(ResVReg, ResType, I);
1639   case Intrinsic::spv_insertv:
1640     return selectInsertVal(ResVReg, ResType, I);
1641   case Intrinsic::spv_extractelt:
1642     return selectExtractElt(ResVReg, ResType, I);
1643   case Intrinsic::spv_insertelt:
1644     return selectInsertElt(ResVReg, ResType, I);
1645   case Intrinsic::spv_gep:
1646     return selectGEP(ResVReg, ResType, I);
1647   case Intrinsic::spv_unref_global:
1648   case Intrinsic::spv_init_global: {
1649     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1650     MachineInstr *Init = I.getNumExplicitOperands() > 2
1651                              ? MRI->getVRegDef(I.getOperand(2).getReg())
1652                              : nullptr;
1653     assert(MI);
1654     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1655   }
1656   case Intrinsic::spv_undef: {
1657     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1658                    .addDef(ResVReg)
1659                    .addUse(GR.getSPIRVTypeID(ResType));
1660     return MIB.constrainAllUses(TII, TRI, RBI);
1661   }
1662   case Intrinsic::spv_const_composite: {
1663     // If no values are attached, the composite is null constant.
1664     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1665     unsigned Opcode =
1666         IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1667     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1668                    .addDef(ResVReg)
1669                    .addUse(GR.getSPIRVTypeID(ResType));
1670     // skip type MD node we already used when generated assign.type for this
1671     if (!IsNull) {
1672       for (unsigned i = I.getNumExplicitDefs() + 1;
1673            i < I.getNumExplicitOperands(); ++i) {
1674         MIB.addUse(I.getOperand(i).getReg());
1675       }
1676     }
1677     return MIB.constrainAllUses(TII, TRI, RBI);
1678   }
1679   case Intrinsic::spv_assign_name: {
1680     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1681     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1682     for (unsigned i = I.getNumExplicitDefs() + 2;
1683          i < I.getNumExplicitOperands(); ++i) {
1684       MIB.addImm(I.getOperand(i).getImm());
1685     }
1686     return MIB.constrainAllUses(TII, TRI, RBI);
1687   }
1688   case Intrinsic::spv_switch: {
1689     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1690     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1691       if (I.getOperand(i).isReg())
1692         MIB.addReg(I.getOperand(i).getReg());
1693       else if (I.getOperand(i).isCImm())
1694         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1695       else if (I.getOperand(i).isMBB())
1696         MIB.addMBB(I.getOperand(i).getMBB());
1697       else
1698         llvm_unreachable("Unexpected OpSwitch operand");
1699     }
1700     return MIB.constrainAllUses(TII, TRI, RBI);
1701   }
1702   case Intrinsic::spv_cmpxchg:
1703     return selectAtomicCmpXchg(ResVReg, ResType, I);
1704   case Intrinsic::spv_unreachable:
1705     BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1706     break;
1707   case Intrinsic::spv_alloca:
1708     return selectFrameIndex(ResVReg, ResType, I);
1709   case Intrinsic::spv_alloca_array:
1710     return selectAllocaArray(ResVReg, ResType, I);
1711   case Intrinsic::spv_assume:
1712     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1713       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
1714           .addUse(I.getOperand(1).getReg());
1715     break;
1716   case Intrinsic::spv_expect:
1717     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1718       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
1719           .addDef(ResVReg)
1720           .addUse(GR.getSPIRVTypeID(ResType))
1721           .addUse(I.getOperand(2).getReg())
1722           .addUse(I.getOperand(3).getReg());
1723     break;
1724   case Intrinsic::spv_thread_id:
1725     return selectSpvThreadId(ResVReg, ResType, I);
1726   case Intrinsic::spv_lifetime_start:
1727   case Intrinsic::spv_lifetime_end: {
1728     unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
1729                                                        : SPIRV::OpLifetimeStop;
1730     int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
1731     Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
1732     unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
1733     bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
1734     if (Size == -1 || IsNonvoidPtr)
1735       Size = 0;
1736     BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
1737   } break;
1738   default: {
1739     std::string DiagMsg;
1740     raw_string_ostream OS(DiagMsg);
1741     I.print(OS);
1742     DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
1743     report_fatal_error(DiagMsg.c_str(), false);
1744   }
1745   }
1746   return true;
1747 }
1748 
1749 bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
1750                                                  const SPIRVType *ResType,
1751                                                  MachineInstr &I) const {
1752   // there was an allocation size parameter to the allocation instruction
1753   // that is not 1
1754   MachineBasicBlock &BB = *I.getParent();
1755   return BuildMI(BB, I, I.getDebugLoc(),
1756                  TII.get(SPIRV::OpVariableLengthArrayINTEL))
1757       .addDef(ResVReg)
1758       .addUse(GR.getSPIRVTypeID(ResType))
1759       .addUse(I.getOperand(2).getReg())
1760       .constrainAllUses(TII, TRI, RBI);
1761 }
1762 
1763 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1764                                                 const SPIRVType *ResType,
1765                                                 MachineInstr &I) const {
1766   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1767       .addDef(ResVReg)
1768       .addUse(GR.getSPIRVTypeID(ResType))
1769       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1770       .constrainAllUses(TII, TRI, RBI);
1771 }
1772 
1773 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1774   // InstructionSelector walks backwards through the instructions. We can use
1775   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1776   // first, so can generate an OpBranchConditional here. If there is no
1777   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1778   const MachineInstr *PrevI = I.getPrevNode();
1779   MachineBasicBlock &MBB = *I.getParent();
1780   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1781     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1782         .addUse(PrevI->getOperand(0).getReg())
1783         .addMBB(PrevI->getOperand(1).getMBB())
1784         .addMBB(I.getOperand(0).getMBB())
1785         .constrainAllUses(TII, TRI, RBI);
1786   }
1787   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1788       .addMBB(I.getOperand(0).getMBB())
1789       .constrainAllUses(TII, TRI, RBI);
1790 }
1791 
1792 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1793   // InstructionSelector walks backwards through the instructions. For an
1794   // explicit conditional branch with no fallthrough, we use both a G_BR and a
1795   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1796   // generate the OpBranchConditional in selectBranch above.
1797   //
1798   // If an OpBranchConditional has been generated, we simply return, as the work
1799   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1800   // implicit fallthrough to the next basic block, so we need to create an
1801   // OpBranchConditional with an explicit "false" argument pointing to the next
1802   // basic block that LLVM would fall through to.
1803   const MachineInstr *NextI = I.getNextNode();
1804   // Check if this has already been successfully selected.
1805   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1806     return true;
1807   // Must be relying on implicit block fallthrough, so generate an
1808   // OpBranchConditional with the "next" basic block as the "false" target.
1809   MachineBasicBlock &MBB = *I.getParent();
1810   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1811   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1812   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1813       .addUse(I.getOperand(0).getReg())
1814       .addMBB(I.getOperand(1).getMBB())
1815       .addMBB(NextMBB)
1816       .constrainAllUses(TII, TRI, RBI);
1817 }
1818 
1819 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1820                                          const SPIRVType *ResType,
1821                                          MachineInstr &I) const {
1822   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1823                  .addDef(ResVReg)
1824                  .addUse(GR.getSPIRVTypeID(ResType));
1825   const unsigned NumOps = I.getNumOperands();
1826   for (unsigned i = 1; i < NumOps; i += 2) {
1827     MIB.addUse(I.getOperand(i + 0).getReg());
1828     MIB.addMBB(I.getOperand(i + 1).getMBB());
1829   }
1830   return MIB.constrainAllUses(TII, TRI, RBI);
1831 }
1832 
1833 bool SPIRVInstructionSelector::selectGlobalValue(
1834     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1835   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1836   MachineIRBuilder MIRBuilder(I);
1837   const GlobalValue *GV = I.getOperand(1).getGlobal();
1838   Type *GVType = GV->getValueType();
1839   SPIRVType *PointerBaseType;
1840   if (GVType->isArrayTy()) {
1841     SPIRVType *ArrayElementType =
1842         GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
1843                                 SPIRV::AccessQualifier::ReadWrite, false);
1844     PointerBaseType = GR.getOrCreateSPIRVArrayType(
1845         ArrayElementType, GVType->getArrayNumElements(), I, TII);
1846   } else {
1847     PointerBaseType = GR.getOrCreateSPIRVType(
1848         GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1849   }
1850   SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
1851       PointerBaseType, I, TII,
1852       addressSpaceToStorageClass(GV->getAddressSpace(), STI));
1853 
1854   std::string GlobalIdent;
1855   if (!GV->hasName()) {
1856     unsigned &ID = UnnamedGlobalIDs[GV];
1857     if (ID == 0)
1858       ID = UnnamedGlobalIDs.size();
1859     GlobalIdent = "__unnamed_" + Twine(ID).str();
1860   } else {
1861     GlobalIdent = GV->getGlobalIdentifier();
1862   }
1863 
1864   // Behaviour of functions as operands depends on availability of the
1865   // corresponding extension (SPV_INTEL_function_pointers):
1866   // - If there is an extension to operate with functions as operands:
1867   // We create a proper constant operand and evaluate a correct type for a
1868   // function pointer.
1869   // - Without the required extension:
1870   // We have functions as operands in tests with blocks of instruction e.g. in
1871   // transcoding/global_block.ll. These operands are not used and should be
1872   // substituted by zero constants. Their type is expected to be always
1873   // OpTypePointer Function %uchar.
1874   if (isa<Function>(GV)) {
1875     const Constant *ConstVal = GV;
1876     MachineBasicBlock &BB = *I.getParent();
1877     Register NewReg = GR.find(ConstVal, GR.CurMF);
1878     if (!NewReg.isValid()) {
1879       Register NewReg = ResVReg;
1880       GR.add(ConstVal, GR.CurMF, NewReg);
1881       const Function *GVFun =
1882           STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
1883               ? dyn_cast<Function>(GV)
1884               : nullptr;
1885       if (GVFun) {
1886         // References to a function via function pointers generate virtual
1887         // registers without a definition. We will resolve it later, during
1888         // module analysis stage.
1889         MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1890         Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1891         MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
1892         MachineInstrBuilder MB =
1893             BuildMI(BB, I, I.getDebugLoc(),
1894                     TII.get(SPIRV::OpConstantFunctionPointerINTEL))
1895                 .addDef(NewReg)
1896                 .addUse(GR.getSPIRVTypeID(ResType))
1897                 .addUse(FuncVReg);
1898         // mapping the function pointer to the used Function
1899         GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
1900         return MB.constrainAllUses(TII, TRI, RBI);
1901       }
1902       return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1903           .addDef(NewReg)
1904           .addUse(GR.getSPIRVTypeID(ResType))
1905           .constrainAllUses(TII, TRI, RBI);
1906     }
1907     assert(NewReg != ResVReg);
1908     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1909         .addDef(ResVReg)
1910         .addUse(NewReg)
1911         .constrainAllUses(TII, TRI, RBI);
1912   }
1913   auto GlobalVar = cast<GlobalVariable>(GV);
1914   assert(GlobalVar->getName() != "llvm.global.annotations");
1915 
1916   bool HasInit = GlobalVar->hasInitializer() &&
1917                  !isa<UndefValue>(GlobalVar->getInitializer());
1918   // Skip empty declaration for GVs with initilaizers till we get the decl with
1919   // passed initializer.
1920   if (HasInit && !Init)
1921     return true;
1922 
1923   unsigned AddrSpace = GV->getAddressSpace();
1924   SPIRV::StorageClass::StorageClass Storage =
1925       addressSpaceToStorageClass(AddrSpace, STI);
1926   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1927                   Storage != SPIRV::StorageClass::Function;
1928   SPIRV::LinkageType::LinkageType LnkType =
1929       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
1930           ? SPIRV::LinkageType::Import
1931           : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
1932                      STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
1933                  ? SPIRV::LinkageType::LinkOnceODR
1934                  : SPIRV::LinkageType::Export);
1935 
1936   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1937                                         Storage, Init, GlobalVar->isConstant(),
1938                                         HasLnkTy, LnkType, MIRBuilder, true);
1939   return Reg.isValid();
1940 }
1941 
1942 bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
1943                                            const SPIRVType *ResType,
1944                                            MachineInstr &I) const {
1945   if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
1946     return selectExtInst(ResVReg, ResType, I, CL::log10);
1947   }
1948 
1949   // There is no log10 instruction in the GLSL Extended Instruction set, so it
1950   // is implemented as:
1951   // log10(x) = log2(x) * (1 / log2(10))
1952   //          = log2(x) * 0.30103
1953 
1954   MachineIRBuilder MIRBuilder(I);
1955   MachineBasicBlock &BB = *I.getParent();
1956 
1957   // Build log2(x).
1958   Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1959   bool Result =
1960       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1961           .addDef(VarReg)
1962           .addUse(GR.getSPIRVTypeID(ResType))
1963           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1964           .addImm(GL::Log2)
1965           .add(I.getOperand(1))
1966           .constrainAllUses(TII, TRI, RBI);
1967 
1968   // Build 0.30103.
1969   assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
1970          ResType->getOpcode() == SPIRV::OpTypeFloat);
1971   // TODO: Add matrix implementation once supported by the HLSL frontend.
1972   const SPIRVType *SpirvScalarType =
1973       ResType->getOpcode() == SPIRV::OpTypeVector
1974           ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
1975           : ResType;
1976   Register ScaleReg =
1977       GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
1978 
1979   // Multiply log2(x) by 0.30103 to get log10(x) result.
1980   auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
1981                     ? SPIRV::OpVectorTimesScalar
1982                     : SPIRV::OpFMulS;
1983   Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1984                 .addDef(ResVReg)
1985                 .addUse(GR.getSPIRVTypeID(ResType))
1986                 .addUse(VarReg)
1987                 .addUse(ScaleReg)
1988                 .constrainAllUses(TII, TRI, RBI);
1989 
1990   return Result;
1991 }
1992 
1993 bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
1994                                                  const SPIRVType *ResType,
1995                                                  MachineInstr &I) const {
1996   // DX intrinsic: @llvm.dx.thread.id(i32)
1997   // ID  Name      Description
1998   // 93  ThreadId  reads the thread ID
1999 
2000   MachineIRBuilder MIRBuilder(I);
2001   const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2002   const SPIRVType *Vec3Ty =
2003       GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2004   const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2005       Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2006 
2007   // Create new register for GlobalInvocationID builtin variable.
2008   Register NewRegister =
2009       MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2010   MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 32));
2011   GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2012 
2013   // Build GlobalInvocationID global variable with the necessary decorations.
2014   Register Variable = GR.buildGlobalVariable(
2015       NewRegister, PtrType,
2016       getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2017       SPIRV::StorageClass::Input, nullptr, true, true,
2018       SPIRV::LinkageType::Import, MIRBuilder, false);
2019 
2020   // Create new register for loading value.
2021   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2022   Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2023   MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 32));
2024   GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2025 
2026   // Load v3uint value from the global variable.
2027   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2028       .addDef(LoadedRegister)
2029       .addUse(GR.getSPIRVTypeID(Vec3Ty))
2030       .addUse(Variable);
2031 
2032   // Get Thread ID index. Expecting operand is a constant immediate value,
2033   // wrapped in a type assignment.
2034   assert(I.getOperand(2).isReg());
2035   Register ThreadIdReg = I.getOperand(2).getReg();
2036   SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
2037   assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
2038          ConstTy->getOperand(1).isReg());
2039   Register ConstReg = ConstTy->getOperand(1).getReg();
2040   const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
2041   assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
2042   const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
2043   const uint32_t ThreadId = Val.getZExtValue();
2044 
2045   // Extract the thread ID from the loaded vector value.
2046   MachineBasicBlock &BB = *I.getParent();
2047   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2048                  .addDef(ResVReg)
2049                  .addUse(GR.getSPIRVTypeID(ResType))
2050                  .addUse(LoadedRegister)
2051                  .addImm(ThreadId);
2052   return MIB.constrainAllUses(TII, TRI, RBI);
2053 }
2054 
2055 namespace llvm {
2056 InstructionSelector *
2057 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
2058                                const SPIRVSubtarget &Subtarget,
2059                                const RegisterBankInfo &RBI) {
2060   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2061 }
2062 } // namespace llvm
2063