xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (revision 9552a396ed649e499bc4a370ab72ca6972e5b982)
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
17 #include "SPIRV.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/IR/IntrinsicsSPIRV.h"
31 #include "llvm/Support/Debug.h"
32 
33 #define DEBUG_TYPE "spirv-isel"
34 
35 using namespace llvm;
36 namespace CL = SPIRV::OpenCLExtInst;
37 namespace GL = SPIRV::GLSLExtInst;
38 
39 using ExtInstList =
40     std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
41 
42 namespace {
43 
44 #define GET_GLOBALISEL_PREDICATE_BITSET
45 #include "SPIRVGenGlobalISel.inc"
46 #undef GET_GLOBALISEL_PREDICATE_BITSET
47 
48 class SPIRVInstructionSelector : public InstructionSelector {
49   const SPIRVSubtarget &STI;
50   const SPIRVInstrInfo &TII;
51   const SPIRVRegisterInfo &TRI;
52   const RegisterBankInfo &RBI;
53   SPIRVGlobalRegistry &GR;
54   MachineRegisterInfo *MRI;
55 
56   /// We need to keep track of the number we give to anonymous global values to
57   /// generate the same name every time when this is needed.
58   mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
59 
60 public:
61   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
62                            const SPIRVSubtarget &ST,
63                            const RegisterBankInfo &RBI);
64   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
66                BlockFrequencyInfo *BFI) override;
67   // Common selection code. Instruction-specific selection occurs in spvSelect.
68   bool select(MachineInstr &I) override;
69   static const char *getName() { return DEBUG_TYPE; }
70 
71 #define GET_GLOBALISEL_PREDICATES_DECL
72 #include "SPIRVGenGlobalISel.inc"
73 #undef GET_GLOBALISEL_PREDICATES_DECL
74 
75 #define GET_GLOBALISEL_TEMPORARIES_DECL
76 #include "SPIRVGenGlobalISel.inc"
77 #undef GET_GLOBALISEL_TEMPORARIES_DECL
78 
79 private:
80   // tblgen-erated 'select' implementation, used as the initial selector for
81   // the patterns that don't require complex C++.
82   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
83 
84   // All instruction-specific selection that didn't happen in "select()".
85   // Is basically a large Switch/Case delegating to all other select method.
86   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
87                  MachineInstr &I) const;
88 
89   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
90                          const MachineInstr *Init = nullptr) const;
91 
92   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
93                          MachineInstr &I, Register SrcReg,
94                          unsigned Opcode) const;
95   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
96                   unsigned Opcode) const;
97 
98   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
99                   MachineInstr &I) const;
100   bool selectStore(MachineInstr &I) const;
101 
102   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
103 
104   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
105                        MachineInstr &I, unsigned NewOpcode) const;
106 
107   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
108                            MachineInstr &I) const;
109 
110   bool selectFence(MachineInstr &I) const;
111 
112   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
113                            MachineInstr &I) const;
114 
115   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
116                         MachineInstr &I) const;
117 
118   bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
119                          MachineInstr &I) const;
120 
121   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
122                  unsigned comparisonOpcode, MachineInstr &I) const;
123 
124   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
125                   MachineInstr &I) const;
126   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
127                   MachineInstr &I) const;
128 
129   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
130                    int OpIdx) const;
131   void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
132                     int OpIdx) const;
133 
134   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
135                    MachineInstr &I) const;
136 
137   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
138                     bool IsSigned) const;
139   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
140                   bool IsSigned, unsigned Opcode) const;
141   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
142                  bool IsSigned) const;
143 
144   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
145                    MachineInstr &I) const;
146 
147   bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
148                        const SPIRVType *intTy, const SPIRVType *boolTy) const;
149 
150   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
151                      MachineInstr &I) const;
152   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
153                        MachineInstr &I) const;
154   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
155                         MachineInstr &I) const;
156   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
157                        MachineInstr &I) const;
158   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
159                         MachineInstr &I) const;
160   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
161                        MachineInstr &I) const;
162   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
163                  MachineInstr &I) const;
164 
165   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
166                         MachineInstr &I) const;
167 
168   bool selectBranch(MachineInstr &I) const;
169   bool selectBranchCond(MachineInstr &I) const;
170 
171   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
172                  MachineInstr &I) const;
173 
174   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
175                      MachineInstr &I, CL::OpenCLExtInst CLInst) const;
176   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
177                      MachineInstr &I, CL::OpenCLExtInst CLInst,
178                      GL::GLSLExtInst GLInst) const;
179   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
180                      MachineInstr &I, const ExtInstList &ExtInsts) const;
181 
182   bool selectLog10(Register ResVReg, const SPIRVType *ResType,
183                    MachineInstr &I) const;
184 
185   Register buildI32Constant(uint32_t Val, MachineInstr &I,
186                             const SPIRVType *ResType = nullptr) const;
187 
188   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
189   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
190                         MachineInstr &I) const;
191 };
192 
193 } // end anonymous namespace
194 
195 #define GET_GLOBALISEL_IMPL
196 #include "SPIRVGenGlobalISel.inc"
197 #undef GET_GLOBALISEL_IMPL
198 
199 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
200                                                    const SPIRVSubtarget &ST,
201                                                    const RegisterBankInfo &RBI)
202     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
203       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
204 #define GET_GLOBALISEL_PREDICATES_INIT
205 #include "SPIRVGenGlobalISel.inc"
206 #undef GET_GLOBALISEL_PREDICATES_INIT
207 #define GET_GLOBALISEL_TEMPORARIES_INIT
208 #include "SPIRVGenGlobalISel.inc"
209 #undef GET_GLOBALISEL_TEMPORARIES_INIT
210 {
211 }
212 
213 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
214                                        CodeGenCoverage *CoverageInfo,
215                                        ProfileSummaryInfo *PSI,
216                                        BlockFrequencyInfo *BFI) {
217   MRI = &MF.getRegInfo();
218   GR.setCurrentFunc(MF);
219   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
220 }
221 
222 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
223 
224 // Defined in SPIRVLegalizerInfo.cpp.
225 extern bool isTypeFoldingSupported(unsigned Opcode);
226 
227 bool SPIRVInstructionSelector::select(MachineInstr &I) {
228   assert(I.getParent() && "Instruction should be in a basic block!");
229   assert(I.getParent()->getParent() && "Instruction should be in a function!");
230 
231   Register Opcode = I.getOpcode();
232   // If it's not a GMIR instruction, we've selected it already.
233   if (!isPreISelGenericOpcode(Opcode)) {
234     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
235       auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
236       if (isTypeFoldingSupported(Def->getOpcode())) {
237         auto Res = selectImpl(I, *CoverageInfo);
238         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
239         if (Res)
240           return Res;
241       }
242       MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
243       I.removeFromParent();
244       return true;
245     } else if (I.getNumDefs() == 1) {
246       // Make all vregs 32 bits (for SPIR-V IDs).
247       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
248     }
249     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
250   }
251 
252   if (I.getNumOperands() != I.getNumExplicitOperands()) {
253     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
254     return false;
255   }
256 
257   // Common code for getting return reg+type, and removing selected instr
258   // from parent occurs here. Instr-specific selection happens in spvSelect().
259   bool HasDefs = I.getNumDefs() > 0;
260   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
261   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
262   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
263   if (spvSelect(ResVReg, ResType, I)) {
264     if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
265       MRI->setType(ResVReg, LLT::scalar(32));
266     I.removeFromParent();
267     return true;
268   }
269   return false;
270 }
271 
272 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
273                                          const SPIRVType *ResType,
274                                          MachineInstr &I) const {
275   assert(!isTypeFoldingSupported(I.getOpcode()) ||
276          I.getOpcode() == TargetOpcode::G_CONSTANT);
277   const unsigned Opcode = I.getOpcode();
278   switch (Opcode) {
279   case TargetOpcode::G_CONSTANT:
280     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
281                        I);
282   case TargetOpcode::G_GLOBAL_VALUE:
283     return selectGlobalValue(ResVReg, I);
284   case TargetOpcode::G_IMPLICIT_DEF:
285     return selectOpUndef(ResVReg, ResType, I);
286 
287   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
288   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
289     return selectIntrinsic(ResVReg, ResType, I);
290   case TargetOpcode::G_BITREVERSE:
291     return selectBitreverse(ResVReg, ResType, I);
292 
293   case TargetOpcode::G_BUILD_VECTOR:
294     return selectConstVector(ResVReg, ResType, I);
295 
296   case TargetOpcode::G_SHUFFLE_VECTOR: {
297     MachineBasicBlock &BB = *I.getParent();
298     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
299                    .addDef(ResVReg)
300                    .addUse(GR.getSPIRVTypeID(ResType))
301                    .addUse(I.getOperand(1).getReg())
302                    .addUse(I.getOperand(2).getReg());
303     for (auto V : I.getOperand(3).getShuffleMask())
304       MIB.addImm(V);
305     return MIB.constrainAllUses(TII, TRI, RBI);
306   }
307   case TargetOpcode::G_MEMMOVE:
308   case TargetOpcode::G_MEMCPY:
309   case TargetOpcode::G_MEMSET:
310     return selectMemOperation(ResVReg, I);
311 
312   case TargetOpcode::G_ICMP:
313     return selectICmp(ResVReg, ResType, I);
314   case TargetOpcode::G_FCMP:
315     return selectFCmp(ResVReg, ResType, I);
316 
317   case TargetOpcode::G_FRAME_INDEX:
318     return selectFrameIndex(ResVReg, ResType, I);
319 
320   case TargetOpcode::G_LOAD:
321     return selectLoad(ResVReg, ResType, I);
322   case TargetOpcode::G_STORE:
323     return selectStore(I);
324 
325   case TargetOpcode::G_BR:
326     return selectBranch(I);
327   case TargetOpcode::G_BRCOND:
328     return selectBranchCond(I);
329 
330   case TargetOpcode::G_PHI:
331     return selectPhi(ResVReg, ResType, I);
332 
333   case TargetOpcode::G_FPTOSI:
334     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
335   case TargetOpcode::G_FPTOUI:
336     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
337 
338   case TargetOpcode::G_SITOFP:
339     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
340   case TargetOpcode::G_UITOFP:
341     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
342 
343   case TargetOpcode::G_CTPOP:
344     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
345   case TargetOpcode::G_SMIN:
346     return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
347   case TargetOpcode::G_UMIN:
348     return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
349 
350   case TargetOpcode::G_SMAX:
351     return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
352   case TargetOpcode::G_UMAX:
353     return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
354 
355   case TargetOpcode::G_FMA:
356     return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
357 
358   case TargetOpcode::G_FPOW:
359     return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
360   case TargetOpcode::G_FPOWI:
361     return selectExtInst(ResVReg, ResType, I, CL::pown);
362 
363   case TargetOpcode::G_FEXP:
364     return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
365   case TargetOpcode::G_FEXP2:
366     return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
367 
368   case TargetOpcode::G_FLOG:
369     return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
370   case TargetOpcode::G_FLOG2:
371     return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
372   case TargetOpcode::G_FLOG10:
373     return selectLog10(ResVReg, ResType, I);
374 
375   case TargetOpcode::G_FABS:
376     return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
377   case TargetOpcode::G_ABS:
378     return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
379 
380   case TargetOpcode::G_FMINNUM:
381   case TargetOpcode::G_FMINIMUM:
382     return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin);
383   case TargetOpcode::G_FMAXNUM:
384   case TargetOpcode::G_FMAXIMUM:
385     return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax);
386 
387   case TargetOpcode::G_FCOPYSIGN:
388     return selectExtInst(ResVReg, ResType, I, CL::copysign);
389 
390   case TargetOpcode::G_FCEIL:
391     return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
392   case TargetOpcode::G_FFLOOR:
393     return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
394 
395   case TargetOpcode::G_FCOS:
396     return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
397   case TargetOpcode::G_FSIN:
398     return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
399 
400   case TargetOpcode::G_FSQRT:
401     return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
402 
403   case TargetOpcode::G_CTTZ:
404   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
405     return selectExtInst(ResVReg, ResType, I, CL::ctz);
406   case TargetOpcode::G_CTLZ:
407   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
408     return selectExtInst(ResVReg, ResType, I, CL::clz);
409 
410   case TargetOpcode::G_INTRINSIC_ROUND:
411     return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
412   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
413     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
414   case TargetOpcode::G_INTRINSIC_TRUNC:
415     return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
416   case TargetOpcode::G_FRINT:
417   case TargetOpcode::G_FNEARBYINT:
418     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
419 
420   case TargetOpcode::G_SMULH:
421     return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
422   case TargetOpcode::G_UMULH:
423     return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
424 
425   case TargetOpcode::G_SEXT:
426     return selectExt(ResVReg, ResType, I, true);
427   case TargetOpcode::G_ANYEXT:
428   case TargetOpcode::G_ZEXT:
429     return selectExt(ResVReg, ResType, I, false);
430   case TargetOpcode::G_TRUNC:
431     return selectTrunc(ResVReg, ResType, I);
432   case TargetOpcode::G_FPTRUNC:
433   case TargetOpcode::G_FPEXT:
434     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
435 
436   case TargetOpcode::G_PTRTOINT:
437     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
438   case TargetOpcode::G_INTTOPTR:
439     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
440   case TargetOpcode::G_BITCAST:
441     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
442   case TargetOpcode::G_ADDRSPACE_CAST:
443     return selectAddrSpaceCast(ResVReg, ResType, I);
444   case TargetOpcode::G_PTR_ADD: {
445     // Currently, we get G_PTR_ADD only as a result of translating
446     // global variables, initialized with constant expressions like GV + Const
447     // (see test opencl/basic/progvar_prog_scope_init.ll).
448     // TODO: extend the handler once we have other cases.
449     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
450     Register GV = I.getOperand(1).getReg();
451     MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
452     assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
453             (*II).getOpcode() == TargetOpcode::COPY ||
454             (*II).getOpcode() == SPIRV::OpVariable) &&
455            isImm(I.getOperand(2), MRI));
456     Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
457     MachineBasicBlock &BB = *I.getParent();
458     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
459                    .addDef(ResVReg)
460                    .addUse(GR.getSPIRVTypeID(ResType))
461                    .addImm(static_cast<uint32_t>(
462                        SPIRV::Opcode::InBoundsPtrAccessChain))
463                    .addUse(GV)
464                    .addUse(Idx)
465                    .addUse(I.getOperand(2).getReg());
466     return MIB.constrainAllUses(TII, TRI, RBI);
467   }
468 
469   case TargetOpcode::G_ATOMICRMW_OR:
470     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
471   case TargetOpcode::G_ATOMICRMW_ADD:
472     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
473   case TargetOpcode::G_ATOMICRMW_AND:
474     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
475   case TargetOpcode::G_ATOMICRMW_MAX:
476     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
477   case TargetOpcode::G_ATOMICRMW_MIN:
478     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
479   case TargetOpcode::G_ATOMICRMW_SUB:
480     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
481   case TargetOpcode::G_ATOMICRMW_XOR:
482     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
483   case TargetOpcode::G_ATOMICRMW_UMAX:
484     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
485   case TargetOpcode::G_ATOMICRMW_UMIN:
486     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
487   case TargetOpcode::G_ATOMICRMW_XCHG:
488     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
489   case TargetOpcode::G_ATOMIC_CMPXCHG:
490     return selectAtomicCmpXchg(ResVReg, ResType, I);
491 
492   case TargetOpcode::G_FENCE:
493     return selectFence(I);
494 
495   default:
496     return false;
497   }
498 }
499 
500 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
501                                              const SPIRVType *ResType,
502                                              MachineInstr &I,
503                                              CL::OpenCLExtInst CLInst) const {
504   return selectExtInst(ResVReg, ResType, I,
505                        {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
506 }
507 
508 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
509                                              const SPIRVType *ResType,
510                                              MachineInstr &I,
511                                              CL::OpenCLExtInst CLInst,
512                                              GL::GLSLExtInst GLInst) const {
513   ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
514                           {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
515   return selectExtInst(ResVReg, ResType, I, ExtInsts);
516 }
517 
518 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
519                                              const SPIRVType *ResType,
520                                              MachineInstr &I,
521                                              const ExtInstList &Insts) const {
522 
523   for (const auto &Ex : Insts) {
524     SPIRV::InstructionSet::InstructionSet Set = Ex.first;
525     uint32_t Opcode = Ex.second;
526     if (STI.canUseExtInstSet(Set)) {
527       MachineBasicBlock &BB = *I.getParent();
528       auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
529                      .addDef(ResVReg)
530                      .addUse(GR.getSPIRVTypeID(ResType))
531                      .addImm(static_cast<uint32_t>(Set))
532                      .addImm(Opcode);
533       const unsigned NumOps = I.getNumOperands();
534       for (unsigned i = 1; i < NumOps; ++i)
535         MIB.add(I.getOperand(i));
536       return MIB.constrainAllUses(TII, TRI, RBI);
537     }
538   }
539   return false;
540 }
541 
542 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
543                                                  const SPIRVType *ResType,
544                                                  MachineInstr &I,
545                                                  Register SrcReg,
546                                                  unsigned Opcode) const {
547   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
548       .addDef(ResVReg)
549       .addUse(GR.getSPIRVTypeID(ResType))
550       .addUse(SrcReg)
551       .constrainAllUses(TII, TRI, RBI);
552 }
553 
554 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
555                                           const SPIRVType *ResType,
556                                           MachineInstr &I,
557                                           unsigned Opcode) const {
558   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
559                            Opcode);
560 }
561 
562 static SPIRV::Scope::Scope getScope(SyncScope::ID Ord) {
563   switch (Ord) {
564   case SyncScope::SingleThread:
565     return SPIRV::Scope::Invocation;
566   case SyncScope::System:
567     return SPIRV::Scope::Device;
568   default:
569     llvm_unreachable("Unsupported synchronization Scope ID.");
570   }
571 }
572 
573 static void addMemoryOperands(MachineMemOperand *MemOp,
574                               MachineInstrBuilder &MIB) {
575   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
576   if (MemOp->isVolatile())
577     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
578   if (MemOp->isNonTemporal())
579     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
580   if (MemOp->getAlign().value())
581     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
582 
583   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
584     MIB.addImm(SpvMemOp);
585     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
586       MIB.addImm(MemOp->getAlign().value());
587   }
588 }
589 
590 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
591   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
592   if (Flags & MachineMemOperand::Flags::MOVolatile)
593     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
594   if (Flags & MachineMemOperand::Flags::MONonTemporal)
595     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
596 
597   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
598     MIB.addImm(SpvMemOp);
599 }
600 
601 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
602                                           const SPIRVType *ResType,
603                                           MachineInstr &I) const {
604   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
605   Register Ptr = I.getOperand(1 + OpOffset).getReg();
606   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
607                  .addDef(ResVReg)
608                  .addUse(GR.getSPIRVTypeID(ResType))
609                  .addUse(Ptr);
610   if (!I.getNumMemOperands()) {
611     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
612            I.getOpcode() ==
613                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
614     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
615   } else {
616     addMemoryOperands(*I.memoperands_begin(), MIB);
617   }
618   return MIB.constrainAllUses(TII, TRI, RBI);
619 }
620 
621 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
622   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
623   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
624   Register Ptr = I.getOperand(1 + OpOffset).getReg();
625   MachineBasicBlock &BB = *I.getParent();
626   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
627                  .addUse(Ptr)
628                  .addUse(StoreVal);
629   if (!I.getNumMemOperands()) {
630     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
631            I.getOpcode() ==
632                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
633     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
634   } else {
635     addMemoryOperands(*I.memoperands_begin(), MIB);
636   }
637   return MIB.constrainAllUses(TII, TRI, RBI);
638 }
639 
640 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
641                                                   MachineInstr &I) const {
642   MachineBasicBlock &BB = *I.getParent();
643   Register SrcReg = I.getOperand(1).getReg();
644   if (I.getOpcode() == TargetOpcode::G_MEMSET) {
645     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
646     unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
647     unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
648     SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
649     SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
650     Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
651     SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
652         ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
653     // TODO: check if we have such GV, add init, use buildGlobalVariable.
654     Type *LLVMArrTy = ArrayType::get(
655         IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num);
656     GlobalVariable *GV =
657         new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage);
658     Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
659     GR.add(GV, GR.CurMF, VarReg);
660 
661     buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
662     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
663         .addDef(VarReg)
664         .addUse(GR.getSPIRVTypeID(VarTy))
665         .addImm(SPIRV::StorageClass::UniformConstant)
666         .addUse(Const)
667         .constrainAllUses(TII, TRI, RBI);
668     SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
669         ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
670     SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
671     selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
672   }
673   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
674                  .addUse(I.getOperand(0).getReg())
675                  .addUse(SrcReg)
676                  .addUse(I.getOperand(2).getReg());
677   if (I.getNumMemOperands())
678     addMemoryOperands(*I.memoperands_begin(), MIB);
679   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
680   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
681     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
682         .addUse(MIB->getOperand(0).getReg());
683   return Result;
684 }
685 
686 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
687                                                const SPIRVType *ResType,
688                                                MachineInstr &I,
689                                                unsigned NewOpcode) const {
690   assert(I.hasOneMemOperand());
691   const MachineMemOperand *MemOp = *I.memoperands_begin();
692   uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
693   Register ScopeReg = buildI32Constant(Scope, I);
694 
695   Register Ptr = I.getOperand(1).getReg();
696   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
697   // auto ScSem =
698   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
699   AtomicOrdering AO = MemOp->getSuccessOrdering();
700   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
701   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
702 
703   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
704       .addDef(ResVReg)
705       .addUse(GR.getSPIRVTypeID(ResType))
706       .addUse(Ptr)
707       .addUse(ScopeReg)
708       .addUse(MemSemReg)
709       .addUse(I.getOperand(2).getReg())
710       .constrainAllUses(TII, TRI, RBI);
711 }
712 
713 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
714   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
715   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
716   Register MemSemReg = buildI32Constant(MemSem, I);
717   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
718   uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
719   Register ScopeReg = buildI32Constant(Scope, I);
720   MachineBasicBlock &BB = *I.getParent();
721   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
722       .addUse(ScopeReg)
723       .addUse(MemSemReg)
724       .constrainAllUses(TII, TRI, RBI);
725 }
726 
727 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
728                                                    const SPIRVType *ResType,
729                                                    MachineInstr &I) const {
730   Register ScopeReg;
731   Register MemSemEqReg;
732   Register MemSemNeqReg;
733   Register Ptr = I.getOperand(2).getReg();
734   if (!isa<GIntrinsic>(I)) {
735     assert(I.hasOneMemOperand());
736     const MachineMemOperand *MemOp = *I.memoperands_begin();
737     unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
738     ScopeReg = buildI32Constant(Scope, I);
739 
740     unsigned ScSem = static_cast<uint32_t>(
741         getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
742     AtomicOrdering AO = MemOp->getSuccessOrdering();
743     unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
744     MemSemEqReg = buildI32Constant(MemSemEq, I);
745     AtomicOrdering FO = MemOp->getFailureOrdering();
746     unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
747     MemSemNeqReg =
748         MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
749   } else {
750     ScopeReg = I.getOperand(5).getReg();
751     MemSemEqReg = I.getOperand(6).getReg();
752     MemSemNeqReg = I.getOperand(7).getReg();
753   }
754 
755   Register Cmp = I.getOperand(3).getReg();
756   Register Val = I.getOperand(4).getReg();
757   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
758   Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
759   const DebugLoc &DL = I.getDebugLoc();
760   bool Result =
761       BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
762           .addDef(ACmpRes)
763           .addUse(GR.getSPIRVTypeID(SpvValTy))
764           .addUse(Ptr)
765           .addUse(ScopeReg)
766           .addUse(MemSemEqReg)
767           .addUse(MemSemNeqReg)
768           .addUse(Val)
769           .addUse(Cmp)
770           .constrainAllUses(TII, TRI, RBI);
771   Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
772   SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
773   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
774                 .addDef(CmpSuccReg)
775                 .addUse(GR.getSPIRVTypeID(BoolTy))
776                 .addUse(ACmpRes)
777                 .addUse(Cmp)
778                 .constrainAllUses(TII, TRI, RBI);
779   Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
780   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
781                 .addDef(TmpReg)
782                 .addUse(GR.getSPIRVTypeID(ResType))
783                 .addUse(ACmpRes)
784                 .addUse(GR.getOrCreateUndef(I, ResType, TII))
785                 .addImm(0)
786                 .constrainAllUses(TII, TRI, RBI);
787   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
788                 .addDef(ResVReg)
789                 .addUse(GR.getSPIRVTypeID(ResType))
790                 .addUse(CmpSuccReg)
791                 .addUse(TmpReg)
792                 .addImm(1)
793                 .constrainAllUses(TII, TRI, RBI);
794   return Result;
795 }
796 
797 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
798   switch (SC) {
799   case SPIRV::StorageClass::Workgroup:
800   case SPIRV::StorageClass::CrossWorkgroup:
801   case SPIRV::StorageClass::Function:
802     return true;
803   default:
804     return false;
805   }
806 }
807 
808 // In SPIR-V address space casting can only happen to and from the Generic
809 // storage class. We can also only case Workgroup, CrossWorkgroup, or Function
810 // pointers to and from Generic pointers. As such, we can convert e.g. from
811 // Workgroup to Function by going via a Generic pointer as an intermediary. All
812 // other combinations can only be done by a bitcast, and are probably not safe.
813 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
814                                                    const SPIRVType *ResType,
815                                                    MachineInstr &I) const {
816   // If the AddrSpaceCast user is single and in OpConstantComposite or
817   // OpVariable, we should select OpSpecConstantOp.
818   auto UIs = MRI->use_instructions(ResVReg);
819   if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
820       (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
821        UIs.begin()->getOpcode() == SPIRV::OpVariable ||
822        isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
823     Register NewReg = I.getOperand(1).getReg();
824     MachineBasicBlock &BB = *I.getParent();
825     SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
826     ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
827                                              SPIRV::StorageClass::Generic);
828     bool Result =
829         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
830             .addDef(ResVReg)
831             .addUse(GR.getSPIRVTypeID(ResType))
832             .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
833             .addUse(NewReg)
834             .constrainAllUses(TII, TRI, RBI);
835     return Result;
836   }
837   Register SrcPtr = I.getOperand(1).getReg();
838   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
839   SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
840   SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
841 
842   // Casting from an eligable pointer to Generic.
843   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
844     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
845   // Casting from Generic to an eligable pointer.
846   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
847     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
848   // Casting between 2 eligable pointers using Generic as an intermediary.
849   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
850     Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
851     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
852         SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
853     MachineBasicBlock &BB = *I.getParent();
854     const DebugLoc &DL = I.getDebugLoc();
855     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
856                        .addDef(Tmp)
857                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
858                        .addUse(SrcPtr)
859                        .constrainAllUses(TII, TRI, RBI);
860     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
861                           .addDef(ResVReg)
862                           .addUse(GR.getSPIRVTypeID(ResType))
863                           .addUse(Tmp)
864                           .constrainAllUses(TII, TRI, RBI);
865   }
866   // TODO Should this case just be disallowed completely?
867   // We're casting 2 other arbitrary address spaces, so have to bitcast.
868   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
869 }
870 
871 static unsigned getFCmpOpcode(unsigned PredNum) {
872   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
873   switch (Pred) {
874   case CmpInst::FCMP_OEQ:
875     return SPIRV::OpFOrdEqual;
876   case CmpInst::FCMP_OGE:
877     return SPIRV::OpFOrdGreaterThanEqual;
878   case CmpInst::FCMP_OGT:
879     return SPIRV::OpFOrdGreaterThan;
880   case CmpInst::FCMP_OLE:
881     return SPIRV::OpFOrdLessThanEqual;
882   case CmpInst::FCMP_OLT:
883     return SPIRV::OpFOrdLessThan;
884   case CmpInst::FCMP_ONE:
885     return SPIRV::OpFOrdNotEqual;
886   case CmpInst::FCMP_ORD:
887     return SPIRV::OpOrdered;
888   case CmpInst::FCMP_UEQ:
889     return SPIRV::OpFUnordEqual;
890   case CmpInst::FCMP_UGE:
891     return SPIRV::OpFUnordGreaterThanEqual;
892   case CmpInst::FCMP_UGT:
893     return SPIRV::OpFUnordGreaterThan;
894   case CmpInst::FCMP_ULE:
895     return SPIRV::OpFUnordLessThanEqual;
896   case CmpInst::FCMP_ULT:
897     return SPIRV::OpFUnordLessThan;
898   case CmpInst::FCMP_UNE:
899     return SPIRV::OpFUnordNotEqual;
900   case CmpInst::FCMP_UNO:
901     return SPIRV::OpUnordered;
902   default:
903     llvm_unreachable("Unknown predicate type for FCmp");
904   }
905 }
906 
907 static unsigned getICmpOpcode(unsigned PredNum) {
908   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
909   switch (Pred) {
910   case CmpInst::ICMP_EQ:
911     return SPIRV::OpIEqual;
912   case CmpInst::ICMP_NE:
913     return SPIRV::OpINotEqual;
914   case CmpInst::ICMP_SGE:
915     return SPIRV::OpSGreaterThanEqual;
916   case CmpInst::ICMP_SGT:
917     return SPIRV::OpSGreaterThan;
918   case CmpInst::ICMP_SLE:
919     return SPIRV::OpSLessThanEqual;
920   case CmpInst::ICMP_SLT:
921     return SPIRV::OpSLessThan;
922   case CmpInst::ICMP_UGE:
923     return SPIRV::OpUGreaterThanEqual;
924   case CmpInst::ICMP_UGT:
925     return SPIRV::OpUGreaterThan;
926   case CmpInst::ICMP_ULE:
927     return SPIRV::OpULessThanEqual;
928   case CmpInst::ICMP_ULT:
929     return SPIRV::OpULessThan;
930   default:
931     llvm_unreachable("Unknown predicate type for ICmp");
932   }
933 }
934 
935 static unsigned getPtrCmpOpcode(unsigned Pred) {
936   switch (static_cast<CmpInst::Predicate>(Pred)) {
937   case CmpInst::ICMP_EQ:
938     return SPIRV::OpPtrEqual;
939   case CmpInst::ICMP_NE:
940     return SPIRV::OpPtrNotEqual;
941   default:
942     llvm_unreachable("Unknown predicate type for pointer comparison");
943   }
944 }
945 
946 // Return the logical operation, or abort if none exists.
947 static unsigned getBoolCmpOpcode(unsigned PredNum) {
948   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
949   switch (Pred) {
950   case CmpInst::ICMP_EQ:
951     return SPIRV::OpLogicalEqual;
952   case CmpInst::ICMP_NE:
953     return SPIRV::OpLogicalNotEqual;
954   default:
955     llvm_unreachable("Unknown predicate type for Bool comparison");
956   }
957 }
958 
959 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
960                                                 const SPIRVType *ResType,
961                                                 MachineInstr &I) const {
962   MachineBasicBlock &BB = *I.getParent();
963   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
964       .addDef(ResVReg)
965       .addUse(GR.getSPIRVTypeID(ResType))
966       .addUse(I.getOperand(1).getReg())
967       .constrainAllUses(TII, TRI, RBI);
968 }
969 
970 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
971                                                  const SPIRVType *ResType,
972                                                  MachineInstr &I) const {
973   // TODO: only const case is supported for now.
974   assert(std::all_of(
975       I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
976         if (MO.isDef())
977           return true;
978         if (!MO.isReg())
979           return false;
980         SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
981         assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
982                ConstTy->getOperand(1).isReg());
983         Register ConstReg = ConstTy->getOperand(1).getReg();
984         const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
985         assert(Const);
986         return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
987                 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
988       }));
989 
990   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
991                      TII.get(SPIRV::OpConstantComposite))
992                  .addDef(ResVReg)
993                  .addUse(GR.getSPIRVTypeID(ResType));
994   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
995     MIB.addUse(I.getOperand(i).getReg());
996   return MIB.constrainAllUses(TII, TRI, RBI);
997 }
998 
999 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1000                                          const SPIRVType *ResType,
1001                                          unsigned CmpOpc,
1002                                          MachineInstr &I) const {
1003   Register Cmp0 = I.getOperand(2).getReg();
1004   Register Cmp1 = I.getOperand(3).getReg();
1005   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1006              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1007          "CMP operands should have the same type");
1008   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1009       .addDef(ResVReg)
1010       .addUse(GR.getSPIRVTypeID(ResType))
1011       .addUse(Cmp0)
1012       .addUse(Cmp1)
1013       .constrainAllUses(TII, TRI, RBI);
1014 }
1015 
1016 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1017                                           const SPIRVType *ResType,
1018                                           MachineInstr &I) const {
1019   auto Pred = I.getOperand(1).getPredicate();
1020   unsigned CmpOpc;
1021 
1022   Register CmpOperand = I.getOperand(2).getReg();
1023   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1024     CmpOpc = getPtrCmpOpcode(Pred);
1025   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1026     CmpOpc = getBoolCmpOpcode(Pred);
1027   else
1028     CmpOpc = getICmpOpcode(Pred);
1029   return selectCmp(ResVReg, ResType, CmpOpc, I);
1030 }
1031 
1032 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1033                                             const MachineInstr &I,
1034                                             int OpIdx) const {
1035   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1036          "Expected G_FCONSTANT");
1037   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1038   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1039 }
1040 
1041 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1042                                            const MachineInstr &I,
1043                                            int OpIdx) const {
1044   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1045          "Expected G_CONSTANT");
1046   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1047 }
1048 
1049 Register
1050 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1051                                            const SPIRVType *ResType) const {
1052   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1053   const SPIRVType *SpvI32Ty =
1054       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1055   // Find a constant in DT or build a new one.
1056   auto ConstInt = ConstantInt::get(LLVMTy, Val);
1057   Register NewReg = GR.find(ConstInt, GR.CurMF);
1058   if (!NewReg.isValid()) {
1059     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1060     GR.add(ConstInt, GR.CurMF, NewReg);
1061     MachineInstr *MI;
1062     MachineBasicBlock &BB = *I.getParent();
1063     if (Val == 0) {
1064       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1065                .addDef(NewReg)
1066                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1067     } else {
1068       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1069                .addDef(NewReg)
1070                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1071                .addImm(APInt(32, Val).getZExtValue());
1072     }
1073     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
1074   }
1075   return NewReg;
1076 }
1077 
1078 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1079                                           const SPIRVType *ResType,
1080                                           MachineInstr &I) const {
1081   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1082   return selectCmp(ResVReg, ResType, CmpOp, I);
1083 }
1084 
1085 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1086                                                  MachineInstr &I) const {
1087   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1088     return GR.getOrCreateConsIntVector(0, I, ResType, TII);
1089   return GR.getOrCreateConstInt(0, I, ResType, TII);
1090 }
1091 
1092 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1093                                                 const SPIRVType *ResType,
1094                                                 MachineInstr &I) const {
1095   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1096   APInt One =
1097       AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1098   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1099     return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
1100   return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1101 }
1102 
1103 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1104                                             const SPIRVType *ResType,
1105                                             MachineInstr &I,
1106                                             bool IsSigned) const {
1107   // To extend a bool, we need to use OpSelect between constants.
1108   Register ZeroReg = buildZerosVal(ResType, I);
1109   Register OneReg = buildOnesVal(IsSigned, ResType, I);
1110   bool IsScalarBool =
1111       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1112   unsigned Opcode =
1113       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1114   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1115       .addDef(ResVReg)
1116       .addUse(GR.getSPIRVTypeID(ResType))
1117       .addUse(I.getOperand(1).getReg())
1118       .addUse(OneReg)
1119       .addUse(ZeroReg)
1120       .constrainAllUses(TII, TRI, RBI);
1121 }
1122 
1123 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1124                                           const SPIRVType *ResType,
1125                                           MachineInstr &I, bool IsSigned,
1126                                           unsigned Opcode) const {
1127   Register SrcReg = I.getOperand(1).getReg();
1128   // We can convert bool value directly to float type without OpConvert*ToF,
1129   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1130   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1131     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1132     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1133     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1134       const unsigned NumElts = ResType->getOperand(2).getImm();
1135       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1136     }
1137     SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1138     selectSelect(SrcReg, TmpType, I, false);
1139   }
1140   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1141 }
1142 
1143 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1144                                          const SPIRVType *ResType,
1145                                          MachineInstr &I, bool IsSigned) const {
1146   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1147     return selectSelect(ResVReg, ResType, I, IsSigned);
1148   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1149   return selectUnOp(ResVReg, ResType, I, Opcode);
1150 }
1151 
1152 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1153                                                Register ResVReg,
1154                                                MachineInstr &I,
1155                                                const SPIRVType *IntTy,
1156                                                const SPIRVType *BoolTy) const {
1157   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1158   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1159   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1160   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1161   Register Zero = buildZerosVal(IntTy, I);
1162   Register One = buildOnesVal(false, IntTy, I);
1163   MachineBasicBlock &BB = *I.getParent();
1164   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1165       .addDef(BitIntReg)
1166       .addUse(GR.getSPIRVTypeID(IntTy))
1167       .addUse(IntReg)
1168       .addUse(One)
1169       .constrainAllUses(TII, TRI, RBI);
1170   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1171       .addDef(ResVReg)
1172       .addUse(GR.getSPIRVTypeID(BoolTy))
1173       .addUse(BitIntReg)
1174       .addUse(Zero)
1175       .constrainAllUses(TII, TRI, RBI);
1176 }
1177 
1178 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1179                                            const SPIRVType *ResType,
1180                                            MachineInstr &I) const {
1181   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1182     Register IntReg = I.getOperand(1).getReg();
1183     const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1184     return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1185   }
1186   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1187   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1188   return selectUnOp(ResVReg, ResType, I, Opcode);
1189 }
1190 
1191 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1192                                            const SPIRVType *ResType,
1193                                            const APInt &Imm,
1194                                            MachineInstr &I) const {
1195   unsigned TyOpcode = ResType->getOpcode();
1196   assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1197   MachineBasicBlock &BB = *I.getParent();
1198   if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1199       Imm.isZero())
1200     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1201         .addDef(ResVReg)
1202         .addUse(GR.getSPIRVTypeID(ResType))
1203         .constrainAllUses(TII, TRI, RBI);
1204   if (TyOpcode == SPIRV::OpTypeInt) {
1205     assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1206     Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1207     if (Reg == ResVReg)
1208       return true;
1209     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1210         .addDef(ResVReg)
1211         .addUse(Reg)
1212         .constrainAllUses(TII, TRI, RBI);
1213   }
1214   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1215                  .addDef(ResVReg)
1216                  .addUse(GR.getSPIRVTypeID(ResType));
1217   // <=32-bit integers should be caught by the sdag pattern.
1218   assert(Imm.getBitWidth() > 32);
1219   addNumImm(Imm, MIB);
1220   return MIB.constrainAllUses(TII, TRI, RBI);
1221 }
1222 
1223 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1224                                              const SPIRVType *ResType,
1225                                              MachineInstr &I) const {
1226   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1227       .addDef(ResVReg)
1228       .addUse(GR.getSPIRVTypeID(ResType))
1229       .constrainAllUses(TII, TRI, RBI);
1230 }
1231 
1232 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1233   assert(MO.isReg());
1234   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1235   if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1236     return false;
1237   assert(TypeInst->getOperand(1).isReg());
1238   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1239   return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1240 }
1241 
1242 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1243   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1244   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1245   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1246   return ImmInst->getOperand(1).getCImm()->getZExtValue();
1247 }
1248 
1249 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1250                                                const SPIRVType *ResType,
1251                                                MachineInstr &I) const {
1252   MachineBasicBlock &BB = *I.getParent();
1253   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1254                  .addDef(ResVReg)
1255                  .addUse(GR.getSPIRVTypeID(ResType))
1256                  // object to insert
1257                  .addUse(I.getOperand(3).getReg())
1258                  // composite to insert into
1259                  .addUse(I.getOperand(2).getReg());
1260   for (unsigned i = 4; i < I.getNumOperands(); i++)
1261     MIB.addImm(foldImm(I.getOperand(i), MRI));
1262   return MIB.constrainAllUses(TII, TRI, RBI);
1263 }
1264 
1265 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1266                                                 const SPIRVType *ResType,
1267                                                 MachineInstr &I) const {
1268   MachineBasicBlock &BB = *I.getParent();
1269   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1270                  .addDef(ResVReg)
1271                  .addUse(GR.getSPIRVTypeID(ResType))
1272                  .addUse(I.getOperand(2).getReg());
1273   for (unsigned i = 3; i < I.getNumOperands(); i++)
1274     MIB.addImm(foldImm(I.getOperand(i), MRI));
1275   return MIB.constrainAllUses(TII, TRI, RBI);
1276 }
1277 
1278 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1279                                                const SPIRVType *ResType,
1280                                                MachineInstr &I) const {
1281   if (isImm(I.getOperand(4), MRI))
1282     return selectInsertVal(ResVReg, ResType, I);
1283   MachineBasicBlock &BB = *I.getParent();
1284   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1285       .addDef(ResVReg)
1286       .addUse(GR.getSPIRVTypeID(ResType))
1287       .addUse(I.getOperand(2).getReg())
1288       .addUse(I.getOperand(3).getReg())
1289       .addUse(I.getOperand(4).getReg())
1290       .constrainAllUses(TII, TRI, RBI);
1291 }
1292 
1293 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1294                                                 const SPIRVType *ResType,
1295                                                 MachineInstr &I) const {
1296   if (isImm(I.getOperand(3), MRI))
1297     return selectExtractVal(ResVReg, ResType, I);
1298   MachineBasicBlock &BB = *I.getParent();
1299   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1300       .addDef(ResVReg)
1301       .addUse(GR.getSPIRVTypeID(ResType))
1302       .addUse(I.getOperand(2).getReg())
1303       .addUse(I.getOperand(3).getReg())
1304       .constrainAllUses(TII, TRI, RBI);
1305 }
1306 
1307 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1308                                          const SPIRVType *ResType,
1309                                          MachineInstr &I) const {
1310   const bool IsGEPInBounds = I.getOperand(2).getImm();
1311 
1312   // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1313   // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1314   // we have to use Op[InBounds]AccessChain.
1315   const unsigned Opcode = STI.isVulkanEnv()
1316                               ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1317                                                : SPIRV::OpAccessChain)
1318                               : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1319                                                : SPIRV::OpPtrAccessChain);
1320 
1321   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1322                  .addDef(ResVReg)
1323                  .addUse(GR.getSPIRVTypeID(ResType))
1324                  // Object to get a pointer to.
1325                  .addUse(I.getOperand(3).getReg());
1326   // Adding indices.
1327   const unsigned StartingIndex =
1328       (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1329           ? 5
1330           : 4;
1331   for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1332     Res.addUse(I.getOperand(i).getReg());
1333   return Res.constrainAllUses(TII, TRI, RBI);
1334 }
1335 
1336 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1337                                                const SPIRVType *ResType,
1338                                                MachineInstr &I) const {
1339   MachineBasicBlock &BB = *I.getParent();
1340   switch (cast<GIntrinsic>(I).getIntrinsicID()) {
1341   case Intrinsic::spv_load:
1342     return selectLoad(ResVReg, ResType, I);
1343   case Intrinsic::spv_store:
1344     return selectStore(I);
1345   case Intrinsic::spv_extractv:
1346     return selectExtractVal(ResVReg, ResType, I);
1347   case Intrinsic::spv_insertv:
1348     return selectInsertVal(ResVReg, ResType, I);
1349   case Intrinsic::spv_extractelt:
1350     return selectExtractElt(ResVReg, ResType, I);
1351   case Intrinsic::spv_insertelt:
1352     return selectInsertElt(ResVReg, ResType, I);
1353   case Intrinsic::spv_gep:
1354     return selectGEP(ResVReg, ResType, I);
1355   case Intrinsic::spv_unref_global:
1356   case Intrinsic::spv_init_global: {
1357     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1358     MachineInstr *Init = I.getNumExplicitOperands() > 2
1359                              ? MRI->getVRegDef(I.getOperand(2).getReg())
1360                              : nullptr;
1361     assert(MI);
1362     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1363   }
1364   case Intrinsic::spv_undef: {
1365     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1366                    .addDef(ResVReg)
1367                    .addUse(GR.getSPIRVTypeID(ResType));
1368     return MIB.constrainAllUses(TII, TRI, RBI);
1369   }
1370   case Intrinsic::spv_const_composite: {
1371     // If no values are attached, the composite is null constant.
1372     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1373     unsigned Opcode =
1374         IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1375     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1376                    .addDef(ResVReg)
1377                    .addUse(GR.getSPIRVTypeID(ResType));
1378     // skip type MD node we already used when generated assign.type for this
1379     if (!IsNull) {
1380       for (unsigned i = I.getNumExplicitDefs() + 1;
1381            i < I.getNumExplicitOperands(); ++i) {
1382         MIB.addUse(I.getOperand(i).getReg());
1383       }
1384     }
1385     return MIB.constrainAllUses(TII, TRI, RBI);
1386   }
1387   case Intrinsic::spv_assign_name: {
1388     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1389     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1390     for (unsigned i = I.getNumExplicitDefs() + 2;
1391          i < I.getNumExplicitOperands(); ++i) {
1392       MIB.addImm(I.getOperand(i).getImm());
1393     }
1394     return MIB.constrainAllUses(TII, TRI, RBI);
1395   }
1396   case Intrinsic::spv_switch: {
1397     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1398     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1399       if (I.getOperand(i).isReg())
1400         MIB.addReg(I.getOperand(i).getReg());
1401       else if (I.getOperand(i).isCImm())
1402         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1403       else if (I.getOperand(i).isMBB())
1404         MIB.addMBB(I.getOperand(i).getMBB());
1405       else
1406         llvm_unreachable("Unexpected OpSwitch operand");
1407     }
1408     return MIB.constrainAllUses(TII, TRI, RBI);
1409   }
1410   case Intrinsic::spv_cmpxchg:
1411     return selectAtomicCmpXchg(ResVReg, ResType, I);
1412   case Intrinsic::spv_unreachable:
1413     BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1414     break;
1415   case Intrinsic::spv_alloca:
1416     return selectFrameIndex(ResVReg, ResType, I);
1417   case Intrinsic::spv_assume:
1418     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1419       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
1420           .addUse(I.getOperand(1).getReg());
1421     break;
1422   case Intrinsic::spv_expect:
1423     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1424       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
1425           .addDef(ResVReg)
1426           .addUse(GR.getSPIRVTypeID(ResType))
1427           .addUse(I.getOperand(2).getReg())
1428           .addUse(I.getOperand(3).getReg());
1429     break;
1430   default:
1431     llvm_unreachable("Intrinsic selection not implemented");
1432   }
1433   return true;
1434 }
1435 
1436 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1437                                                 const SPIRVType *ResType,
1438                                                 MachineInstr &I) const {
1439   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1440       .addDef(ResVReg)
1441       .addUse(GR.getSPIRVTypeID(ResType))
1442       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1443       .constrainAllUses(TII, TRI, RBI);
1444 }
1445 
1446 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1447   // InstructionSelector walks backwards through the instructions. We can use
1448   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1449   // first, so can generate an OpBranchConditional here. If there is no
1450   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1451   const MachineInstr *PrevI = I.getPrevNode();
1452   MachineBasicBlock &MBB = *I.getParent();
1453   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1454     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1455         .addUse(PrevI->getOperand(0).getReg())
1456         .addMBB(PrevI->getOperand(1).getMBB())
1457         .addMBB(I.getOperand(0).getMBB())
1458         .constrainAllUses(TII, TRI, RBI);
1459   }
1460   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1461       .addMBB(I.getOperand(0).getMBB())
1462       .constrainAllUses(TII, TRI, RBI);
1463 }
1464 
1465 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1466   // InstructionSelector walks backwards through the instructions. For an
1467   // explicit conditional branch with no fallthrough, we use both a G_BR and a
1468   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1469   // generate the OpBranchConditional in selectBranch above.
1470   //
1471   // If an OpBranchConditional has been generated, we simply return, as the work
1472   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1473   // implicit fallthrough to the next basic block, so we need to create an
1474   // OpBranchConditional with an explicit "false" argument pointing to the next
1475   // basic block that LLVM would fall through to.
1476   const MachineInstr *NextI = I.getNextNode();
1477   // Check if this has already been successfully selected.
1478   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1479     return true;
1480   // Must be relying on implicit block fallthrough, so generate an
1481   // OpBranchConditional with the "next" basic block as the "false" target.
1482   MachineBasicBlock &MBB = *I.getParent();
1483   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1484   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1485   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1486       .addUse(I.getOperand(0).getReg())
1487       .addMBB(I.getOperand(1).getMBB())
1488       .addMBB(NextMBB)
1489       .constrainAllUses(TII, TRI, RBI);
1490 }
1491 
1492 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1493                                          const SPIRVType *ResType,
1494                                          MachineInstr &I) const {
1495   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1496                  .addDef(ResVReg)
1497                  .addUse(GR.getSPIRVTypeID(ResType));
1498   const unsigned NumOps = I.getNumOperands();
1499   for (unsigned i = 1; i < NumOps; i += 2) {
1500     MIB.addUse(I.getOperand(i + 0).getReg());
1501     MIB.addMBB(I.getOperand(i + 1).getMBB());
1502   }
1503   return MIB.constrainAllUses(TII, TRI, RBI);
1504 }
1505 
1506 bool SPIRVInstructionSelector::selectGlobalValue(
1507     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1508   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1509   MachineIRBuilder MIRBuilder(I);
1510   const GlobalValue *GV = I.getOperand(1).getGlobal();
1511   Type *GVType = GV->getValueType();
1512   SPIRVType *PointerBaseType;
1513   if (GVType->isArrayTy()) {
1514     SPIRVType *ArrayElementType =
1515         GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
1516                                 SPIRV::AccessQualifier::ReadWrite, false);
1517     PointerBaseType = GR.getOrCreateSPIRVArrayType(
1518         ArrayElementType, GVType->getArrayNumElements(), I, TII);
1519   } else {
1520     PointerBaseType = GR.getOrCreateSPIRVType(
1521         GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1522   }
1523   SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
1524       PointerBaseType, I, TII,
1525       addressSpaceToStorageClass(GV->getAddressSpace()));
1526 
1527   std::string GlobalIdent;
1528   if (!GV->hasName()) {
1529     unsigned &ID = UnnamedGlobalIDs[GV];
1530     if (ID == 0)
1531       ID = UnnamedGlobalIDs.size();
1532     GlobalIdent = "__unnamed_" + Twine(ID).str();
1533   } else {
1534     GlobalIdent = GV->getGlobalIdentifier();
1535   }
1536 
1537   // Behaviour of functions as operands depends on availability of the
1538   // corresponding extension (SPV_INTEL_function_pointers):
1539   // - If there is an extension to operate with functions as operands:
1540   // We create a proper constant operand and evaluate a correct type for a
1541   // function pointer.
1542   // - Without the required extension:
1543   // We have functions as operands in tests with blocks of instruction e.g. in
1544   // transcoding/global_block.ll. These operands are not used and should be
1545   // substituted by zero constants. Their type is expected to be always
1546   // OpTypePointer Function %uchar.
1547   if (isa<Function>(GV)) {
1548     const Constant *ConstVal = GV;
1549     MachineBasicBlock &BB = *I.getParent();
1550     Register NewReg = GR.find(ConstVal, GR.CurMF);
1551     if (!NewReg.isValid()) {
1552       Register NewReg = ResVReg;
1553       GR.add(ConstVal, GR.CurMF, NewReg);
1554       const Function *GVFun =
1555           STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
1556               ? dyn_cast<Function>(GV)
1557               : nullptr;
1558       if (GVFun) {
1559         // References to a function via function pointers generate virtual
1560         // registers without a definition. We will resolve it later, during
1561         // module analysis stage.
1562         MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1563         Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1564         MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
1565         MachineInstrBuilder MB =
1566             BuildMI(BB, I, I.getDebugLoc(),
1567                     TII.get(SPIRV::OpConstantFunctionPointerINTEL))
1568                 .addDef(NewReg)
1569                 .addUse(GR.getSPIRVTypeID(ResType))
1570                 .addUse(FuncVReg);
1571         // mapping the function pointer to the used Function
1572         GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
1573         return MB.constrainAllUses(TII, TRI, RBI);
1574       }
1575       return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1576           .addDef(NewReg)
1577           .addUse(GR.getSPIRVTypeID(ResType))
1578           .constrainAllUses(TII, TRI, RBI);
1579     }
1580     assert(NewReg != ResVReg);
1581     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1582         .addDef(ResVReg)
1583         .addUse(NewReg)
1584         .constrainAllUses(TII, TRI, RBI);
1585   }
1586   auto GlobalVar = cast<GlobalVariable>(GV);
1587   assert(GlobalVar->getName() != "llvm.global.annotations");
1588 
1589   bool HasInit = GlobalVar->hasInitializer() &&
1590                  !isa<UndefValue>(GlobalVar->getInitializer());
1591   // Skip empty declaration for GVs with initilaizers till we get the decl with
1592   // passed initializer.
1593   if (HasInit && !Init)
1594     return true;
1595 
1596   unsigned AddrSpace = GV->getAddressSpace();
1597   SPIRV::StorageClass::StorageClass Storage =
1598       addressSpaceToStorageClass(AddrSpace);
1599   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1600                   Storage != SPIRV::StorageClass::Function;
1601   SPIRV::LinkageType::LinkageType LnkType =
1602       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
1603           ? SPIRV::LinkageType::Import
1604           : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
1605                      STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
1606                  ? SPIRV::LinkageType::LinkOnceODR
1607                  : SPIRV::LinkageType::Export);
1608 
1609   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1610                                         Storage, Init, GlobalVar->isConstant(),
1611                                         HasLnkTy, LnkType, MIRBuilder, true);
1612   return Reg.isValid();
1613 }
1614 
1615 bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
1616                                            const SPIRVType *ResType,
1617                                            MachineInstr &I) const {
1618   if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
1619     return selectExtInst(ResVReg, ResType, I, CL::log10);
1620   }
1621 
1622   // There is no log10 instruction in the GLSL Extended Instruction set, so it
1623   // is implemented as:
1624   // log10(x) = log2(x) * (1 / log2(10))
1625   //          = log2(x) * 0.30103
1626 
1627   MachineIRBuilder MIRBuilder(I);
1628   MachineBasicBlock &BB = *I.getParent();
1629 
1630   // Build log2(x).
1631   Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1632   bool Result =
1633       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1634           .addDef(VarReg)
1635           .addUse(GR.getSPIRVTypeID(ResType))
1636           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1637           .addImm(GL::Log2)
1638           .add(I.getOperand(1))
1639           .constrainAllUses(TII, TRI, RBI);
1640 
1641   // Build 0.30103.
1642   assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
1643          ResType->getOpcode() == SPIRV::OpTypeFloat);
1644   // TODO: Add matrix implementation once supported by the HLSL frontend.
1645   const SPIRVType *SpirvScalarType =
1646       ResType->getOpcode() == SPIRV::OpTypeVector
1647           ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
1648           : ResType;
1649   Register ScaleReg =
1650       GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
1651 
1652   // Multiply log2(x) by 0.30103 to get log10(x) result.
1653   auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
1654                     ? SPIRV::OpVectorTimesScalar
1655                     : SPIRV::OpFMulS;
1656   Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1657                 .addDef(ResVReg)
1658                 .addUse(GR.getSPIRVTypeID(ResType))
1659                 .addUse(VarReg)
1660                 .addUse(ScaleReg)
1661                 .constrainAllUses(TII, TRI, RBI);
1662 
1663   return Result;
1664 }
1665 
1666 namespace llvm {
1667 InstructionSelector *
1668 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
1669                                const SPIRVSubtarget &Subtarget,
1670                                const RegisterBankInfo &RBI) {
1671   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
1672 }
1673 } // namespace llvm
1674