xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (revision 1d250d9099a9ba8b53add7eb7db6827e8fc0c8fd)
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
17 #include "SPIRV.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/IR/IntrinsicsSPIRV.h"
32 #include "llvm/Support/Debug.h"
33 
34 namespace llvm {
35 
36 class SPIRVMachineModuleInfo : public MachineModuleInfoImpl {
37 public:
38   SyncScope::ID Work_ItemSSID;
39   SyncScope::ID WorkGroupSSID;
40   SyncScope::ID DeviceSSID;
41   SyncScope::ID AllSVMDevicesSSID;
42   SyncScope::ID SubGroupSSID;
43 
44   SPIRVMachineModuleInfo(const MachineModuleInfo &MMI) {
45     LLVMContext &CTX = MMI.getModule()->getContext();
46     Work_ItemSSID = CTX.getOrInsertSyncScopeID("work_item");
47     WorkGroupSSID = CTX.getOrInsertSyncScopeID("workgroup");
48     DeviceSSID = CTX.getOrInsertSyncScopeID("device");
49     AllSVMDevicesSSID = CTX.getOrInsertSyncScopeID("all_svm_devices");
50     SubGroupSSID = CTX.getOrInsertSyncScopeID("sub_group");
51   }
52 };
53 
54 } // end namespace llvm
55 
56 #define DEBUG_TYPE "spirv-isel"
57 
58 using namespace llvm;
59 namespace CL = SPIRV::OpenCLExtInst;
60 namespace GL = SPIRV::GLSLExtInst;
61 
62 using ExtInstList =
63     std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
64 
65 namespace {
66 
67 #define GET_GLOBALISEL_PREDICATE_BITSET
68 #include "SPIRVGenGlobalISel.inc"
69 #undef GET_GLOBALISEL_PREDICATE_BITSET
70 
71 class SPIRVInstructionSelector : public InstructionSelector {
72   const SPIRVSubtarget &STI;
73   const SPIRVInstrInfo &TII;
74   const SPIRVRegisterInfo &TRI;
75   const RegisterBankInfo &RBI;
76   SPIRVGlobalRegistry &GR;
77   MachineRegisterInfo *MRI;
78   SPIRVMachineModuleInfo *MMI = nullptr;
79 
80   /// We need to keep track of the number we give to anonymous global values to
81   /// generate the same name every time when this is needed.
82   mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
83 
84 public:
85   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
86                            const SPIRVSubtarget &ST,
87                            const RegisterBankInfo &RBI);
88   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
89                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
90                BlockFrequencyInfo *BFI) override;
91   // Common selection code. Instruction-specific selection occurs in spvSelect.
92   bool select(MachineInstr &I) override;
93   static const char *getName() { return DEBUG_TYPE; }
94 
95 #define GET_GLOBALISEL_PREDICATES_DECL
96 #include "SPIRVGenGlobalISel.inc"
97 #undef GET_GLOBALISEL_PREDICATES_DECL
98 
99 #define GET_GLOBALISEL_TEMPORARIES_DECL
100 #include "SPIRVGenGlobalISel.inc"
101 #undef GET_GLOBALISEL_TEMPORARIES_DECL
102 
103 private:
104   // tblgen-erated 'select' implementation, used as the initial selector for
105   // the patterns that don't require complex C++.
106   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
107 
108   // All instruction-specific selection that didn't happen in "select()".
109   // Is basically a large Switch/Case delegating to all other select method.
110   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
111                  MachineInstr &I) const;
112 
113   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
114                          const MachineInstr *Init = nullptr) const;
115 
116   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
117                          MachineInstr &I, Register SrcReg,
118                          unsigned Opcode) const;
119   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
120                   unsigned Opcode) const;
121 
122   bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
123                      MachineInstr &I) const;
124 
125   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
126                   MachineInstr &I) const;
127   bool selectStore(MachineInstr &I) const;
128 
129   bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
130                        MachineInstr &I) const;
131   bool selectStackRestore(MachineInstr &I) const;
132 
133   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
134 
135   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
136                        MachineInstr &I, unsigned NewOpcode,
137                        unsigned NegateOpcode = 0) const;
138 
139   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
140                            MachineInstr &I) const;
141 
142   bool selectFence(MachineInstr &I) const;
143 
144   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
145                            MachineInstr &I) const;
146 
147   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
148                         MachineInstr &I) const;
149 
150   bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
151                          MachineInstr &I) const;
152   bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
153                          MachineInstr &I) const;
154 
155   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
156                  unsigned comparisonOpcode, MachineInstr &I) const;
157 
158   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
159                   MachineInstr &I) const;
160   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
161                   MachineInstr &I) const;
162 
163   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
164                    int OpIdx) const;
165   void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
166                     int OpIdx) const;
167 
168   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
169                    MachineInstr &I) const;
170 
171   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
172                     bool IsSigned) const;
173   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
174                   bool IsSigned, unsigned Opcode) const;
175   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
176                  bool IsSigned) const;
177 
178   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
179                    MachineInstr &I) const;
180 
181   bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
182                        const SPIRVType *intTy, const SPIRVType *boolTy) const;
183 
184   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
185                      MachineInstr &I) const;
186   bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
187                     MachineInstr &I) const;
188   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
189                        MachineInstr &I) const;
190   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
191                         MachineInstr &I) const;
192   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
193                        MachineInstr &I) const;
194   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
195                         MachineInstr &I) const;
196   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
197                        MachineInstr &I) const;
198   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
199                  MachineInstr &I) const;
200 
201   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
202                         MachineInstr &I) const;
203   bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
204                          MachineInstr &I) const;
205 
206   bool selectBranch(MachineInstr &I) const;
207   bool selectBranchCond(MachineInstr &I) const;
208 
209   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
210                  MachineInstr &I) const;
211 
212   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
213                      MachineInstr &I, CL::OpenCLExtInst CLInst) const;
214   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
215                      MachineInstr &I, CL::OpenCLExtInst CLInst,
216                      GL::GLSLExtInst GLInst) const;
217   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
218                      MachineInstr &I, const ExtInstList &ExtInsts) const;
219 
220   bool selectLog10(Register ResVReg, const SPIRVType *ResType,
221                    MachineInstr &I) const;
222 
223   bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
224                          MachineInstr &I) const;
225 
226   bool selectUnmergeValues(MachineInstr &I) const;
227 
228   Register buildI32Constant(uint32_t Val, MachineInstr &I,
229                             const SPIRVType *ResType = nullptr) const;
230 
231   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
232   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
233                         MachineInstr &I) const;
234 };
235 
236 } // end anonymous namespace
237 
238 #define GET_GLOBALISEL_IMPL
239 #include "SPIRVGenGlobalISel.inc"
240 #undef GET_GLOBALISEL_IMPL
241 
242 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
243                                                    const SPIRVSubtarget &ST,
244                                                    const RegisterBankInfo &RBI)
245     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
246       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
247 #define GET_GLOBALISEL_PREDICATES_INIT
248 #include "SPIRVGenGlobalISel.inc"
249 #undef GET_GLOBALISEL_PREDICATES_INIT
250 #define GET_GLOBALISEL_TEMPORARIES_INIT
251 #include "SPIRVGenGlobalISel.inc"
252 #undef GET_GLOBALISEL_TEMPORARIES_INIT
253 {
254 }
255 
256 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
257                                        CodeGenCoverage *CoverageInfo,
258                                        ProfileSummaryInfo *PSI,
259                                        BlockFrequencyInfo *BFI) {
260   MMI = &MF.getMMI().getObjFileInfo<SPIRVMachineModuleInfo>();
261   MRI = &MF.getRegInfo();
262   GR.setCurrentFunc(MF);
263   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
264 }
265 
266 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
267 
268 // Defined in SPIRVLegalizerInfo.cpp.
269 extern bool isTypeFoldingSupported(unsigned Opcode);
270 
271 bool SPIRVInstructionSelector::select(MachineInstr &I) {
272   assert(I.getParent() && "Instruction should be in a basic block!");
273   assert(I.getParent()->getParent() && "Instruction should be in a function!");
274 
275   Register Opcode = I.getOpcode();
276   // If it's not a GMIR instruction, we've selected it already.
277   if (!isPreISelGenericOpcode(Opcode)) {
278     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
279       auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
280       if (isTypeFoldingSupported(Def->getOpcode())) {
281         bool Res = selectImpl(I, *CoverageInfo);
282         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
283         if (Res)
284           return Res;
285       }
286       MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
287       I.removeFromParent();
288       return true;
289     } else if (I.getNumDefs() == 1) {
290       // Make all vregs 32 bits (for SPIR-V IDs).
291       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
292     }
293     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
294   }
295 
296   if (I.getNumOperands() != I.getNumExplicitOperands()) {
297     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
298     return false;
299   }
300 
301   // Common code for getting return reg+type, and removing selected instr
302   // from parent occurs here. Instr-specific selection happens in spvSelect().
303   bool HasDefs = I.getNumDefs() > 0;
304   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
305   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
306   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
307   if (spvSelect(ResVReg, ResType, I)) {
308     if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
309       for (unsigned i = 0; i < I.getNumDefs(); ++i)
310         MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
311     I.removeFromParent();
312     return true;
313   }
314   return false;
315 }
316 
317 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
318                                          const SPIRVType *ResType,
319                                          MachineInstr &I) const {
320   const unsigned Opcode = I.getOpcode();
321   if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
322     return selectImpl(I, *CoverageInfo);
323   switch (Opcode) {
324   case TargetOpcode::G_CONSTANT:
325     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
326                        I);
327   case TargetOpcode::G_GLOBAL_VALUE:
328     return selectGlobalValue(ResVReg, I);
329   case TargetOpcode::G_IMPLICIT_DEF:
330     return selectOpUndef(ResVReg, ResType, I);
331   case TargetOpcode::G_FREEZE:
332     return selectFreeze(ResVReg, ResType, I);
333 
334   case TargetOpcode::G_INTRINSIC:
335   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
336   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
337     return selectIntrinsic(ResVReg, ResType, I);
338   case TargetOpcode::G_BITREVERSE:
339     return selectBitreverse(ResVReg, ResType, I);
340 
341   case TargetOpcode::G_BUILD_VECTOR:
342     return selectConstVector(ResVReg, ResType, I);
343   case TargetOpcode::G_SPLAT_VECTOR:
344     return selectSplatVector(ResVReg, ResType, I);
345 
346   case TargetOpcode::G_SHUFFLE_VECTOR: {
347     MachineBasicBlock &BB = *I.getParent();
348     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
349                    .addDef(ResVReg)
350                    .addUse(GR.getSPIRVTypeID(ResType))
351                    .addUse(I.getOperand(1).getReg())
352                    .addUse(I.getOperand(2).getReg());
353     for (auto V : I.getOperand(3).getShuffleMask())
354       MIB.addImm(V);
355     return MIB.constrainAllUses(TII, TRI, RBI);
356   }
357   case TargetOpcode::G_MEMMOVE:
358   case TargetOpcode::G_MEMCPY:
359   case TargetOpcode::G_MEMSET:
360     return selectMemOperation(ResVReg, I);
361 
362   case TargetOpcode::G_ICMP:
363     return selectICmp(ResVReg, ResType, I);
364   case TargetOpcode::G_FCMP:
365     return selectFCmp(ResVReg, ResType, I);
366 
367   case TargetOpcode::G_FRAME_INDEX:
368     return selectFrameIndex(ResVReg, ResType, I);
369 
370   case TargetOpcode::G_LOAD:
371     return selectLoad(ResVReg, ResType, I);
372   case TargetOpcode::G_STORE:
373     return selectStore(I);
374 
375   case TargetOpcode::G_BR:
376     return selectBranch(I);
377   case TargetOpcode::G_BRCOND:
378     return selectBranchCond(I);
379 
380   case TargetOpcode::G_PHI:
381     return selectPhi(ResVReg, ResType, I);
382 
383   case TargetOpcode::G_FPTOSI:
384     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
385   case TargetOpcode::G_FPTOUI:
386     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
387 
388   case TargetOpcode::G_SITOFP:
389     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
390   case TargetOpcode::G_UITOFP:
391     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
392 
393   case TargetOpcode::G_CTPOP:
394     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
395   case TargetOpcode::G_SMIN:
396     return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
397   case TargetOpcode::G_UMIN:
398     return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
399 
400   case TargetOpcode::G_SMAX:
401     return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
402   case TargetOpcode::G_UMAX:
403     return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
404 
405   case TargetOpcode::G_FMA:
406     return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
407 
408   case TargetOpcode::G_FPOW:
409     return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
410   case TargetOpcode::G_FPOWI:
411     return selectExtInst(ResVReg, ResType, I, CL::pown);
412 
413   case TargetOpcode::G_FEXP:
414     return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
415   case TargetOpcode::G_FEXP2:
416     return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
417 
418   case TargetOpcode::G_FLOG:
419     return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
420   case TargetOpcode::G_FLOG2:
421     return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
422   case TargetOpcode::G_FLOG10:
423     return selectLog10(ResVReg, ResType, I);
424 
425   case TargetOpcode::G_FABS:
426     return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
427   case TargetOpcode::G_ABS:
428     return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
429 
430   case TargetOpcode::G_FMINNUM:
431   case TargetOpcode::G_FMINIMUM:
432     return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin);
433   case TargetOpcode::G_FMAXNUM:
434   case TargetOpcode::G_FMAXIMUM:
435     return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax);
436 
437   case TargetOpcode::G_FCOPYSIGN:
438     return selectExtInst(ResVReg, ResType, I, CL::copysign);
439 
440   case TargetOpcode::G_FCEIL:
441     return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
442   case TargetOpcode::G_FFLOOR:
443     return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
444 
445   case TargetOpcode::G_FCOS:
446     return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
447   case TargetOpcode::G_FSIN:
448     return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
449 
450   case TargetOpcode::G_FSQRT:
451     return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
452 
453   case TargetOpcode::G_CTTZ:
454   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
455     return selectExtInst(ResVReg, ResType, I, CL::ctz);
456   case TargetOpcode::G_CTLZ:
457   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
458     return selectExtInst(ResVReg, ResType, I, CL::clz);
459 
460   case TargetOpcode::G_INTRINSIC_ROUND:
461     return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
462   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
463     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
464   case TargetOpcode::G_INTRINSIC_TRUNC:
465     return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
466   case TargetOpcode::G_FRINT:
467   case TargetOpcode::G_FNEARBYINT:
468     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
469 
470   case TargetOpcode::G_SMULH:
471     return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
472   case TargetOpcode::G_UMULH:
473     return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
474 
475   case TargetOpcode::G_SEXT:
476     return selectExt(ResVReg, ResType, I, true);
477   case TargetOpcode::G_ANYEXT:
478   case TargetOpcode::G_ZEXT:
479     return selectExt(ResVReg, ResType, I, false);
480   case TargetOpcode::G_TRUNC:
481     return selectTrunc(ResVReg, ResType, I);
482   case TargetOpcode::G_FPTRUNC:
483   case TargetOpcode::G_FPEXT:
484     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
485 
486   case TargetOpcode::G_PTRTOINT:
487     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
488   case TargetOpcode::G_INTTOPTR:
489     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
490   case TargetOpcode::G_BITCAST:
491     return selectBitcast(ResVReg, ResType, I);
492   case TargetOpcode::G_ADDRSPACE_CAST:
493     return selectAddrSpaceCast(ResVReg, ResType, I);
494   case TargetOpcode::G_PTR_ADD: {
495     // Currently, we get G_PTR_ADD only as a result of translating
496     // global variables, initialized with constant expressions like GV + Const
497     // (see test opencl/basic/progvar_prog_scope_init.ll).
498     // TODO: extend the handler once we have other cases.
499     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
500     Register GV = I.getOperand(1).getReg();
501     MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
502     (void)II;
503     assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
504             (*II).getOpcode() == TargetOpcode::COPY ||
505             (*II).getOpcode() == SPIRV::OpVariable) &&
506            isImm(I.getOperand(2), MRI));
507     Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
508     MachineBasicBlock &BB = *I.getParent();
509     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
510                    .addDef(ResVReg)
511                    .addUse(GR.getSPIRVTypeID(ResType))
512                    .addImm(static_cast<uint32_t>(
513                        SPIRV::Opcode::InBoundsPtrAccessChain))
514                    .addUse(GV)
515                    .addUse(Idx)
516                    .addUse(I.getOperand(2).getReg());
517     return MIB.constrainAllUses(TII, TRI, RBI);
518   }
519 
520   case TargetOpcode::G_ATOMICRMW_OR:
521     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
522   case TargetOpcode::G_ATOMICRMW_ADD:
523     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
524   case TargetOpcode::G_ATOMICRMW_AND:
525     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
526   case TargetOpcode::G_ATOMICRMW_MAX:
527     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
528   case TargetOpcode::G_ATOMICRMW_MIN:
529     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
530   case TargetOpcode::G_ATOMICRMW_SUB:
531     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
532   case TargetOpcode::G_ATOMICRMW_XOR:
533     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
534   case TargetOpcode::G_ATOMICRMW_UMAX:
535     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
536   case TargetOpcode::G_ATOMICRMW_UMIN:
537     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
538   case TargetOpcode::G_ATOMICRMW_XCHG:
539     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
540   case TargetOpcode::G_ATOMIC_CMPXCHG:
541     return selectAtomicCmpXchg(ResVReg, ResType, I);
542 
543   case TargetOpcode::G_ATOMICRMW_FADD:
544     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
545   case TargetOpcode::G_ATOMICRMW_FSUB:
546     // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
547     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
548                            SPIRV::OpFNegate);
549   case TargetOpcode::G_ATOMICRMW_FMIN:
550     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
551   case TargetOpcode::G_ATOMICRMW_FMAX:
552     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
553 
554   case TargetOpcode::G_FENCE:
555     return selectFence(I);
556 
557   case TargetOpcode::G_STACKSAVE:
558     return selectStackSave(ResVReg, ResType, I);
559   case TargetOpcode::G_STACKRESTORE:
560     return selectStackRestore(I);
561 
562   case TargetOpcode::G_UNMERGE_VALUES:
563     return selectUnmergeValues(I);
564 
565   default:
566     return false;
567   }
568 }
569 
570 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
571                                              const SPIRVType *ResType,
572                                              MachineInstr &I,
573                                              CL::OpenCLExtInst CLInst) const {
574   return selectExtInst(ResVReg, ResType, I,
575                        {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
576 }
577 
578 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
579                                              const SPIRVType *ResType,
580                                              MachineInstr &I,
581                                              CL::OpenCLExtInst CLInst,
582                                              GL::GLSLExtInst GLInst) const {
583   ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
584                           {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
585   return selectExtInst(ResVReg, ResType, I, ExtInsts);
586 }
587 
588 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
589                                              const SPIRVType *ResType,
590                                              MachineInstr &I,
591                                              const ExtInstList &Insts) const {
592 
593   for (const auto &Ex : Insts) {
594     SPIRV::InstructionSet::InstructionSet Set = Ex.first;
595     uint32_t Opcode = Ex.second;
596     if (STI.canUseExtInstSet(Set)) {
597       MachineBasicBlock &BB = *I.getParent();
598       auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
599                      .addDef(ResVReg)
600                      .addUse(GR.getSPIRVTypeID(ResType))
601                      .addImm(static_cast<uint32_t>(Set))
602                      .addImm(Opcode);
603       const unsigned NumOps = I.getNumOperands();
604       for (unsigned i = 1; i < NumOps; ++i)
605         MIB.add(I.getOperand(i));
606       return MIB.constrainAllUses(TII, TRI, RBI);
607     }
608   }
609   return false;
610 }
611 
612 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
613                                                  const SPIRVType *ResType,
614                                                  MachineInstr &I,
615                                                  Register SrcReg,
616                                                  unsigned Opcode) const {
617   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
618       .addDef(ResVReg)
619       .addUse(GR.getSPIRVTypeID(ResType))
620       .addUse(SrcReg)
621       .constrainAllUses(TII, TRI, RBI);
622 }
623 
624 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
625                                           const SPIRVType *ResType,
626                                           MachineInstr &I,
627                                           unsigned Opcode) const {
628   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
629                            Opcode);
630 }
631 
632 bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
633                                              const SPIRVType *ResType,
634                                              MachineInstr &I) const {
635   Register OpReg = I.getOperand(1).getReg();
636   SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
637   if (!GR.isBitcastCompatible(ResType, OpType))
638     report_fatal_error("incompatible result and operand types in a bitcast");
639   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
640 }
641 
642 static SPIRV::Scope::Scope getScope(SyncScope::ID Ord,
643                                     SPIRVMachineModuleInfo *MMI) {
644   if (Ord == SyncScope::SingleThread || Ord == MMI->Work_ItemSSID)
645     return SPIRV::Scope::Invocation;
646   else if (Ord == SyncScope::System || Ord == MMI->DeviceSSID)
647     return SPIRV::Scope::Device;
648   else if (Ord == MMI->WorkGroupSSID)
649     return SPIRV::Scope::Workgroup;
650   else if (Ord == MMI->AllSVMDevicesSSID)
651     return SPIRV::Scope::CrossDevice;
652   else if (Ord == MMI->SubGroupSSID)
653     return SPIRV::Scope::Subgroup;
654   else
655     // OpenCL approach is: "The functions that do not have memory_scope argument
656     // have the same semantics as the corresponding functions with the
657     // memory_scope argument set to memory_scope_device." See ref.: //
658     // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
659     // In our case if the scope is unknown, assuming that SPIR-V code is to be
660     // consumed in an OpenCL environment, we use the same approach and set the
661     // scope to memory_scope_device.
662     return SPIRV::Scope::Device;
663 }
664 
665 static void addMemoryOperands(MachineMemOperand *MemOp,
666                               MachineInstrBuilder &MIB) {
667   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
668   if (MemOp->isVolatile())
669     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
670   if (MemOp->isNonTemporal())
671     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
672   if (MemOp->getAlign().value())
673     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
674 
675   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
676     MIB.addImm(SpvMemOp);
677     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
678       MIB.addImm(MemOp->getAlign().value());
679   }
680 }
681 
682 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
683   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
684   if (Flags & MachineMemOperand::Flags::MOVolatile)
685     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
686   if (Flags & MachineMemOperand::Flags::MONonTemporal)
687     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
688 
689   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
690     MIB.addImm(SpvMemOp);
691 }
692 
693 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
694                                           const SPIRVType *ResType,
695                                           MachineInstr &I) const {
696   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
697   Register Ptr = I.getOperand(1 + OpOffset).getReg();
698   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
699                  .addDef(ResVReg)
700                  .addUse(GR.getSPIRVTypeID(ResType))
701                  .addUse(Ptr);
702   if (!I.getNumMemOperands()) {
703     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
704            I.getOpcode() ==
705                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
706     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
707   } else {
708     addMemoryOperands(*I.memoperands_begin(), MIB);
709   }
710   return MIB.constrainAllUses(TII, TRI, RBI);
711 }
712 
713 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
714   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
715   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
716   Register Ptr = I.getOperand(1 + OpOffset).getReg();
717   MachineBasicBlock &BB = *I.getParent();
718   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
719                  .addUse(Ptr)
720                  .addUse(StoreVal);
721   if (!I.getNumMemOperands()) {
722     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
723            I.getOpcode() ==
724                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
725     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
726   } else {
727     addMemoryOperands(*I.memoperands_begin(), MIB);
728   }
729   return MIB.constrainAllUses(TII, TRI, RBI);
730 }
731 
732 bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
733                                                const SPIRVType *ResType,
734                                                MachineInstr &I) const {
735   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
736     report_fatal_error(
737         "llvm.stacksave intrinsic: this instruction requires the following "
738         "SPIR-V extension: SPV_INTEL_variable_length_array",
739         false);
740   MachineBasicBlock &BB = *I.getParent();
741   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
742       .addDef(ResVReg)
743       .addUse(GR.getSPIRVTypeID(ResType))
744       .constrainAllUses(TII, TRI, RBI);
745 }
746 
747 bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
748   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
749     report_fatal_error(
750         "llvm.stackrestore intrinsic: this instruction requires the following "
751         "SPIR-V extension: SPV_INTEL_variable_length_array",
752         false);
753   if (!I.getOperand(0).isReg())
754     return false;
755   MachineBasicBlock &BB = *I.getParent();
756   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
757       .addUse(I.getOperand(0).getReg())
758       .constrainAllUses(TII, TRI, RBI);
759 }
760 
761 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
762                                                   MachineInstr &I) const {
763   MachineBasicBlock &BB = *I.getParent();
764   Register SrcReg = I.getOperand(1).getReg();
765   if (I.getOpcode() == TargetOpcode::G_MEMSET) {
766     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
767     unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
768     unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
769     SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
770     SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
771     Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
772     SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
773         ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
774     // TODO: check if we have such GV, add init, use buildGlobalVariable.
775     Function &CurFunction = GR.CurMF->getFunction();
776     Type *LLVMArrTy =
777         ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
778     // Module takes ownership of the global var.
779     GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
780                                             true, GlobalValue::InternalLinkage,
781                                             Constant::getNullValue(LLVMArrTy));
782     Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
783     GR.add(GV, GR.CurMF, VarReg);
784 
785     buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
786     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
787         .addDef(VarReg)
788         .addUse(GR.getSPIRVTypeID(VarTy))
789         .addImm(SPIRV::StorageClass::UniformConstant)
790         .addUse(Const)
791         .constrainAllUses(TII, TRI, RBI);
792     SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
793         ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
794     SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
795     selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
796   }
797   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
798                  .addUse(I.getOperand(0).getReg())
799                  .addUse(SrcReg)
800                  .addUse(I.getOperand(2).getReg());
801   if (I.getNumMemOperands())
802     addMemoryOperands(*I.memoperands_begin(), MIB);
803   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
804   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
805     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
806         .addUse(MIB->getOperand(0).getReg());
807   return Result;
808 }
809 
810 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
811                                                const SPIRVType *ResType,
812                                                MachineInstr &I,
813                                                unsigned NewOpcode,
814                                                unsigned NegateOpcode) const {
815   assert(I.hasOneMemOperand());
816   const MachineMemOperand *MemOp = *I.memoperands_begin();
817   uint32_t Scope =
818       static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
819   Register ScopeReg = buildI32Constant(Scope, I);
820 
821   Register Ptr = I.getOperand(1).getReg();
822   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
823   // auto ScSem =
824   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
825   AtomicOrdering AO = MemOp->getSuccessOrdering();
826   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
827   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
828 
829   bool Result = false;
830   Register ValueReg = I.getOperand(2).getReg();
831   if (NegateOpcode != 0) {
832     // Translation with negative value operand is requested
833     Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
834     Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
835     ValueReg = TmpReg;
836   }
837 
838   Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
839                 .addDef(ResVReg)
840                 .addUse(GR.getSPIRVTypeID(ResType))
841                 .addUse(Ptr)
842                 .addUse(ScopeReg)
843                 .addUse(MemSemReg)
844                 .addUse(ValueReg)
845                 .constrainAllUses(TII, TRI, RBI);
846   return Result;
847 }
848 
849 bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
850   unsigned ArgI = I.getNumOperands() - 1;
851   Register SrcReg =
852       I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
853   SPIRVType *DefType =
854       SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
855   if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
856     report_fatal_error(
857         "cannot select G_UNMERGE_VALUES with a non-vector argument");
858 
859   SPIRVType *ScalarType =
860       GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
861   MachineBasicBlock &BB = *I.getParent();
862   bool Res = false;
863   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
864     Register ResVReg = I.getOperand(i).getReg();
865     SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
866     if (!ResType) {
867       // There was no "assign type" actions, let's fix this now
868       ResType = ScalarType;
869       MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
870       MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
871       GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
872     }
873     auto MIB =
874         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
875             .addDef(ResVReg)
876             .addUse(GR.getSPIRVTypeID(ResType))
877             .addUse(SrcReg)
878             .addImm(static_cast<int64_t>(i));
879     Res |= MIB.constrainAllUses(TII, TRI, RBI);
880   }
881   return Res;
882 }
883 
884 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
885   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
886   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
887   Register MemSemReg = buildI32Constant(MemSem, I);
888   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
889   uint32_t Scope = static_cast<uint32_t>(getScope(Ord, MMI));
890   Register ScopeReg = buildI32Constant(Scope, I);
891   MachineBasicBlock &BB = *I.getParent();
892   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
893       .addUse(ScopeReg)
894       .addUse(MemSemReg)
895       .constrainAllUses(TII, TRI, RBI);
896 }
897 
898 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
899                                                    const SPIRVType *ResType,
900                                                    MachineInstr &I) const {
901   Register ScopeReg;
902   Register MemSemEqReg;
903   Register MemSemNeqReg;
904   Register Ptr = I.getOperand(2).getReg();
905   if (!isa<GIntrinsic>(I)) {
906     assert(I.hasOneMemOperand());
907     const MachineMemOperand *MemOp = *I.memoperands_begin();
908     unsigned Scope =
909         static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
910     ScopeReg = buildI32Constant(Scope, I);
911 
912     unsigned ScSem = static_cast<uint32_t>(
913         getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
914     AtomicOrdering AO = MemOp->getSuccessOrdering();
915     unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
916     MemSemEqReg = buildI32Constant(MemSemEq, I);
917     AtomicOrdering FO = MemOp->getFailureOrdering();
918     unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
919     MemSemNeqReg =
920         MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
921   } else {
922     ScopeReg = I.getOperand(5).getReg();
923     MemSemEqReg = I.getOperand(6).getReg();
924     MemSemNeqReg = I.getOperand(7).getReg();
925   }
926 
927   Register Cmp = I.getOperand(3).getReg();
928   Register Val = I.getOperand(4).getReg();
929   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
930   Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
931   const DebugLoc &DL = I.getDebugLoc();
932   bool Result =
933       BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
934           .addDef(ACmpRes)
935           .addUse(GR.getSPIRVTypeID(SpvValTy))
936           .addUse(Ptr)
937           .addUse(ScopeReg)
938           .addUse(MemSemEqReg)
939           .addUse(MemSemNeqReg)
940           .addUse(Val)
941           .addUse(Cmp)
942           .constrainAllUses(TII, TRI, RBI);
943   Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
944   SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
945   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
946                 .addDef(CmpSuccReg)
947                 .addUse(GR.getSPIRVTypeID(BoolTy))
948                 .addUse(ACmpRes)
949                 .addUse(Cmp)
950                 .constrainAllUses(TII, TRI, RBI);
951   Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
952   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
953                 .addDef(TmpReg)
954                 .addUse(GR.getSPIRVTypeID(ResType))
955                 .addUse(ACmpRes)
956                 .addUse(GR.getOrCreateUndef(I, ResType, TII))
957                 .addImm(0)
958                 .constrainAllUses(TII, TRI, RBI);
959   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
960                 .addDef(ResVReg)
961                 .addUse(GR.getSPIRVTypeID(ResType))
962                 .addUse(CmpSuccReg)
963                 .addUse(TmpReg)
964                 .addImm(1)
965                 .constrainAllUses(TII, TRI, RBI);
966   return Result;
967 }
968 
969 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
970   switch (SC) {
971   case SPIRV::StorageClass::Workgroup:
972   case SPIRV::StorageClass::CrossWorkgroup:
973   case SPIRV::StorageClass::Function:
974     return true;
975   default:
976     return false;
977   }
978 }
979 
980 static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
981   switch (SC) {
982   case SPIRV::StorageClass::DeviceOnlyINTEL:
983   case SPIRV::StorageClass::HostOnlyINTEL:
984     return true;
985   default:
986     return false;
987   }
988 }
989 
990 // In SPIR-V address space casting can only happen to and from the Generic
991 // storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
992 // pointers to and from Generic pointers. As such, we can convert e.g. from
993 // Workgroup to Function by going via a Generic pointer as an intermediary. All
994 // other combinations can only be done by a bitcast, and are probably not safe.
995 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
996                                                    const SPIRVType *ResType,
997                                                    MachineInstr &I) const {
998   // If the AddrSpaceCast user is single and in OpConstantComposite or
999   // OpVariable, we should select OpSpecConstantOp.
1000   auto UIs = MRI->use_instructions(ResVReg);
1001   if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1002       (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1003        UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1004        isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
1005     Register NewReg = I.getOperand(1).getReg();
1006     MachineBasicBlock &BB = *I.getParent();
1007     SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1008     ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
1009                                              SPIRV::StorageClass::Generic);
1010     bool Result =
1011         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1012             .addDef(ResVReg)
1013             .addUse(GR.getSPIRVTypeID(ResType))
1014             .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
1015             .addUse(NewReg)
1016             .constrainAllUses(TII, TRI, RBI);
1017     return Result;
1018   }
1019   Register SrcPtr = I.getOperand(1).getReg();
1020   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1021   SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1022   SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1023 
1024   // don't generate a cast between identical storage classes
1025   if (SrcSC == DstSC)
1026     return true;
1027 
1028   // Casting from an eligible pointer to Generic.
1029   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1030     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1031   // Casting from Generic to an eligible pointer.
1032   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1033     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1034   // Casting between 2 eligible pointers using Generic as an intermediary.
1035   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1036     Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1037     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1038         SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
1039     MachineBasicBlock &BB = *I.getParent();
1040     const DebugLoc &DL = I.getDebugLoc();
1041     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1042                        .addDef(Tmp)
1043                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1044                        .addUse(SrcPtr)
1045                        .constrainAllUses(TII, TRI, RBI);
1046     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1047                           .addDef(ResVReg)
1048                           .addUse(GR.getSPIRVTypeID(ResType))
1049                           .addUse(Tmp)
1050                           .constrainAllUses(TII, TRI, RBI);
1051   }
1052 
1053   // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1054   // be applied
1055   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1056     return selectUnOp(ResVReg, ResType, I,
1057                       SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1058   if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1059     return selectUnOp(ResVReg, ResType, I,
1060                       SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1061 
1062   // TODO Should this case just be disallowed completely?
1063   // We're casting 2 other arbitrary address spaces, so have to bitcast.
1064   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1065 }
1066 
1067 static unsigned getFCmpOpcode(unsigned PredNum) {
1068   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1069   switch (Pred) {
1070   case CmpInst::FCMP_OEQ:
1071     return SPIRV::OpFOrdEqual;
1072   case CmpInst::FCMP_OGE:
1073     return SPIRV::OpFOrdGreaterThanEqual;
1074   case CmpInst::FCMP_OGT:
1075     return SPIRV::OpFOrdGreaterThan;
1076   case CmpInst::FCMP_OLE:
1077     return SPIRV::OpFOrdLessThanEqual;
1078   case CmpInst::FCMP_OLT:
1079     return SPIRV::OpFOrdLessThan;
1080   case CmpInst::FCMP_ONE:
1081     return SPIRV::OpFOrdNotEqual;
1082   case CmpInst::FCMP_ORD:
1083     return SPIRV::OpOrdered;
1084   case CmpInst::FCMP_UEQ:
1085     return SPIRV::OpFUnordEqual;
1086   case CmpInst::FCMP_UGE:
1087     return SPIRV::OpFUnordGreaterThanEqual;
1088   case CmpInst::FCMP_UGT:
1089     return SPIRV::OpFUnordGreaterThan;
1090   case CmpInst::FCMP_ULE:
1091     return SPIRV::OpFUnordLessThanEqual;
1092   case CmpInst::FCMP_ULT:
1093     return SPIRV::OpFUnordLessThan;
1094   case CmpInst::FCMP_UNE:
1095     return SPIRV::OpFUnordNotEqual;
1096   case CmpInst::FCMP_UNO:
1097     return SPIRV::OpUnordered;
1098   default:
1099     llvm_unreachable("Unknown predicate type for FCmp");
1100   }
1101 }
1102 
1103 static unsigned getICmpOpcode(unsigned PredNum) {
1104   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1105   switch (Pred) {
1106   case CmpInst::ICMP_EQ:
1107     return SPIRV::OpIEqual;
1108   case CmpInst::ICMP_NE:
1109     return SPIRV::OpINotEqual;
1110   case CmpInst::ICMP_SGE:
1111     return SPIRV::OpSGreaterThanEqual;
1112   case CmpInst::ICMP_SGT:
1113     return SPIRV::OpSGreaterThan;
1114   case CmpInst::ICMP_SLE:
1115     return SPIRV::OpSLessThanEqual;
1116   case CmpInst::ICMP_SLT:
1117     return SPIRV::OpSLessThan;
1118   case CmpInst::ICMP_UGE:
1119     return SPIRV::OpUGreaterThanEqual;
1120   case CmpInst::ICMP_UGT:
1121     return SPIRV::OpUGreaterThan;
1122   case CmpInst::ICMP_ULE:
1123     return SPIRV::OpULessThanEqual;
1124   case CmpInst::ICMP_ULT:
1125     return SPIRV::OpULessThan;
1126   default:
1127     llvm_unreachable("Unknown predicate type for ICmp");
1128   }
1129 }
1130 
1131 static unsigned getPtrCmpOpcode(unsigned Pred) {
1132   switch (static_cast<CmpInst::Predicate>(Pred)) {
1133   case CmpInst::ICMP_EQ:
1134     return SPIRV::OpPtrEqual;
1135   case CmpInst::ICMP_NE:
1136     return SPIRV::OpPtrNotEqual;
1137   default:
1138     llvm_unreachable("Unknown predicate type for pointer comparison");
1139   }
1140 }
1141 
1142 // Return the logical operation, or abort if none exists.
1143 static unsigned getBoolCmpOpcode(unsigned PredNum) {
1144   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1145   switch (Pred) {
1146   case CmpInst::ICMP_EQ:
1147     return SPIRV::OpLogicalEqual;
1148   case CmpInst::ICMP_NE:
1149     return SPIRV::OpLogicalNotEqual;
1150   default:
1151     llvm_unreachable("Unknown predicate type for Bool comparison");
1152   }
1153 }
1154 
1155 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1156                                                 const SPIRVType *ResType,
1157                                                 MachineInstr &I) const {
1158   MachineBasicBlock &BB = *I.getParent();
1159   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1160       .addDef(ResVReg)
1161       .addUse(GR.getSPIRVTypeID(ResType))
1162       .addUse(I.getOperand(1).getReg())
1163       .constrainAllUses(TII, TRI, RBI);
1164 }
1165 
1166 bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1167                                             const SPIRVType *ResType,
1168                                             MachineInstr &I) const {
1169   // There is no way to implement `freeze` correctly without support on SPIR-V
1170   // standard side, but we may at least address a simple (static) case when
1171   // undef/poison value presence is obvious. The main benefit of even
1172   // incomplete `freeze` support is preventing of translation from crashing due
1173   // to lack of support on legalization and instruction selection steps.
1174   if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1175     return false;
1176   Register OpReg = I.getOperand(1).getReg();
1177   if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1178     Register Reg;
1179     switch (Def->getOpcode()) {
1180     case SPIRV::ASSIGN_TYPE:
1181       if (MachineInstr *AssignToDef =
1182               MRI->getVRegDef(Def->getOperand(1).getReg())) {
1183         if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1184           Reg = Def->getOperand(2).getReg();
1185       }
1186       break;
1187     case SPIRV::OpUndef:
1188       Reg = Def->getOperand(1).getReg();
1189       break;
1190     }
1191     unsigned DestOpCode;
1192     if (Reg.isValid()) {
1193       DestOpCode = SPIRV::OpConstantNull;
1194     } else {
1195       DestOpCode = TargetOpcode::COPY;
1196       Reg = OpReg;
1197     }
1198     return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1199         .addDef(I.getOperand(0).getReg())
1200         .addUse(Reg)
1201         .constrainAllUses(TII, TRI, RBI);
1202   }
1203   return false;
1204 }
1205 
1206 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1207                                                  const SPIRVType *ResType,
1208                                                  MachineInstr &I) const {
1209   // TODO: only const case is supported for now.
1210   assert(std::all_of(
1211       I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1212         if (MO.isDef())
1213           return true;
1214         if (!MO.isReg())
1215           return false;
1216         SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1217         assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1218                ConstTy->getOperand(1).isReg());
1219         Register ConstReg = ConstTy->getOperand(1).getReg();
1220         const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1221         assert(Const);
1222         return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1223                 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1224       }));
1225 
1226   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1227                      TII.get(SPIRV::OpConstantComposite))
1228                  .addDef(ResVReg)
1229                  .addUse(GR.getSPIRVTypeID(ResType));
1230   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1231     MIB.addUse(I.getOperand(i).getReg());
1232   return MIB.constrainAllUses(TII, TRI, RBI);
1233 }
1234 
1235 static unsigned getArrayComponentCount(MachineRegisterInfo *MRI,
1236                                        const SPIRVType *ResType) {
1237   Register OpReg = ResType->getOperand(2).getReg();
1238   SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1239   if (!OpDef)
1240     return 0;
1241   if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1242       OpDef->getOperand(1).isReg()) {
1243     if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1244       OpDef = RefDef;
1245   }
1246   unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1247                    ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1248                    : 0;
1249   return N;
1250 }
1251 
1252 bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1253                                                  const SPIRVType *ResType,
1254                                                  MachineInstr &I) const {
1255   unsigned N = 0;
1256   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1257     N = GR.getScalarOrVectorComponentCount(ResType);
1258   else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1259     N = getArrayComponentCount(MRI, ResType);
1260   else
1261     report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1262 
1263   unsigned OpIdx = I.getNumExplicitDefs();
1264   if (!I.getOperand(OpIdx).isReg())
1265     report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1266 
1267   // check if we may construct a constant vector
1268   Register OpReg = I.getOperand(OpIdx).getReg();
1269   bool IsConst = false;
1270   if (SPIRVType *OpDef = MRI->getVRegDef(OpReg)) {
1271     if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1272         OpDef->getOperand(1).isReg()) {
1273       if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1274         OpDef = RefDef;
1275     }
1276     IsConst = OpDef->getOpcode() == TargetOpcode::G_CONSTANT ||
1277               OpDef->getOpcode() == TargetOpcode::G_FCONSTANT;
1278   }
1279 
1280   if (!IsConst && N < 2)
1281     report_fatal_error(
1282         "There must be at least two constituent operands in a vector");
1283 
1284   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1285                      TII.get(IsConst ? SPIRV::OpConstantComposite
1286                                      : SPIRV::OpCompositeConstruct))
1287                  .addDef(ResVReg)
1288                  .addUse(GR.getSPIRVTypeID(ResType));
1289   for (unsigned i = 0; i < N; ++i)
1290     MIB.addUse(OpReg);
1291   return MIB.constrainAllUses(TII, TRI, RBI);
1292 }
1293 
1294 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1295                                          const SPIRVType *ResType,
1296                                          unsigned CmpOpc,
1297                                          MachineInstr &I) const {
1298   Register Cmp0 = I.getOperand(2).getReg();
1299   Register Cmp1 = I.getOperand(3).getReg();
1300   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1301              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1302          "CMP operands should have the same type");
1303   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1304       .addDef(ResVReg)
1305       .addUse(GR.getSPIRVTypeID(ResType))
1306       .addUse(Cmp0)
1307       .addUse(Cmp1)
1308       .constrainAllUses(TII, TRI, RBI);
1309 }
1310 
1311 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1312                                           const SPIRVType *ResType,
1313                                           MachineInstr &I) const {
1314   auto Pred = I.getOperand(1).getPredicate();
1315   unsigned CmpOpc;
1316 
1317   Register CmpOperand = I.getOperand(2).getReg();
1318   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1319     CmpOpc = getPtrCmpOpcode(Pred);
1320   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1321     CmpOpc = getBoolCmpOpcode(Pred);
1322   else
1323     CmpOpc = getICmpOpcode(Pred);
1324   return selectCmp(ResVReg, ResType, CmpOpc, I);
1325 }
1326 
1327 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1328                                             const MachineInstr &I,
1329                                             int OpIdx) const {
1330   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1331          "Expected G_FCONSTANT");
1332   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1333   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1334 }
1335 
1336 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1337                                            const MachineInstr &I,
1338                                            int OpIdx) const {
1339   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1340          "Expected G_CONSTANT");
1341   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1342 }
1343 
1344 Register
1345 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1346                                            const SPIRVType *ResType) const {
1347   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1348   const SPIRVType *SpvI32Ty =
1349       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1350   // Find a constant in DT or build a new one.
1351   auto ConstInt = ConstantInt::get(LLVMTy, Val);
1352   Register NewReg = GR.find(ConstInt, GR.CurMF);
1353   if (!NewReg.isValid()) {
1354     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1355     GR.add(ConstInt, GR.CurMF, NewReg);
1356     MachineInstr *MI;
1357     MachineBasicBlock &BB = *I.getParent();
1358     if (Val == 0) {
1359       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1360                .addDef(NewReg)
1361                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1362     } else {
1363       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1364                .addDef(NewReg)
1365                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1366                .addImm(APInt(32, Val).getZExtValue());
1367     }
1368     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
1369   }
1370   return NewReg;
1371 }
1372 
1373 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1374                                           const SPIRVType *ResType,
1375                                           MachineInstr &I) const {
1376   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1377   return selectCmp(ResVReg, ResType, CmpOp, I);
1378 }
1379 
1380 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1381                                                  MachineInstr &I) const {
1382   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1383     return GR.getOrCreateConsIntVector(0, I, ResType, TII);
1384   return GR.getOrCreateConstInt(0, I, ResType, TII);
1385 }
1386 
1387 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1388                                                 const SPIRVType *ResType,
1389                                                 MachineInstr &I) const {
1390   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1391   APInt One =
1392       AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1393   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1394     return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
1395   return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1396 }
1397 
1398 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1399                                             const SPIRVType *ResType,
1400                                             MachineInstr &I,
1401                                             bool IsSigned) const {
1402   // To extend a bool, we need to use OpSelect between constants.
1403   Register ZeroReg = buildZerosVal(ResType, I);
1404   Register OneReg = buildOnesVal(IsSigned, ResType, I);
1405   bool IsScalarBool =
1406       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1407   unsigned Opcode =
1408       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1409   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1410       .addDef(ResVReg)
1411       .addUse(GR.getSPIRVTypeID(ResType))
1412       .addUse(I.getOperand(1).getReg())
1413       .addUse(OneReg)
1414       .addUse(ZeroReg)
1415       .constrainAllUses(TII, TRI, RBI);
1416 }
1417 
1418 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1419                                           const SPIRVType *ResType,
1420                                           MachineInstr &I, bool IsSigned,
1421                                           unsigned Opcode) const {
1422   Register SrcReg = I.getOperand(1).getReg();
1423   // We can convert bool value directly to float type without OpConvert*ToF,
1424   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1425   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1426     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1427     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1428     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1429       const unsigned NumElts = ResType->getOperand(2).getImm();
1430       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1431     }
1432     SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1433     selectSelect(SrcReg, TmpType, I, false);
1434   }
1435   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1436 }
1437 
1438 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1439                                          const SPIRVType *ResType,
1440                                          MachineInstr &I, bool IsSigned) const {
1441   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1442     return selectSelect(ResVReg, ResType, I, IsSigned);
1443   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1444   return selectUnOp(ResVReg, ResType, I, Opcode);
1445 }
1446 
1447 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1448                                                Register ResVReg,
1449                                                MachineInstr &I,
1450                                                const SPIRVType *IntTy,
1451                                                const SPIRVType *BoolTy) const {
1452   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1453   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1454   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1455   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1456   Register Zero = buildZerosVal(IntTy, I);
1457   Register One = buildOnesVal(false, IntTy, I);
1458   MachineBasicBlock &BB = *I.getParent();
1459   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1460       .addDef(BitIntReg)
1461       .addUse(GR.getSPIRVTypeID(IntTy))
1462       .addUse(IntReg)
1463       .addUse(One)
1464       .constrainAllUses(TII, TRI, RBI);
1465   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1466       .addDef(ResVReg)
1467       .addUse(GR.getSPIRVTypeID(BoolTy))
1468       .addUse(BitIntReg)
1469       .addUse(Zero)
1470       .constrainAllUses(TII, TRI, RBI);
1471 }
1472 
1473 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1474                                            const SPIRVType *ResType,
1475                                            MachineInstr &I) const {
1476   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1477     Register IntReg = I.getOperand(1).getReg();
1478     const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1479     return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1480   }
1481   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1482   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1483   return selectUnOp(ResVReg, ResType, I, Opcode);
1484 }
1485 
1486 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1487                                            const SPIRVType *ResType,
1488                                            const APInt &Imm,
1489                                            MachineInstr &I) const {
1490   unsigned TyOpcode = ResType->getOpcode();
1491   assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1492   MachineBasicBlock &BB = *I.getParent();
1493   if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1494       Imm.isZero())
1495     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1496         .addDef(ResVReg)
1497         .addUse(GR.getSPIRVTypeID(ResType))
1498         .constrainAllUses(TII, TRI, RBI);
1499   if (TyOpcode == SPIRV::OpTypeInt) {
1500     assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1501     Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1502     if (Reg == ResVReg)
1503       return true;
1504     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1505         .addDef(ResVReg)
1506         .addUse(Reg)
1507         .constrainAllUses(TII, TRI, RBI);
1508   }
1509   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1510                  .addDef(ResVReg)
1511                  .addUse(GR.getSPIRVTypeID(ResType));
1512   // <=32-bit integers should be caught by the sdag pattern.
1513   assert(Imm.getBitWidth() > 32);
1514   addNumImm(Imm, MIB);
1515   return MIB.constrainAllUses(TII, TRI, RBI);
1516 }
1517 
1518 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1519                                              const SPIRVType *ResType,
1520                                              MachineInstr &I) const {
1521   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1522       .addDef(ResVReg)
1523       .addUse(GR.getSPIRVTypeID(ResType))
1524       .constrainAllUses(TII, TRI, RBI);
1525 }
1526 
1527 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1528   assert(MO.isReg());
1529   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1530   if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1531     return false;
1532   assert(TypeInst->getOperand(1).isReg());
1533   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1534   return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1535 }
1536 
1537 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1538   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1539   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1540   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1541   return ImmInst->getOperand(1).getCImm()->getZExtValue();
1542 }
1543 
1544 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1545                                                const SPIRVType *ResType,
1546                                                MachineInstr &I) const {
1547   MachineBasicBlock &BB = *I.getParent();
1548   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1549                  .addDef(ResVReg)
1550                  .addUse(GR.getSPIRVTypeID(ResType))
1551                  // object to insert
1552                  .addUse(I.getOperand(3).getReg())
1553                  // composite to insert into
1554                  .addUse(I.getOperand(2).getReg());
1555   for (unsigned i = 4; i < I.getNumOperands(); i++)
1556     MIB.addImm(foldImm(I.getOperand(i), MRI));
1557   return MIB.constrainAllUses(TII, TRI, RBI);
1558 }
1559 
1560 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1561                                                 const SPIRVType *ResType,
1562                                                 MachineInstr &I) const {
1563   MachineBasicBlock &BB = *I.getParent();
1564   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1565                  .addDef(ResVReg)
1566                  .addUse(GR.getSPIRVTypeID(ResType))
1567                  .addUse(I.getOperand(2).getReg());
1568   for (unsigned i = 3; i < I.getNumOperands(); i++)
1569     MIB.addImm(foldImm(I.getOperand(i), MRI));
1570   return MIB.constrainAllUses(TII, TRI, RBI);
1571 }
1572 
1573 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1574                                                const SPIRVType *ResType,
1575                                                MachineInstr &I) const {
1576   if (isImm(I.getOperand(4), MRI))
1577     return selectInsertVal(ResVReg, ResType, I);
1578   MachineBasicBlock &BB = *I.getParent();
1579   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1580       .addDef(ResVReg)
1581       .addUse(GR.getSPIRVTypeID(ResType))
1582       .addUse(I.getOperand(2).getReg())
1583       .addUse(I.getOperand(3).getReg())
1584       .addUse(I.getOperand(4).getReg())
1585       .constrainAllUses(TII, TRI, RBI);
1586 }
1587 
1588 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1589                                                 const SPIRVType *ResType,
1590                                                 MachineInstr &I) const {
1591   if (isImm(I.getOperand(3), MRI))
1592     return selectExtractVal(ResVReg, ResType, I);
1593   MachineBasicBlock &BB = *I.getParent();
1594   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1595       .addDef(ResVReg)
1596       .addUse(GR.getSPIRVTypeID(ResType))
1597       .addUse(I.getOperand(2).getReg())
1598       .addUse(I.getOperand(3).getReg())
1599       .constrainAllUses(TII, TRI, RBI);
1600 }
1601 
1602 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1603                                          const SPIRVType *ResType,
1604                                          MachineInstr &I) const {
1605   const bool IsGEPInBounds = I.getOperand(2).getImm();
1606 
1607   // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1608   // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1609   // we have to use Op[InBounds]AccessChain.
1610   const unsigned Opcode = STI.isVulkanEnv()
1611                               ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1612                                                : SPIRV::OpAccessChain)
1613                               : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1614                                                : SPIRV::OpPtrAccessChain);
1615 
1616   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1617                  .addDef(ResVReg)
1618                  .addUse(GR.getSPIRVTypeID(ResType))
1619                  // Object to get a pointer to.
1620                  .addUse(I.getOperand(3).getReg());
1621   // Adding indices.
1622   const unsigned StartingIndex =
1623       (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1624           ? 5
1625           : 4;
1626   for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1627     Res.addUse(I.getOperand(i).getReg());
1628   return Res.constrainAllUses(TII, TRI, RBI);
1629 }
1630 
1631 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1632                                                const SPIRVType *ResType,
1633                                                MachineInstr &I) const {
1634   MachineBasicBlock &BB = *I.getParent();
1635   Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
1636   switch (IID) {
1637   case Intrinsic::spv_load:
1638     return selectLoad(ResVReg, ResType, I);
1639   case Intrinsic::spv_store:
1640     return selectStore(I);
1641   case Intrinsic::spv_extractv:
1642     return selectExtractVal(ResVReg, ResType, I);
1643   case Intrinsic::spv_insertv:
1644     return selectInsertVal(ResVReg, ResType, I);
1645   case Intrinsic::spv_extractelt:
1646     return selectExtractElt(ResVReg, ResType, I);
1647   case Intrinsic::spv_insertelt:
1648     return selectInsertElt(ResVReg, ResType, I);
1649   case Intrinsic::spv_gep:
1650     return selectGEP(ResVReg, ResType, I);
1651   case Intrinsic::spv_unref_global:
1652   case Intrinsic::spv_init_global: {
1653     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1654     MachineInstr *Init = I.getNumExplicitOperands() > 2
1655                              ? MRI->getVRegDef(I.getOperand(2).getReg())
1656                              : nullptr;
1657     assert(MI);
1658     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1659   }
1660   case Intrinsic::spv_undef: {
1661     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1662                    .addDef(ResVReg)
1663                    .addUse(GR.getSPIRVTypeID(ResType));
1664     return MIB.constrainAllUses(TII, TRI, RBI);
1665   }
1666   case Intrinsic::spv_const_composite: {
1667     // If no values are attached, the composite is null constant.
1668     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1669     unsigned Opcode =
1670         IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1671     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1672                    .addDef(ResVReg)
1673                    .addUse(GR.getSPIRVTypeID(ResType));
1674     // skip type MD node we already used when generated assign.type for this
1675     if (!IsNull) {
1676       for (unsigned i = I.getNumExplicitDefs() + 1;
1677            i < I.getNumExplicitOperands(); ++i) {
1678         MIB.addUse(I.getOperand(i).getReg());
1679       }
1680     }
1681     return MIB.constrainAllUses(TII, TRI, RBI);
1682   }
1683   case Intrinsic::spv_assign_name: {
1684     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1685     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1686     for (unsigned i = I.getNumExplicitDefs() + 2;
1687          i < I.getNumExplicitOperands(); ++i) {
1688       MIB.addImm(I.getOperand(i).getImm());
1689     }
1690     return MIB.constrainAllUses(TII, TRI, RBI);
1691   }
1692   case Intrinsic::spv_switch: {
1693     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1694     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1695       if (I.getOperand(i).isReg())
1696         MIB.addReg(I.getOperand(i).getReg());
1697       else if (I.getOperand(i).isCImm())
1698         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1699       else if (I.getOperand(i).isMBB())
1700         MIB.addMBB(I.getOperand(i).getMBB());
1701       else
1702         llvm_unreachable("Unexpected OpSwitch operand");
1703     }
1704     return MIB.constrainAllUses(TII, TRI, RBI);
1705   }
1706   case Intrinsic::spv_cmpxchg:
1707     return selectAtomicCmpXchg(ResVReg, ResType, I);
1708   case Intrinsic::spv_unreachable:
1709     BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1710     break;
1711   case Intrinsic::spv_alloca:
1712     return selectFrameIndex(ResVReg, ResType, I);
1713   case Intrinsic::spv_alloca_array:
1714     return selectAllocaArray(ResVReg, ResType, I);
1715   case Intrinsic::spv_assume:
1716     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1717       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
1718           .addUse(I.getOperand(1).getReg());
1719     break;
1720   case Intrinsic::spv_expect:
1721     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1722       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
1723           .addDef(ResVReg)
1724           .addUse(GR.getSPIRVTypeID(ResType))
1725           .addUse(I.getOperand(2).getReg())
1726           .addUse(I.getOperand(3).getReg());
1727     break;
1728   case Intrinsic::spv_thread_id:
1729     return selectSpvThreadId(ResVReg, ResType, I);
1730   case Intrinsic::spv_lifetime_start:
1731   case Intrinsic::spv_lifetime_end: {
1732     unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
1733                                                        : SPIRV::OpLifetimeStop;
1734     int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
1735     Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
1736     unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
1737     bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
1738     if (Size == -1 || IsNonvoidPtr)
1739       Size = 0;
1740     BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
1741   } break;
1742   default: {
1743     std::string DiagMsg;
1744     raw_string_ostream OS(DiagMsg);
1745     I.print(OS);
1746     DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
1747     report_fatal_error(DiagMsg.c_str(), false);
1748   }
1749   }
1750   return true;
1751 }
1752 
1753 bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
1754                                                  const SPIRVType *ResType,
1755                                                  MachineInstr &I) const {
1756   // there was an allocation size parameter to the allocation instruction
1757   // that is not 1
1758   MachineBasicBlock &BB = *I.getParent();
1759   return BuildMI(BB, I, I.getDebugLoc(),
1760                  TII.get(SPIRV::OpVariableLengthArrayINTEL))
1761       .addDef(ResVReg)
1762       .addUse(GR.getSPIRVTypeID(ResType))
1763       .addUse(I.getOperand(2).getReg())
1764       .constrainAllUses(TII, TRI, RBI);
1765 }
1766 
1767 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1768                                                 const SPIRVType *ResType,
1769                                                 MachineInstr &I) const {
1770   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1771       .addDef(ResVReg)
1772       .addUse(GR.getSPIRVTypeID(ResType))
1773       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1774       .constrainAllUses(TII, TRI, RBI);
1775 }
1776 
1777 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1778   // InstructionSelector walks backwards through the instructions. We can use
1779   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1780   // first, so can generate an OpBranchConditional here. If there is no
1781   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1782   const MachineInstr *PrevI = I.getPrevNode();
1783   MachineBasicBlock &MBB = *I.getParent();
1784   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1785     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1786         .addUse(PrevI->getOperand(0).getReg())
1787         .addMBB(PrevI->getOperand(1).getMBB())
1788         .addMBB(I.getOperand(0).getMBB())
1789         .constrainAllUses(TII, TRI, RBI);
1790   }
1791   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1792       .addMBB(I.getOperand(0).getMBB())
1793       .constrainAllUses(TII, TRI, RBI);
1794 }
1795 
1796 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1797   // InstructionSelector walks backwards through the instructions. For an
1798   // explicit conditional branch with no fallthrough, we use both a G_BR and a
1799   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1800   // generate the OpBranchConditional in selectBranch above.
1801   //
1802   // If an OpBranchConditional has been generated, we simply return, as the work
1803   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1804   // implicit fallthrough to the next basic block, so we need to create an
1805   // OpBranchConditional with an explicit "false" argument pointing to the next
1806   // basic block that LLVM would fall through to.
1807   const MachineInstr *NextI = I.getNextNode();
1808   // Check if this has already been successfully selected.
1809   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1810     return true;
1811   // Must be relying on implicit block fallthrough, so generate an
1812   // OpBranchConditional with the "next" basic block as the "false" target.
1813   MachineBasicBlock &MBB = *I.getParent();
1814   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1815   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1816   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1817       .addUse(I.getOperand(0).getReg())
1818       .addMBB(I.getOperand(1).getMBB())
1819       .addMBB(NextMBB)
1820       .constrainAllUses(TII, TRI, RBI);
1821 }
1822 
1823 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1824                                          const SPIRVType *ResType,
1825                                          MachineInstr &I) const {
1826   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1827                  .addDef(ResVReg)
1828                  .addUse(GR.getSPIRVTypeID(ResType));
1829   const unsigned NumOps = I.getNumOperands();
1830   for (unsigned i = 1; i < NumOps; i += 2) {
1831     MIB.addUse(I.getOperand(i + 0).getReg());
1832     MIB.addMBB(I.getOperand(i + 1).getMBB());
1833   }
1834   return MIB.constrainAllUses(TII, TRI, RBI);
1835 }
1836 
1837 bool SPIRVInstructionSelector::selectGlobalValue(
1838     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1839   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1840   MachineIRBuilder MIRBuilder(I);
1841   const GlobalValue *GV = I.getOperand(1).getGlobal();
1842   Type *GVType = GV->getValueType();
1843   SPIRVType *PointerBaseType;
1844   if (GVType->isArrayTy()) {
1845     SPIRVType *ArrayElementType =
1846         GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
1847                                 SPIRV::AccessQualifier::ReadWrite, false);
1848     PointerBaseType = GR.getOrCreateSPIRVArrayType(
1849         ArrayElementType, GVType->getArrayNumElements(), I, TII);
1850   } else {
1851     PointerBaseType = GR.getOrCreateSPIRVType(
1852         GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1853   }
1854   SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
1855       PointerBaseType, I, TII,
1856       addressSpaceToStorageClass(GV->getAddressSpace(), STI));
1857 
1858   std::string GlobalIdent;
1859   if (!GV->hasName()) {
1860     unsigned &ID = UnnamedGlobalIDs[GV];
1861     if (ID == 0)
1862       ID = UnnamedGlobalIDs.size();
1863     GlobalIdent = "__unnamed_" + Twine(ID).str();
1864   } else {
1865     GlobalIdent = GV->getGlobalIdentifier();
1866   }
1867 
1868   // Behaviour of functions as operands depends on availability of the
1869   // corresponding extension (SPV_INTEL_function_pointers):
1870   // - If there is an extension to operate with functions as operands:
1871   // We create a proper constant operand and evaluate a correct type for a
1872   // function pointer.
1873   // - Without the required extension:
1874   // We have functions as operands in tests with blocks of instruction e.g. in
1875   // transcoding/global_block.ll. These operands are not used and should be
1876   // substituted by zero constants. Their type is expected to be always
1877   // OpTypePointer Function %uchar.
1878   if (isa<Function>(GV)) {
1879     const Constant *ConstVal = GV;
1880     MachineBasicBlock &BB = *I.getParent();
1881     Register NewReg = GR.find(ConstVal, GR.CurMF);
1882     if (!NewReg.isValid()) {
1883       Register NewReg = ResVReg;
1884       GR.add(ConstVal, GR.CurMF, NewReg);
1885       const Function *GVFun =
1886           STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
1887               ? dyn_cast<Function>(GV)
1888               : nullptr;
1889       if (GVFun) {
1890         // References to a function via function pointers generate virtual
1891         // registers without a definition. We will resolve it later, during
1892         // module analysis stage.
1893         MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1894         Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1895         MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
1896         MachineInstrBuilder MB =
1897             BuildMI(BB, I, I.getDebugLoc(),
1898                     TII.get(SPIRV::OpConstantFunctionPointerINTEL))
1899                 .addDef(NewReg)
1900                 .addUse(GR.getSPIRVTypeID(ResType))
1901                 .addUse(FuncVReg);
1902         // mapping the function pointer to the used Function
1903         GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
1904         return MB.constrainAllUses(TII, TRI, RBI);
1905       }
1906       return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1907           .addDef(NewReg)
1908           .addUse(GR.getSPIRVTypeID(ResType))
1909           .constrainAllUses(TII, TRI, RBI);
1910     }
1911     assert(NewReg != ResVReg);
1912     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1913         .addDef(ResVReg)
1914         .addUse(NewReg)
1915         .constrainAllUses(TII, TRI, RBI);
1916   }
1917   auto GlobalVar = cast<GlobalVariable>(GV);
1918   assert(GlobalVar->getName() != "llvm.global.annotations");
1919 
1920   bool HasInit = GlobalVar->hasInitializer() &&
1921                  !isa<UndefValue>(GlobalVar->getInitializer());
1922   // Skip empty declaration for GVs with initilaizers till we get the decl with
1923   // passed initializer.
1924   if (HasInit && !Init)
1925     return true;
1926 
1927   unsigned AddrSpace = GV->getAddressSpace();
1928   SPIRV::StorageClass::StorageClass Storage =
1929       addressSpaceToStorageClass(AddrSpace, STI);
1930   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1931                   Storage != SPIRV::StorageClass::Function;
1932   SPIRV::LinkageType::LinkageType LnkType =
1933       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
1934           ? SPIRV::LinkageType::Import
1935           : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
1936                      STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
1937                  ? SPIRV::LinkageType::LinkOnceODR
1938                  : SPIRV::LinkageType::Export);
1939 
1940   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1941                                         Storage, Init, GlobalVar->isConstant(),
1942                                         HasLnkTy, LnkType, MIRBuilder, true);
1943   return Reg.isValid();
1944 }
1945 
1946 bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
1947                                            const SPIRVType *ResType,
1948                                            MachineInstr &I) const {
1949   if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
1950     return selectExtInst(ResVReg, ResType, I, CL::log10);
1951   }
1952 
1953   // There is no log10 instruction in the GLSL Extended Instruction set, so it
1954   // is implemented as:
1955   // log10(x) = log2(x) * (1 / log2(10))
1956   //          = log2(x) * 0.30103
1957 
1958   MachineIRBuilder MIRBuilder(I);
1959   MachineBasicBlock &BB = *I.getParent();
1960 
1961   // Build log2(x).
1962   Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1963   bool Result =
1964       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1965           .addDef(VarReg)
1966           .addUse(GR.getSPIRVTypeID(ResType))
1967           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1968           .addImm(GL::Log2)
1969           .add(I.getOperand(1))
1970           .constrainAllUses(TII, TRI, RBI);
1971 
1972   // Build 0.30103.
1973   assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
1974          ResType->getOpcode() == SPIRV::OpTypeFloat);
1975   // TODO: Add matrix implementation once supported by the HLSL frontend.
1976   const SPIRVType *SpirvScalarType =
1977       ResType->getOpcode() == SPIRV::OpTypeVector
1978           ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
1979           : ResType;
1980   Register ScaleReg =
1981       GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
1982 
1983   // Multiply log2(x) by 0.30103 to get log10(x) result.
1984   auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
1985                     ? SPIRV::OpVectorTimesScalar
1986                     : SPIRV::OpFMulS;
1987   Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1988                 .addDef(ResVReg)
1989                 .addUse(GR.getSPIRVTypeID(ResType))
1990                 .addUse(VarReg)
1991                 .addUse(ScaleReg)
1992                 .constrainAllUses(TII, TRI, RBI);
1993 
1994   return Result;
1995 }
1996 
1997 bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
1998                                                  const SPIRVType *ResType,
1999                                                  MachineInstr &I) const {
2000   // DX intrinsic: @llvm.dx.thread.id(i32)
2001   // ID  Name      Description
2002   // 93  ThreadId  reads the thread ID
2003 
2004   MachineIRBuilder MIRBuilder(I);
2005   const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2006   const SPIRVType *Vec3Ty =
2007       GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2008   const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2009       Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2010 
2011   // Create new register for GlobalInvocationID builtin variable.
2012   Register NewRegister =
2013       MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2014   MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 32));
2015   GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2016 
2017   // Build GlobalInvocationID global variable with the necessary decorations.
2018   Register Variable = GR.buildGlobalVariable(
2019       NewRegister, PtrType,
2020       getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2021       SPIRV::StorageClass::Input, nullptr, true, true,
2022       SPIRV::LinkageType::Import, MIRBuilder, false);
2023 
2024   // Create new register for loading value.
2025   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2026   Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2027   MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 32));
2028   GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2029 
2030   // Load v3uint value from the global variable.
2031   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2032       .addDef(LoadedRegister)
2033       .addUse(GR.getSPIRVTypeID(Vec3Ty))
2034       .addUse(Variable);
2035 
2036   // Get Thread ID index. Expecting operand is a constant immediate value,
2037   // wrapped in a type assignment.
2038   assert(I.getOperand(2).isReg());
2039   Register ThreadIdReg = I.getOperand(2).getReg();
2040   SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
2041   assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
2042          ConstTy->getOperand(1).isReg());
2043   Register ConstReg = ConstTy->getOperand(1).getReg();
2044   const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
2045   assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
2046   const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
2047   const uint32_t ThreadId = Val.getZExtValue();
2048 
2049   // Extract the thread ID from the loaded vector value.
2050   MachineBasicBlock &BB = *I.getParent();
2051   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2052                  .addDef(ResVReg)
2053                  .addUse(GR.getSPIRVTypeID(ResType))
2054                  .addUse(LoadedRegister)
2055                  .addImm(ThreadId);
2056   return MIB.constrainAllUses(TII, TRI, RBI);
2057 }
2058 
2059 namespace llvm {
2060 InstructionSelector *
2061 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
2062                                const SPIRVSubtarget &Subtarget,
2063                                const RegisterBankInfo &RBI) {
2064   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2065 }
2066 } // namespace llvm
2067