xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (revision 3a1228a543bc85e225809b1f3033fac744f1f122)
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
17 #include "SPIRV.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Register.h"
32 #include "llvm/CodeGen/TargetOpcodes.h"
33 #include "llvm/IR/IntrinsicsSPIRV.h"
34 #include "llvm/Support/Debug.h"
35 
36 #define DEBUG_TYPE "spirv-isel"
37 
38 using namespace llvm;
39 namespace CL = SPIRV::OpenCLExtInst;
40 namespace GL = SPIRV::GLSLExtInst;
41 
42 using ExtInstList =
43     std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
44 
45 namespace {
46 
47 #define GET_GLOBALISEL_PREDICATE_BITSET
48 #include "SPIRVGenGlobalISel.inc"
49 #undef GET_GLOBALISEL_PREDICATE_BITSET
50 
51 class SPIRVInstructionSelector : public InstructionSelector {
52   const SPIRVSubtarget &STI;
53   const SPIRVInstrInfo &TII;
54   const SPIRVRegisterInfo &TRI;
55   const RegisterBankInfo &RBI;
56   SPIRVGlobalRegistry &GR;
57   MachineRegisterInfo *MRI;
58   MachineFunction *HasVRegsReset = nullptr;
59 
60   /// We need to keep track of the number we give to anonymous global values to
61   /// generate the same name every time when this is needed.
62   mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
63 
64 public:
65   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
66                            const SPIRVSubtarget &ST,
67                            const RegisterBankInfo &RBI);
68   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
69                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
70                BlockFrequencyInfo *BFI) override;
71   // Common selection code. Instruction-specific selection occurs in spvSelect.
72   bool select(MachineInstr &I) override;
73   static const char *getName() { return DEBUG_TYPE; }
74 
75 #define GET_GLOBALISEL_PREDICATES_DECL
76 #include "SPIRVGenGlobalISel.inc"
77 #undef GET_GLOBALISEL_PREDICATES_DECL
78 
79 #define GET_GLOBALISEL_TEMPORARIES_DECL
80 #include "SPIRVGenGlobalISel.inc"
81 #undef GET_GLOBALISEL_TEMPORARIES_DECL
82 
83 private:
84   void resetVRegsType(MachineFunction &MF);
85 
86   // tblgen-erated 'select' implementation, used as the initial selector for
87   // the patterns that don't require complex C++.
88   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
89 
90   // All instruction-specific selection that didn't happen in "select()".
91   // Is basically a large Switch/Case delegating to all other select method.
92   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
93                  MachineInstr &I) const;
94 
95   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
96                          const MachineInstr *Init = nullptr) const;
97 
98   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
99                          MachineInstr &I, Register SrcReg,
100                          unsigned Opcode) const;
101   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
102                   unsigned Opcode) const;
103 
104   bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
105                      MachineInstr &I) const;
106 
107   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
108                   MachineInstr &I) const;
109   bool selectStore(MachineInstr &I) const;
110 
111   bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
112                        MachineInstr &I) const;
113   bool selectStackRestore(MachineInstr &I) const;
114 
115   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
116 
117   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
118                        MachineInstr &I, unsigned NewOpcode,
119                        unsigned NegateOpcode = 0) const;
120 
121   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
122                            MachineInstr &I) const;
123 
124   bool selectFence(MachineInstr &I) const;
125 
126   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
127                            MachineInstr &I) const;
128 
129   bool selectAnyOrAll(Register ResVReg, const SPIRVType *ResType,
130                       MachineInstr &I, unsigned OpType) const;
131 
132   bool selectAll(Register ResVReg, const SPIRVType *ResType,
133                  MachineInstr &I) const;
134 
135   bool selectAny(Register ResVReg, const SPIRVType *ResType,
136                  MachineInstr &I) const;
137 
138   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
139                         MachineInstr &I) const;
140 
141   bool selectBuildVector(Register ResVReg, const SPIRVType *ResType,
142                          MachineInstr &I) const;
143   bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
144                          MachineInstr &I) const;
145 
146   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
147                  unsigned comparisonOpcode, MachineInstr &I) const;
148   bool selectCross(Register ResVReg, const SPIRVType *ResType,
149                    MachineInstr &I) const;
150   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
151                   MachineInstr &I) const;
152   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
153                   MachineInstr &I) const;
154 
155   bool selectSign(Register ResVReg, const SPIRVType *ResType,
156                   MachineInstr &I) const;
157 
158   bool selectFloatDot(Register ResVReg, const SPIRVType *ResType,
159                       MachineInstr &I) const;
160 
161   bool selectOverflowArith(Register ResVReg, const SPIRVType *ResType,
162                            MachineInstr &I, unsigned Opcode) const;
163 
164   bool selectIntegerDot(Register ResVReg, const SPIRVType *ResType,
165                         MachineInstr &I) const;
166 
167   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
168                    int OpIdx) const;
169   void renderFImm64(MachineInstrBuilder &MIB, const MachineInstr &I,
170                     int OpIdx) const;
171 
172   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
173                    MachineInstr &I) const;
174 
175   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
176                     bool IsSigned) const;
177   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
178                   bool IsSigned, unsigned Opcode) const;
179   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
180                  bool IsSigned) const;
181 
182   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
183                    MachineInstr &I) const;
184 
185   bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
186                        const SPIRVType *intTy, const SPIRVType *boolTy) const;
187 
188   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
189                      MachineInstr &I) const;
190   bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
191                     MachineInstr &I) const;
192   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
193                        MachineInstr &I) const;
194   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
195                         MachineInstr &I) const;
196   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
197                        MachineInstr &I) const;
198   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
199                         MachineInstr &I) const;
200   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
201                        MachineInstr &I) const;
202   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
203                  MachineInstr &I) const;
204 
205   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
206                         MachineInstr &I) const;
207   bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
208                          MachineInstr &I) const;
209 
210   bool selectBranch(MachineInstr &I) const;
211   bool selectBranchCond(MachineInstr &I) const;
212 
213   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
214                  MachineInstr &I) const;
215 
216   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
217                      MachineInstr &I, CL::OpenCLExtInst CLInst) const;
218   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
219                      MachineInstr &I, CL::OpenCLExtInst CLInst,
220                      GL::GLSLExtInst GLInst) const;
221   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
222                      MachineInstr &I, const ExtInstList &ExtInsts) const;
223 
224   bool selectLog10(Register ResVReg, const SPIRVType *ResType,
225                    MachineInstr &I) const;
226 
227   bool selectSaturate(Register ResVReg, const SPIRVType *ResType,
228                       MachineInstr &I) const;
229 
230   bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
231                          MachineInstr &I) const;
232 
233   bool selectWaveReadLaneAt(Register ResVReg, const SPIRVType *ResType,
234                             MachineInstr &I) const;
235 
236   bool selectUnmergeValues(MachineInstr &I) const;
237 
238   void selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType,
239                                MachineInstr &I) const;
240 
241   // Utilities
242   Register buildI32Constant(uint32_t Val, MachineInstr &I,
243                             const SPIRVType *ResType = nullptr) const;
244 
245   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
246   Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
247   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
248                         MachineInstr &I) const;
249   Register buildOnesValF(const SPIRVType *ResType, MachineInstr &I) const;
250 
251   bool wrapIntoSpecConstantOp(MachineInstr &I,
252                               SmallVector<Register> &CompositeArgs) const;
253 
254   Register getUcharPtrTypeReg(MachineInstr &I,
255                               SPIRV::StorageClass::StorageClass SC) const;
256   MachineInstrBuilder buildSpecConstantOp(MachineInstr &I, Register Dest,
257                                           Register Src, Register DestType,
258                                           uint32_t Opcode) const;
259   MachineInstrBuilder buildConstGenericPtr(MachineInstr &I, Register SrcPtr,
260                                            SPIRVType *SrcPtrTy) const;
261   Register buildPointerToResource(const SPIRVType *ResType, uint32_t Set,
262                                   uint32_t Binding, uint32_t ArraySize,
263                                   MachineIRBuilder MIRBuilder) const;
264 };
265 
266 } // end anonymous namespace
267 
268 #define GET_GLOBALISEL_IMPL
269 #include "SPIRVGenGlobalISel.inc"
270 #undef GET_GLOBALISEL_IMPL
271 
272 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
273                                                    const SPIRVSubtarget &ST,
274                                                    const RegisterBankInfo &RBI)
275     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
276       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
277 #define GET_GLOBALISEL_PREDICATES_INIT
278 #include "SPIRVGenGlobalISel.inc"
279 #undef GET_GLOBALISEL_PREDICATES_INIT
280 #define GET_GLOBALISEL_TEMPORARIES_INIT
281 #include "SPIRVGenGlobalISel.inc"
282 #undef GET_GLOBALISEL_TEMPORARIES_INIT
283 {
284 }
285 
286 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
287                                        CodeGenCoverage *CoverageInfo,
288                                        ProfileSummaryInfo *PSI,
289                                        BlockFrequencyInfo *BFI) {
290   MRI = &MF.getRegInfo();
291   GR.setCurrentFunc(MF);
292   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
293 }
294 
295 // Ensure that register classes correspond to pattern matching rules.
296 void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
297   if (HasVRegsReset == &MF)
298     return;
299   HasVRegsReset = &MF;
300 
301   MachineRegisterInfo &MRI = MF.getRegInfo();
302   for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
303     Register Reg = Register::index2VirtReg(I);
304     LLT RegType = MRI.getType(Reg);
305     if (RegType.isScalar())
306       MRI.setType(Reg, LLT::scalar(64));
307     else if (RegType.isPointer())
308       MRI.setType(Reg, LLT::pointer(0, 64));
309     else if (RegType.isVector())
310       MRI.setType(Reg, LLT::fixed_vector(2, LLT::scalar(64)));
311   }
312   for (const auto &MBB : MF) {
313     for (const auto &MI : MBB) {
314       if (MI.getOpcode() != SPIRV::ASSIGN_TYPE)
315         continue;
316       Register DstReg = MI.getOperand(0).getReg();
317       LLT DstType = MRI.getType(DstReg);
318       Register SrcReg = MI.getOperand(1).getReg();
319       LLT SrcType = MRI.getType(SrcReg);
320       if (DstType != SrcType)
321         MRI.setType(DstReg, MRI.getType(SrcReg));
322 
323       const TargetRegisterClass *DstRC = MRI.getRegClassOrNull(DstReg);
324       const TargetRegisterClass *SrcRC = MRI.getRegClassOrNull(SrcReg);
325       if (DstRC != SrcRC && SrcRC)
326         MRI.setRegClass(DstReg, SrcRC);
327     }
328   }
329 }
330 
331 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
332 
333 // Defined in SPIRVLegalizerInfo.cpp.
334 extern bool isTypeFoldingSupported(unsigned Opcode);
335 
336 bool SPIRVInstructionSelector::select(MachineInstr &I) {
337   resetVRegsType(*I.getParent()->getParent());
338 
339   assert(I.getParent() && "Instruction should be in a basic block!");
340   assert(I.getParent()->getParent() && "Instruction should be in a function!");
341 
342   Register Opcode = I.getOpcode();
343   // If it's not a GMIR instruction, we've selected it already.
344   if (!isPreISelGenericOpcode(Opcode)) {
345     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
346       Register DstReg = I.getOperand(0).getReg();
347       Register SrcReg = I.getOperand(1).getReg();
348       auto *Def = MRI->getVRegDef(SrcReg);
349       if (isTypeFoldingSupported(Def->getOpcode())) {
350         bool Res = selectImpl(I, *CoverageInfo);
351         LLVM_DEBUG({
352           if (!Res && Def->getOpcode() != TargetOpcode::G_CONSTANT) {
353             dbgs() << "Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
354             I.print(dbgs());
355           }
356         });
357         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
358         if (Res)
359           return Res;
360       }
361       MRI->setRegClass(SrcReg, MRI->getRegClass(DstReg));
362       MRI->replaceRegWith(SrcReg, DstReg);
363       I.removeFromParent();
364       return true;
365     } else if (I.getNumDefs() == 1) {
366       // Make all vregs 64 bits (for SPIR-V IDs).
367       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(64));
368     }
369     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
370   }
371 
372   if (I.getNumOperands() != I.getNumExplicitOperands()) {
373     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
374     return false;
375   }
376 
377   // Common code for getting return reg+type, and removing selected instr
378   // from parent occurs here. Instr-specific selection happens in spvSelect().
379   bool HasDefs = I.getNumDefs() > 0;
380   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
381   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
382   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
383   if (spvSelect(ResVReg, ResType, I)) {
384     if (HasDefs) // Make all vregs 64 bits (for SPIR-V IDs).
385       for (unsigned i = 0; i < I.getNumDefs(); ++i)
386         MRI->setType(I.getOperand(i).getReg(), LLT::scalar(64));
387     I.removeFromParent();
388     return true;
389   }
390   return false;
391 }
392 
393 static bool mayApplyGenericSelection(unsigned Opcode) {
394   switch (Opcode) {
395   case TargetOpcode::G_CONSTANT:
396     return false;
397   case TargetOpcode::G_SADDO:
398   case TargetOpcode::G_SSUBO:
399     return true;
400   }
401   return isTypeFoldingSupported(Opcode);
402 }
403 
404 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
405                                          const SPIRVType *ResType,
406                                          MachineInstr &I) const {
407   const unsigned Opcode = I.getOpcode();
408   if (mayApplyGenericSelection(Opcode))
409     return selectImpl(I, *CoverageInfo);
410   switch (Opcode) {
411   case TargetOpcode::G_CONSTANT:
412     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
413                        I);
414   case TargetOpcode::G_GLOBAL_VALUE:
415     return selectGlobalValue(ResVReg, I);
416   case TargetOpcode::G_IMPLICIT_DEF:
417     return selectOpUndef(ResVReg, ResType, I);
418   case TargetOpcode::G_FREEZE:
419     return selectFreeze(ResVReg, ResType, I);
420 
421   case TargetOpcode::G_INTRINSIC:
422   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
423   case TargetOpcode::G_INTRINSIC_CONVERGENT:
424   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
425     return selectIntrinsic(ResVReg, ResType, I);
426   case TargetOpcode::G_BITREVERSE:
427     return selectBitreverse(ResVReg, ResType, I);
428 
429   case TargetOpcode::G_BUILD_VECTOR:
430     return selectBuildVector(ResVReg, ResType, I);
431   case TargetOpcode::G_SPLAT_VECTOR:
432     return selectSplatVector(ResVReg, ResType, I);
433 
434   case TargetOpcode::G_SHUFFLE_VECTOR: {
435     MachineBasicBlock &BB = *I.getParent();
436     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
437                    .addDef(ResVReg)
438                    .addUse(GR.getSPIRVTypeID(ResType))
439                    .addUse(I.getOperand(1).getReg())
440                    .addUse(I.getOperand(2).getReg());
441     for (auto V : I.getOperand(3).getShuffleMask())
442       MIB.addImm(V);
443     return MIB.constrainAllUses(TII, TRI, RBI);
444   }
445   case TargetOpcode::G_MEMMOVE:
446   case TargetOpcode::G_MEMCPY:
447   case TargetOpcode::G_MEMSET:
448     return selectMemOperation(ResVReg, I);
449 
450   case TargetOpcode::G_ICMP:
451     return selectICmp(ResVReg, ResType, I);
452   case TargetOpcode::G_FCMP:
453     return selectFCmp(ResVReg, ResType, I);
454 
455   case TargetOpcode::G_FRAME_INDEX:
456     return selectFrameIndex(ResVReg, ResType, I);
457 
458   case TargetOpcode::G_LOAD:
459     return selectLoad(ResVReg, ResType, I);
460   case TargetOpcode::G_STORE:
461     return selectStore(I);
462 
463   case TargetOpcode::G_BR:
464     return selectBranch(I);
465   case TargetOpcode::G_BRCOND:
466     return selectBranchCond(I);
467 
468   case TargetOpcode::G_PHI:
469     return selectPhi(ResVReg, ResType, I);
470 
471   case TargetOpcode::G_FPTOSI:
472     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
473   case TargetOpcode::G_FPTOUI:
474     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
475 
476   case TargetOpcode::G_SITOFP:
477     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
478   case TargetOpcode::G_UITOFP:
479     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
480 
481   case TargetOpcode::G_CTPOP:
482     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
483   case TargetOpcode::G_SMIN:
484     return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
485   case TargetOpcode::G_UMIN:
486     return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
487 
488   case TargetOpcode::G_SMAX:
489     return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
490   case TargetOpcode::G_UMAX:
491     return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
492 
493   case TargetOpcode::G_FMA:
494     return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
495 
496   case TargetOpcode::G_FPOW:
497     return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
498   case TargetOpcode::G_FPOWI:
499     return selectExtInst(ResVReg, ResType, I, CL::pown);
500 
501   case TargetOpcode::G_FEXP:
502     return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
503   case TargetOpcode::G_FEXP2:
504     return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
505 
506   case TargetOpcode::G_FLOG:
507     return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
508   case TargetOpcode::G_FLOG2:
509     return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
510   case TargetOpcode::G_FLOG10:
511     return selectLog10(ResVReg, ResType, I);
512 
513   case TargetOpcode::G_FABS:
514     return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
515   case TargetOpcode::G_ABS:
516     return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
517 
518   case TargetOpcode::G_FMINNUM:
519   case TargetOpcode::G_FMINIMUM:
520     return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
521   case TargetOpcode::G_FMAXNUM:
522   case TargetOpcode::G_FMAXIMUM:
523     return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);
524 
525   case TargetOpcode::G_FCOPYSIGN:
526     return selectExtInst(ResVReg, ResType, I, CL::copysign);
527 
528   case TargetOpcode::G_FCEIL:
529     return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
530   case TargetOpcode::G_FFLOOR:
531     return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
532 
533   case TargetOpcode::G_FCOS:
534     return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
535   case TargetOpcode::G_FSIN:
536     return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
537   case TargetOpcode::G_FTAN:
538     return selectExtInst(ResVReg, ResType, I, CL::tan, GL::Tan);
539   case TargetOpcode::G_FACOS:
540     return selectExtInst(ResVReg, ResType, I, CL::acos, GL::Acos);
541   case TargetOpcode::G_FASIN:
542     return selectExtInst(ResVReg, ResType, I, CL::asin, GL::Asin);
543   case TargetOpcode::G_FATAN:
544     return selectExtInst(ResVReg, ResType, I, CL::atan, GL::Atan);
545   case TargetOpcode::G_FATAN2:
546     return selectExtInst(ResVReg, ResType, I, CL::atan2, GL::Atan2);
547   case TargetOpcode::G_FCOSH:
548     return selectExtInst(ResVReg, ResType, I, CL::cosh, GL::Cosh);
549   case TargetOpcode::G_FSINH:
550     return selectExtInst(ResVReg, ResType, I, CL::sinh, GL::Sinh);
551   case TargetOpcode::G_FTANH:
552     return selectExtInst(ResVReg, ResType, I, CL::tanh, GL::Tanh);
553 
554   case TargetOpcode::G_FSQRT:
555     return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
556 
557   case TargetOpcode::G_CTTZ:
558   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
559     return selectExtInst(ResVReg, ResType, I, CL::ctz);
560   case TargetOpcode::G_CTLZ:
561   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
562     return selectExtInst(ResVReg, ResType, I, CL::clz);
563 
564   case TargetOpcode::G_INTRINSIC_ROUND:
565     return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
566   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
567     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
568   case TargetOpcode::G_INTRINSIC_TRUNC:
569     return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
570   case TargetOpcode::G_FRINT:
571   case TargetOpcode::G_FNEARBYINT:
572     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
573 
574   case TargetOpcode::G_SMULH:
575     return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
576   case TargetOpcode::G_UMULH:
577     return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
578 
579   case TargetOpcode::G_SADDSAT:
580     return selectExtInst(ResVReg, ResType, I, CL::s_add_sat);
581   case TargetOpcode::G_UADDSAT:
582     return selectExtInst(ResVReg, ResType, I, CL::u_add_sat);
583   case TargetOpcode::G_SSUBSAT:
584     return selectExtInst(ResVReg, ResType, I, CL::s_sub_sat);
585   case TargetOpcode::G_USUBSAT:
586     return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat);
587 
588   case TargetOpcode::G_UADDO:
589     return selectOverflowArith(ResVReg, ResType, I,
590                                ResType->getOpcode() == SPIRV::OpTypeVector
591                                    ? SPIRV::OpIAddCarryV
592                                    : SPIRV::OpIAddCarryS);
593   case TargetOpcode::G_USUBO:
594     return selectOverflowArith(ResVReg, ResType, I,
595                                ResType->getOpcode() == SPIRV::OpTypeVector
596                                    ? SPIRV::OpISubBorrowV
597                                    : SPIRV::OpISubBorrowS);
598   case TargetOpcode::G_UMULO:
599     return selectOverflowArith(ResVReg, ResType, I, SPIRV::OpUMulExtended);
600   case TargetOpcode::G_SMULO:
601     return selectOverflowArith(ResVReg, ResType, I, SPIRV::OpSMulExtended);
602 
603   case TargetOpcode::G_SEXT:
604     return selectExt(ResVReg, ResType, I, true);
605   case TargetOpcode::G_ANYEXT:
606   case TargetOpcode::G_ZEXT:
607     return selectExt(ResVReg, ResType, I, false);
608   case TargetOpcode::G_TRUNC:
609     return selectTrunc(ResVReg, ResType, I);
610   case TargetOpcode::G_FPTRUNC:
611   case TargetOpcode::G_FPEXT:
612     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
613 
614   case TargetOpcode::G_PTRTOINT:
615     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
616   case TargetOpcode::G_INTTOPTR:
617     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
618   case TargetOpcode::G_BITCAST:
619     return selectBitcast(ResVReg, ResType, I);
620   case TargetOpcode::G_ADDRSPACE_CAST:
621     return selectAddrSpaceCast(ResVReg, ResType, I);
622   case TargetOpcode::G_PTR_ADD: {
623     // Currently, we get G_PTR_ADD only applied to global variables.
624     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
625     Register GV = I.getOperand(1).getReg();
626     MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
627     (void)II;
628     assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
629             (*II).getOpcode() == TargetOpcode::COPY ||
630             (*II).getOpcode() == SPIRV::OpVariable) &&
631            isImm(I.getOperand(2), MRI));
632     // It may be the initialization of a global variable.
633     bool IsGVInit = false;
634     for (MachineRegisterInfo::use_instr_iterator
635              UseIt = MRI->use_instr_begin(I.getOperand(0).getReg()),
636              UseEnd = MRI->use_instr_end();
637          UseIt != UseEnd; UseIt = std::next(UseIt)) {
638       if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
639           (*UseIt).getOpcode() == SPIRV::OpVariable) {
640         IsGVInit = true;
641         break;
642       }
643     }
644     MachineBasicBlock &BB = *I.getParent();
645     if (!IsGVInit) {
646       SPIRVType *GVType = GR.getSPIRVTypeForVReg(GV);
647       SPIRVType *GVPointeeType = GR.getPointeeType(GVType);
648       SPIRVType *ResPointeeType = GR.getPointeeType(ResType);
649       if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
650         // Build a new virtual register that is associated with the required
651         // data type.
652         Register NewVReg = MRI->createGenericVirtualRegister(MRI->getType(GV));
653         MRI->setRegClass(NewVReg, MRI->getRegClass(GV));
654         //  Having a correctly typed base we are ready to build the actually
655         //  required GEP. It may not be a constant though, because all Operands
656         //  of OpSpecConstantOp is to originate from other const instructions,
657         //  and only the AccessChain named opcodes accept a global OpVariable
658         //  instruction. We can't use an AccessChain opcode because of the type
659         //  mismatch between result and base types.
660         if (!GR.isBitcastCompatible(ResType, GVType))
661           report_fatal_error(
662               "incompatible result and operand types in a bitcast");
663         Register ResTypeReg = GR.getSPIRVTypeID(ResType);
664         MachineInstrBuilder MIB =
665             BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitcast))
666                 .addDef(NewVReg)
667                 .addUse(ResTypeReg)
668                 .addUse(GV);
669         return MIB.constrainAllUses(TII, TRI, RBI) &&
670                BuildMI(BB, I, I.getDebugLoc(),
671                        TII.get(STI.isVulkanEnv()
672                                    ? SPIRV::OpInBoundsAccessChain
673                                    : SPIRV::OpInBoundsPtrAccessChain))
674                    .addDef(ResVReg)
675                    .addUse(ResTypeReg)
676                    .addUse(NewVReg)
677                    .addUse(I.getOperand(2).getReg())
678                    .constrainAllUses(TII, TRI, RBI);
679       } else {
680         return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
681             .addDef(ResVReg)
682             .addUse(GR.getSPIRVTypeID(ResType))
683             .addImm(
684                 static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
685             .addUse(GV)
686             .addUse(I.getOperand(2).getReg())
687             .constrainAllUses(TII, TRI, RBI);
688       }
689     }
690     // It's possible to translate G_PTR_ADD to OpSpecConstantOp: either to
691     // initialize a global variable with a constant expression (e.g., the test
692     // case opencl/basic/progvar_prog_scope_init.ll), or for another use case
693     Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
694     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
695                    .addDef(ResVReg)
696                    .addUse(GR.getSPIRVTypeID(ResType))
697                    .addImm(static_cast<uint32_t>(
698                        SPIRV::Opcode::InBoundsPtrAccessChain))
699                    .addUse(GV)
700                    .addUse(Idx)
701                    .addUse(I.getOperand(2).getReg());
702     return MIB.constrainAllUses(TII, TRI, RBI);
703   }
704 
705   case TargetOpcode::G_ATOMICRMW_OR:
706     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
707   case TargetOpcode::G_ATOMICRMW_ADD:
708     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
709   case TargetOpcode::G_ATOMICRMW_AND:
710     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
711   case TargetOpcode::G_ATOMICRMW_MAX:
712     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
713   case TargetOpcode::G_ATOMICRMW_MIN:
714     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
715   case TargetOpcode::G_ATOMICRMW_SUB:
716     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
717   case TargetOpcode::G_ATOMICRMW_XOR:
718     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
719   case TargetOpcode::G_ATOMICRMW_UMAX:
720     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
721   case TargetOpcode::G_ATOMICRMW_UMIN:
722     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
723   case TargetOpcode::G_ATOMICRMW_XCHG:
724     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
725   case TargetOpcode::G_ATOMIC_CMPXCHG:
726     return selectAtomicCmpXchg(ResVReg, ResType, I);
727 
728   case TargetOpcode::G_ATOMICRMW_FADD:
729     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
730   case TargetOpcode::G_ATOMICRMW_FSUB:
731     // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
732     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
733                            SPIRV::OpFNegate);
734   case TargetOpcode::G_ATOMICRMW_FMIN:
735     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
736   case TargetOpcode::G_ATOMICRMW_FMAX:
737     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
738 
739   case TargetOpcode::G_FENCE:
740     return selectFence(I);
741 
742   case TargetOpcode::G_STACKSAVE:
743     return selectStackSave(ResVReg, ResType, I);
744   case TargetOpcode::G_STACKRESTORE:
745     return selectStackRestore(I);
746 
747   case TargetOpcode::G_UNMERGE_VALUES:
748     return selectUnmergeValues(I);
749 
750   // Discard gen opcodes for intrinsics which we do not expect to actually
751   // represent code after lowering or intrinsics which are not implemented but
752   // should not crash when found in a customer's LLVM IR input.
753   case TargetOpcode::G_TRAP:
754   case TargetOpcode::G_DEBUGTRAP:
755   case TargetOpcode::G_UBSANTRAP:
756   case TargetOpcode::DBG_LABEL:
757     return true;
758 
759   default:
760     return false;
761   }
762 }
763 
764 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
765                                              const SPIRVType *ResType,
766                                              MachineInstr &I,
767                                              CL::OpenCLExtInst CLInst) const {
768   return selectExtInst(ResVReg, ResType, I,
769                        {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
770 }
771 
772 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
773                                              const SPIRVType *ResType,
774                                              MachineInstr &I,
775                                              CL::OpenCLExtInst CLInst,
776                                              GL::GLSLExtInst GLInst) const {
777   ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
778                           {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
779   return selectExtInst(ResVReg, ResType, I, ExtInsts);
780 }
781 
782 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
783                                              const SPIRVType *ResType,
784                                              MachineInstr &I,
785                                              const ExtInstList &Insts) const {
786 
787   for (const auto &Ex : Insts) {
788     SPIRV::InstructionSet::InstructionSet Set = Ex.first;
789     uint32_t Opcode = Ex.second;
790     if (STI.canUseExtInstSet(Set)) {
791       MachineBasicBlock &BB = *I.getParent();
792       auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
793                      .addDef(ResVReg)
794                      .addUse(GR.getSPIRVTypeID(ResType))
795                      .addImm(static_cast<uint32_t>(Set))
796                      .addImm(Opcode);
797       const unsigned NumOps = I.getNumOperands();
798       unsigned Index = 1;
799       if (Index < NumOps &&
800           I.getOperand(Index).getType() ==
801               MachineOperand::MachineOperandType::MO_IntrinsicID)
802         Index = 2;
803       for (; Index < NumOps; ++Index)
804         MIB.add(I.getOperand(Index));
805       return MIB.constrainAllUses(TII, TRI, RBI);
806     }
807   }
808   return false;
809 }
810 
811 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
812                                                  const SPIRVType *ResType,
813                                                  MachineInstr &I,
814                                                  Register SrcReg,
815                                                  unsigned Opcode) const {
816   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
817       .addDef(ResVReg)
818       .addUse(GR.getSPIRVTypeID(ResType))
819       .addUse(SrcReg)
820       .constrainAllUses(TII, TRI, RBI);
821 }
822 
823 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
824                                           const SPIRVType *ResType,
825                                           MachineInstr &I,
826                                           unsigned Opcode) const {
827   if (STI.isOpenCLEnv() && I.getOperand(1).isReg()) {
828     Register SrcReg = I.getOperand(1).getReg();
829     bool IsGV = false;
830     for (MachineRegisterInfo::def_instr_iterator DefIt =
831              MRI->def_instr_begin(SrcReg);
832          DefIt != MRI->def_instr_end(); DefIt = std::next(DefIt)) {
833       if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
834         IsGV = true;
835         break;
836       }
837     }
838     if (IsGV) {
839       uint32_t SpecOpcode = 0;
840       switch (Opcode) {
841       case SPIRV::OpConvertPtrToU:
842         SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
843         break;
844       case SPIRV::OpConvertUToPtr:
845         SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
846         break;
847       }
848       if (SpecOpcode)
849         return BuildMI(*I.getParent(), I, I.getDebugLoc(),
850                        TII.get(SPIRV::OpSpecConstantOp))
851             .addDef(ResVReg)
852             .addUse(GR.getSPIRVTypeID(ResType))
853             .addImm(SpecOpcode)
854             .addUse(SrcReg)
855             .constrainAllUses(TII, TRI, RBI);
856     }
857   }
858   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
859                            Opcode);
860 }
861 
862 bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
863                                              const SPIRVType *ResType,
864                                              MachineInstr &I) const {
865   Register OpReg = I.getOperand(1).getReg();
866   SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
867   if (!GR.isBitcastCompatible(ResType, OpType))
868     report_fatal_error("incompatible result and operand types in a bitcast");
869   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
870 }
871 
872 static void addMemoryOperands(MachineMemOperand *MemOp,
873                               MachineInstrBuilder &MIB) {
874   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
875   if (MemOp->isVolatile())
876     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
877   if (MemOp->isNonTemporal())
878     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
879   if (MemOp->getAlign().value())
880     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
881 
882   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
883     MIB.addImm(SpvMemOp);
884     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
885       MIB.addImm(MemOp->getAlign().value());
886   }
887 }
888 
889 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
890   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
891   if (Flags & MachineMemOperand::Flags::MOVolatile)
892     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
893   if (Flags & MachineMemOperand::Flags::MONonTemporal)
894     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
895 
896   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
897     MIB.addImm(SpvMemOp);
898 }
899 
900 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
901                                           const SPIRVType *ResType,
902                                           MachineInstr &I) const {
903   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
904   Register Ptr = I.getOperand(1 + OpOffset).getReg();
905   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
906                  .addDef(ResVReg)
907                  .addUse(GR.getSPIRVTypeID(ResType))
908                  .addUse(Ptr);
909   if (!I.getNumMemOperands()) {
910     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
911            I.getOpcode() ==
912                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
913     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
914   } else {
915     addMemoryOperands(*I.memoperands_begin(), MIB);
916   }
917   return MIB.constrainAllUses(TII, TRI, RBI);
918 }
919 
920 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
921   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
922   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
923   Register Ptr = I.getOperand(1 + OpOffset).getReg();
924   MachineBasicBlock &BB = *I.getParent();
925   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
926                  .addUse(Ptr)
927                  .addUse(StoreVal);
928   if (!I.getNumMemOperands()) {
929     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
930            I.getOpcode() ==
931                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
932     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
933   } else {
934     addMemoryOperands(*I.memoperands_begin(), MIB);
935   }
936   return MIB.constrainAllUses(TII, TRI, RBI);
937 }
938 
939 bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
940                                                const SPIRVType *ResType,
941                                                MachineInstr &I) const {
942   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
943     report_fatal_error(
944         "llvm.stacksave intrinsic: this instruction requires the following "
945         "SPIR-V extension: SPV_INTEL_variable_length_array",
946         false);
947   MachineBasicBlock &BB = *I.getParent();
948   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
949       .addDef(ResVReg)
950       .addUse(GR.getSPIRVTypeID(ResType))
951       .constrainAllUses(TII, TRI, RBI);
952 }
953 
954 bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
955   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
956     report_fatal_error(
957         "llvm.stackrestore intrinsic: this instruction requires the following "
958         "SPIR-V extension: SPV_INTEL_variable_length_array",
959         false);
960   if (!I.getOperand(0).isReg())
961     return false;
962   MachineBasicBlock &BB = *I.getParent();
963   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
964       .addUse(I.getOperand(0).getReg())
965       .constrainAllUses(TII, TRI, RBI);
966 }
967 
968 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
969                                                   MachineInstr &I) const {
970   MachineBasicBlock &BB = *I.getParent();
971   Register SrcReg = I.getOperand(1).getReg();
972   if (I.getOpcode() == TargetOpcode::G_MEMSET) {
973     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
974     unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
975     unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
976     SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
977     SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
978     Register Const = GR.getOrCreateConstIntArray(Val, Num, I, ArrTy, TII);
979     SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
980         ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
981     // TODO: check if we have such GV, add init, use buildGlobalVariable.
982     Function &CurFunction = GR.CurMF->getFunction();
983     Type *LLVMArrTy =
984         ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
985     // Module takes ownership of the global var.
986     GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
987                                             true, GlobalValue::InternalLinkage,
988                                             Constant::getNullValue(LLVMArrTy));
989     Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
990     GR.add(GV, GR.CurMF, VarReg);
991 
992     buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
993     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
994         .addDef(VarReg)
995         .addUse(GR.getSPIRVTypeID(VarTy))
996         .addImm(SPIRV::StorageClass::UniformConstant)
997         .addUse(Const)
998         .constrainAllUses(TII, TRI, RBI);
999     SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
1000         ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
1001     SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1002     selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
1003   }
1004   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
1005                  .addUse(I.getOperand(0).getReg())
1006                  .addUse(SrcReg)
1007                  .addUse(I.getOperand(2).getReg());
1008   if (I.getNumMemOperands())
1009     addMemoryOperands(*I.memoperands_begin(), MIB);
1010   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
1011   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
1012     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
1013         .addUse(MIB->getOperand(0).getReg());
1014   return Result;
1015 }
1016 
1017 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
1018                                                const SPIRVType *ResType,
1019                                                MachineInstr &I,
1020                                                unsigned NewOpcode,
1021                                                unsigned NegateOpcode) const {
1022   assert(I.hasOneMemOperand());
1023   const MachineMemOperand *MemOp = *I.memoperands_begin();
1024   uint32_t Scope = static_cast<uint32_t>(getMemScope(
1025       GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
1026   Register ScopeReg = buildI32Constant(Scope, I);
1027 
1028   Register Ptr = I.getOperand(1).getReg();
1029   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
1030   // auto ScSem =
1031   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
1032   AtomicOrdering AO = MemOp->getSuccessOrdering();
1033   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
1034   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
1035 
1036   bool Result = false;
1037   Register ValueReg = I.getOperand(2).getReg();
1038   if (NegateOpcode != 0) {
1039     // Translation with negative value operand is requested
1040     Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1041     Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
1042     ValueReg = TmpReg;
1043   }
1044 
1045   Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
1046                 .addDef(ResVReg)
1047                 .addUse(GR.getSPIRVTypeID(ResType))
1048                 .addUse(Ptr)
1049                 .addUse(ScopeReg)
1050                 .addUse(MemSemReg)
1051                 .addUse(ValueReg)
1052                 .constrainAllUses(TII, TRI, RBI);
1053   return Result;
1054 }
1055 
1056 bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
1057   unsigned ArgI = I.getNumOperands() - 1;
1058   Register SrcReg =
1059       I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
1060   SPIRVType *DefType =
1061       SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
1062   if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
1063     report_fatal_error(
1064         "cannot select G_UNMERGE_VALUES with a non-vector argument");
1065 
1066   SPIRVType *ScalarType =
1067       GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
1068   MachineBasicBlock &BB = *I.getParent();
1069   bool Res = false;
1070   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
1071     Register ResVReg = I.getOperand(i).getReg();
1072     SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1073     if (!ResType) {
1074       // There was no "assign type" actions, let's fix this now
1075       ResType = ScalarType;
1076       MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1077       MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1078       GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1079     }
1080     auto MIB =
1081         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1082             .addDef(ResVReg)
1083             .addUse(GR.getSPIRVTypeID(ResType))
1084             .addUse(SrcReg)
1085             .addImm(static_cast<int64_t>(i));
1086     Res |= MIB.constrainAllUses(TII, TRI, RBI);
1087   }
1088   return Res;
1089 }
1090 
1091 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
1092   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
1093   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
1094   Register MemSemReg = buildI32Constant(MemSem, I);
1095   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
1096   uint32_t Scope = static_cast<uint32_t>(
1097       getMemScope(GR.CurMF->getFunction().getContext(), Ord));
1098   Register ScopeReg = buildI32Constant(Scope, I);
1099   MachineBasicBlock &BB = *I.getParent();
1100   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
1101       .addUse(ScopeReg)
1102       .addUse(MemSemReg)
1103       .constrainAllUses(TII, TRI, RBI);
1104 }
1105 
1106 bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
1107                                                    const SPIRVType *ResType,
1108                                                    MachineInstr &I,
1109                                                    unsigned Opcode) const {
1110   Type *ResTy = nullptr;
1111   StringRef ResName;
1112   if (!GR.findValueAttrs(&I, ResTy, ResName))
1113     report_fatal_error(
1114         "Not enough info to select the arithmetic with overflow instruction");
1115   if (!ResTy || !ResTy->isStructTy())
1116     report_fatal_error("Expect struct type result for the arithmetic "
1117                        "with overflow instruction");
1118   // "Result Type must be from OpTypeStruct. The struct must have two members,
1119   // and the two members must be the same type."
1120   Type *ResElemTy = cast<StructType>(ResTy)->getElementType(0);
1121   ResTy = StructType::create(SmallVector<Type *, 2>{ResElemTy, ResElemTy});
1122   // Build SPIR-V types and constant(s) if needed.
1123   MachineIRBuilder MIRBuilder(I);
1124   SPIRVType *StructType = GR.getOrCreateSPIRVType(
1125       ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1126   assert(I.getNumDefs() > 1 && "Not enought operands");
1127   SPIRVType *BoolType = GR.getOrCreateSPIRVBoolType(I, TII);
1128   unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1129   if (N > 1)
1130     BoolType = GR.getOrCreateSPIRVVectorType(BoolType, N, I, TII);
1131   Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
1132   Register ZeroReg = buildZerosVal(ResType, I);
1133   // A new virtual register to store the result struct.
1134   Register StructVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1135   MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1136   // Build the result name if needed.
1137   if (ResName.size() > 0)
1138     buildOpName(StructVReg, ResName, MIRBuilder);
1139   // Build the arithmetic with overflow instruction.
1140   MachineBasicBlock &BB = *I.getParent();
1141   auto MIB =
1142       BuildMI(BB, MIRBuilder.getInsertPt(), I.getDebugLoc(), TII.get(Opcode))
1143           .addDef(StructVReg)
1144           .addUse(GR.getSPIRVTypeID(StructType));
1145   for (unsigned i = I.getNumDefs(); i < I.getNumOperands(); ++i)
1146     MIB.addUse(I.getOperand(i).getReg());
1147   bool Status = MIB.constrainAllUses(TII, TRI, RBI);
1148   // Build instructions to extract fields of the instruction's result.
1149   // A new virtual register to store the higher part of the result struct.
1150   Register HigherVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1151   MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1152   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
1153     auto MIB =
1154         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1155             .addDef(i == 1 ? HigherVReg : I.getOperand(i).getReg())
1156             .addUse(GR.getSPIRVTypeID(ResType))
1157             .addUse(StructVReg)
1158             .addImm(i);
1159     Status &= MIB.constrainAllUses(TII, TRI, RBI);
1160   }
1161   // Build boolean value from the higher part.
1162   Status &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1163                 .addDef(I.getOperand(1).getReg())
1164                 .addUse(BoolTypeReg)
1165                 .addUse(HigherVReg)
1166                 .addUse(ZeroReg)
1167                 .constrainAllUses(TII, TRI, RBI);
1168   return Status;
1169 }
1170 
1171 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
1172                                                    const SPIRVType *ResType,
1173                                                    MachineInstr &I) const {
1174   Register ScopeReg;
1175   Register MemSemEqReg;
1176   Register MemSemNeqReg;
1177   Register Ptr = I.getOperand(2).getReg();
1178   if (!isa<GIntrinsic>(I)) {
1179     assert(I.hasOneMemOperand());
1180     const MachineMemOperand *MemOp = *I.memoperands_begin();
1181     unsigned Scope = static_cast<uint32_t>(getMemScope(
1182         GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
1183     ScopeReg = buildI32Constant(Scope, I);
1184 
1185     unsigned ScSem = static_cast<uint32_t>(
1186         getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
1187     AtomicOrdering AO = MemOp->getSuccessOrdering();
1188     unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
1189     MemSemEqReg = buildI32Constant(MemSemEq, I);
1190     AtomicOrdering FO = MemOp->getFailureOrdering();
1191     unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
1192     MemSemNeqReg =
1193         MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
1194   } else {
1195     ScopeReg = I.getOperand(5).getReg();
1196     MemSemEqReg = I.getOperand(6).getReg();
1197     MemSemNeqReg = I.getOperand(7).getReg();
1198   }
1199 
1200   Register Cmp = I.getOperand(3).getReg();
1201   Register Val = I.getOperand(4).getReg();
1202   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1203   Register ACmpRes = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1204   const DebugLoc &DL = I.getDebugLoc();
1205   bool Result =
1206       BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
1207           .addDef(ACmpRes)
1208           .addUse(GR.getSPIRVTypeID(SpvValTy))
1209           .addUse(Ptr)
1210           .addUse(ScopeReg)
1211           .addUse(MemSemEqReg)
1212           .addUse(MemSemNeqReg)
1213           .addUse(Val)
1214           .addUse(Cmp)
1215           .constrainAllUses(TII, TRI, RBI);
1216   Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1217   SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
1218   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
1219                 .addDef(CmpSuccReg)
1220                 .addUse(GR.getSPIRVTypeID(BoolTy))
1221                 .addUse(ACmpRes)
1222                 .addUse(Cmp)
1223                 .constrainAllUses(TII, TRI, RBI);
1224   Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1225   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1226                 .addDef(TmpReg)
1227                 .addUse(GR.getSPIRVTypeID(ResType))
1228                 .addUse(ACmpRes)
1229                 .addUse(GR.getOrCreateUndef(I, ResType, TII))
1230                 .addImm(0)
1231                 .constrainAllUses(TII, TRI, RBI);
1232   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1233                 .addDef(ResVReg)
1234                 .addUse(GR.getSPIRVTypeID(ResType))
1235                 .addUse(CmpSuccReg)
1236                 .addUse(TmpReg)
1237                 .addImm(1)
1238                 .constrainAllUses(TII, TRI, RBI);
1239   return Result;
1240 }
1241 
1242 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
1243   switch (SC) {
1244   case SPIRV::StorageClass::Workgroup:
1245   case SPIRV::StorageClass::CrossWorkgroup:
1246   case SPIRV::StorageClass::Function:
1247     return true;
1248   default:
1249     return false;
1250   }
1251 }
1252 
1253 static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
1254   switch (SC) {
1255   case SPIRV::StorageClass::DeviceOnlyINTEL:
1256   case SPIRV::StorageClass::HostOnlyINTEL:
1257     return true;
1258   default:
1259     return false;
1260   }
1261 }
1262 
1263 // Returns true ResVReg is referred only from global vars and OpName's.
1264 static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg) {
1265   bool IsGRef = false;
1266   bool IsAllowedRefs =
1267       std::all_of(MRI->use_instr_begin(ResVReg), MRI->use_instr_end(),
1268                   [&IsGRef](auto const &It) {
1269                     unsigned Opcode = It.getOpcode();
1270                     if (Opcode == SPIRV::OpConstantComposite ||
1271                         Opcode == SPIRV::OpVariable ||
1272                         isSpvIntrinsic(It, Intrinsic::spv_init_global))
1273                       return IsGRef = true;
1274                     return Opcode == SPIRV::OpName;
1275                   });
1276   return IsAllowedRefs && IsGRef;
1277 }
1278 
1279 Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1280     MachineInstr &I, SPIRV::StorageClass::StorageClass SC) const {
1281   return GR.getSPIRVTypeID(GR.getOrCreateSPIRVPointerType(
1282       GR.getOrCreateSPIRVIntegerType(8, I, TII), I, TII, SC));
1283 }
1284 
1285 MachineInstrBuilder
1286 SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &I, Register Dest,
1287                                               Register Src, Register DestType,
1288                                               uint32_t Opcode) const {
1289   return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1290                  TII.get(SPIRV::OpSpecConstantOp))
1291       .addDef(Dest)
1292       .addUse(DestType)
1293       .addImm(Opcode)
1294       .addUse(Src);
1295 }
1296 
1297 MachineInstrBuilder
1298 SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &I, Register SrcPtr,
1299                                                SPIRVType *SrcPtrTy) const {
1300   SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1301       GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
1302   Register Tmp = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1303   MRI->setType(Tmp, LLT::pointer(storageClassToAddressSpace(
1304                                      SPIRV::StorageClass::Generic),
1305                                  GR.getPointerSize()));
1306   MachineFunction *MF = I.getParent()->getParent();
1307   GR.assignSPIRVTypeToVReg(GenericPtrTy, Tmp, *MF);
1308   MachineInstrBuilder MIB = buildSpecConstantOp(
1309       I, Tmp, SrcPtr, GR.getSPIRVTypeID(GenericPtrTy),
1310       static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
1311   GR.add(MIB.getInstr(), MF, Tmp);
1312   return MIB;
1313 }
1314 
1315 // In SPIR-V address space casting can only happen to and from the Generic
1316 // storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
1317 // pointers to and from Generic pointers. As such, we can convert e.g. from
1318 // Workgroup to Function by going via a Generic pointer as an intermediary. All
1319 // other combinations can only be done by a bitcast, and are probably not safe.
1320 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
1321                                                    const SPIRVType *ResType,
1322                                                    MachineInstr &I) const {
1323   MachineBasicBlock &BB = *I.getParent();
1324   const DebugLoc &DL = I.getDebugLoc();
1325 
1326   Register SrcPtr = I.getOperand(1).getReg();
1327   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1328 
1329   // don't generate a cast for a null that may be represented by OpTypeInt
1330   if (SrcPtrTy->getOpcode() != SPIRV::OpTypePointer ||
1331       ResType->getOpcode() != SPIRV::OpTypePointer)
1332     return BuildMI(BB, I, DL, TII.get(TargetOpcode::COPY))
1333         .addDef(ResVReg)
1334         .addUse(SrcPtr)
1335         .constrainAllUses(TII, TRI, RBI);
1336 
1337   SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtrTy);
1338   SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResType);
1339 
1340   if (isASCastInGVar(MRI, ResVReg)) {
1341     // AddrSpaceCast uses within OpVariable and OpConstantComposite instructions
1342     // are expressed by OpSpecConstantOp with an Opcode.
1343     // TODO: maybe insert a check whether the Kernel capability was declared and
1344     // so PtrCastToGeneric/GenericCastToPtr are available.
1345     unsigned SpecOpcode =
1346         DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC)
1347             ? static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
1348             : (SrcSC == SPIRV::StorageClass::Generic &&
1349                        isGenericCastablePtr(DstSC)
1350                    ? static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
1351                    : 0);
1352     // TODO: OpConstantComposite expects i8*, so we are forced to forget a
1353     // correct value of ResType and use general i8* instead. Maybe this should
1354     // be addressed in the emit-intrinsic step to infer a correct
1355     // OpConstantComposite type.
1356     if (SpecOpcode) {
1357       return buildSpecConstantOp(I, ResVReg, SrcPtr,
1358                                  getUcharPtrTypeReg(I, DstSC), SpecOpcode)
1359           .constrainAllUses(TII, TRI, RBI);
1360     } else if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1361       MachineInstrBuilder MIB = buildConstGenericPtr(I, SrcPtr, SrcPtrTy);
1362       return MIB.constrainAllUses(TII, TRI, RBI) &&
1363              buildSpecConstantOp(
1364                  I, ResVReg, MIB->getOperand(0).getReg(),
1365                  getUcharPtrTypeReg(I, DstSC),
1366                  static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
1367                  .constrainAllUses(TII, TRI, RBI);
1368     }
1369   }
1370 
1371   // don't generate a cast between identical storage classes
1372   if (SrcSC == DstSC)
1373     return BuildMI(BB, I, DL, TII.get(TargetOpcode::COPY))
1374         .addDef(ResVReg)
1375         .addUse(SrcPtr)
1376         .constrainAllUses(TII, TRI, RBI);
1377 
1378   // Casting from an eligible pointer to Generic.
1379   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1380     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1381   // Casting from Generic to an eligible pointer.
1382   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1383     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1384   // Casting between 2 eligible pointers using Generic as an intermediary.
1385   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1386     Register Tmp = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1387     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1388         GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
1389     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1390                        .addDef(Tmp)
1391                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1392                        .addUse(SrcPtr)
1393                        .constrainAllUses(TII, TRI, RBI);
1394     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1395                           .addDef(ResVReg)
1396                           .addUse(GR.getSPIRVTypeID(ResType))
1397                           .addUse(Tmp)
1398                           .constrainAllUses(TII, TRI, RBI);
1399   }
1400 
1401   // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1402   // be applied
1403   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1404     return selectUnOp(ResVReg, ResType, I,
1405                       SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1406   if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1407     return selectUnOp(ResVReg, ResType, I,
1408                       SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1409   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::Generic)
1410     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1411   if (SrcSC == SPIRV::StorageClass::Generic && isUSMStorageClass(DstSC))
1412     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1413 
1414   // Bitcast for pointers requires that the address spaces must match
1415   return false;
1416 }
1417 
1418 static unsigned getFCmpOpcode(unsigned PredNum) {
1419   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1420   switch (Pred) {
1421   case CmpInst::FCMP_OEQ:
1422     return SPIRV::OpFOrdEqual;
1423   case CmpInst::FCMP_OGE:
1424     return SPIRV::OpFOrdGreaterThanEqual;
1425   case CmpInst::FCMP_OGT:
1426     return SPIRV::OpFOrdGreaterThan;
1427   case CmpInst::FCMP_OLE:
1428     return SPIRV::OpFOrdLessThanEqual;
1429   case CmpInst::FCMP_OLT:
1430     return SPIRV::OpFOrdLessThan;
1431   case CmpInst::FCMP_ONE:
1432     return SPIRV::OpFOrdNotEqual;
1433   case CmpInst::FCMP_ORD:
1434     return SPIRV::OpOrdered;
1435   case CmpInst::FCMP_UEQ:
1436     return SPIRV::OpFUnordEqual;
1437   case CmpInst::FCMP_UGE:
1438     return SPIRV::OpFUnordGreaterThanEqual;
1439   case CmpInst::FCMP_UGT:
1440     return SPIRV::OpFUnordGreaterThan;
1441   case CmpInst::FCMP_ULE:
1442     return SPIRV::OpFUnordLessThanEqual;
1443   case CmpInst::FCMP_ULT:
1444     return SPIRV::OpFUnordLessThan;
1445   case CmpInst::FCMP_UNE:
1446     return SPIRV::OpFUnordNotEqual;
1447   case CmpInst::FCMP_UNO:
1448     return SPIRV::OpUnordered;
1449   default:
1450     llvm_unreachable("Unknown predicate type for FCmp");
1451   }
1452 }
1453 
1454 static unsigned getICmpOpcode(unsigned PredNum) {
1455   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1456   switch (Pred) {
1457   case CmpInst::ICMP_EQ:
1458     return SPIRV::OpIEqual;
1459   case CmpInst::ICMP_NE:
1460     return SPIRV::OpINotEqual;
1461   case CmpInst::ICMP_SGE:
1462     return SPIRV::OpSGreaterThanEqual;
1463   case CmpInst::ICMP_SGT:
1464     return SPIRV::OpSGreaterThan;
1465   case CmpInst::ICMP_SLE:
1466     return SPIRV::OpSLessThanEqual;
1467   case CmpInst::ICMP_SLT:
1468     return SPIRV::OpSLessThan;
1469   case CmpInst::ICMP_UGE:
1470     return SPIRV::OpUGreaterThanEqual;
1471   case CmpInst::ICMP_UGT:
1472     return SPIRV::OpUGreaterThan;
1473   case CmpInst::ICMP_ULE:
1474     return SPIRV::OpULessThanEqual;
1475   case CmpInst::ICMP_ULT:
1476     return SPIRV::OpULessThan;
1477   default:
1478     llvm_unreachable("Unknown predicate type for ICmp");
1479   }
1480 }
1481 
1482 static unsigned getPtrCmpOpcode(unsigned Pred) {
1483   switch (static_cast<CmpInst::Predicate>(Pred)) {
1484   case CmpInst::ICMP_EQ:
1485     return SPIRV::OpPtrEqual;
1486   case CmpInst::ICMP_NE:
1487     return SPIRV::OpPtrNotEqual;
1488   default:
1489     llvm_unreachable("Unknown predicate type for pointer comparison");
1490   }
1491 }
1492 
1493 // Return the logical operation, or abort if none exists.
1494 static unsigned getBoolCmpOpcode(unsigned PredNum) {
1495   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1496   switch (Pred) {
1497   case CmpInst::ICMP_EQ:
1498     return SPIRV::OpLogicalEqual;
1499   case CmpInst::ICMP_NE:
1500     return SPIRV::OpLogicalNotEqual;
1501   default:
1502     llvm_unreachable("Unknown predicate type for Bool comparison");
1503   }
1504 }
1505 
1506 static APFloat getZeroFP(const Type *LLVMFloatTy) {
1507   if (!LLVMFloatTy)
1508     return APFloat::getZero(APFloat::IEEEsingle());
1509   switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1510   case Type::HalfTyID:
1511     return APFloat::getZero(APFloat::IEEEhalf());
1512   default:
1513   case Type::FloatTyID:
1514     return APFloat::getZero(APFloat::IEEEsingle());
1515   case Type::DoubleTyID:
1516     return APFloat::getZero(APFloat::IEEEdouble());
1517   }
1518 }
1519 
1520 static APFloat getOneFP(const Type *LLVMFloatTy) {
1521   if (!LLVMFloatTy)
1522     return APFloat::getOne(APFloat::IEEEsingle());
1523   switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1524   case Type::HalfTyID:
1525     return APFloat::getOne(APFloat::IEEEhalf());
1526   default:
1527   case Type::FloatTyID:
1528     return APFloat::getOne(APFloat::IEEEsingle());
1529   case Type::DoubleTyID:
1530     return APFloat::getOne(APFloat::IEEEdouble());
1531   }
1532 }
1533 
1534 bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
1535                                               const SPIRVType *ResType,
1536                                               MachineInstr &I,
1537                                               unsigned OpAnyOrAll) const {
1538   assert(I.getNumOperands() == 3);
1539   assert(I.getOperand(2).isReg());
1540   MachineBasicBlock &BB = *I.getParent();
1541   Register InputRegister = I.getOperand(2).getReg();
1542   SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1543 
1544   if (!InputType)
1545     report_fatal_error("Input Type could not be determined.");
1546 
1547   bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1548   bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
1549   if (IsBoolTy && !IsVectorTy) {
1550     assert(ResVReg == I.getOperand(0).getReg());
1551     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1552                    TII.get(TargetOpcode::COPY))
1553         .addDef(ResVReg)
1554         .addUse(InputRegister)
1555         .constrainAllUses(TII, TRI, RBI);
1556   }
1557 
1558   bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1559   unsigned SpirvNotEqualId =
1560       IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1561   SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(I, TII);
1562   SPIRVType *SpvBoolTy = SpvBoolScalarTy;
1563   Register NotEqualReg = ResVReg;
1564 
1565   if (IsVectorTy) {
1566     NotEqualReg = IsBoolTy ? InputRegister
1567                            : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1568     const unsigned NumElts = InputType->getOperand(2).getImm();
1569     SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
1570   }
1571 
1572   if (!IsBoolTy) {
1573     Register ConstZeroReg =
1574         IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
1575 
1576     BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
1577         .addDef(NotEqualReg)
1578         .addUse(GR.getSPIRVTypeID(SpvBoolTy))
1579         .addUse(InputRegister)
1580         .addUse(ConstZeroReg)
1581         .constrainAllUses(TII, TRI, RBI);
1582   }
1583 
1584   if (!IsVectorTy)
1585     return true;
1586 
1587   return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
1588       .addDef(ResVReg)
1589       .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1590       .addUse(NotEqualReg)
1591       .constrainAllUses(TII, TRI, RBI);
1592 }
1593 
1594 bool SPIRVInstructionSelector::selectAll(Register ResVReg,
1595                                          const SPIRVType *ResType,
1596                                          MachineInstr &I) const {
1597   return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
1598 }
1599 
1600 bool SPIRVInstructionSelector::selectAny(Register ResVReg,
1601                                          const SPIRVType *ResType,
1602                                          MachineInstr &I) const {
1603   return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
1604 }
1605 
1606 // Select the OpDot instruction for the given float dot
1607 bool SPIRVInstructionSelector::selectFloatDot(Register ResVReg,
1608                                               const SPIRVType *ResType,
1609                                               MachineInstr &I) const {
1610   assert(I.getNumOperands() == 4);
1611   assert(I.getOperand(2).isReg());
1612   assert(I.getOperand(3).isReg());
1613 
1614   [[maybe_unused]] SPIRVType *VecType =
1615       GR.getSPIRVTypeForVReg(I.getOperand(2).getReg());
1616 
1617   assert(VecType->getOpcode() == SPIRV::OpTypeVector &&
1618          GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1619          "dot product requires a vector of at least 2 components");
1620 
1621   [[maybe_unused]] SPIRVType *EltType =
1622       GR.getSPIRVTypeForVReg(VecType->getOperand(1).getReg());
1623 
1624   assert(EltType->getOpcode() == SPIRV::OpTypeFloat);
1625 
1626   MachineBasicBlock &BB = *I.getParent();
1627   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpDot))
1628       .addDef(ResVReg)
1629       .addUse(GR.getSPIRVTypeID(ResType))
1630       .addUse(I.getOperand(2).getReg())
1631       .addUse(I.getOperand(3).getReg())
1632       .constrainAllUses(TII, TRI, RBI);
1633 }
1634 
1635 // Since pre-1.6 SPIRV has no integer dot implementation,
1636 // expand by piecewise multiplying and adding the results
1637 bool SPIRVInstructionSelector::selectIntegerDot(Register ResVReg,
1638                                                 const SPIRVType *ResType,
1639                                                 MachineInstr &I) const {
1640   assert(I.getNumOperands() == 4);
1641   assert(I.getOperand(2).isReg());
1642   assert(I.getOperand(3).isReg());
1643   MachineBasicBlock &BB = *I.getParent();
1644 
1645   // Multiply the vectors, then sum the results
1646   Register Vec0 = I.getOperand(2).getReg();
1647   Register Vec1 = I.getOperand(3).getReg();
1648   Register TmpVec = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1649   SPIRVType *VecType = GR.getSPIRVTypeForVReg(Vec0);
1650 
1651   bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulV))
1652                     .addDef(TmpVec)
1653                     .addUse(GR.getSPIRVTypeID(VecType))
1654                     .addUse(Vec0)
1655                     .addUse(Vec1)
1656                     .constrainAllUses(TII, TRI, RBI);
1657 
1658   assert(VecType->getOpcode() == SPIRV::OpTypeVector &&
1659          GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1660          "dot product requires a vector of at least 2 components");
1661 
1662   Register Res = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1663   Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1664                 .addDef(Res)
1665                 .addUse(GR.getSPIRVTypeID(ResType))
1666                 .addUse(TmpVec)
1667                 .addImm(0)
1668                 .constrainAllUses(TII, TRI, RBI);
1669 
1670   for (unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
1671     Register Elt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1672 
1673     Result |=
1674         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1675             .addDef(Elt)
1676             .addUse(GR.getSPIRVTypeID(ResType))
1677             .addUse(TmpVec)
1678             .addImm(i)
1679             .constrainAllUses(TII, TRI, RBI);
1680 
1681     Register Sum = i < GR.getScalarOrVectorComponentCount(VecType) - 1
1682                        ? MRI->createVirtualRegister(&SPIRV::IDRegClass)
1683                        : ResVReg;
1684 
1685     Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
1686                   .addDef(Sum)
1687                   .addUse(GR.getSPIRVTypeID(ResType))
1688                   .addUse(Res)
1689                   .addUse(Elt)
1690                   .constrainAllUses(TII, TRI, RBI);
1691     Res = Sum;
1692   }
1693 
1694   return Result;
1695 }
1696 
1697 /// Transform saturate(x) to clamp(x, 0.0f, 1.0f) as SPIRV
1698 /// does not have a saturate builtin.
1699 bool SPIRVInstructionSelector::selectSaturate(Register ResVReg,
1700                                               const SPIRVType *ResType,
1701                                               MachineInstr &I) const {
1702   assert(I.getNumOperands() == 3);
1703   assert(I.getOperand(2).isReg());
1704   MachineBasicBlock &BB = *I.getParent();
1705   Register VZero = buildZerosValF(ResType, I);
1706   Register VOne = buildOnesValF(ResType, I);
1707 
1708   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1709       .addDef(ResVReg)
1710       .addUse(GR.getSPIRVTypeID(ResType))
1711       .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1712       .addImm(GL::FClamp)
1713       .addUse(I.getOperand(2).getReg())
1714       .addUse(VZero)
1715       .addUse(VOne)
1716       .constrainAllUses(TII, TRI, RBI);
1717 }
1718 
1719 bool SPIRVInstructionSelector::selectSign(Register ResVReg,
1720                                           const SPIRVType *ResType,
1721                                           MachineInstr &I) const {
1722   assert(I.getNumOperands() == 3);
1723   assert(I.getOperand(2).isReg());
1724   MachineBasicBlock &BB = *I.getParent();
1725   Register InputRegister = I.getOperand(2).getReg();
1726   SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1727   auto &DL = I.getDebugLoc();
1728 
1729   if (!InputType)
1730     report_fatal_error("Input Type could not be determined.");
1731 
1732   bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1733 
1734   unsigned SignBitWidth = GR.getScalarOrVectorBitWidth(InputType);
1735   unsigned ResBitWidth = GR.getScalarOrVectorBitWidth(ResType);
1736 
1737   bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
1738 
1739   auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
1740   Register SignReg = NeedsConversion
1741                          ? MRI->createVirtualRegister(&SPIRV::IDRegClass)
1742                          : ResVReg;
1743 
1744   bool Result =
1745       BuildMI(BB, I, DL, TII.get(SPIRV::OpExtInst))
1746           .addDef(SignReg)
1747           .addUse(GR.getSPIRVTypeID(InputType))
1748           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1749           .addImm(SignOpcode)
1750           .addUse(InputRegister)
1751           .constrainAllUses(TII, TRI, RBI);
1752 
1753   if (NeedsConversion) {
1754     auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
1755     Result |= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode))
1756                   .addDef(ResVReg)
1757                   .addUse(GR.getSPIRVTypeID(ResType))
1758                   .addUse(SignReg)
1759                   .constrainAllUses(TII, TRI, RBI);
1760   }
1761 
1762   return Result;
1763 }
1764 
1765 bool SPIRVInstructionSelector::selectWaveReadLaneAt(Register ResVReg,
1766                                                     const SPIRVType *ResType,
1767                                                     MachineInstr &I) const {
1768   assert(I.getNumOperands() == 4);
1769   assert(I.getOperand(2).isReg());
1770   assert(I.getOperand(3).isReg());
1771   MachineBasicBlock &BB = *I.getParent();
1772 
1773   // IntTy is used to define the execution scope, set to 3 to denote a
1774   // cross-lane interaction equivalent to a SPIR-V subgroup.
1775   SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
1776   return BuildMI(BB, I, I.getDebugLoc(),
1777                  TII.get(SPIRV::OpGroupNonUniformShuffle))
1778       .addDef(ResVReg)
1779       .addUse(GR.getSPIRVTypeID(ResType))
1780       .addUse(GR.getOrCreateConstInt(3, I, IntTy, TII))
1781       .addUse(I.getOperand(2).getReg())
1782       .addUse(I.getOperand(3).getReg());
1783 }
1784 
1785 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1786                                                 const SPIRVType *ResType,
1787                                                 MachineInstr &I) const {
1788   MachineBasicBlock &BB = *I.getParent();
1789   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1790       .addDef(ResVReg)
1791       .addUse(GR.getSPIRVTypeID(ResType))
1792       .addUse(I.getOperand(1).getReg())
1793       .constrainAllUses(TII, TRI, RBI);
1794 }
1795 
1796 bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1797                                             const SPIRVType *ResType,
1798                                             MachineInstr &I) const {
1799   // There is no way to implement `freeze` correctly without support on SPIR-V
1800   // standard side, but we may at least address a simple (static) case when
1801   // undef/poison value presence is obvious. The main benefit of even
1802   // incomplete `freeze` support is preventing of translation from crashing due
1803   // to lack of support on legalization and instruction selection steps.
1804   if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1805     return false;
1806   Register OpReg = I.getOperand(1).getReg();
1807   if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1808     Register Reg;
1809     switch (Def->getOpcode()) {
1810     case SPIRV::ASSIGN_TYPE:
1811       if (MachineInstr *AssignToDef =
1812               MRI->getVRegDef(Def->getOperand(1).getReg())) {
1813         if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1814           Reg = Def->getOperand(2).getReg();
1815       }
1816       break;
1817     case SPIRV::OpUndef:
1818       Reg = Def->getOperand(1).getReg();
1819       break;
1820     }
1821     unsigned DestOpCode;
1822     if (Reg.isValid()) {
1823       DestOpCode = SPIRV::OpConstantNull;
1824     } else {
1825       DestOpCode = TargetOpcode::COPY;
1826       Reg = OpReg;
1827     }
1828     return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1829         .addDef(I.getOperand(0).getReg())
1830         .addUse(Reg)
1831         .constrainAllUses(TII, TRI, RBI);
1832   }
1833   return false;
1834 }
1835 
1836 static unsigned getArrayComponentCount(MachineRegisterInfo *MRI,
1837                                        const SPIRVType *ResType) {
1838   Register OpReg = ResType->getOperand(2).getReg();
1839   SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1840   if (!OpDef)
1841     return 0;
1842   if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1843       OpDef->getOperand(1).isReg()) {
1844     if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1845       OpDef = RefDef;
1846   }
1847   unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1848                    ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1849                    : 0;
1850   return N;
1851 }
1852 
1853 // Return true if the type represents a constant register
1854 static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef,
1855                        SmallPtrSet<SPIRVType *, 4> &Visited) {
1856   if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1857       OpDef->getOperand(1).isReg()) {
1858     if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1859       OpDef = RefDef;
1860   }
1861 
1862   if (Visited.contains(OpDef))
1863     return true;
1864   Visited.insert(OpDef);
1865 
1866   unsigned Opcode = OpDef->getOpcode();
1867   switch (Opcode) {
1868   case TargetOpcode::G_CONSTANT:
1869   case TargetOpcode::G_FCONSTANT:
1870     return true;
1871   case TargetOpcode::G_INTRINSIC:
1872   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1873   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
1874     return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
1875            Intrinsic::spv_const_composite;
1876   case TargetOpcode::G_BUILD_VECTOR:
1877   case TargetOpcode::G_SPLAT_VECTOR: {
1878     for (unsigned i = OpDef->getNumExplicitDefs(); i < OpDef->getNumOperands();
1879          i++) {
1880       SPIRVType *OpNestedDef =
1881           OpDef->getOperand(i).isReg()
1882               ? MRI->getVRegDef(OpDef->getOperand(i).getReg())
1883               : nullptr;
1884       if (OpNestedDef && !isConstReg(MRI, OpNestedDef, Visited))
1885         return false;
1886     }
1887     return true;
1888   }
1889   }
1890   return false;
1891 }
1892 
1893 // Return true if the virtual register represents a constant
1894 static bool isConstReg(MachineRegisterInfo *MRI, Register OpReg) {
1895   SmallPtrSet<SPIRVType *, 4> Visited;
1896   if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
1897     return isConstReg(MRI, OpDef, Visited);
1898   return false;
1899 }
1900 
1901 bool SPIRVInstructionSelector::selectBuildVector(Register ResVReg,
1902                                                  const SPIRVType *ResType,
1903                                                  MachineInstr &I) const {
1904   unsigned N = 0;
1905   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1906     N = GR.getScalarOrVectorComponentCount(ResType);
1907   else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1908     N = getArrayComponentCount(MRI, ResType);
1909   else
1910     report_fatal_error("Cannot select G_BUILD_VECTOR with a non-vector result");
1911   if (I.getNumExplicitOperands() - I.getNumExplicitDefs() != N)
1912     report_fatal_error("G_BUILD_VECTOR and the result type are inconsistent");
1913 
1914   // check if we may construct a constant vector
1915   bool IsConst = true;
1916   for (unsigned i = I.getNumExplicitDefs();
1917        i < I.getNumExplicitOperands() && IsConst; ++i)
1918     if (!isConstReg(MRI, I.getOperand(i).getReg()))
1919       IsConst = false;
1920 
1921   if (!IsConst && N < 2)
1922     report_fatal_error(
1923         "There must be at least two constituent operands in a vector");
1924 
1925   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1926                      TII.get(IsConst ? SPIRV::OpConstantComposite
1927                                      : SPIRV::OpCompositeConstruct))
1928                  .addDef(ResVReg)
1929                  .addUse(GR.getSPIRVTypeID(ResType));
1930   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1931     MIB.addUse(I.getOperand(i).getReg());
1932   return MIB.constrainAllUses(TII, TRI, RBI);
1933 }
1934 
1935 bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1936                                                  const SPIRVType *ResType,
1937                                                  MachineInstr &I) const {
1938   unsigned N = 0;
1939   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1940     N = GR.getScalarOrVectorComponentCount(ResType);
1941   else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1942     N = getArrayComponentCount(MRI, ResType);
1943   else
1944     report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1945 
1946   unsigned OpIdx = I.getNumExplicitDefs();
1947   if (!I.getOperand(OpIdx).isReg())
1948     report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1949 
1950   // check if we may construct a constant vector
1951   Register OpReg = I.getOperand(OpIdx).getReg();
1952   bool IsConst = isConstReg(MRI, OpReg);
1953 
1954   if (!IsConst && N < 2)
1955     report_fatal_error(
1956         "There must be at least two constituent operands in a vector");
1957 
1958   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1959                      TII.get(IsConst ? SPIRV::OpConstantComposite
1960                                      : SPIRV::OpCompositeConstruct))
1961                  .addDef(ResVReg)
1962                  .addUse(GR.getSPIRVTypeID(ResType));
1963   for (unsigned i = 0; i < N; ++i)
1964     MIB.addUse(OpReg);
1965   return MIB.constrainAllUses(TII, TRI, RBI);
1966 }
1967 
1968 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1969                                          const SPIRVType *ResType,
1970                                          unsigned CmpOpc,
1971                                          MachineInstr &I) const {
1972   Register Cmp0 = I.getOperand(2).getReg();
1973   Register Cmp1 = I.getOperand(3).getReg();
1974   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1975              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1976          "CMP operands should have the same type");
1977   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1978       .addDef(ResVReg)
1979       .addUse(GR.getSPIRVTypeID(ResType))
1980       .addUse(Cmp0)
1981       .addUse(Cmp1)
1982       .constrainAllUses(TII, TRI, RBI);
1983 }
1984 
1985 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1986                                           const SPIRVType *ResType,
1987                                           MachineInstr &I) const {
1988   auto Pred = I.getOperand(1).getPredicate();
1989   unsigned CmpOpc;
1990 
1991   Register CmpOperand = I.getOperand(2).getReg();
1992   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1993     CmpOpc = getPtrCmpOpcode(Pred);
1994   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1995     CmpOpc = getBoolCmpOpcode(Pred);
1996   else
1997     CmpOpc = getICmpOpcode(Pred);
1998   return selectCmp(ResVReg, ResType, CmpOpc, I);
1999 }
2000 
2001 void SPIRVInstructionSelector::renderFImm64(MachineInstrBuilder &MIB,
2002                                             const MachineInstr &I,
2003                                             int OpIdx) const {
2004   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
2005          "Expected G_FCONSTANT");
2006   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
2007   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
2008 }
2009 
2010 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
2011                                            const MachineInstr &I,
2012                                            int OpIdx) const {
2013   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2014          "Expected G_CONSTANT");
2015   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
2016 }
2017 
2018 Register
2019 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
2020                                            const SPIRVType *ResType) const {
2021   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
2022   const SPIRVType *SpvI32Ty =
2023       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
2024   // Find a constant in DT or build a new one.
2025   auto ConstInt = ConstantInt::get(LLVMTy, Val);
2026   Register NewReg = GR.find(ConstInt, GR.CurMF);
2027   if (!NewReg.isValid()) {
2028     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
2029     GR.add(ConstInt, GR.CurMF, NewReg);
2030     MachineInstr *MI;
2031     MachineBasicBlock &BB = *I.getParent();
2032     if (Val == 0) {
2033       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2034                .addDef(NewReg)
2035                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
2036     } else {
2037       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
2038                .addDef(NewReg)
2039                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
2040                .addImm(APInt(32, Val).getZExtValue());
2041     }
2042     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
2043   }
2044   return NewReg;
2045 }
2046 
2047 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
2048                                           const SPIRVType *ResType,
2049                                           MachineInstr &I) const {
2050   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
2051   return selectCmp(ResVReg, ResType, CmpOp, I);
2052 }
2053 
2054 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
2055                                                  MachineInstr &I) const {
2056   // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
2057   bool ZeroAsNull = STI.isOpenCLEnv();
2058   if (ResType->getOpcode() == SPIRV::OpTypeVector)
2059     return GR.getOrCreateConstVector(0UL, I, ResType, TII, ZeroAsNull);
2060   return GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
2061 }
2062 
2063 Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
2064                                                   MachineInstr &I) const {
2065   // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
2066   bool ZeroAsNull = STI.isOpenCLEnv();
2067   APFloat VZero = getZeroFP(GR.getTypeForSPIRVType(ResType));
2068   if (ResType->getOpcode() == SPIRV::OpTypeVector)
2069     return GR.getOrCreateConstVector(VZero, I, ResType, TII, ZeroAsNull);
2070   return GR.getOrCreateConstFP(VZero, I, ResType, TII, ZeroAsNull);
2071 }
2072 
2073 Register SPIRVInstructionSelector::buildOnesValF(const SPIRVType *ResType,
2074                                                  MachineInstr &I) const {
2075   // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
2076   bool ZeroAsNull = STI.isOpenCLEnv();
2077   APFloat VOne = getOneFP(GR.getTypeForSPIRVType(ResType));
2078   if (ResType->getOpcode() == SPIRV::OpTypeVector)
2079     return GR.getOrCreateConstVector(VOne, I, ResType, TII, ZeroAsNull);
2080   return GR.getOrCreateConstFP(VOne, I, ResType, TII, ZeroAsNull);
2081 }
2082 
2083 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
2084                                                 const SPIRVType *ResType,
2085                                                 MachineInstr &I) const {
2086   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2087   APInt One =
2088       AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
2089   if (ResType->getOpcode() == SPIRV::OpTypeVector)
2090     return GR.getOrCreateConstVector(One.getZExtValue(), I, ResType, TII);
2091   return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
2092 }
2093 
2094 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
2095                                             const SPIRVType *ResType,
2096                                             MachineInstr &I,
2097                                             bool IsSigned) const {
2098   // To extend a bool, we need to use OpSelect between constants.
2099   Register ZeroReg = buildZerosVal(ResType, I);
2100   Register OneReg = buildOnesVal(IsSigned, ResType, I);
2101   bool IsScalarBool =
2102       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
2103   unsigned Opcode =
2104       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
2105   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
2106       .addDef(ResVReg)
2107       .addUse(GR.getSPIRVTypeID(ResType))
2108       .addUse(I.getOperand(1).getReg())
2109       .addUse(OneReg)
2110       .addUse(ZeroReg)
2111       .constrainAllUses(TII, TRI, RBI);
2112 }
2113 
2114 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
2115                                           const SPIRVType *ResType,
2116                                           MachineInstr &I, bool IsSigned,
2117                                           unsigned Opcode) const {
2118   Register SrcReg = I.getOperand(1).getReg();
2119   // We can convert bool value directly to float type without OpConvert*ToF,
2120   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
2121   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
2122     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2123     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
2124     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
2125       const unsigned NumElts = ResType->getOperand(2).getImm();
2126       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
2127     }
2128     SrcReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2129     selectSelect(SrcReg, TmpType, I, false);
2130   }
2131   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
2132 }
2133 
2134 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
2135                                          const SPIRVType *ResType,
2136                                          MachineInstr &I, bool IsSigned) const {
2137   Register SrcReg = I.getOperand(1).getReg();
2138   if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
2139     return selectSelect(ResVReg, ResType, I, IsSigned);
2140 
2141   SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
2142   if (SrcType == ResType) {
2143     const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(ResVReg);
2144     const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(SrcReg);
2145     if (DstRC != SrcRC && SrcRC)
2146       MRI->setRegClass(ResVReg, SrcRC);
2147     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
2148                    TII.get(TargetOpcode::COPY))
2149         .addDef(ResVReg)
2150         .addUse(SrcReg)
2151         .constrainAllUses(TII, TRI, RBI);
2152   }
2153 
2154   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2155   return selectUnOp(ResVReg, ResType, I, Opcode);
2156 }
2157 
2158 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
2159                                                Register ResVReg,
2160                                                MachineInstr &I,
2161                                                const SPIRVType *IntTy,
2162                                                const SPIRVType *BoolTy) const {
2163   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
2164   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2165   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
2166   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2167   Register Zero = buildZerosVal(IntTy, I);
2168   Register One = buildOnesVal(false, IntTy, I);
2169   MachineBasicBlock &BB = *I.getParent();
2170   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2171       .addDef(BitIntReg)
2172       .addUse(GR.getSPIRVTypeID(IntTy))
2173       .addUse(IntReg)
2174       .addUse(One)
2175       .constrainAllUses(TII, TRI, RBI);
2176   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
2177       .addDef(ResVReg)
2178       .addUse(GR.getSPIRVTypeID(BoolTy))
2179       .addUse(BitIntReg)
2180       .addUse(Zero)
2181       .constrainAllUses(TII, TRI, RBI);
2182 }
2183 
2184 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
2185                                            const SPIRVType *ResType,
2186                                            MachineInstr &I) const {
2187   Register IntReg = I.getOperand(1).getReg();
2188   const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
2189   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
2190     return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
2191   if (ArgType == ResType) {
2192     const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(ResVReg);
2193     const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(IntReg);
2194     if (DstRC != SrcRC && SrcRC)
2195       MRI->setRegClass(ResVReg, SrcRC);
2196     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
2197                    TII.get(TargetOpcode::COPY))
2198         .addDef(ResVReg)
2199         .addUse(IntReg)
2200         .constrainAllUses(TII, TRI, RBI);
2201   }
2202   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
2203   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2204   return selectUnOp(ResVReg, ResType, I, Opcode);
2205 }
2206 
2207 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
2208                                            const SPIRVType *ResType,
2209                                            const APInt &Imm,
2210                                            MachineInstr &I) const {
2211   unsigned TyOpcode = ResType->getOpcode();
2212   assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
2213   MachineBasicBlock &BB = *I.getParent();
2214   if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
2215       Imm.isZero())
2216     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2217         .addDef(ResVReg)
2218         .addUse(GR.getSPIRVTypeID(ResType))
2219         .constrainAllUses(TII, TRI, RBI);
2220   if (TyOpcode == SPIRV::OpTypeInt) {
2221     assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
2222     Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
2223     if (Reg == ResVReg)
2224       return true;
2225     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
2226         .addDef(ResVReg)
2227         .addUse(Reg)
2228         .constrainAllUses(TII, TRI, RBI);
2229   }
2230   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
2231                  .addDef(ResVReg)
2232                  .addUse(GR.getSPIRVTypeID(ResType));
2233   // <=32-bit integers should be caught by the sdag pattern.
2234   assert(Imm.getBitWidth() > 32);
2235   addNumImm(Imm, MIB);
2236   return MIB.constrainAllUses(TII, TRI, RBI);
2237 }
2238 
2239 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
2240                                              const SPIRVType *ResType,
2241                                              MachineInstr &I) const {
2242   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
2243       .addDef(ResVReg)
2244       .addUse(GR.getSPIRVTypeID(ResType))
2245       .constrainAllUses(TII, TRI, RBI);
2246 }
2247 
2248 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
2249   assert(MO.isReg());
2250   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
2251   if (TypeInst->getOpcode() == SPIRV::ASSIGN_TYPE) {
2252     assert(TypeInst->getOperand(1).isReg());
2253     MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
2254     return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
2255   }
2256   return TypeInst->getOpcode() == SPIRV::OpConstantI;
2257 }
2258 
2259 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
2260   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
2261   if (TypeInst->getOpcode() == SPIRV::OpConstantI)
2262     return TypeInst->getOperand(2).getImm();
2263   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
2264   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
2265   return ImmInst->getOperand(1).getCImm()->getZExtValue();
2266 }
2267 
2268 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
2269                                                const SPIRVType *ResType,
2270                                                MachineInstr &I) const {
2271   MachineBasicBlock &BB = *I.getParent();
2272   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
2273                  .addDef(ResVReg)
2274                  .addUse(GR.getSPIRVTypeID(ResType))
2275                  // object to insert
2276                  .addUse(I.getOperand(3).getReg())
2277                  // composite to insert into
2278                  .addUse(I.getOperand(2).getReg());
2279   for (unsigned i = 4; i < I.getNumOperands(); i++)
2280     MIB.addImm(foldImm(I.getOperand(i), MRI));
2281   return MIB.constrainAllUses(TII, TRI, RBI);
2282 }
2283 
2284 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
2285                                                 const SPIRVType *ResType,
2286                                                 MachineInstr &I) const {
2287   MachineBasicBlock &BB = *I.getParent();
2288   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2289                  .addDef(ResVReg)
2290                  .addUse(GR.getSPIRVTypeID(ResType))
2291                  .addUse(I.getOperand(2).getReg());
2292   for (unsigned i = 3; i < I.getNumOperands(); i++)
2293     MIB.addImm(foldImm(I.getOperand(i), MRI));
2294   return MIB.constrainAllUses(TII, TRI, RBI);
2295 }
2296 
2297 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
2298                                                const SPIRVType *ResType,
2299                                                MachineInstr &I) const {
2300   if (isImm(I.getOperand(4), MRI))
2301     return selectInsertVal(ResVReg, ResType, I);
2302   MachineBasicBlock &BB = *I.getParent();
2303   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
2304       .addDef(ResVReg)
2305       .addUse(GR.getSPIRVTypeID(ResType))
2306       .addUse(I.getOperand(2).getReg())
2307       .addUse(I.getOperand(3).getReg())
2308       .addUse(I.getOperand(4).getReg())
2309       .constrainAllUses(TII, TRI, RBI);
2310 }
2311 
2312 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
2313                                                 const SPIRVType *ResType,
2314                                                 MachineInstr &I) const {
2315   if (isImm(I.getOperand(3), MRI))
2316     return selectExtractVal(ResVReg, ResType, I);
2317   MachineBasicBlock &BB = *I.getParent();
2318   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
2319       .addDef(ResVReg)
2320       .addUse(GR.getSPIRVTypeID(ResType))
2321       .addUse(I.getOperand(2).getReg())
2322       .addUse(I.getOperand(3).getReg())
2323       .constrainAllUses(TII, TRI, RBI);
2324 }
2325 
2326 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
2327                                          const SPIRVType *ResType,
2328                                          MachineInstr &I) const {
2329   const bool IsGEPInBounds = I.getOperand(2).getImm();
2330 
2331   // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
2332   // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
2333   // we have to use Op[InBounds]AccessChain.
2334   const unsigned Opcode = STI.isVulkanEnv()
2335                               ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2336                                                : SPIRV::OpAccessChain)
2337                               : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2338                                                : SPIRV::OpPtrAccessChain);
2339 
2340   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
2341                  .addDef(ResVReg)
2342                  .addUse(GR.getSPIRVTypeID(ResType))
2343                  // Object to get a pointer to.
2344                  .addUse(I.getOperand(3).getReg());
2345   // Adding indices.
2346   const unsigned StartingIndex =
2347       (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2348           ? 5
2349           : 4;
2350   for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
2351     Res.addUse(I.getOperand(i).getReg());
2352   return Res.constrainAllUses(TII, TRI, RBI);
2353 }
2354 
2355 // Maybe wrap a value into OpSpecConstantOp
2356 bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2357     MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
2358   bool Result = true;
2359   unsigned Lim = I.getNumExplicitOperands();
2360   for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2361     Register OpReg = I.getOperand(i).getReg();
2362     SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
2363     SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2364     SmallPtrSet<SPIRVType *, 4> Visited;
2365     if (!OpDefine || !OpType || isConstReg(MRI, OpDefine, Visited) ||
2366         OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2367         GR.isAggregateType(OpType)) {
2368       // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
2369       // by selectAddrSpaceCast()
2370       CompositeArgs.push_back(OpReg);
2371       continue;
2372     }
2373     MachineFunction *MF = I.getMF();
2374     Register WrapReg = GR.find(OpDefine, MF);
2375     if (WrapReg.isValid()) {
2376       CompositeArgs.push_back(WrapReg);
2377       continue;
2378     }
2379     // Create a new register for the wrapper
2380     WrapReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2381     GR.add(OpDefine, MF, WrapReg);
2382     CompositeArgs.push_back(WrapReg);
2383     // Decorate the wrapper register and generate a new instruction
2384     MRI->setType(WrapReg, LLT::pointer(0, 64));
2385     GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2386     MachineBasicBlock &BB = *I.getParent();
2387     Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
2388                  .addDef(WrapReg)
2389                  .addUse(GR.getSPIRVTypeID(OpType))
2390                  .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
2391                  .addUse(OpReg)
2392                  .constrainAllUses(TII, TRI, RBI);
2393     if (!Result)
2394       break;
2395   }
2396   return Result;
2397 }
2398 
2399 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
2400                                                const SPIRVType *ResType,
2401                                                MachineInstr &I) const {
2402   MachineBasicBlock &BB = *I.getParent();
2403   Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
2404   switch (IID) {
2405   case Intrinsic::spv_load:
2406     return selectLoad(ResVReg, ResType, I);
2407   case Intrinsic::spv_store:
2408     return selectStore(I);
2409   case Intrinsic::spv_extractv:
2410     return selectExtractVal(ResVReg, ResType, I);
2411   case Intrinsic::spv_insertv:
2412     return selectInsertVal(ResVReg, ResType, I);
2413   case Intrinsic::spv_extractelt:
2414     return selectExtractElt(ResVReg, ResType, I);
2415   case Intrinsic::spv_insertelt:
2416     return selectInsertElt(ResVReg, ResType, I);
2417   case Intrinsic::spv_gep:
2418     return selectGEP(ResVReg, ResType, I);
2419   case Intrinsic::spv_unref_global:
2420   case Intrinsic::spv_init_global: {
2421     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
2422     MachineInstr *Init = I.getNumExplicitOperands() > 2
2423                              ? MRI->getVRegDef(I.getOperand(2).getReg())
2424                              : nullptr;
2425     assert(MI);
2426     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
2427   }
2428   case Intrinsic::spv_undef: {
2429     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
2430                    .addDef(ResVReg)
2431                    .addUse(GR.getSPIRVTypeID(ResType));
2432     return MIB.constrainAllUses(TII, TRI, RBI);
2433   }
2434   case Intrinsic::spv_const_composite: {
2435     // If no values are attached, the composite is null constant.
2436     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
2437     // Select a proper instruction.
2438     unsigned Opcode = SPIRV::OpConstantNull;
2439     SmallVector<Register> CompositeArgs;
2440     if (!IsNull) {
2441       Opcode = SPIRV::OpConstantComposite;
2442       if (!wrapIntoSpecConstantOp(I, CompositeArgs))
2443         return false;
2444     }
2445     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2446                    .addDef(ResVReg)
2447                    .addUse(GR.getSPIRVTypeID(ResType));
2448     // skip type MD node we already used when generated assign.type for this
2449     if (!IsNull) {
2450       for (Register OpReg : CompositeArgs)
2451         MIB.addUse(OpReg);
2452     }
2453     return MIB.constrainAllUses(TII, TRI, RBI);
2454   }
2455   case Intrinsic::spv_assign_name: {
2456     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
2457     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
2458     for (unsigned i = I.getNumExplicitDefs() + 2;
2459          i < I.getNumExplicitOperands(); ++i) {
2460       MIB.addImm(I.getOperand(i).getImm());
2461     }
2462     return MIB.constrainAllUses(TII, TRI, RBI);
2463   }
2464   case Intrinsic::spv_switch: {
2465     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
2466     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
2467       if (I.getOperand(i).isReg())
2468         MIB.addReg(I.getOperand(i).getReg());
2469       else if (I.getOperand(i).isCImm())
2470         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
2471       else if (I.getOperand(i).isMBB())
2472         MIB.addMBB(I.getOperand(i).getMBB());
2473       else
2474         llvm_unreachable("Unexpected OpSwitch operand");
2475     }
2476     return MIB.constrainAllUses(TII, TRI, RBI);
2477   }
2478   case Intrinsic::spv_loop_merge:
2479   case Intrinsic::spv_selection_merge: {
2480     const auto Opcode = IID == Intrinsic::spv_selection_merge
2481                             ? SPIRV::OpSelectionMerge
2482                             : SPIRV::OpLoopMerge;
2483     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode));
2484     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
2485       assert(I.getOperand(i).isMBB());
2486       MIB.addMBB(I.getOperand(i).getMBB());
2487     }
2488     MIB.addImm(SPIRV::SelectionControl::None);
2489     return MIB.constrainAllUses(TII, TRI, RBI);
2490   }
2491   case Intrinsic::spv_cmpxchg:
2492     return selectAtomicCmpXchg(ResVReg, ResType, I);
2493   case Intrinsic::spv_unreachable:
2494     BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
2495     break;
2496   case Intrinsic::spv_alloca:
2497     return selectFrameIndex(ResVReg, ResType, I);
2498   case Intrinsic::spv_alloca_array:
2499     return selectAllocaArray(ResVReg, ResType, I);
2500   case Intrinsic::spv_assume:
2501     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2502       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
2503           .addUse(I.getOperand(1).getReg());
2504     break;
2505   case Intrinsic::spv_expect:
2506     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2507       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
2508           .addDef(ResVReg)
2509           .addUse(GR.getSPIRVTypeID(ResType))
2510           .addUse(I.getOperand(2).getReg())
2511           .addUse(I.getOperand(3).getReg());
2512     break;
2513   case Intrinsic::arithmetic_fence:
2514     if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
2515       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpArithmeticFenceEXT))
2516           .addDef(ResVReg)
2517           .addUse(GR.getSPIRVTypeID(ResType))
2518           .addUse(I.getOperand(2).getReg());
2519     else
2520       BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
2521           .addUse(I.getOperand(2).getReg());
2522     break;
2523   case Intrinsic::spv_thread_id:
2524     return selectSpvThreadId(ResVReg, ResType, I);
2525   case Intrinsic::spv_fdot:
2526     return selectFloatDot(ResVReg, ResType, I);
2527   case Intrinsic::spv_udot:
2528   case Intrinsic::spv_sdot:
2529     return selectIntegerDot(ResVReg, ResType, I);
2530   case Intrinsic::spv_all:
2531     return selectAll(ResVReg, ResType, I);
2532   case Intrinsic::spv_any:
2533     return selectAny(ResVReg, ResType, I);
2534   case Intrinsic::spv_cross:
2535     return selectExtInst(ResVReg, ResType, I, CL::cross, GL::Cross);
2536   case Intrinsic::spv_lerp:
2537     return selectExtInst(ResVReg, ResType, I, CL::mix, GL::FMix);
2538   case Intrinsic::spv_length:
2539     return selectExtInst(ResVReg, ResType, I, CL::length, GL::Length);
2540   case Intrinsic::spv_degrees:
2541     return selectExtInst(ResVReg, ResType, I, CL::degrees, GL::Degrees);
2542   case Intrinsic::spv_frac:
2543     return selectExtInst(ResVReg, ResType, I, CL::fract, GL::Fract);
2544   case Intrinsic::spv_normalize:
2545     return selectExtInst(ResVReg, ResType, I, CL::normalize, GL::Normalize);
2546   case Intrinsic::spv_rsqrt:
2547     return selectExtInst(ResVReg, ResType, I, CL::rsqrt, GL::InverseSqrt);
2548   case Intrinsic::spv_sign:
2549     return selectSign(ResVReg, ResType, I);
2550   case Intrinsic::spv_group_memory_barrier_with_group_sync: {
2551     Register MemSemReg =
2552         buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent, I);
2553     Register ScopeReg = buildI32Constant(SPIRV::Scope::Workgroup, I);
2554     MachineBasicBlock &BB = *I.getParent();
2555     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier))
2556         .addUse(ScopeReg)
2557         .addUse(ScopeReg)
2558         .addUse(MemSemReg)
2559         .constrainAllUses(TII, TRI, RBI);
2560   } break;
2561   case Intrinsic::spv_lifetime_start:
2562   case Intrinsic::spv_lifetime_end: {
2563     unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
2564                                                        : SPIRV::OpLifetimeStop;
2565     int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
2566     Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
2567     if (Size == -1)
2568       Size = 0;
2569     BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
2570   } break;
2571   case Intrinsic::spv_saturate:
2572     return selectSaturate(ResVReg, ResType, I);
2573   case Intrinsic::spv_wave_is_first_lane: {
2574     SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
2575     return BuildMI(BB, I, I.getDebugLoc(),
2576                    TII.get(SPIRV::OpGroupNonUniformElect))
2577         .addDef(ResVReg)
2578         .addUse(GR.getSPIRVTypeID(ResType))
2579         .addUse(GR.getOrCreateConstInt(3, I, IntTy, TII));
2580   }
2581   case Intrinsic::spv_wave_readlane:
2582     return selectWaveReadLaneAt(ResVReg, ResType, I);
2583   case Intrinsic::spv_step:
2584     return selectExtInst(ResVReg, ResType, I, CL::step, GL::Step);
2585   case Intrinsic::spv_radians:
2586     return selectExtInst(ResVReg, ResType, I, CL::radians, GL::Radians);
2587   // Discard intrinsics which we do not expect to actually represent code after
2588   // lowering or intrinsics which are not implemented but should not crash when
2589   // found in a customer's LLVM IR input.
2590   case Intrinsic::instrprof_increment:
2591   case Intrinsic::instrprof_increment_step:
2592   case Intrinsic::instrprof_value_profile:
2593     break;
2594   // Discard internal intrinsics.
2595   case Intrinsic::spv_value_md:
2596     break;
2597   case Intrinsic::spv_handle_fromBinding: {
2598     selectHandleFromBinding(ResVReg, ResType, I);
2599     return true;
2600   }
2601   default: {
2602     std::string DiagMsg;
2603     raw_string_ostream OS(DiagMsg);
2604     I.print(OS);
2605     DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
2606     report_fatal_error(DiagMsg.c_str(), false);
2607   }
2608   }
2609   return true;
2610 }
2611 
2612 void SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
2613                                                        const SPIRVType *ResType,
2614                                                        MachineInstr &I) const {
2615 
2616   uint32_t Set = foldImm(I.getOperand(2), MRI);
2617   uint32_t Binding = foldImm(I.getOperand(3), MRI);
2618   uint32_t ArraySize = foldImm(I.getOperand(4), MRI);
2619 
2620   MachineIRBuilder MIRBuilder(I);
2621   Register VarReg =
2622       buildPointerToResource(ResType, Set, Binding, ArraySize, MIRBuilder);
2623 
2624   // TODO: For now we assume the resource is an image, which needs to be
2625   // loaded to get the handle. That will not be true for storage buffers.
2626   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2627       .addDef(ResVReg)
2628       .addUse(GR.getSPIRVTypeID(ResType))
2629       .addUse(VarReg);
2630 }
2631 
2632 Register SPIRVInstructionSelector::buildPointerToResource(
2633     const SPIRVType *ResType, uint32_t Set, uint32_t Binding,
2634     uint32_t ArraySize, MachineIRBuilder MIRBuilder) const {
2635   assert(ArraySize == 1 && "Resource arrays are not implemented yet.");
2636   return GR.getOrCreateGlobalVariableWithBinding(ResType, Set, Binding,
2637                                                  MIRBuilder);
2638 }
2639 
2640 bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
2641                                                  const SPIRVType *ResType,
2642                                                  MachineInstr &I) const {
2643   // there was an allocation size parameter to the allocation instruction
2644   // that is not 1
2645   MachineBasicBlock &BB = *I.getParent();
2646   return BuildMI(BB, I, I.getDebugLoc(),
2647                  TII.get(SPIRV::OpVariableLengthArrayINTEL))
2648       .addDef(ResVReg)
2649       .addUse(GR.getSPIRVTypeID(ResType))
2650       .addUse(I.getOperand(2).getReg())
2651       .constrainAllUses(TII, TRI, RBI);
2652 }
2653 
2654 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
2655                                                 const SPIRVType *ResType,
2656                                                 MachineInstr &I) const {
2657   // Change order of instructions if needed: all OpVariable instructions in a
2658   // function must be the first instructions in the first block
2659   MachineFunction *MF = I.getParent()->getParent();
2660   MachineBasicBlock *MBB = &MF->front();
2661   auto It = MBB->SkipPHIsAndLabels(MBB->begin()), E = MBB->end();
2662   bool IsHeader = false;
2663   unsigned Opcode;
2664   for (; It != E && It != I; ++It) {
2665     Opcode = It->getOpcode();
2666     if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
2667       IsHeader = true;
2668     } else if (IsHeader &&
2669                !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
2670       ++It;
2671       break;
2672     }
2673   }
2674   return BuildMI(*MBB, It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
2675       .addDef(ResVReg)
2676       .addUse(GR.getSPIRVTypeID(ResType))
2677       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
2678       .constrainAllUses(TII, TRI, RBI);
2679 }
2680 
2681 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
2682   // InstructionSelector walks backwards through the instructions. We can use
2683   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
2684   // first, so can generate an OpBranchConditional here. If there is no
2685   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
2686   const MachineInstr *PrevI = I.getPrevNode();
2687   MachineBasicBlock &MBB = *I.getParent();
2688   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
2689     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2690         .addUse(PrevI->getOperand(0).getReg())
2691         .addMBB(PrevI->getOperand(1).getMBB())
2692         .addMBB(I.getOperand(0).getMBB())
2693         .constrainAllUses(TII, TRI, RBI);
2694   }
2695   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
2696       .addMBB(I.getOperand(0).getMBB())
2697       .constrainAllUses(TII, TRI, RBI);
2698 }
2699 
2700 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
2701   // InstructionSelector walks backwards through the instructions. For an
2702   // explicit conditional branch with no fallthrough, we use both a G_BR and a
2703   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
2704   // generate the OpBranchConditional in selectBranch above.
2705   //
2706   // If an OpBranchConditional has been generated, we simply return, as the work
2707   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
2708   // implicit fallthrough to the next basic block, so we need to create an
2709   // OpBranchConditional with an explicit "false" argument pointing to the next
2710   // basic block that LLVM would fall through to.
2711   const MachineInstr *NextI = I.getNextNode();
2712   // Check if this has already been successfully selected.
2713   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
2714     return true;
2715   // Must be relying on implicit block fallthrough, so generate an
2716   // OpBranchConditional with the "next" basic block as the "false" target.
2717   MachineBasicBlock &MBB = *I.getParent();
2718   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
2719   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
2720   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2721       .addUse(I.getOperand(0).getReg())
2722       .addMBB(I.getOperand(1).getMBB())
2723       .addMBB(NextMBB)
2724       .constrainAllUses(TII, TRI, RBI);
2725 }
2726 
2727 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
2728                                          const SPIRVType *ResType,
2729                                          MachineInstr &I) const {
2730   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
2731                  .addDef(ResVReg)
2732                  .addUse(GR.getSPIRVTypeID(ResType));
2733   const unsigned NumOps = I.getNumOperands();
2734   for (unsigned i = 1; i < NumOps; i += 2) {
2735     MIB.addUse(I.getOperand(i + 0).getReg());
2736     MIB.addMBB(I.getOperand(i + 1).getMBB());
2737   }
2738   return MIB.constrainAllUses(TII, TRI, RBI);
2739 }
2740 
2741 bool SPIRVInstructionSelector::selectGlobalValue(
2742     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
2743   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
2744   MachineIRBuilder MIRBuilder(I);
2745   const GlobalValue *GV = I.getOperand(1).getGlobal();
2746   Type *GVType = toTypedPointer(GR.getDeducedGlobalValueType(GV));
2747   SPIRVType *PointerBaseType;
2748   if (GVType->isArrayTy()) {
2749     SPIRVType *ArrayElementType =
2750         GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
2751                                 SPIRV::AccessQualifier::ReadWrite, false);
2752     PointerBaseType = GR.getOrCreateSPIRVArrayType(
2753         ArrayElementType, GVType->getArrayNumElements(), I, TII);
2754   } else {
2755     PointerBaseType = GR.getOrCreateSPIRVType(
2756         GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
2757   }
2758   SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2759       PointerBaseType, I, TII,
2760       addressSpaceToStorageClass(GV->getAddressSpace(), STI));
2761 
2762   std::string GlobalIdent;
2763   if (!GV->hasName()) {
2764     unsigned &ID = UnnamedGlobalIDs[GV];
2765     if (ID == 0)
2766       ID = UnnamedGlobalIDs.size();
2767     GlobalIdent = "__unnamed_" + Twine(ID).str();
2768   } else {
2769     GlobalIdent = GV->getGlobalIdentifier();
2770   }
2771 
2772   // Behaviour of functions as operands depends on availability of the
2773   // corresponding extension (SPV_INTEL_function_pointers):
2774   // - If there is an extension to operate with functions as operands:
2775   // We create a proper constant operand and evaluate a correct type for a
2776   // function pointer.
2777   // - Without the required extension:
2778   // We have functions as operands in tests with blocks of instruction e.g. in
2779   // transcoding/global_block.ll. These operands are not used and should be
2780   // substituted by zero constants. Their type is expected to be always
2781   // OpTypePointer Function %uchar.
2782   if (isa<Function>(GV)) {
2783     const Constant *ConstVal = GV;
2784     MachineBasicBlock &BB = *I.getParent();
2785     Register NewReg = GR.find(ConstVal, GR.CurMF);
2786     if (!NewReg.isValid()) {
2787       Register NewReg = ResVReg;
2788       GR.add(ConstVal, GR.CurMF, NewReg);
2789       const Function *GVFun =
2790           STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2791               ? dyn_cast<Function>(GV)
2792               : nullptr;
2793       if (GVFun) {
2794         // References to a function via function pointers generate virtual
2795         // registers without a definition. We will resolve it later, during
2796         // module analysis stage.
2797         MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2798         Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
2799         MRI->setRegClass(FuncVReg, &SPIRV::iIDRegClass);
2800         MachineInstrBuilder MB =
2801             BuildMI(BB, I, I.getDebugLoc(),
2802                     TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2803                 .addDef(NewReg)
2804                 .addUse(GR.getSPIRVTypeID(ResType))
2805                 .addUse(FuncVReg);
2806         // mapping the function pointer to the used Function
2807         GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
2808         return MB.constrainAllUses(TII, TRI, RBI);
2809       }
2810       return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2811           .addDef(NewReg)
2812           .addUse(GR.getSPIRVTypeID(ResType))
2813           .constrainAllUses(TII, TRI, RBI);
2814     }
2815     assert(NewReg != ResVReg);
2816     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
2817         .addDef(ResVReg)
2818         .addUse(NewReg)
2819         .constrainAllUses(TII, TRI, RBI);
2820   }
2821   auto GlobalVar = cast<GlobalVariable>(GV);
2822   assert(GlobalVar->getName() != "llvm.global.annotations");
2823 
2824   bool HasInit = GlobalVar->hasInitializer() &&
2825                  !isa<UndefValue>(GlobalVar->getInitializer());
2826   // Skip empty declaration for GVs with initilaizers till we get the decl with
2827   // passed initializer.
2828   if (HasInit && !Init)
2829     return true;
2830 
2831   unsigned AddrSpace = GV->getAddressSpace();
2832   SPIRV::StorageClass::StorageClass Storage =
2833       addressSpaceToStorageClass(AddrSpace, STI);
2834   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
2835                   Storage != SPIRV::StorageClass::Function;
2836   SPIRV::LinkageType::LinkageType LnkType =
2837       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
2838           ? SPIRV::LinkageType::Import
2839           : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
2840                      STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2841                  ? SPIRV::LinkageType::LinkOnceODR
2842                  : SPIRV::LinkageType::Export);
2843 
2844   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2845                                         Storage, Init, GlobalVar->isConstant(),
2846                                         HasLnkTy, LnkType, MIRBuilder, true);
2847   return Reg.isValid();
2848 }
2849 
2850 bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
2851                                            const SPIRVType *ResType,
2852                                            MachineInstr &I) const {
2853   if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2854     return selectExtInst(ResVReg, ResType, I, CL::log10);
2855   }
2856 
2857   // There is no log10 instruction in the GLSL Extended Instruction set, so it
2858   // is implemented as:
2859   // log10(x) = log2(x) * (1 / log2(10))
2860   //          = log2(x) * 0.30103
2861 
2862   MachineIRBuilder MIRBuilder(I);
2863   MachineBasicBlock &BB = *I.getParent();
2864 
2865   // Build log2(x).
2866   Register VarReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
2867   bool Result =
2868       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
2869           .addDef(VarReg)
2870           .addUse(GR.getSPIRVTypeID(ResType))
2871           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2872           .addImm(GL::Log2)
2873           .add(I.getOperand(1))
2874           .constrainAllUses(TII, TRI, RBI);
2875 
2876   // Build 0.30103.
2877   assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
2878          ResType->getOpcode() == SPIRV::OpTypeFloat);
2879   // TODO: Add matrix implementation once supported by the HLSL frontend.
2880   const SPIRVType *SpirvScalarType =
2881       ResType->getOpcode() == SPIRV::OpTypeVector
2882           ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
2883           : ResType;
2884   Register ScaleReg =
2885       GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2886 
2887   // Multiply log2(x) by 0.30103 to get log10(x) result.
2888   auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
2889                     ? SPIRV::OpVectorTimesScalar
2890                     : SPIRV::OpFMulS;
2891   Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2892                 .addDef(ResVReg)
2893                 .addUse(GR.getSPIRVTypeID(ResType))
2894                 .addUse(VarReg)
2895                 .addUse(ScaleReg)
2896                 .constrainAllUses(TII, TRI, RBI);
2897 
2898   return Result;
2899 }
2900 
2901 bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
2902                                                  const SPIRVType *ResType,
2903                                                  MachineInstr &I) const {
2904   // DX intrinsic: @llvm.dx.thread.id(i32)
2905   // ID  Name      Description
2906   // 93  ThreadId  reads the thread ID
2907 
2908   MachineIRBuilder MIRBuilder(I);
2909   const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2910   const SPIRVType *Vec3Ty =
2911       GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2912   const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2913       Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2914 
2915   // Create new register for GlobalInvocationID builtin variable.
2916   Register NewRegister =
2917       MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::iIDRegClass);
2918   MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 64));
2919   GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2920 
2921   // Build GlobalInvocationID global variable with the necessary decorations.
2922   Register Variable = GR.buildGlobalVariable(
2923       NewRegister, PtrType,
2924       getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2925       SPIRV::StorageClass::Input, nullptr, true, true,
2926       SPIRV::LinkageType::Import, MIRBuilder, false);
2927 
2928   // Create new register for loading value.
2929   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2930   Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2931   MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 64));
2932   GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2933 
2934   // Load v3uint value from the global variable.
2935   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2936       .addDef(LoadedRegister)
2937       .addUse(GR.getSPIRVTypeID(Vec3Ty))
2938       .addUse(Variable);
2939 
2940   // Get Thread ID index. Expecting operand is a constant immediate value,
2941   // wrapped in a type assignment.
2942   assert(I.getOperand(2).isReg());
2943   const uint32_t ThreadId = foldImm(I.getOperand(2), MRI);
2944 
2945   // Extract the thread ID from the loaded vector value.
2946   MachineBasicBlock &BB = *I.getParent();
2947   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2948                  .addDef(ResVReg)
2949                  .addUse(GR.getSPIRVTypeID(ResType))
2950                  .addUse(LoadedRegister)
2951                  .addImm(ThreadId);
2952   return MIB.constrainAllUses(TII, TRI, RBI);
2953 }
2954 
2955 namespace llvm {
2956 InstructionSelector *
2957 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
2958                                const SPIRVSubtarget &Subtarget,
2959                                const RegisterBankInfo &RBI) {
2960   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2961 }
2962 } // namespace llvm
2963