xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (revision d8295e2eeceef37bfd9e0f84918735eff6cfc659)
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
17 #include "SPIRV.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Register.h"
32 #include "llvm/CodeGen/TargetOpcodes.h"
33 #include "llvm/IR/IntrinsicsSPIRV.h"
34 #include "llvm/Support/Debug.h"
35 
36 #define DEBUG_TYPE "spirv-isel"
37 
38 using namespace llvm;
39 namespace CL = SPIRV::OpenCLExtInst;
40 namespace GL = SPIRV::GLSLExtInst;
41 
42 using ExtInstList =
43     std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
44 
45 namespace {
46 
47 #define GET_GLOBALISEL_PREDICATE_BITSET
48 #include "SPIRVGenGlobalISel.inc"
49 #undef GET_GLOBALISEL_PREDICATE_BITSET
50 
51 class SPIRVInstructionSelector : public InstructionSelector {
52   const SPIRVSubtarget &STI;
53   const SPIRVInstrInfo &TII;
54   const SPIRVRegisterInfo &TRI;
55   const RegisterBankInfo &RBI;
56   SPIRVGlobalRegistry &GR;
57   MachineRegisterInfo *MRI;
58   MachineFunction *HasVRegsReset = nullptr;
59 
60   /// We need to keep track of the number we give to anonymous global values to
61   /// generate the same name every time when this is needed.
62   mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
63 
64 public:
65   SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
66                            const SPIRVSubtarget &ST,
67                            const RegisterBankInfo &RBI);
68   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
69                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
70                BlockFrequencyInfo *BFI) override;
71   // Common selection code. Instruction-specific selection occurs in spvSelect.
72   bool select(MachineInstr &I) override;
73   static const char *getName() { return DEBUG_TYPE; }
74 
75 #define GET_GLOBALISEL_PREDICATES_DECL
76 #include "SPIRVGenGlobalISel.inc"
77 #undef GET_GLOBALISEL_PREDICATES_DECL
78 
79 #define GET_GLOBALISEL_TEMPORARIES_DECL
80 #include "SPIRVGenGlobalISel.inc"
81 #undef GET_GLOBALISEL_TEMPORARIES_DECL
82 
83 private:
84   void resetVRegsType(MachineFunction &MF);
85 
86   // tblgen-erated 'select' implementation, used as the initial selector for
87   // the patterns that don't require complex C++.
88   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
89 
90   // All instruction-specific selection that didn't happen in "select()".
91   // Is basically a large Switch/Case delegating to all other select method.
92   bool spvSelect(Register ResVReg, const SPIRVType *ResType,
93                  MachineInstr &I) const;
94 
95   bool selectGlobalValue(Register ResVReg, MachineInstr &I,
96                          const MachineInstr *Init = nullptr) const;
97 
98   bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
99                          MachineInstr &I, Register SrcReg,
100                          unsigned Opcode) const;
101   bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
102                   unsigned Opcode) const;
103 
104   bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
105                      MachineInstr &I) const;
106 
107   bool selectLoad(Register ResVReg, const SPIRVType *ResType,
108                   MachineInstr &I) const;
109   bool selectStore(MachineInstr &I) const;
110 
111   bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
112                        MachineInstr &I) const;
113   bool selectStackRestore(MachineInstr &I) const;
114 
115   bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
116 
117   bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
118                        MachineInstr &I, unsigned NewOpcode,
119                        unsigned NegateOpcode = 0) const;
120 
121   bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
122                            MachineInstr &I) const;
123 
124   bool selectFence(MachineInstr &I) const;
125 
126   bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
127                            MachineInstr &I) const;
128 
129   bool selectAnyOrAll(Register ResVReg, const SPIRVType *ResType,
130                       MachineInstr &I, unsigned OpType) const;
131 
132   bool selectAll(Register ResVReg, const SPIRVType *ResType,
133                  MachineInstr &I) const;
134 
135   bool selectAny(Register ResVReg, const SPIRVType *ResType,
136                  MachineInstr &I) const;
137 
138   bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
139                         MachineInstr &I) const;
140 
141   bool selectBuildVector(Register ResVReg, const SPIRVType *ResType,
142                          MachineInstr &I) const;
143   bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
144                          MachineInstr &I) const;
145 
146   bool selectCmp(Register ResVReg, const SPIRVType *ResType,
147                  unsigned comparisonOpcode, MachineInstr &I) const;
148   bool selectCross(Register ResVReg, const SPIRVType *ResType,
149                    MachineInstr &I) const;
150   bool selectICmp(Register ResVReg, const SPIRVType *ResType,
151                   MachineInstr &I) const;
152   bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
153                   MachineInstr &I) const;
154 
155   bool selectSign(Register ResVReg, const SPIRVType *ResType,
156                   MachineInstr &I) const;
157 
158   bool selectFloatDot(Register ResVReg, const SPIRVType *ResType,
159                       MachineInstr &I) const;
160 
161   bool selectOverflowArith(Register ResVReg, const SPIRVType *ResType,
162                            MachineInstr &I, unsigned Opcode) const;
163 
164   bool selectIntegerDot(Register ResVReg, const SPIRVType *ResType,
165                         MachineInstr &I) const;
166 
167   void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
168                    int OpIdx) const;
169   void renderFImm64(MachineInstrBuilder &MIB, const MachineInstr &I,
170                     int OpIdx) const;
171 
172   bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
173                    MachineInstr &I) const;
174 
175   bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
176                     bool IsSigned) const;
177   bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
178                   bool IsSigned, unsigned Opcode) const;
179   bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
180                  bool IsSigned) const;
181 
182   bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
183                    MachineInstr &I) const;
184 
185   bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
186                        const SPIRVType *intTy, const SPIRVType *boolTy) const;
187 
188   bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
189                      MachineInstr &I) const;
190   bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
191                     MachineInstr &I) const;
192   bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
193                        MachineInstr &I) const;
194   bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
195                         MachineInstr &I) const;
196   bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
197                        MachineInstr &I) const;
198   bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
199                         MachineInstr &I) const;
200   bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
201                        MachineInstr &I) const;
202   bool selectGEP(Register ResVReg, const SPIRVType *ResType,
203                  MachineInstr &I) const;
204 
205   bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
206                         MachineInstr &I) const;
207   bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
208                          MachineInstr &I) const;
209 
210   bool selectBranch(MachineInstr &I) const;
211   bool selectBranchCond(MachineInstr &I) const;
212 
213   bool selectPhi(Register ResVReg, const SPIRVType *ResType,
214                  MachineInstr &I) const;
215 
216   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
217                      MachineInstr &I, CL::OpenCLExtInst CLInst) const;
218   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
219                      MachineInstr &I, CL::OpenCLExtInst CLInst,
220                      GL::GLSLExtInst GLInst) const;
221   bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
222                      MachineInstr &I, const ExtInstList &ExtInsts) const;
223 
224   bool selectLog10(Register ResVReg, const SPIRVType *ResType,
225                    MachineInstr &I) const;
226 
227   bool selectSaturate(Register ResVReg, const SPIRVType *ResType,
228                       MachineInstr &I) const;
229 
230   bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
231                          MachineInstr &I) const;
232 
233   bool selectWaveReadLaneAt(Register ResVReg, const SPIRVType *ResType,
234                             MachineInstr &I) const;
235 
236   bool selectUnmergeValues(MachineInstr &I) const;
237 
238   void selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType,
239                                MachineInstr &I) const;
240 
241   // Utilities
242   Register buildI32Constant(uint32_t Val, MachineInstr &I,
243                             const SPIRVType *ResType = nullptr) const;
244 
245   Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
246   Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
247   Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
248                         MachineInstr &I) const;
249   Register buildOnesValF(const SPIRVType *ResType, MachineInstr &I) const;
250 
251   bool wrapIntoSpecConstantOp(MachineInstr &I,
252                               SmallVector<Register> &CompositeArgs) const;
253 
254   Register getUcharPtrTypeReg(MachineInstr &I,
255                               SPIRV::StorageClass::StorageClass SC) const;
256   MachineInstrBuilder buildSpecConstantOp(MachineInstr &I, Register Dest,
257                                           Register Src, Register DestType,
258                                           uint32_t Opcode) const;
259   MachineInstrBuilder buildConstGenericPtr(MachineInstr &I, Register SrcPtr,
260                                            SPIRVType *SrcPtrTy) const;
261   Register buildPointerToResource(const SPIRVType *ResType, uint32_t Set,
262                                   uint32_t Binding, uint32_t ArraySize,
263                                   Register IndexReg, bool IsNonUniform,
264                                   MachineIRBuilder MIRBuilder) const;
265 };
266 
267 } // end anonymous namespace
268 
269 #define GET_GLOBALISEL_IMPL
270 #include "SPIRVGenGlobalISel.inc"
271 #undef GET_GLOBALISEL_IMPL
272 
273 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
274                                                    const SPIRVSubtarget &ST,
275                                                    const RegisterBankInfo &RBI)
276     : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
277       TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
278 #define GET_GLOBALISEL_PREDICATES_INIT
279 #include "SPIRVGenGlobalISel.inc"
280 #undef GET_GLOBALISEL_PREDICATES_INIT
281 #define GET_GLOBALISEL_TEMPORARIES_INIT
282 #include "SPIRVGenGlobalISel.inc"
283 #undef GET_GLOBALISEL_TEMPORARIES_INIT
284 {
285 }
286 
287 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
288                                        CodeGenCoverage *CoverageInfo,
289                                        ProfileSummaryInfo *PSI,
290                                        BlockFrequencyInfo *BFI) {
291   MRI = &MF.getRegInfo();
292   GR.setCurrentFunc(MF);
293   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
294 }
295 
296 // Ensure that register classes correspond to pattern matching rules.
297 void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
298   if (HasVRegsReset == &MF)
299     return;
300   HasVRegsReset = &MF;
301 
302   MachineRegisterInfo &MRI = MF.getRegInfo();
303   for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
304     Register Reg = Register::index2VirtReg(I);
305     LLT RegType = MRI.getType(Reg);
306     if (RegType.isScalar())
307       MRI.setType(Reg, LLT::scalar(64));
308     else if (RegType.isPointer())
309       MRI.setType(Reg, LLT::pointer(0, 64));
310     else if (RegType.isVector())
311       MRI.setType(Reg, LLT::fixed_vector(2, LLT::scalar(64)));
312   }
313   for (const auto &MBB : MF) {
314     for (const auto &MI : MBB) {
315       if (MI.getOpcode() != SPIRV::ASSIGN_TYPE)
316         continue;
317       Register DstReg = MI.getOperand(0).getReg();
318       LLT DstType = MRI.getType(DstReg);
319       Register SrcReg = MI.getOperand(1).getReg();
320       LLT SrcType = MRI.getType(SrcReg);
321       if (DstType != SrcType)
322         MRI.setType(DstReg, MRI.getType(SrcReg));
323 
324       const TargetRegisterClass *DstRC = MRI.getRegClassOrNull(DstReg);
325       const TargetRegisterClass *SrcRC = MRI.getRegClassOrNull(SrcReg);
326       if (DstRC != SrcRC && SrcRC)
327         MRI.setRegClass(DstReg, SrcRC);
328     }
329   }
330 }
331 
332 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
333 
334 // Defined in SPIRVLegalizerInfo.cpp.
335 extern bool isTypeFoldingSupported(unsigned Opcode);
336 
337 bool SPIRVInstructionSelector::select(MachineInstr &I) {
338   resetVRegsType(*I.getParent()->getParent());
339 
340   assert(I.getParent() && "Instruction should be in a basic block!");
341   assert(I.getParent()->getParent() && "Instruction should be in a function!");
342 
343   Register Opcode = I.getOpcode();
344   // If it's not a GMIR instruction, we've selected it already.
345   if (!isPreISelGenericOpcode(Opcode)) {
346     if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
347       Register DstReg = I.getOperand(0).getReg();
348       Register SrcReg = I.getOperand(1).getReg();
349       auto *Def = MRI->getVRegDef(SrcReg);
350       if (isTypeFoldingSupported(Def->getOpcode())) {
351         bool Res = selectImpl(I, *CoverageInfo);
352         LLVM_DEBUG({
353           if (!Res && Def->getOpcode() != TargetOpcode::G_CONSTANT) {
354             dbgs() << "Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
355             I.print(dbgs());
356           }
357         });
358         assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
359         if (Res)
360           return Res;
361       }
362       MRI->setRegClass(SrcReg, MRI->getRegClass(DstReg));
363       MRI->replaceRegWith(SrcReg, DstReg);
364       I.removeFromParent();
365       return true;
366     } else if (I.getNumDefs() == 1) {
367       // Make all vregs 64 bits (for SPIR-V IDs).
368       MRI->setType(I.getOperand(0).getReg(), LLT::scalar(64));
369     }
370     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
371   }
372 
373   if (I.getNumOperands() != I.getNumExplicitOperands()) {
374     LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
375     return false;
376   }
377 
378   // Common code for getting return reg+type, and removing selected instr
379   // from parent occurs here. Instr-specific selection happens in spvSelect().
380   bool HasDefs = I.getNumDefs() > 0;
381   Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
382   SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
383   assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
384   if (spvSelect(ResVReg, ResType, I)) {
385     if (HasDefs) // Make all vregs 64 bits (for SPIR-V IDs).
386       for (unsigned i = 0; i < I.getNumDefs(); ++i)
387         MRI->setType(I.getOperand(i).getReg(), LLT::scalar(64));
388     I.removeFromParent();
389     return true;
390   }
391   return false;
392 }
393 
394 static bool mayApplyGenericSelection(unsigned Opcode) {
395   switch (Opcode) {
396   case TargetOpcode::G_CONSTANT:
397     return false;
398   case TargetOpcode::G_SADDO:
399   case TargetOpcode::G_SSUBO:
400     return true;
401   }
402   return isTypeFoldingSupported(Opcode);
403 }
404 
405 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
406                                          const SPIRVType *ResType,
407                                          MachineInstr &I) const {
408   const unsigned Opcode = I.getOpcode();
409   if (mayApplyGenericSelection(Opcode))
410     return selectImpl(I, *CoverageInfo);
411   switch (Opcode) {
412   case TargetOpcode::G_CONSTANT:
413     return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
414                        I);
415   case TargetOpcode::G_GLOBAL_VALUE:
416     return selectGlobalValue(ResVReg, I);
417   case TargetOpcode::G_IMPLICIT_DEF:
418     return selectOpUndef(ResVReg, ResType, I);
419   case TargetOpcode::G_FREEZE:
420     return selectFreeze(ResVReg, ResType, I);
421 
422   case TargetOpcode::G_INTRINSIC:
423   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
424   case TargetOpcode::G_INTRINSIC_CONVERGENT:
425   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
426     return selectIntrinsic(ResVReg, ResType, I);
427   case TargetOpcode::G_BITREVERSE:
428     return selectBitreverse(ResVReg, ResType, I);
429 
430   case TargetOpcode::G_BUILD_VECTOR:
431     return selectBuildVector(ResVReg, ResType, I);
432   case TargetOpcode::G_SPLAT_VECTOR:
433     return selectSplatVector(ResVReg, ResType, I);
434 
435   case TargetOpcode::G_SHUFFLE_VECTOR: {
436     MachineBasicBlock &BB = *I.getParent();
437     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
438                    .addDef(ResVReg)
439                    .addUse(GR.getSPIRVTypeID(ResType))
440                    .addUse(I.getOperand(1).getReg())
441                    .addUse(I.getOperand(2).getReg());
442     for (auto V : I.getOperand(3).getShuffleMask())
443       MIB.addImm(V);
444     return MIB.constrainAllUses(TII, TRI, RBI);
445   }
446   case TargetOpcode::G_MEMMOVE:
447   case TargetOpcode::G_MEMCPY:
448   case TargetOpcode::G_MEMSET:
449     return selectMemOperation(ResVReg, I);
450 
451   case TargetOpcode::G_ICMP:
452     return selectICmp(ResVReg, ResType, I);
453   case TargetOpcode::G_FCMP:
454     return selectFCmp(ResVReg, ResType, I);
455 
456   case TargetOpcode::G_FRAME_INDEX:
457     return selectFrameIndex(ResVReg, ResType, I);
458 
459   case TargetOpcode::G_LOAD:
460     return selectLoad(ResVReg, ResType, I);
461   case TargetOpcode::G_STORE:
462     return selectStore(I);
463 
464   case TargetOpcode::G_BR:
465     return selectBranch(I);
466   case TargetOpcode::G_BRCOND:
467     return selectBranchCond(I);
468 
469   case TargetOpcode::G_PHI:
470     return selectPhi(ResVReg, ResType, I);
471 
472   case TargetOpcode::G_FPTOSI:
473     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
474   case TargetOpcode::G_FPTOUI:
475     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
476 
477   case TargetOpcode::G_SITOFP:
478     return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
479   case TargetOpcode::G_UITOFP:
480     return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
481 
482   case TargetOpcode::G_CTPOP:
483     return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
484   case TargetOpcode::G_SMIN:
485     return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
486   case TargetOpcode::G_UMIN:
487     return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
488 
489   case TargetOpcode::G_SMAX:
490     return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
491   case TargetOpcode::G_UMAX:
492     return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
493 
494   case TargetOpcode::G_FMA:
495     return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
496 
497   case TargetOpcode::G_FPOW:
498     return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
499   case TargetOpcode::G_FPOWI:
500     return selectExtInst(ResVReg, ResType, I, CL::pown);
501 
502   case TargetOpcode::G_FEXP:
503     return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
504   case TargetOpcode::G_FEXP2:
505     return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
506 
507   case TargetOpcode::G_FLOG:
508     return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
509   case TargetOpcode::G_FLOG2:
510     return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
511   case TargetOpcode::G_FLOG10:
512     return selectLog10(ResVReg, ResType, I);
513 
514   case TargetOpcode::G_FABS:
515     return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
516   case TargetOpcode::G_ABS:
517     return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
518 
519   case TargetOpcode::G_FMINNUM:
520   case TargetOpcode::G_FMINIMUM:
521     return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
522   case TargetOpcode::G_FMAXNUM:
523   case TargetOpcode::G_FMAXIMUM:
524     return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);
525 
526   case TargetOpcode::G_FCOPYSIGN:
527     return selectExtInst(ResVReg, ResType, I, CL::copysign);
528 
529   case TargetOpcode::G_FCEIL:
530     return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
531   case TargetOpcode::G_FFLOOR:
532     return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
533 
534   case TargetOpcode::G_FCOS:
535     return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
536   case TargetOpcode::G_FSIN:
537     return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
538   case TargetOpcode::G_FTAN:
539     return selectExtInst(ResVReg, ResType, I, CL::tan, GL::Tan);
540   case TargetOpcode::G_FACOS:
541     return selectExtInst(ResVReg, ResType, I, CL::acos, GL::Acos);
542   case TargetOpcode::G_FASIN:
543     return selectExtInst(ResVReg, ResType, I, CL::asin, GL::Asin);
544   case TargetOpcode::G_FATAN:
545     return selectExtInst(ResVReg, ResType, I, CL::atan, GL::Atan);
546   case TargetOpcode::G_FATAN2:
547     return selectExtInst(ResVReg, ResType, I, CL::atan2, GL::Atan2);
548   case TargetOpcode::G_FCOSH:
549     return selectExtInst(ResVReg, ResType, I, CL::cosh, GL::Cosh);
550   case TargetOpcode::G_FSINH:
551     return selectExtInst(ResVReg, ResType, I, CL::sinh, GL::Sinh);
552   case TargetOpcode::G_FTANH:
553     return selectExtInst(ResVReg, ResType, I, CL::tanh, GL::Tanh);
554 
555   case TargetOpcode::G_FSQRT:
556     return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
557 
558   case TargetOpcode::G_CTTZ:
559   case TargetOpcode::G_CTTZ_ZERO_UNDEF:
560     return selectExtInst(ResVReg, ResType, I, CL::ctz);
561   case TargetOpcode::G_CTLZ:
562   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
563     return selectExtInst(ResVReg, ResType, I, CL::clz);
564 
565   case TargetOpcode::G_INTRINSIC_ROUND:
566     return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
567   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
568     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
569   case TargetOpcode::G_INTRINSIC_TRUNC:
570     return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
571   case TargetOpcode::G_FRINT:
572   case TargetOpcode::G_FNEARBYINT:
573     return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
574 
575   case TargetOpcode::G_SMULH:
576     return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
577   case TargetOpcode::G_UMULH:
578     return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
579 
580   case TargetOpcode::G_SADDSAT:
581     return selectExtInst(ResVReg, ResType, I, CL::s_add_sat);
582   case TargetOpcode::G_UADDSAT:
583     return selectExtInst(ResVReg, ResType, I, CL::u_add_sat);
584   case TargetOpcode::G_SSUBSAT:
585     return selectExtInst(ResVReg, ResType, I, CL::s_sub_sat);
586   case TargetOpcode::G_USUBSAT:
587     return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat);
588 
589   case TargetOpcode::G_UADDO:
590     return selectOverflowArith(ResVReg, ResType, I,
591                                ResType->getOpcode() == SPIRV::OpTypeVector
592                                    ? SPIRV::OpIAddCarryV
593                                    : SPIRV::OpIAddCarryS);
594   case TargetOpcode::G_USUBO:
595     return selectOverflowArith(ResVReg, ResType, I,
596                                ResType->getOpcode() == SPIRV::OpTypeVector
597                                    ? SPIRV::OpISubBorrowV
598                                    : SPIRV::OpISubBorrowS);
599   case TargetOpcode::G_UMULO:
600     return selectOverflowArith(ResVReg, ResType, I, SPIRV::OpUMulExtended);
601   case TargetOpcode::G_SMULO:
602     return selectOverflowArith(ResVReg, ResType, I, SPIRV::OpSMulExtended);
603 
604   case TargetOpcode::G_SEXT:
605     return selectExt(ResVReg, ResType, I, true);
606   case TargetOpcode::G_ANYEXT:
607   case TargetOpcode::G_ZEXT:
608     return selectExt(ResVReg, ResType, I, false);
609   case TargetOpcode::G_TRUNC:
610     return selectTrunc(ResVReg, ResType, I);
611   case TargetOpcode::G_FPTRUNC:
612   case TargetOpcode::G_FPEXT:
613     return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
614 
615   case TargetOpcode::G_PTRTOINT:
616     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
617   case TargetOpcode::G_INTTOPTR:
618     return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
619   case TargetOpcode::G_BITCAST:
620     return selectBitcast(ResVReg, ResType, I);
621   case TargetOpcode::G_ADDRSPACE_CAST:
622     return selectAddrSpaceCast(ResVReg, ResType, I);
623   case TargetOpcode::G_PTR_ADD: {
624     // Currently, we get G_PTR_ADD only applied to global variables.
625     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
626     Register GV = I.getOperand(1).getReg();
627     MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
628     (void)II;
629     assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
630             (*II).getOpcode() == TargetOpcode::COPY ||
631             (*II).getOpcode() == SPIRV::OpVariable) &&
632            isImm(I.getOperand(2), MRI));
633     // It may be the initialization of a global variable.
634     bool IsGVInit = false;
635     for (MachineRegisterInfo::use_instr_iterator
636              UseIt = MRI->use_instr_begin(I.getOperand(0).getReg()),
637              UseEnd = MRI->use_instr_end();
638          UseIt != UseEnd; UseIt = std::next(UseIt)) {
639       if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
640           (*UseIt).getOpcode() == SPIRV::OpVariable) {
641         IsGVInit = true;
642         break;
643       }
644     }
645     MachineBasicBlock &BB = *I.getParent();
646     if (!IsGVInit) {
647       SPIRVType *GVType = GR.getSPIRVTypeForVReg(GV);
648       SPIRVType *GVPointeeType = GR.getPointeeType(GVType);
649       SPIRVType *ResPointeeType = GR.getPointeeType(ResType);
650       if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
651         // Build a new virtual register that is associated with the required
652         // data type.
653         Register NewVReg = MRI->createGenericVirtualRegister(MRI->getType(GV));
654         MRI->setRegClass(NewVReg, MRI->getRegClass(GV));
655         //  Having a correctly typed base we are ready to build the actually
656         //  required GEP. It may not be a constant though, because all Operands
657         //  of OpSpecConstantOp is to originate from other const instructions,
658         //  and only the AccessChain named opcodes accept a global OpVariable
659         //  instruction. We can't use an AccessChain opcode because of the type
660         //  mismatch between result and base types.
661         if (!GR.isBitcastCompatible(ResType, GVType))
662           report_fatal_error(
663               "incompatible result and operand types in a bitcast");
664         Register ResTypeReg = GR.getSPIRVTypeID(ResType);
665         MachineInstrBuilder MIB =
666             BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitcast))
667                 .addDef(NewVReg)
668                 .addUse(ResTypeReg)
669                 .addUse(GV);
670         return MIB.constrainAllUses(TII, TRI, RBI) &&
671                BuildMI(BB, I, I.getDebugLoc(),
672                        TII.get(STI.isVulkanEnv()
673                                    ? SPIRV::OpInBoundsAccessChain
674                                    : SPIRV::OpInBoundsPtrAccessChain))
675                    .addDef(ResVReg)
676                    .addUse(ResTypeReg)
677                    .addUse(NewVReg)
678                    .addUse(I.getOperand(2).getReg())
679                    .constrainAllUses(TII, TRI, RBI);
680       } else {
681         return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
682             .addDef(ResVReg)
683             .addUse(GR.getSPIRVTypeID(ResType))
684             .addImm(
685                 static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
686             .addUse(GV)
687             .addUse(I.getOperand(2).getReg())
688             .constrainAllUses(TII, TRI, RBI);
689       }
690     }
691     // It's possible to translate G_PTR_ADD to OpSpecConstantOp: either to
692     // initialize a global variable with a constant expression (e.g., the test
693     // case opencl/basic/progvar_prog_scope_init.ll), or for another use case
694     Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
695     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
696                    .addDef(ResVReg)
697                    .addUse(GR.getSPIRVTypeID(ResType))
698                    .addImm(static_cast<uint32_t>(
699                        SPIRV::Opcode::InBoundsPtrAccessChain))
700                    .addUse(GV)
701                    .addUse(Idx)
702                    .addUse(I.getOperand(2).getReg());
703     return MIB.constrainAllUses(TII, TRI, RBI);
704   }
705 
706   case TargetOpcode::G_ATOMICRMW_OR:
707     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
708   case TargetOpcode::G_ATOMICRMW_ADD:
709     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
710   case TargetOpcode::G_ATOMICRMW_AND:
711     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
712   case TargetOpcode::G_ATOMICRMW_MAX:
713     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
714   case TargetOpcode::G_ATOMICRMW_MIN:
715     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
716   case TargetOpcode::G_ATOMICRMW_SUB:
717     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
718   case TargetOpcode::G_ATOMICRMW_XOR:
719     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
720   case TargetOpcode::G_ATOMICRMW_UMAX:
721     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
722   case TargetOpcode::G_ATOMICRMW_UMIN:
723     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
724   case TargetOpcode::G_ATOMICRMW_XCHG:
725     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
726   case TargetOpcode::G_ATOMIC_CMPXCHG:
727     return selectAtomicCmpXchg(ResVReg, ResType, I);
728 
729   case TargetOpcode::G_ATOMICRMW_FADD:
730     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
731   case TargetOpcode::G_ATOMICRMW_FSUB:
732     // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
733     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
734                            SPIRV::OpFNegate);
735   case TargetOpcode::G_ATOMICRMW_FMIN:
736     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
737   case TargetOpcode::G_ATOMICRMW_FMAX:
738     return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
739 
740   case TargetOpcode::G_FENCE:
741     return selectFence(I);
742 
743   case TargetOpcode::G_STACKSAVE:
744     return selectStackSave(ResVReg, ResType, I);
745   case TargetOpcode::G_STACKRESTORE:
746     return selectStackRestore(I);
747 
748   case TargetOpcode::G_UNMERGE_VALUES:
749     return selectUnmergeValues(I);
750 
751   // Discard gen opcodes for intrinsics which we do not expect to actually
752   // represent code after lowering or intrinsics which are not implemented but
753   // should not crash when found in a customer's LLVM IR input.
754   case TargetOpcode::G_TRAP:
755   case TargetOpcode::G_DEBUGTRAP:
756   case TargetOpcode::G_UBSANTRAP:
757   case TargetOpcode::DBG_LABEL:
758     return true;
759 
760   default:
761     return false;
762   }
763 }
764 
765 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
766                                              const SPIRVType *ResType,
767                                              MachineInstr &I,
768                                              CL::OpenCLExtInst CLInst) const {
769   return selectExtInst(ResVReg, ResType, I,
770                        {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
771 }
772 
773 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
774                                              const SPIRVType *ResType,
775                                              MachineInstr &I,
776                                              CL::OpenCLExtInst CLInst,
777                                              GL::GLSLExtInst GLInst) const {
778   ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
779                           {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
780   return selectExtInst(ResVReg, ResType, I, ExtInsts);
781 }
782 
783 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
784                                              const SPIRVType *ResType,
785                                              MachineInstr &I,
786                                              const ExtInstList &Insts) const {
787 
788   for (const auto &Ex : Insts) {
789     SPIRV::InstructionSet::InstructionSet Set = Ex.first;
790     uint32_t Opcode = Ex.second;
791     if (STI.canUseExtInstSet(Set)) {
792       MachineBasicBlock &BB = *I.getParent();
793       auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
794                      .addDef(ResVReg)
795                      .addUse(GR.getSPIRVTypeID(ResType))
796                      .addImm(static_cast<uint32_t>(Set))
797                      .addImm(Opcode);
798       const unsigned NumOps = I.getNumOperands();
799       unsigned Index = 1;
800       if (Index < NumOps &&
801           I.getOperand(Index).getType() ==
802               MachineOperand::MachineOperandType::MO_IntrinsicID)
803         Index = 2;
804       for (; Index < NumOps; ++Index)
805         MIB.add(I.getOperand(Index));
806       return MIB.constrainAllUses(TII, TRI, RBI);
807     }
808   }
809   return false;
810 }
811 
812 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
813                                                  const SPIRVType *ResType,
814                                                  MachineInstr &I,
815                                                  Register SrcReg,
816                                                  unsigned Opcode) const {
817   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
818       .addDef(ResVReg)
819       .addUse(GR.getSPIRVTypeID(ResType))
820       .addUse(SrcReg)
821       .constrainAllUses(TII, TRI, RBI);
822 }
823 
824 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
825                                           const SPIRVType *ResType,
826                                           MachineInstr &I,
827                                           unsigned Opcode) const {
828   if (STI.isOpenCLEnv() && I.getOperand(1).isReg()) {
829     Register SrcReg = I.getOperand(1).getReg();
830     bool IsGV = false;
831     for (MachineRegisterInfo::def_instr_iterator DefIt =
832              MRI->def_instr_begin(SrcReg);
833          DefIt != MRI->def_instr_end(); DefIt = std::next(DefIt)) {
834       if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
835         IsGV = true;
836         break;
837       }
838     }
839     if (IsGV) {
840       uint32_t SpecOpcode = 0;
841       switch (Opcode) {
842       case SPIRV::OpConvertPtrToU:
843         SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
844         break;
845       case SPIRV::OpConvertUToPtr:
846         SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
847         break;
848       }
849       if (SpecOpcode)
850         return BuildMI(*I.getParent(), I, I.getDebugLoc(),
851                        TII.get(SPIRV::OpSpecConstantOp))
852             .addDef(ResVReg)
853             .addUse(GR.getSPIRVTypeID(ResType))
854             .addImm(SpecOpcode)
855             .addUse(SrcReg)
856             .constrainAllUses(TII, TRI, RBI);
857     }
858   }
859   return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
860                            Opcode);
861 }
862 
863 bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
864                                              const SPIRVType *ResType,
865                                              MachineInstr &I) const {
866   Register OpReg = I.getOperand(1).getReg();
867   SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
868   if (!GR.isBitcastCompatible(ResType, OpType))
869     report_fatal_error("incompatible result and operand types in a bitcast");
870   return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
871 }
872 
873 static void addMemoryOperands(MachineMemOperand *MemOp,
874                               MachineInstrBuilder &MIB) {
875   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
876   if (MemOp->isVolatile())
877     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
878   if (MemOp->isNonTemporal())
879     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
880   if (MemOp->getAlign().value())
881     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
882 
883   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
884     MIB.addImm(SpvMemOp);
885     if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
886       MIB.addImm(MemOp->getAlign().value());
887   }
888 }
889 
890 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
891   uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
892   if (Flags & MachineMemOperand::Flags::MOVolatile)
893     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
894   if (Flags & MachineMemOperand::Flags::MONonTemporal)
895     SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
896 
897   if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
898     MIB.addImm(SpvMemOp);
899 }
900 
901 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
902                                           const SPIRVType *ResType,
903                                           MachineInstr &I) const {
904   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
905   Register Ptr = I.getOperand(1 + OpOffset).getReg();
906   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
907                  .addDef(ResVReg)
908                  .addUse(GR.getSPIRVTypeID(ResType))
909                  .addUse(Ptr);
910   if (!I.getNumMemOperands()) {
911     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
912            I.getOpcode() ==
913                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
914     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
915   } else {
916     addMemoryOperands(*I.memoperands_begin(), MIB);
917   }
918   return MIB.constrainAllUses(TII, TRI, RBI);
919 }
920 
921 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
922   unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
923   Register StoreVal = I.getOperand(0 + OpOffset).getReg();
924   Register Ptr = I.getOperand(1 + OpOffset).getReg();
925   MachineBasicBlock &BB = *I.getParent();
926   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
927                  .addUse(Ptr)
928                  .addUse(StoreVal);
929   if (!I.getNumMemOperands()) {
930     assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
931            I.getOpcode() ==
932                TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
933     addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
934   } else {
935     addMemoryOperands(*I.memoperands_begin(), MIB);
936   }
937   return MIB.constrainAllUses(TII, TRI, RBI);
938 }
939 
940 bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
941                                                const SPIRVType *ResType,
942                                                MachineInstr &I) const {
943   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
944     report_fatal_error(
945         "llvm.stacksave intrinsic: this instruction requires the following "
946         "SPIR-V extension: SPV_INTEL_variable_length_array",
947         false);
948   MachineBasicBlock &BB = *I.getParent();
949   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
950       .addDef(ResVReg)
951       .addUse(GR.getSPIRVTypeID(ResType))
952       .constrainAllUses(TII, TRI, RBI);
953 }
954 
955 bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
956   if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
957     report_fatal_error(
958         "llvm.stackrestore intrinsic: this instruction requires the following "
959         "SPIR-V extension: SPV_INTEL_variable_length_array",
960         false);
961   if (!I.getOperand(0).isReg())
962     return false;
963   MachineBasicBlock &BB = *I.getParent();
964   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
965       .addUse(I.getOperand(0).getReg())
966       .constrainAllUses(TII, TRI, RBI);
967 }
968 
969 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
970                                                   MachineInstr &I) const {
971   MachineBasicBlock &BB = *I.getParent();
972   Register SrcReg = I.getOperand(1).getReg();
973   if (I.getOpcode() == TargetOpcode::G_MEMSET) {
974     assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
975     unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
976     unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
977     SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
978     SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
979     Register Const = GR.getOrCreateConstIntArray(Val, Num, I, ArrTy, TII);
980     SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
981         ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
982     // TODO: check if we have such GV, add init, use buildGlobalVariable.
983     Function &CurFunction = GR.CurMF->getFunction();
984     Type *LLVMArrTy =
985         ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
986     // Module takes ownership of the global var.
987     GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
988                                             true, GlobalValue::InternalLinkage,
989                                             Constant::getNullValue(LLVMArrTy));
990     Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
991     GR.add(GV, GR.CurMF, VarReg);
992 
993     buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
994     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
995         .addDef(VarReg)
996         .addUse(GR.getSPIRVTypeID(VarTy))
997         .addImm(SPIRV::StorageClass::UniformConstant)
998         .addUse(Const)
999         .constrainAllUses(TII, TRI, RBI);
1000     SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
1001         ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
1002     SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1003     selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
1004   }
1005   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
1006                  .addUse(I.getOperand(0).getReg())
1007                  .addUse(SrcReg)
1008                  .addUse(I.getOperand(2).getReg());
1009   if (I.getNumMemOperands())
1010     addMemoryOperands(*I.memoperands_begin(), MIB);
1011   bool Result = MIB.constrainAllUses(TII, TRI, RBI);
1012   if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
1013     BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
1014         .addUse(MIB->getOperand(0).getReg());
1015   return Result;
1016 }
1017 
1018 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
1019                                                const SPIRVType *ResType,
1020                                                MachineInstr &I,
1021                                                unsigned NewOpcode,
1022                                                unsigned NegateOpcode) const {
1023   assert(I.hasOneMemOperand());
1024   const MachineMemOperand *MemOp = *I.memoperands_begin();
1025   uint32_t Scope = static_cast<uint32_t>(getMemScope(
1026       GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
1027   Register ScopeReg = buildI32Constant(Scope, I);
1028 
1029   Register Ptr = I.getOperand(1).getReg();
1030   // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
1031   // auto ScSem =
1032   // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
1033   AtomicOrdering AO = MemOp->getSuccessOrdering();
1034   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
1035   Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
1036 
1037   bool Result = false;
1038   Register ValueReg = I.getOperand(2).getReg();
1039   if (NegateOpcode != 0) {
1040     // Translation with negative value operand is requested
1041     Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1042     Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
1043     ValueReg = TmpReg;
1044   }
1045 
1046   Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
1047                 .addDef(ResVReg)
1048                 .addUse(GR.getSPIRVTypeID(ResType))
1049                 .addUse(Ptr)
1050                 .addUse(ScopeReg)
1051                 .addUse(MemSemReg)
1052                 .addUse(ValueReg)
1053                 .constrainAllUses(TII, TRI, RBI);
1054   return Result;
1055 }
1056 
1057 bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
1058   unsigned ArgI = I.getNumOperands() - 1;
1059   Register SrcReg =
1060       I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
1061   SPIRVType *DefType =
1062       SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
1063   if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
1064     report_fatal_error(
1065         "cannot select G_UNMERGE_VALUES with a non-vector argument");
1066 
1067   SPIRVType *ScalarType =
1068       GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
1069   MachineBasicBlock &BB = *I.getParent();
1070   bool Res = false;
1071   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
1072     Register ResVReg = I.getOperand(i).getReg();
1073     SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1074     if (!ResType) {
1075       // There was no "assign type" actions, let's fix this now
1076       ResType = ScalarType;
1077       MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1078       MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1079       GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1080     }
1081     auto MIB =
1082         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1083             .addDef(ResVReg)
1084             .addUse(GR.getSPIRVTypeID(ResType))
1085             .addUse(SrcReg)
1086             .addImm(static_cast<int64_t>(i));
1087     Res |= MIB.constrainAllUses(TII, TRI, RBI);
1088   }
1089   return Res;
1090 }
1091 
1092 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
1093   AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
1094   uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
1095   Register MemSemReg = buildI32Constant(MemSem, I);
1096   SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
1097   uint32_t Scope = static_cast<uint32_t>(
1098       getMemScope(GR.CurMF->getFunction().getContext(), Ord));
1099   Register ScopeReg = buildI32Constant(Scope, I);
1100   MachineBasicBlock &BB = *I.getParent();
1101   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
1102       .addUse(ScopeReg)
1103       .addUse(MemSemReg)
1104       .constrainAllUses(TII, TRI, RBI);
1105 }
1106 
1107 bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
1108                                                    const SPIRVType *ResType,
1109                                                    MachineInstr &I,
1110                                                    unsigned Opcode) const {
1111   Type *ResTy = nullptr;
1112   StringRef ResName;
1113   if (!GR.findValueAttrs(&I, ResTy, ResName))
1114     report_fatal_error(
1115         "Not enough info to select the arithmetic with overflow instruction");
1116   if (!ResTy || !ResTy->isStructTy())
1117     report_fatal_error("Expect struct type result for the arithmetic "
1118                        "with overflow instruction");
1119   // "Result Type must be from OpTypeStruct. The struct must have two members,
1120   // and the two members must be the same type."
1121   Type *ResElemTy = cast<StructType>(ResTy)->getElementType(0);
1122   ResTy = StructType::create(SmallVector<Type *, 2>{ResElemTy, ResElemTy});
1123   // Build SPIR-V types and constant(s) if needed.
1124   MachineIRBuilder MIRBuilder(I);
1125   SPIRVType *StructType = GR.getOrCreateSPIRVType(
1126       ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1127   assert(I.getNumDefs() > 1 && "Not enought operands");
1128   SPIRVType *BoolType = GR.getOrCreateSPIRVBoolType(I, TII);
1129   unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1130   if (N > 1)
1131     BoolType = GR.getOrCreateSPIRVVectorType(BoolType, N, I, TII);
1132   Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
1133   Register ZeroReg = buildZerosVal(ResType, I);
1134   // A new virtual register to store the result struct.
1135   Register StructVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1136   MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1137   // Build the result name if needed.
1138   if (ResName.size() > 0)
1139     buildOpName(StructVReg, ResName, MIRBuilder);
1140   // Build the arithmetic with overflow instruction.
1141   MachineBasicBlock &BB = *I.getParent();
1142   auto MIB =
1143       BuildMI(BB, MIRBuilder.getInsertPt(), I.getDebugLoc(), TII.get(Opcode))
1144           .addDef(StructVReg)
1145           .addUse(GR.getSPIRVTypeID(StructType));
1146   for (unsigned i = I.getNumDefs(); i < I.getNumOperands(); ++i)
1147     MIB.addUse(I.getOperand(i).getReg());
1148   bool Status = MIB.constrainAllUses(TII, TRI, RBI);
1149   // Build instructions to extract fields of the instruction's result.
1150   // A new virtual register to store the higher part of the result struct.
1151   Register HigherVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1152   MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1153   for (unsigned i = 0; i < I.getNumDefs(); ++i) {
1154     auto MIB =
1155         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1156             .addDef(i == 1 ? HigherVReg : I.getOperand(i).getReg())
1157             .addUse(GR.getSPIRVTypeID(ResType))
1158             .addUse(StructVReg)
1159             .addImm(i);
1160     Status &= MIB.constrainAllUses(TII, TRI, RBI);
1161   }
1162   // Build boolean value from the higher part.
1163   Status &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1164                 .addDef(I.getOperand(1).getReg())
1165                 .addUse(BoolTypeReg)
1166                 .addUse(HigherVReg)
1167                 .addUse(ZeroReg)
1168                 .constrainAllUses(TII, TRI, RBI);
1169   return Status;
1170 }
1171 
1172 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
1173                                                    const SPIRVType *ResType,
1174                                                    MachineInstr &I) const {
1175   Register ScopeReg;
1176   Register MemSemEqReg;
1177   Register MemSemNeqReg;
1178   Register Ptr = I.getOperand(2).getReg();
1179   if (!isa<GIntrinsic>(I)) {
1180     assert(I.hasOneMemOperand());
1181     const MachineMemOperand *MemOp = *I.memoperands_begin();
1182     unsigned Scope = static_cast<uint32_t>(getMemScope(
1183         GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
1184     ScopeReg = buildI32Constant(Scope, I);
1185 
1186     unsigned ScSem = static_cast<uint32_t>(
1187         getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
1188     AtomicOrdering AO = MemOp->getSuccessOrdering();
1189     unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
1190     MemSemEqReg = buildI32Constant(MemSemEq, I);
1191     AtomicOrdering FO = MemOp->getFailureOrdering();
1192     unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
1193     MemSemNeqReg =
1194         MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
1195   } else {
1196     ScopeReg = I.getOperand(5).getReg();
1197     MemSemEqReg = I.getOperand(6).getReg();
1198     MemSemNeqReg = I.getOperand(7).getReg();
1199   }
1200 
1201   Register Cmp = I.getOperand(3).getReg();
1202   Register Val = I.getOperand(4).getReg();
1203   SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1204   Register ACmpRes = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1205   const DebugLoc &DL = I.getDebugLoc();
1206   bool Result =
1207       BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
1208           .addDef(ACmpRes)
1209           .addUse(GR.getSPIRVTypeID(SpvValTy))
1210           .addUse(Ptr)
1211           .addUse(ScopeReg)
1212           .addUse(MemSemEqReg)
1213           .addUse(MemSemNeqReg)
1214           .addUse(Val)
1215           .addUse(Cmp)
1216           .constrainAllUses(TII, TRI, RBI);
1217   Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1218   SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
1219   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
1220                 .addDef(CmpSuccReg)
1221                 .addUse(GR.getSPIRVTypeID(BoolTy))
1222                 .addUse(ACmpRes)
1223                 .addUse(Cmp)
1224                 .constrainAllUses(TII, TRI, RBI);
1225   Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1226   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1227                 .addDef(TmpReg)
1228                 .addUse(GR.getSPIRVTypeID(ResType))
1229                 .addUse(ACmpRes)
1230                 .addUse(GR.getOrCreateUndef(I, ResType, TII))
1231                 .addImm(0)
1232                 .constrainAllUses(TII, TRI, RBI);
1233   Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1234                 .addDef(ResVReg)
1235                 .addUse(GR.getSPIRVTypeID(ResType))
1236                 .addUse(CmpSuccReg)
1237                 .addUse(TmpReg)
1238                 .addImm(1)
1239                 .constrainAllUses(TII, TRI, RBI);
1240   return Result;
1241 }
1242 
1243 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
1244   switch (SC) {
1245   case SPIRV::StorageClass::Workgroup:
1246   case SPIRV::StorageClass::CrossWorkgroup:
1247   case SPIRV::StorageClass::Function:
1248     return true;
1249   default:
1250     return false;
1251   }
1252 }
1253 
1254 static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
1255   switch (SC) {
1256   case SPIRV::StorageClass::DeviceOnlyINTEL:
1257   case SPIRV::StorageClass::HostOnlyINTEL:
1258     return true;
1259   default:
1260     return false;
1261   }
1262 }
1263 
1264 // Returns true ResVReg is referred only from global vars and OpName's.
1265 static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg) {
1266   bool IsGRef = false;
1267   bool IsAllowedRefs =
1268       std::all_of(MRI->use_instr_begin(ResVReg), MRI->use_instr_end(),
1269                   [&IsGRef](auto const &It) {
1270                     unsigned Opcode = It.getOpcode();
1271                     if (Opcode == SPIRV::OpConstantComposite ||
1272                         Opcode == SPIRV::OpVariable ||
1273                         isSpvIntrinsic(It, Intrinsic::spv_init_global))
1274                       return IsGRef = true;
1275                     return Opcode == SPIRV::OpName;
1276                   });
1277   return IsAllowedRefs && IsGRef;
1278 }
1279 
1280 Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1281     MachineInstr &I, SPIRV::StorageClass::StorageClass SC) const {
1282   return GR.getSPIRVTypeID(GR.getOrCreateSPIRVPointerType(
1283       GR.getOrCreateSPIRVIntegerType(8, I, TII), I, TII, SC));
1284 }
1285 
1286 MachineInstrBuilder
1287 SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &I, Register Dest,
1288                                               Register Src, Register DestType,
1289                                               uint32_t Opcode) const {
1290   return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1291                  TII.get(SPIRV::OpSpecConstantOp))
1292       .addDef(Dest)
1293       .addUse(DestType)
1294       .addImm(Opcode)
1295       .addUse(Src);
1296 }
1297 
1298 MachineInstrBuilder
1299 SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &I, Register SrcPtr,
1300                                                SPIRVType *SrcPtrTy) const {
1301   SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1302       GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
1303   Register Tmp = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1304   MRI->setType(Tmp, LLT::pointer(storageClassToAddressSpace(
1305                                      SPIRV::StorageClass::Generic),
1306                                  GR.getPointerSize()));
1307   MachineFunction *MF = I.getParent()->getParent();
1308   GR.assignSPIRVTypeToVReg(GenericPtrTy, Tmp, *MF);
1309   MachineInstrBuilder MIB = buildSpecConstantOp(
1310       I, Tmp, SrcPtr, GR.getSPIRVTypeID(GenericPtrTy),
1311       static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
1312   GR.add(MIB.getInstr(), MF, Tmp);
1313   return MIB;
1314 }
1315 
1316 // In SPIR-V address space casting can only happen to and from the Generic
1317 // storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
1318 // pointers to and from Generic pointers. As such, we can convert e.g. from
1319 // Workgroup to Function by going via a Generic pointer as an intermediary. All
1320 // other combinations can only be done by a bitcast, and are probably not safe.
1321 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
1322                                                    const SPIRVType *ResType,
1323                                                    MachineInstr &I) const {
1324   MachineBasicBlock &BB = *I.getParent();
1325   const DebugLoc &DL = I.getDebugLoc();
1326 
1327   Register SrcPtr = I.getOperand(1).getReg();
1328   SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1329 
1330   // don't generate a cast for a null that may be represented by OpTypeInt
1331   if (SrcPtrTy->getOpcode() != SPIRV::OpTypePointer ||
1332       ResType->getOpcode() != SPIRV::OpTypePointer)
1333     return BuildMI(BB, I, DL, TII.get(TargetOpcode::COPY))
1334         .addDef(ResVReg)
1335         .addUse(SrcPtr)
1336         .constrainAllUses(TII, TRI, RBI);
1337 
1338   SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtrTy);
1339   SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResType);
1340 
1341   if (isASCastInGVar(MRI, ResVReg)) {
1342     // AddrSpaceCast uses within OpVariable and OpConstantComposite instructions
1343     // are expressed by OpSpecConstantOp with an Opcode.
1344     // TODO: maybe insert a check whether the Kernel capability was declared and
1345     // so PtrCastToGeneric/GenericCastToPtr are available.
1346     unsigned SpecOpcode =
1347         DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC)
1348             ? static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
1349             : (SrcSC == SPIRV::StorageClass::Generic &&
1350                        isGenericCastablePtr(DstSC)
1351                    ? static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
1352                    : 0);
1353     // TODO: OpConstantComposite expects i8*, so we are forced to forget a
1354     // correct value of ResType and use general i8* instead. Maybe this should
1355     // be addressed in the emit-intrinsic step to infer a correct
1356     // OpConstantComposite type.
1357     if (SpecOpcode) {
1358       return buildSpecConstantOp(I, ResVReg, SrcPtr,
1359                                  getUcharPtrTypeReg(I, DstSC), SpecOpcode)
1360           .constrainAllUses(TII, TRI, RBI);
1361     } else if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1362       MachineInstrBuilder MIB = buildConstGenericPtr(I, SrcPtr, SrcPtrTy);
1363       return MIB.constrainAllUses(TII, TRI, RBI) &&
1364              buildSpecConstantOp(
1365                  I, ResVReg, MIB->getOperand(0).getReg(),
1366                  getUcharPtrTypeReg(I, DstSC),
1367                  static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
1368                  .constrainAllUses(TII, TRI, RBI);
1369     }
1370   }
1371 
1372   // don't generate a cast between identical storage classes
1373   if (SrcSC == DstSC)
1374     return BuildMI(BB, I, DL, TII.get(TargetOpcode::COPY))
1375         .addDef(ResVReg)
1376         .addUse(SrcPtr)
1377         .constrainAllUses(TII, TRI, RBI);
1378 
1379   // Casting from an eligible pointer to Generic.
1380   if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1381     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1382   // Casting from Generic to an eligible pointer.
1383   if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1384     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1385   // Casting between 2 eligible pointers using Generic as an intermediary.
1386   if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1387     Register Tmp = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1388     SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1389         GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
1390     bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1391                        .addDef(Tmp)
1392                        .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1393                        .addUse(SrcPtr)
1394                        .constrainAllUses(TII, TRI, RBI);
1395     return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1396                           .addDef(ResVReg)
1397                           .addUse(GR.getSPIRVTypeID(ResType))
1398                           .addUse(Tmp)
1399                           .constrainAllUses(TII, TRI, RBI);
1400   }
1401 
1402   // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1403   // be applied
1404   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1405     return selectUnOp(ResVReg, ResType, I,
1406                       SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1407   if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1408     return selectUnOp(ResVReg, ResType, I,
1409                       SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1410   if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::Generic)
1411     return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1412   if (SrcSC == SPIRV::StorageClass::Generic && isUSMStorageClass(DstSC))
1413     return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1414 
1415   // Bitcast for pointers requires that the address spaces must match
1416   return false;
1417 }
1418 
1419 static unsigned getFCmpOpcode(unsigned PredNum) {
1420   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1421   switch (Pred) {
1422   case CmpInst::FCMP_OEQ:
1423     return SPIRV::OpFOrdEqual;
1424   case CmpInst::FCMP_OGE:
1425     return SPIRV::OpFOrdGreaterThanEqual;
1426   case CmpInst::FCMP_OGT:
1427     return SPIRV::OpFOrdGreaterThan;
1428   case CmpInst::FCMP_OLE:
1429     return SPIRV::OpFOrdLessThanEqual;
1430   case CmpInst::FCMP_OLT:
1431     return SPIRV::OpFOrdLessThan;
1432   case CmpInst::FCMP_ONE:
1433     return SPIRV::OpFOrdNotEqual;
1434   case CmpInst::FCMP_ORD:
1435     return SPIRV::OpOrdered;
1436   case CmpInst::FCMP_UEQ:
1437     return SPIRV::OpFUnordEqual;
1438   case CmpInst::FCMP_UGE:
1439     return SPIRV::OpFUnordGreaterThanEqual;
1440   case CmpInst::FCMP_UGT:
1441     return SPIRV::OpFUnordGreaterThan;
1442   case CmpInst::FCMP_ULE:
1443     return SPIRV::OpFUnordLessThanEqual;
1444   case CmpInst::FCMP_ULT:
1445     return SPIRV::OpFUnordLessThan;
1446   case CmpInst::FCMP_UNE:
1447     return SPIRV::OpFUnordNotEqual;
1448   case CmpInst::FCMP_UNO:
1449     return SPIRV::OpUnordered;
1450   default:
1451     llvm_unreachable("Unknown predicate type for FCmp");
1452   }
1453 }
1454 
1455 static unsigned getICmpOpcode(unsigned PredNum) {
1456   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1457   switch (Pred) {
1458   case CmpInst::ICMP_EQ:
1459     return SPIRV::OpIEqual;
1460   case CmpInst::ICMP_NE:
1461     return SPIRV::OpINotEqual;
1462   case CmpInst::ICMP_SGE:
1463     return SPIRV::OpSGreaterThanEqual;
1464   case CmpInst::ICMP_SGT:
1465     return SPIRV::OpSGreaterThan;
1466   case CmpInst::ICMP_SLE:
1467     return SPIRV::OpSLessThanEqual;
1468   case CmpInst::ICMP_SLT:
1469     return SPIRV::OpSLessThan;
1470   case CmpInst::ICMP_UGE:
1471     return SPIRV::OpUGreaterThanEqual;
1472   case CmpInst::ICMP_UGT:
1473     return SPIRV::OpUGreaterThan;
1474   case CmpInst::ICMP_ULE:
1475     return SPIRV::OpULessThanEqual;
1476   case CmpInst::ICMP_ULT:
1477     return SPIRV::OpULessThan;
1478   default:
1479     llvm_unreachable("Unknown predicate type for ICmp");
1480   }
1481 }
1482 
1483 static unsigned getPtrCmpOpcode(unsigned Pred) {
1484   switch (static_cast<CmpInst::Predicate>(Pred)) {
1485   case CmpInst::ICMP_EQ:
1486     return SPIRV::OpPtrEqual;
1487   case CmpInst::ICMP_NE:
1488     return SPIRV::OpPtrNotEqual;
1489   default:
1490     llvm_unreachable("Unknown predicate type for pointer comparison");
1491   }
1492 }
1493 
1494 // Return the logical operation, or abort if none exists.
1495 static unsigned getBoolCmpOpcode(unsigned PredNum) {
1496   auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1497   switch (Pred) {
1498   case CmpInst::ICMP_EQ:
1499     return SPIRV::OpLogicalEqual;
1500   case CmpInst::ICMP_NE:
1501     return SPIRV::OpLogicalNotEqual;
1502   default:
1503     llvm_unreachable("Unknown predicate type for Bool comparison");
1504   }
1505 }
1506 
1507 static APFloat getZeroFP(const Type *LLVMFloatTy) {
1508   if (!LLVMFloatTy)
1509     return APFloat::getZero(APFloat::IEEEsingle());
1510   switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1511   case Type::HalfTyID:
1512     return APFloat::getZero(APFloat::IEEEhalf());
1513   default:
1514   case Type::FloatTyID:
1515     return APFloat::getZero(APFloat::IEEEsingle());
1516   case Type::DoubleTyID:
1517     return APFloat::getZero(APFloat::IEEEdouble());
1518   }
1519 }
1520 
1521 static APFloat getOneFP(const Type *LLVMFloatTy) {
1522   if (!LLVMFloatTy)
1523     return APFloat::getOne(APFloat::IEEEsingle());
1524   switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1525   case Type::HalfTyID:
1526     return APFloat::getOne(APFloat::IEEEhalf());
1527   default:
1528   case Type::FloatTyID:
1529     return APFloat::getOne(APFloat::IEEEsingle());
1530   case Type::DoubleTyID:
1531     return APFloat::getOne(APFloat::IEEEdouble());
1532   }
1533 }
1534 
1535 bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
1536                                               const SPIRVType *ResType,
1537                                               MachineInstr &I,
1538                                               unsigned OpAnyOrAll) const {
1539   assert(I.getNumOperands() == 3);
1540   assert(I.getOperand(2).isReg());
1541   MachineBasicBlock &BB = *I.getParent();
1542   Register InputRegister = I.getOperand(2).getReg();
1543   SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1544 
1545   if (!InputType)
1546     report_fatal_error("Input Type could not be determined.");
1547 
1548   bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1549   bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
1550   if (IsBoolTy && !IsVectorTy) {
1551     assert(ResVReg == I.getOperand(0).getReg());
1552     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1553                    TII.get(TargetOpcode::COPY))
1554         .addDef(ResVReg)
1555         .addUse(InputRegister)
1556         .constrainAllUses(TII, TRI, RBI);
1557   }
1558 
1559   bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1560   unsigned SpirvNotEqualId =
1561       IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1562   SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(I, TII);
1563   SPIRVType *SpvBoolTy = SpvBoolScalarTy;
1564   Register NotEqualReg = ResVReg;
1565 
1566   if (IsVectorTy) {
1567     NotEqualReg = IsBoolTy ? InputRegister
1568                            : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1569     const unsigned NumElts = InputType->getOperand(2).getImm();
1570     SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
1571   }
1572 
1573   if (!IsBoolTy) {
1574     Register ConstZeroReg =
1575         IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
1576 
1577     BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
1578         .addDef(NotEqualReg)
1579         .addUse(GR.getSPIRVTypeID(SpvBoolTy))
1580         .addUse(InputRegister)
1581         .addUse(ConstZeroReg)
1582         .constrainAllUses(TII, TRI, RBI);
1583   }
1584 
1585   if (!IsVectorTy)
1586     return true;
1587 
1588   return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
1589       .addDef(ResVReg)
1590       .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1591       .addUse(NotEqualReg)
1592       .constrainAllUses(TII, TRI, RBI);
1593 }
1594 
1595 bool SPIRVInstructionSelector::selectAll(Register ResVReg,
1596                                          const SPIRVType *ResType,
1597                                          MachineInstr &I) const {
1598   return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
1599 }
1600 
1601 bool SPIRVInstructionSelector::selectAny(Register ResVReg,
1602                                          const SPIRVType *ResType,
1603                                          MachineInstr &I) const {
1604   return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
1605 }
1606 
1607 // Select the OpDot instruction for the given float dot
1608 bool SPIRVInstructionSelector::selectFloatDot(Register ResVReg,
1609                                               const SPIRVType *ResType,
1610                                               MachineInstr &I) const {
1611   assert(I.getNumOperands() == 4);
1612   assert(I.getOperand(2).isReg());
1613   assert(I.getOperand(3).isReg());
1614 
1615   [[maybe_unused]] SPIRVType *VecType =
1616       GR.getSPIRVTypeForVReg(I.getOperand(2).getReg());
1617 
1618   assert(VecType->getOpcode() == SPIRV::OpTypeVector &&
1619          GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1620          "dot product requires a vector of at least 2 components");
1621 
1622   [[maybe_unused]] SPIRVType *EltType =
1623       GR.getSPIRVTypeForVReg(VecType->getOperand(1).getReg());
1624 
1625   assert(EltType->getOpcode() == SPIRV::OpTypeFloat);
1626 
1627   MachineBasicBlock &BB = *I.getParent();
1628   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpDot))
1629       .addDef(ResVReg)
1630       .addUse(GR.getSPIRVTypeID(ResType))
1631       .addUse(I.getOperand(2).getReg())
1632       .addUse(I.getOperand(3).getReg())
1633       .constrainAllUses(TII, TRI, RBI);
1634 }
1635 
1636 // Since pre-1.6 SPIRV has no integer dot implementation,
1637 // expand by piecewise multiplying and adding the results
1638 bool SPIRVInstructionSelector::selectIntegerDot(Register ResVReg,
1639                                                 const SPIRVType *ResType,
1640                                                 MachineInstr &I) const {
1641   assert(I.getNumOperands() == 4);
1642   assert(I.getOperand(2).isReg());
1643   assert(I.getOperand(3).isReg());
1644   MachineBasicBlock &BB = *I.getParent();
1645 
1646   // Multiply the vectors, then sum the results
1647   Register Vec0 = I.getOperand(2).getReg();
1648   Register Vec1 = I.getOperand(3).getReg();
1649   Register TmpVec = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1650   SPIRVType *VecType = GR.getSPIRVTypeForVReg(Vec0);
1651 
1652   bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulV))
1653                     .addDef(TmpVec)
1654                     .addUse(GR.getSPIRVTypeID(VecType))
1655                     .addUse(Vec0)
1656                     .addUse(Vec1)
1657                     .constrainAllUses(TII, TRI, RBI);
1658 
1659   assert(VecType->getOpcode() == SPIRV::OpTypeVector &&
1660          GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1661          "dot product requires a vector of at least 2 components");
1662 
1663   Register Res = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1664   Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1665                 .addDef(Res)
1666                 .addUse(GR.getSPIRVTypeID(ResType))
1667                 .addUse(TmpVec)
1668                 .addImm(0)
1669                 .constrainAllUses(TII, TRI, RBI);
1670 
1671   for (unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
1672     Register Elt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1673 
1674     Result |=
1675         BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1676             .addDef(Elt)
1677             .addUse(GR.getSPIRVTypeID(ResType))
1678             .addUse(TmpVec)
1679             .addImm(i)
1680             .constrainAllUses(TII, TRI, RBI);
1681 
1682     Register Sum = i < GR.getScalarOrVectorComponentCount(VecType) - 1
1683                        ? MRI->createVirtualRegister(&SPIRV::IDRegClass)
1684                        : ResVReg;
1685 
1686     Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
1687                   .addDef(Sum)
1688                   .addUse(GR.getSPIRVTypeID(ResType))
1689                   .addUse(Res)
1690                   .addUse(Elt)
1691                   .constrainAllUses(TII, TRI, RBI);
1692     Res = Sum;
1693   }
1694 
1695   return Result;
1696 }
1697 
1698 /// Transform saturate(x) to clamp(x, 0.0f, 1.0f) as SPIRV
1699 /// does not have a saturate builtin.
1700 bool SPIRVInstructionSelector::selectSaturate(Register ResVReg,
1701                                               const SPIRVType *ResType,
1702                                               MachineInstr &I) const {
1703   assert(I.getNumOperands() == 3);
1704   assert(I.getOperand(2).isReg());
1705   MachineBasicBlock &BB = *I.getParent();
1706   Register VZero = buildZerosValF(ResType, I);
1707   Register VOne = buildOnesValF(ResType, I);
1708 
1709   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1710       .addDef(ResVReg)
1711       .addUse(GR.getSPIRVTypeID(ResType))
1712       .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1713       .addImm(GL::FClamp)
1714       .addUse(I.getOperand(2).getReg())
1715       .addUse(VZero)
1716       .addUse(VOne)
1717       .constrainAllUses(TII, TRI, RBI);
1718 }
1719 
1720 bool SPIRVInstructionSelector::selectSign(Register ResVReg,
1721                                           const SPIRVType *ResType,
1722                                           MachineInstr &I) const {
1723   assert(I.getNumOperands() == 3);
1724   assert(I.getOperand(2).isReg());
1725   MachineBasicBlock &BB = *I.getParent();
1726   Register InputRegister = I.getOperand(2).getReg();
1727   SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1728   auto &DL = I.getDebugLoc();
1729 
1730   if (!InputType)
1731     report_fatal_error("Input Type could not be determined.");
1732 
1733   bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1734 
1735   unsigned SignBitWidth = GR.getScalarOrVectorBitWidth(InputType);
1736   unsigned ResBitWidth = GR.getScalarOrVectorBitWidth(ResType);
1737 
1738   bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
1739 
1740   auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
1741   Register SignReg = NeedsConversion
1742                          ? MRI->createVirtualRegister(&SPIRV::IDRegClass)
1743                          : ResVReg;
1744 
1745   bool Result =
1746       BuildMI(BB, I, DL, TII.get(SPIRV::OpExtInst))
1747           .addDef(SignReg)
1748           .addUse(GR.getSPIRVTypeID(InputType))
1749           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1750           .addImm(SignOpcode)
1751           .addUse(InputRegister)
1752           .constrainAllUses(TII, TRI, RBI);
1753 
1754   if (NeedsConversion) {
1755     auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
1756     Result |= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode))
1757                   .addDef(ResVReg)
1758                   .addUse(GR.getSPIRVTypeID(ResType))
1759                   .addUse(SignReg)
1760                   .constrainAllUses(TII, TRI, RBI);
1761   }
1762 
1763   return Result;
1764 }
1765 
1766 bool SPIRVInstructionSelector::selectWaveReadLaneAt(Register ResVReg,
1767                                                     const SPIRVType *ResType,
1768                                                     MachineInstr &I) const {
1769   assert(I.getNumOperands() == 4);
1770   assert(I.getOperand(2).isReg());
1771   assert(I.getOperand(3).isReg());
1772   MachineBasicBlock &BB = *I.getParent();
1773 
1774   // IntTy is used to define the execution scope, set to 3 to denote a
1775   // cross-lane interaction equivalent to a SPIR-V subgroup.
1776   SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
1777   return BuildMI(BB, I, I.getDebugLoc(),
1778                  TII.get(SPIRV::OpGroupNonUniformShuffle))
1779       .addDef(ResVReg)
1780       .addUse(GR.getSPIRVTypeID(ResType))
1781       .addUse(GR.getOrCreateConstInt(3, I, IntTy, TII))
1782       .addUse(I.getOperand(2).getReg())
1783       .addUse(I.getOperand(3).getReg());
1784 }
1785 
1786 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1787                                                 const SPIRVType *ResType,
1788                                                 MachineInstr &I) const {
1789   MachineBasicBlock &BB = *I.getParent();
1790   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1791       .addDef(ResVReg)
1792       .addUse(GR.getSPIRVTypeID(ResType))
1793       .addUse(I.getOperand(1).getReg())
1794       .constrainAllUses(TII, TRI, RBI);
1795 }
1796 
1797 bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1798                                             const SPIRVType *ResType,
1799                                             MachineInstr &I) const {
1800   // There is no way to implement `freeze` correctly without support on SPIR-V
1801   // standard side, but we may at least address a simple (static) case when
1802   // undef/poison value presence is obvious. The main benefit of even
1803   // incomplete `freeze` support is preventing of translation from crashing due
1804   // to lack of support on legalization and instruction selection steps.
1805   if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1806     return false;
1807   Register OpReg = I.getOperand(1).getReg();
1808   if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1809     Register Reg;
1810     switch (Def->getOpcode()) {
1811     case SPIRV::ASSIGN_TYPE:
1812       if (MachineInstr *AssignToDef =
1813               MRI->getVRegDef(Def->getOperand(1).getReg())) {
1814         if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1815           Reg = Def->getOperand(2).getReg();
1816       }
1817       break;
1818     case SPIRV::OpUndef:
1819       Reg = Def->getOperand(1).getReg();
1820       break;
1821     }
1822     unsigned DestOpCode;
1823     if (Reg.isValid()) {
1824       DestOpCode = SPIRV::OpConstantNull;
1825     } else {
1826       DestOpCode = TargetOpcode::COPY;
1827       Reg = OpReg;
1828     }
1829     return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1830         .addDef(I.getOperand(0).getReg())
1831         .addUse(Reg)
1832         .constrainAllUses(TII, TRI, RBI);
1833   }
1834   return false;
1835 }
1836 
1837 static unsigned getArrayComponentCount(MachineRegisterInfo *MRI,
1838                                        const SPIRVType *ResType) {
1839   Register OpReg = ResType->getOperand(2).getReg();
1840   SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1841   if (!OpDef)
1842     return 0;
1843   if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1844       OpDef->getOperand(1).isReg()) {
1845     if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1846       OpDef = RefDef;
1847   }
1848   unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1849                    ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1850                    : 0;
1851   return N;
1852 }
1853 
1854 // Return true if the type represents a constant register
1855 static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef,
1856                        SmallPtrSet<SPIRVType *, 4> &Visited) {
1857   if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1858       OpDef->getOperand(1).isReg()) {
1859     if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1860       OpDef = RefDef;
1861   }
1862 
1863   if (Visited.contains(OpDef))
1864     return true;
1865   Visited.insert(OpDef);
1866 
1867   unsigned Opcode = OpDef->getOpcode();
1868   switch (Opcode) {
1869   case TargetOpcode::G_CONSTANT:
1870   case TargetOpcode::G_FCONSTANT:
1871     return true;
1872   case TargetOpcode::G_INTRINSIC:
1873   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1874   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
1875     return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
1876            Intrinsic::spv_const_composite;
1877   case TargetOpcode::G_BUILD_VECTOR:
1878   case TargetOpcode::G_SPLAT_VECTOR: {
1879     for (unsigned i = OpDef->getNumExplicitDefs(); i < OpDef->getNumOperands();
1880          i++) {
1881       SPIRVType *OpNestedDef =
1882           OpDef->getOperand(i).isReg()
1883               ? MRI->getVRegDef(OpDef->getOperand(i).getReg())
1884               : nullptr;
1885       if (OpNestedDef && !isConstReg(MRI, OpNestedDef, Visited))
1886         return false;
1887     }
1888     return true;
1889   }
1890   }
1891   return false;
1892 }
1893 
1894 // Return true if the virtual register represents a constant
1895 static bool isConstReg(MachineRegisterInfo *MRI, Register OpReg) {
1896   SmallPtrSet<SPIRVType *, 4> Visited;
1897   if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
1898     return isConstReg(MRI, OpDef, Visited);
1899   return false;
1900 }
1901 
1902 bool SPIRVInstructionSelector::selectBuildVector(Register ResVReg,
1903                                                  const SPIRVType *ResType,
1904                                                  MachineInstr &I) const {
1905   unsigned N = 0;
1906   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1907     N = GR.getScalarOrVectorComponentCount(ResType);
1908   else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1909     N = getArrayComponentCount(MRI, ResType);
1910   else
1911     report_fatal_error("Cannot select G_BUILD_VECTOR with a non-vector result");
1912   if (I.getNumExplicitOperands() - I.getNumExplicitDefs() != N)
1913     report_fatal_error("G_BUILD_VECTOR and the result type are inconsistent");
1914 
1915   // check if we may construct a constant vector
1916   bool IsConst = true;
1917   for (unsigned i = I.getNumExplicitDefs();
1918        i < I.getNumExplicitOperands() && IsConst; ++i)
1919     if (!isConstReg(MRI, I.getOperand(i).getReg()))
1920       IsConst = false;
1921 
1922   if (!IsConst && N < 2)
1923     report_fatal_error(
1924         "There must be at least two constituent operands in a vector");
1925 
1926   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1927                      TII.get(IsConst ? SPIRV::OpConstantComposite
1928                                      : SPIRV::OpCompositeConstruct))
1929                  .addDef(ResVReg)
1930                  .addUse(GR.getSPIRVTypeID(ResType));
1931   for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1932     MIB.addUse(I.getOperand(i).getReg());
1933   return MIB.constrainAllUses(TII, TRI, RBI);
1934 }
1935 
1936 bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1937                                                  const SPIRVType *ResType,
1938                                                  MachineInstr &I) const {
1939   unsigned N = 0;
1940   if (ResType->getOpcode() == SPIRV::OpTypeVector)
1941     N = GR.getScalarOrVectorComponentCount(ResType);
1942   else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1943     N = getArrayComponentCount(MRI, ResType);
1944   else
1945     report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1946 
1947   unsigned OpIdx = I.getNumExplicitDefs();
1948   if (!I.getOperand(OpIdx).isReg())
1949     report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1950 
1951   // check if we may construct a constant vector
1952   Register OpReg = I.getOperand(OpIdx).getReg();
1953   bool IsConst = isConstReg(MRI, OpReg);
1954 
1955   if (!IsConst && N < 2)
1956     report_fatal_error(
1957         "There must be at least two constituent operands in a vector");
1958 
1959   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1960                      TII.get(IsConst ? SPIRV::OpConstantComposite
1961                                      : SPIRV::OpCompositeConstruct))
1962                  .addDef(ResVReg)
1963                  .addUse(GR.getSPIRVTypeID(ResType));
1964   for (unsigned i = 0; i < N; ++i)
1965     MIB.addUse(OpReg);
1966   return MIB.constrainAllUses(TII, TRI, RBI);
1967 }
1968 
1969 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1970                                          const SPIRVType *ResType,
1971                                          unsigned CmpOpc,
1972                                          MachineInstr &I) const {
1973   Register Cmp0 = I.getOperand(2).getReg();
1974   Register Cmp1 = I.getOperand(3).getReg();
1975   assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1976              GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1977          "CMP operands should have the same type");
1978   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1979       .addDef(ResVReg)
1980       .addUse(GR.getSPIRVTypeID(ResType))
1981       .addUse(Cmp0)
1982       .addUse(Cmp1)
1983       .constrainAllUses(TII, TRI, RBI);
1984 }
1985 
1986 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1987                                           const SPIRVType *ResType,
1988                                           MachineInstr &I) const {
1989   auto Pred = I.getOperand(1).getPredicate();
1990   unsigned CmpOpc;
1991 
1992   Register CmpOperand = I.getOperand(2).getReg();
1993   if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1994     CmpOpc = getPtrCmpOpcode(Pred);
1995   else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1996     CmpOpc = getBoolCmpOpcode(Pred);
1997   else
1998     CmpOpc = getICmpOpcode(Pred);
1999   return selectCmp(ResVReg, ResType, CmpOpc, I);
2000 }
2001 
2002 void SPIRVInstructionSelector::renderFImm64(MachineInstrBuilder &MIB,
2003                                             const MachineInstr &I,
2004                                             int OpIdx) const {
2005   assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
2006          "Expected G_FCONSTANT");
2007   const ConstantFP *FPImm = I.getOperand(1).getFPImm();
2008   addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
2009 }
2010 
2011 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
2012                                            const MachineInstr &I,
2013                                            int OpIdx) const {
2014   assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2015          "Expected G_CONSTANT");
2016   addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
2017 }
2018 
2019 Register
2020 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
2021                                            const SPIRVType *ResType) const {
2022   Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
2023   const SPIRVType *SpvI32Ty =
2024       ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
2025   // Find a constant in DT or build a new one.
2026   auto ConstInt = ConstantInt::get(LLVMTy, Val);
2027   Register NewReg = GR.find(ConstInt, GR.CurMF);
2028   if (!NewReg.isValid()) {
2029     NewReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
2030     GR.add(ConstInt, GR.CurMF, NewReg);
2031     MachineInstr *MI;
2032     MachineBasicBlock &BB = *I.getParent();
2033     if (Val == 0) {
2034       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2035                .addDef(NewReg)
2036                .addUse(GR.getSPIRVTypeID(SpvI32Ty));
2037     } else {
2038       MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
2039                .addDef(NewReg)
2040                .addUse(GR.getSPIRVTypeID(SpvI32Ty))
2041                .addImm(APInt(32, Val).getZExtValue());
2042     }
2043     constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
2044   }
2045   return NewReg;
2046 }
2047 
2048 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
2049                                           const SPIRVType *ResType,
2050                                           MachineInstr &I) const {
2051   unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
2052   return selectCmp(ResVReg, ResType, CmpOp, I);
2053 }
2054 
2055 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
2056                                                  MachineInstr &I) const {
2057   // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
2058   bool ZeroAsNull = STI.isOpenCLEnv();
2059   if (ResType->getOpcode() == SPIRV::OpTypeVector)
2060     return GR.getOrCreateConstVector(0UL, I, ResType, TII, ZeroAsNull);
2061   return GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
2062 }
2063 
2064 Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
2065                                                   MachineInstr &I) const {
2066   // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
2067   bool ZeroAsNull = STI.isOpenCLEnv();
2068   APFloat VZero = getZeroFP(GR.getTypeForSPIRVType(ResType));
2069   if (ResType->getOpcode() == SPIRV::OpTypeVector)
2070     return GR.getOrCreateConstVector(VZero, I, ResType, TII, ZeroAsNull);
2071   return GR.getOrCreateConstFP(VZero, I, ResType, TII, ZeroAsNull);
2072 }
2073 
2074 Register SPIRVInstructionSelector::buildOnesValF(const SPIRVType *ResType,
2075                                                  MachineInstr &I) const {
2076   // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
2077   bool ZeroAsNull = STI.isOpenCLEnv();
2078   APFloat VOne = getOneFP(GR.getTypeForSPIRVType(ResType));
2079   if (ResType->getOpcode() == SPIRV::OpTypeVector)
2080     return GR.getOrCreateConstVector(VOne, I, ResType, TII, ZeroAsNull);
2081   return GR.getOrCreateConstFP(VOne, I, ResType, TII, ZeroAsNull);
2082 }
2083 
2084 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
2085                                                 const SPIRVType *ResType,
2086                                                 MachineInstr &I) const {
2087   unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2088   APInt One =
2089       AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
2090   if (ResType->getOpcode() == SPIRV::OpTypeVector)
2091     return GR.getOrCreateConstVector(One.getZExtValue(), I, ResType, TII);
2092   return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
2093 }
2094 
2095 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
2096                                             const SPIRVType *ResType,
2097                                             MachineInstr &I,
2098                                             bool IsSigned) const {
2099   // To extend a bool, we need to use OpSelect between constants.
2100   Register ZeroReg = buildZerosVal(ResType, I);
2101   Register OneReg = buildOnesVal(IsSigned, ResType, I);
2102   bool IsScalarBool =
2103       GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
2104   unsigned Opcode =
2105       IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
2106   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
2107       .addDef(ResVReg)
2108       .addUse(GR.getSPIRVTypeID(ResType))
2109       .addUse(I.getOperand(1).getReg())
2110       .addUse(OneReg)
2111       .addUse(ZeroReg)
2112       .constrainAllUses(TII, TRI, RBI);
2113 }
2114 
2115 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
2116                                           const SPIRVType *ResType,
2117                                           MachineInstr &I, bool IsSigned,
2118                                           unsigned Opcode) const {
2119   Register SrcReg = I.getOperand(1).getReg();
2120   // We can convert bool value directly to float type without OpConvert*ToF,
2121   // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
2122   if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
2123     unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2124     SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
2125     if (ResType->getOpcode() == SPIRV::OpTypeVector) {
2126       const unsigned NumElts = ResType->getOperand(2).getImm();
2127       TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
2128     }
2129     SrcReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2130     selectSelect(SrcReg, TmpType, I, false);
2131   }
2132   return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
2133 }
2134 
2135 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
2136                                          const SPIRVType *ResType,
2137                                          MachineInstr &I, bool IsSigned) const {
2138   Register SrcReg = I.getOperand(1).getReg();
2139   if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
2140     return selectSelect(ResVReg, ResType, I, IsSigned);
2141 
2142   SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
2143   if (SrcType == ResType) {
2144     const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(ResVReg);
2145     const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(SrcReg);
2146     if (DstRC != SrcRC && SrcRC)
2147       MRI->setRegClass(ResVReg, SrcRC);
2148     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
2149                    TII.get(TargetOpcode::COPY))
2150         .addDef(ResVReg)
2151         .addUse(SrcReg)
2152         .constrainAllUses(TII, TRI, RBI);
2153   }
2154 
2155   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2156   return selectUnOp(ResVReg, ResType, I, Opcode);
2157 }
2158 
2159 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
2160                                                Register ResVReg,
2161                                                MachineInstr &I,
2162                                                const SPIRVType *IntTy,
2163                                                const SPIRVType *BoolTy) const {
2164   // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
2165   Register BitIntReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2166   bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
2167   unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2168   Register Zero = buildZerosVal(IntTy, I);
2169   Register One = buildOnesVal(false, IntTy, I);
2170   MachineBasicBlock &BB = *I.getParent();
2171   BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2172       .addDef(BitIntReg)
2173       .addUse(GR.getSPIRVTypeID(IntTy))
2174       .addUse(IntReg)
2175       .addUse(One)
2176       .constrainAllUses(TII, TRI, RBI);
2177   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
2178       .addDef(ResVReg)
2179       .addUse(GR.getSPIRVTypeID(BoolTy))
2180       .addUse(BitIntReg)
2181       .addUse(Zero)
2182       .constrainAllUses(TII, TRI, RBI);
2183 }
2184 
2185 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
2186                                            const SPIRVType *ResType,
2187                                            MachineInstr &I) const {
2188   Register IntReg = I.getOperand(1).getReg();
2189   const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
2190   if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
2191     return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
2192   if (ArgType == ResType) {
2193     const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(ResVReg);
2194     const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(IntReg);
2195     if (DstRC != SrcRC && SrcRC)
2196       MRI->setRegClass(ResVReg, SrcRC);
2197     return BuildMI(*I.getParent(), I, I.getDebugLoc(),
2198                    TII.get(TargetOpcode::COPY))
2199         .addDef(ResVReg)
2200         .addUse(IntReg)
2201         .constrainAllUses(TII, TRI, RBI);
2202   }
2203   bool IsSigned = GR.isScalarOrVectorSigned(ResType);
2204   unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2205   return selectUnOp(ResVReg, ResType, I, Opcode);
2206 }
2207 
2208 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
2209                                            const SPIRVType *ResType,
2210                                            const APInt &Imm,
2211                                            MachineInstr &I) const {
2212   unsigned TyOpcode = ResType->getOpcode();
2213   assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
2214   MachineBasicBlock &BB = *I.getParent();
2215   if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
2216       Imm.isZero())
2217     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2218         .addDef(ResVReg)
2219         .addUse(GR.getSPIRVTypeID(ResType))
2220         .constrainAllUses(TII, TRI, RBI);
2221   if (TyOpcode == SPIRV::OpTypeInt) {
2222     assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
2223     Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
2224     if (Reg == ResVReg)
2225       return true;
2226     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
2227         .addDef(ResVReg)
2228         .addUse(Reg)
2229         .constrainAllUses(TII, TRI, RBI);
2230   }
2231   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
2232                  .addDef(ResVReg)
2233                  .addUse(GR.getSPIRVTypeID(ResType));
2234   // <=32-bit integers should be caught by the sdag pattern.
2235   assert(Imm.getBitWidth() > 32);
2236   addNumImm(Imm, MIB);
2237   return MIB.constrainAllUses(TII, TRI, RBI);
2238 }
2239 
2240 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
2241                                              const SPIRVType *ResType,
2242                                              MachineInstr &I) const {
2243   return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
2244       .addDef(ResVReg)
2245       .addUse(GR.getSPIRVTypeID(ResType))
2246       .constrainAllUses(TII, TRI, RBI);
2247 }
2248 
2249 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
2250   assert(MO.isReg());
2251   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
2252   if (TypeInst->getOpcode() == SPIRV::ASSIGN_TYPE) {
2253     assert(TypeInst->getOperand(1).isReg());
2254     MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
2255     return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
2256   }
2257   return TypeInst->getOpcode() == SPIRV::OpConstantI;
2258 }
2259 
2260 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
2261   const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
2262   if (TypeInst->getOpcode() == SPIRV::OpConstantI)
2263     return TypeInst->getOperand(2).getImm();
2264   MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
2265   assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
2266   return ImmInst->getOperand(1).getCImm()->getZExtValue();
2267 }
2268 
2269 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
2270                                                const SPIRVType *ResType,
2271                                                MachineInstr &I) const {
2272   MachineBasicBlock &BB = *I.getParent();
2273   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
2274                  .addDef(ResVReg)
2275                  .addUse(GR.getSPIRVTypeID(ResType))
2276                  // object to insert
2277                  .addUse(I.getOperand(3).getReg())
2278                  // composite to insert into
2279                  .addUse(I.getOperand(2).getReg());
2280   for (unsigned i = 4; i < I.getNumOperands(); i++)
2281     MIB.addImm(foldImm(I.getOperand(i), MRI));
2282   return MIB.constrainAllUses(TII, TRI, RBI);
2283 }
2284 
2285 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
2286                                                 const SPIRVType *ResType,
2287                                                 MachineInstr &I) const {
2288   MachineBasicBlock &BB = *I.getParent();
2289   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2290                  .addDef(ResVReg)
2291                  .addUse(GR.getSPIRVTypeID(ResType))
2292                  .addUse(I.getOperand(2).getReg());
2293   for (unsigned i = 3; i < I.getNumOperands(); i++)
2294     MIB.addImm(foldImm(I.getOperand(i), MRI));
2295   return MIB.constrainAllUses(TII, TRI, RBI);
2296 }
2297 
2298 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
2299                                                const SPIRVType *ResType,
2300                                                MachineInstr &I) const {
2301   if (isImm(I.getOperand(4), MRI))
2302     return selectInsertVal(ResVReg, ResType, I);
2303   MachineBasicBlock &BB = *I.getParent();
2304   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
2305       .addDef(ResVReg)
2306       .addUse(GR.getSPIRVTypeID(ResType))
2307       .addUse(I.getOperand(2).getReg())
2308       .addUse(I.getOperand(3).getReg())
2309       .addUse(I.getOperand(4).getReg())
2310       .constrainAllUses(TII, TRI, RBI);
2311 }
2312 
2313 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
2314                                                 const SPIRVType *ResType,
2315                                                 MachineInstr &I) const {
2316   if (isImm(I.getOperand(3), MRI))
2317     return selectExtractVal(ResVReg, ResType, I);
2318   MachineBasicBlock &BB = *I.getParent();
2319   return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
2320       .addDef(ResVReg)
2321       .addUse(GR.getSPIRVTypeID(ResType))
2322       .addUse(I.getOperand(2).getReg())
2323       .addUse(I.getOperand(3).getReg())
2324       .constrainAllUses(TII, TRI, RBI);
2325 }
2326 
2327 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
2328                                          const SPIRVType *ResType,
2329                                          MachineInstr &I) const {
2330   const bool IsGEPInBounds = I.getOperand(2).getImm();
2331 
2332   // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
2333   // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
2334   // we have to use Op[InBounds]AccessChain.
2335   const unsigned Opcode = STI.isVulkanEnv()
2336                               ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2337                                                : SPIRV::OpAccessChain)
2338                               : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2339                                                : SPIRV::OpPtrAccessChain);
2340 
2341   auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
2342                  .addDef(ResVReg)
2343                  .addUse(GR.getSPIRVTypeID(ResType))
2344                  // Object to get a pointer to.
2345                  .addUse(I.getOperand(3).getReg());
2346   // Adding indices.
2347   const unsigned StartingIndex =
2348       (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2349           ? 5
2350           : 4;
2351   for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
2352     Res.addUse(I.getOperand(i).getReg());
2353   return Res.constrainAllUses(TII, TRI, RBI);
2354 }
2355 
2356 // Maybe wrap a value into OpSpecConstantOp
2357 bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2358     MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
2359   bool Result = true;
2360   unsigned Lim = I.getNumExplicitOperands();
2361   for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2362     Register OpReg = I.getOperand(i).getReg();
2363     SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
2364     SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2365     SmallPtrSet<SPIRVType *, 4> Visited;
2366     if (!OpDefine || !OpType || isConstReg(MRI, OpDefine, Visited) ||
2367         OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2368         GR.isAggregateType(OpType)) {
2369       // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
2370       // by selectAddrSpaceCast()
2371       CompositeArgs.push_back(OpReg);
2372       continue;
2373     }
2374     MachineFunction *MF = I.getMF();
2375     Register WrapReg = GR.find(OpDefine, MF);
2376     if (WrapReg.isValid()) {
2377       CompositeArgs.push_back(WrapReg);
2378       continue;
2379     }
2380     // Create a new register for the wrapper
2381     WrapReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2382     GR.add(OpDefine, MF, WrapReg);
2383     CompositeArgs.push_back(WrapReg);
2384     // Decorate the wrapper register and generate a new instruction
2385     MRI->setType(WrapReg, LLT::pointer(0, 64));
2386     GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2387     MachineBasicBlock &BB = *I.getParent();
2388     Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
2389                  .addDef(WrapReg)
2390                  .addUse(GR.getSPIRVTypeID(OpType))
2391                  .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
2392                  .addUse(OpReg)
2393                  .constrainAllUses(TII, TRI, RBI);
2394     if (!Result)
2395       break;
2396   }
2397   return Result;
2398 }
2399 
2400 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
2401                                                const SPIRVType *ResType,
2402                                                MachineInstr &I) const {
2403   MachineBasicBlock &BB = *I.getParent();
2404   Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
2405   switch (IID) {
2406   case Intrinsic::spv_load:
2407     return selectLoad(ResVReg, ResType, I);
2408   case Intrinsic::spv_store:
2409     return selectStore(I);
2410   case Intrinsic::spv_extractv:
2411     return selectExtractVal(ResVReg, ResType, I);
2412   case Intrinsic::spv_insertv:
2413     return selectInsertVal(ResVReg, ResType, I);
2414   case Intrinsic::spv_extractelt:
2415     return selectExtractElt(ResVReg, ResType, I);
2416   case Intrinsic::spv_insertelt:
2417     return selectInsertElt(ResVReg, ResType, I);
2418   case Intrinsic::spv_gep:
2419     return selectGEP(ResVReg, ResType, I);
2420   case Intrinsic::spv_unref_global:
2421   case Intrinsic::spv_init_global: {
2422     MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
2423     MachineInstr *Init = I.getNumExplicitOperands() > 2
2424                              ? MRI->getVRegDef(I.getOperand(2).getReg())
2425                              : nullptr;
2426     assert(MI);
2427     return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
2428   }
2429   case Intrinsic::spv_undef: {
2430     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
2431                    .addDef(ResVReg)
2432                    .addUse(GR.getSPIRVTypeID(ResType));
2433     return MIB.constrainAllUses(TII, TRI, RBI);
2434   }
2435   case Intrinsic::spv_const_composite: {
2436     // If no values are attached, the composite is null constant.
2437     bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
2438     // Select a proper instruction.
2439     unsigned Opcode = SPIRV::OpConstantNull;
2440     SmallVector<Register> CompositeArgs;
2441     if (!IsNull) {
2442       Opcode = SPIRV::OpConstantComposite;
2443       if (!wrapIntoSpecConstantOp(I, CompositeArgs))
2444         return false;
2445     }
2446     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2447                    .addDef(ResVReg)
2448                    .addUse(GR.getSPIRVTypeID(ResType));
2449     // skip type MD node we already used when generated assign.type for this
2450     if (!IsNull) {
2451       for (Register OpReg : CompositeArgs)
2452         MIB.addUse(OpReg);
2453     }
2454     return MIB.constrainAllUses(TII, TRI, RBI);
2455   }
2456   case Intrinsic::spv_assign_name: {
2457     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
2458     MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
2459     for (unsigned i = I.getNumExplicitDefs() + 2;
2460          i < I.getNumExplicitOperands(); ++i) {
2461       MIB.addImm(I.getOperand(i).getImm());
2462     }
2463     return MIB.constrainAllUses(TII, TRI, RBI);
2464   }
2465   case Intrinsic::spv_switch: {
2466     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
2467     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
2468       if (I.getOperand(i).isReg())
2469         MIB.addReg(I.getOperand(i).getReg());
2470       else if (I.getOperand(i).isCImm())
2471         addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
2472       else if (I.getOperand(i).isMBB())
2473         MIB.addMBB(I.getOperand(i).getMBB());
2474       else
2475         llvm_unreachable("Unexpected OpSwitch operand");
2476     }
2477     return MIB.constrainAllUses(TII, TRI, RBI);
2478   }
2479   case Intrinsic::spv_loop_merge:
2480   case Intrinsic::spv_selection_merge: {
2481     const auto Opcode = IID == Intrinsic::spv_selection_merge
2482                             ? SPIRV::OpSelectionMerge
2483                             : SPIRV::OpLoopMerge;
2484     auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode));
2485     for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
2486       assert(I.getOperand(i).isMBB());
2487       MIB.addMBB(I.getOperand(i).getMBB());
2488     }
2489     MIB.addImm(SPIRV::SelectionControl::None);
2490     return MIB.constrainAllUses(TII, TRI, RBI);
2491   }
2492   case Intrinsic::spv_cmpxchg:
2493     return selectAtomicCmpXchg(ResVReg, ResType, I);
2494   case Intrinsic::spv_unreachable:
2495     BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
2496     break;
2497   case Intrinsic::spv_alloca:
2498     return selectFrameIndex(ResVReg, ResType, I);
2499   case Intrinsic::spv_alloca_array:
2500     return selectAllocaArray(ResVReg, ResType, I);
2501   case Intrinsic::spv_assume:
2502     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2503       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
2504           .addUse(I.getOperand(1).getReg());
2505     break;
2506   case Intrinsic::spv_expect:
2507     if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2508       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
2509           .addDef(ResVReg)
2510           .addUse(GR.getSPIRVTypeID(ResType))
2511           .addUse(I.getOperand(2).getReg())
2512           .addUse(I.getOperand(3).getReg());
2513     break;
2514   case Intrinsic::arithmetic_fence:
2515     if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
2516       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpArithmeticFenceEXT))
2517           .addDef(ResVReg)
2518           .addUse(GR.getSPIRVTypeID(ResType))
2519           .addUse(I.getOperand(2).getReg());
2520     else
2521       BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
2522           .addUse(I.getOperand(2).getReg());
2523     break;
2524   case Intrinsic::spv_thread_id:
2525     return selectSpvThreadId(ResVReg, ResType, I);
2526   case Intrinsic::spv_fdot:
2527     return selectFloatDot(ResVReg, ResType, I);
2528   case Intrinsic::spv_udot:
2529   case Intrinsic::spv_sdot:
2530     return selectIntegerDot(ResVReg, ResType, I);
2531   case Intrinsic::spv_all:
2532     return selectAll(ResVReg, ResType, I);
2533   case Intrinsic::spv_any:
2534     return selectAny(ResVReg, ResType, I);
2535   case Intrinsic::spv_cross:
2536     return selectExtInst(ResVReg, ResType, I, CL::cross, GL::Cross);
2537   case Intrinsic::spv_lerp:
2538     return selectExtInst(ResVReg, ResType, I, CL::mix, GL::FMix);
2539   case Intrinsic::spv_length:
2540     return selectExtInst(ResVReg, ResType, I, CL::length, GL::Length);
2541   case Intrinsic::spv_degrees:
2542     return selectExtInst(ResVReg, ResType, I, CL::degrees, GL::Degrees);
2543   case Intrinsic::spv_frac:
2544     return selectExtInst(ResVReg, ResType, I, CL::fract, GL::Fract);
2545   case Intrinsic::spv_normalize:
2546     return selectExtInst(ResVReg, ResType, I, CL::normalize, GL::Normalize);
2547   case Intrinsic::spv_rsqrt:
2548     return selectExtInst(ResVReg, ResType, I, CL::rsqrt, GL::InverseSqrt);
2549   case Intrinsic::spv_sign:
2550     return selectSign(ResVReg, ResType, I);
2551   case Intrinsic::spv_group_memory_barrier_with_group_sync: {
2552     Register MemSemReg =
2553         buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent, I);
2554     Register ScopeReg = buildI32Constant(SPIRV::Scope::Workgroup, I);
2555     MachineBasicBlock &BB = *I.getParent();
2556     return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier))
2557         .addUse(ScopeReg)
2558         .addUse(ScopeReg)
2559         .addUse(MemSemReg)
2560         .constrainAllUses(TII, TRI, RBI);
2561   } break;
2562   case Intrinsic::spv_lifetime_start:
2563   case Intrinsic::spv_lifetime_end: {
2564     unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
2565                                                        : SPIRV::OpLifetimeStop;
2566     int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
2567     Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
2568     if (Size == -1)
2569       Size = 0;
2570     BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
2571   } break;
2572   case Intrinsic::spv_saturate:
2573     return selectSaturate(ResVReg, ResType, I);
2574   case Intrinsic::spv_wave_is_first_lane: {
2575     SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
2576     return BuildMI(BB, I, I.getDebugLoc(),
2577                    TII.get(SPIRV::OpGroupNonUniformElect))
2578         .addDef(ResVReg)
2579         .addUse(GR.getSPIRVTypeID(ResType))
2580         .addUse(GR.getOrCreateConstInt(3, I, IntTy, TII));
2581   }
2582   case Intrinsic::spv_wave_readlane:
2583     return selectWaveReadLaneAt(ResVReg, ResType, I);
2584   case Intrinsic::spv_step:
2585     return selectExtInst(ResVReg, ResType, I, CL::step, GL::Step);
2586   case Intrinsic::spv_radians:
2587     return selectExtInst(ResVReg, ResType, I, CL::radians, GL::Radians);
2588   // Discard intrinsics which we do not expect to actually represent code after
2589   // lowering or intrinsics which are not implemented but should not crash when
2590   // found in a customer's LLVM IR input.
2591   case Intrinsic::instrprof_increment:
2592   case Intrinsic::instrprof_increment_step:
2593   case Intrinsic::instrprof_value_profile:
2594     break;
2595   // Discard internal intrinsics.
2596   case Intrinsic::spv_value_md:
2597     break;
2598   case Intrinsic::spv_handle_fromBinding: {
2599     selectHandleFromBinding(ResVReg, ResType, I);
2600     return true;
2601   }
2602   default: {
2603     std::string DiagMsg;
2604     raw_string_ostream OS(DiagMsg);
2605     I.print(OS);
2606     DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
2607     report_fatal_error(DiagMsg.c_str(), false);
2608   }
2609   }
2610   return true;
2611 }
2612 
2613 void SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
2614                                                        const SPIRVType *ResType,
2615                                                        MachineInstr &I) const {
2616 
2617   uint32_t Set = foldImm(I.getOperand(2), MRI);
2618   uint32_t Binding = foldImm(I.getOperand(3), MRI);
2619   uint32_t ArraySize = foldImm(I.getOperand(4), MRI);
2620   Register IndexReg = I.getOperand(5).getReg();
2621   bool IsNonUniform = ArraySize > 1 && foldImm(I.getOperand(6), MRI);
2622 
2623   MachineIRBuilder MIRBuilder(I);
2624   Register VarReg = buildPointerToResource(ResType, Set, Binding, ArraySize,
2625                                            IndexReg, IsNonUniform, MIRBuilder);
2626 
2627   if (IsNonUniform)
2628     buildOpDecorate(ResVReg, I, TII, SPIRV::Decoration::NonUniformEXT, {});
2629 
2630   // TODO: For now we assume the resource is an image, which needs to be
2631   // loaded to get the handle. That will not be true for storage buffers.
2632   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2633       .addDef(ResVReg)
2634       .addUse(GR.getSPIRVTypeID(ResType))
2635       .addUse(VarReg);
2636 }
2637 
2638 Register SPIRVInstructionSelector::buildPointerToResource(
2639     const SPIRVType *ResType, uint32_t Set, uint32_t Binding,
2640     uint32_t ArraySize, Register IndexReg, bool IsNonUniform,
2641     MachineIRBuilder MIRBuilder) const {
2642   if (ArraySize == 1)
2643     return GR.getOrCreateGlobalVariableWithBinding(ResType, Set, Binding,
2644                                                    MIRBuilder);
2645 
2646   const SPIRVType *VarType = GR.getOrCreateSPIRVArrayType(
2647       ResType, ArraySize, *MIRBuilder.getInsertPt(), TII);
2648   Register VarReg = GR.getOrCreateGlobalVariableWithBinding(
2649       VarType, Set, Binding, MIRBuilder);
2650 
2651   SPIRVType *ResPointerType = GR.getOrCreateSPIRVPointerType(
2652       ResType, MIRBuilder, SPIRV::StorageClass::UniformConstant);
2653 
2654   Register AcReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2655   if (IsNonUniform) {
2656     // It is unclear which value needs to be marked an non-uniform, so both
2657     // the index and the access changed are decorated as non-uniform.
2658     buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
2659     buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
2660   }
2661 
2662   MIRBuilder.buildInstr(SPIRV::OpAccessChain)
2663       .addDef(AcReg)
2664       .addUse(GR.getSPIRVTypeID(ResPointerType))
2665       .addUse(VarReg)
2666       .addUse(IndexReg);
2667 
2668   return AcReg;
2669 }
2670 
2671 bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
2672                                                  const SPIRVType *ResType,
2673                                                  MachineInstr &I) const {
2674   // there was an allocation size parameter to the allocation instruction
2675   // that is not 1
2676   MachineBasicBlock &BB = *I.getParent();
2677   return BuildMI(BB, I, I.getDebugLoc(),
2678                  TII.get(SPIRV::OpVariableLengthArrayINTEL))
2679       .addDef(ResVReg)
2680       .addUse(GR.getSPIRVTypeID(ResType))
2681       .addUse(I.getOperand(2).getReg())
2682       .constrainAllUses(TII, TRI, RBI);
2683 }
2684 
2685 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
2686                                                 const SPIRVType *ResType,
2687                                                 MachineInstr &I) const {
2688   // Change order of instructions if needed: all OpVariable instructions in a
2689   // function must be the first instructions in the first block
2690   MachineFunction *MF = I.getParent()->getParent();
2691   MachineBasicBlock *MBB = &MF->front();
2692   auto It = MBB->SkipPHIsAndLabels(MBB->begin()), E = MBB->end();
2693   bool IsHeader = false;
2694   unsigned Opcode;
2695   for (; It != E && It != I; ++It) {
2696     Opcode = It->getOpcode();
2697     if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
2698       IsHeader = true;
2699     } else if (IsHeader &&
2700                !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
2701       ++It;
2702       break;
2703     }
2704   }
2705   return BuildMI(*MBB, It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
2706       .addDef(ResVReg)
2707       .addUse(GR.getSPIRVTypeID(ResType))
2708       .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
2709       .constrainAllUses(TII, TRI, RBI);
2710 }
2711 
2712 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
2713   // InstructionSelector walks backwards through the instructions. We can use
2714   // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
2715   // first, so can generate an OpBranchConditional here. If there is no
2716   // G_BRCOND, we just use OpBranch for a regular unconditional branch.
2717   const MachineInstr *PrevI = I.getPrevNode();
2718   MachineBasicBlock &MBB = *I.getParent();
2719   if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
2720     return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2721         .addUse(PrevI->getOperand(0).getReg())
2722         .addMBB(PrevI->getOperand(1).getMBB())
2723         .addMBB(I.getOperand(0).getMBB())
2724         .constrainAllUses(TII, TRI, RBI);
2725   }
2726   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
2727       .addMBB(I.getOperand(0).getMBB())
2728       .constrainAllUses(TII, TRI, RBI);
2729 }
2730 
2731 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
2732   // InstructionSelector walks backwards through the instructions. For an
2733   // explicit conditional branch with no fallthrough, we use both a G_BR and a
2734   // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
2735   // generate the OpBranchConditional in selectBranch above.
2736   //
2737   // If an OpBranchConditional has been generated, we simply return, as the work
2738   // is alread done. If there is no OpBranchConditional, LLVM must be relying on
2739   // implicit fallthrough to the next basic block, so we need to create an
2740   // OpBranchConditional with an explicit "false" argument pointing to the next
2741   // basic block that LLVM would fall through to.
2742   const MachineInstr *NextI = I.getNextNode();
2743   // Check if this has already been successfully selected.
2744   if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
2745     return true;
2746   // Must be relying on implicit block fallthrough, so generate an
2747   // OpBranchConditional with the "next" basic block as the "false" target.
2748   MachineBasicBlock &MBB = *I.getParent();
2749   unsigned NextMBBNum = MBB.getNextNode()->getNumber();
2750   MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
2751   return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2752       .addUse(I.getOperand(0).getReg())
2753       .addMBB(I.getOperand(1).getMBB())
2754       .addMBB(NextMBB)
2755       .constrainAllUses(TII, TRI, RBI);
2756 }
2757 
2758 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
2759                                          const SPIRVType *ResType,
2760                                          MachineInstr &I) const {
2761   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
2762                  .addDef(ResVReg)
2763                  .addUse(GR.getSPIRVTypeID(ResType));
2764   const unsigned NumOps = I.getNumOperands();
2765   for (unsigned i = 1; i < NumOps; i += 2) {
2766     MIB.addUse(I.getOperand(i + 0).getReg());
2767     MIB.addMBB(I.getOperand(i + 1).getMBB());
2768   }
2769   return MIB.constrainAllUses(TII, TRI, RBI);
2770 }
2771 
2772 bool SPIRVInstructionSelector::selectGlobalValue(
2773     Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
2774   // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
2775   MachineIRBuilder MIRBuilder(I);
2776   const GlobalValue *GV = I.getOperand(1).getGlobal();
2777   Type *GVType = toTypedPointer(GR.getDeducedGlobalValueType(GV));
2778   SPIRVType *PointerBaseType;
2779   if (GVType->isArrayTy()) {
2780     SPIRVType *ArrayElementType =
2781         GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
2782                                 SPIRV::AccessQualifier::ReadWrite, false);
2783     PointerBaseType = GR.getOrCreateSPIRVArrayType(
2784         ArrayElementType, GVType->getArrayNumElements(), I, TII);
2785   } else {
2786     PointerBaseType = GR.getOrCreateSPIRVType(
2787         GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
2788   }
2789   SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2790       PointerBaseType, I, TII,
2791       addressSpaceToStorageClass(GV->getAddressSpace(), STI));
2792 
2793   std::string GlobalIdent;
2794   if (!GV->hasName()) {
2795     unsigned &ID = UnnamedGlobalIDs[GV];
2796     if (ID == 0)
2797       ID = UnnamedGlobalIDs.size();
2798     GlobalIdent = "__unnamed_" + Twine(ID).str();
2799   } else {
2800     GlobalIdent = GV->getGlobalIdentifier();
2801   }
2802 
2803   // Behaviour of functions as operands depends on availability of the
2804   // corresponding extension (SPV_INTEL_function_pointers):
2805   // - If there is an extension to operate with functions as operands:
2806   // We create a proper constant operand and evaluate a correct type for a
2807   // function pointer.
2808   // - Without the required extension:
2809   // We have functions as operands in tests with blocks of instruction e.g. in
2810   // transcoding/global_block.ll. These operands are not used and should be
2811   // substituted by zero constants. Their type is expected to be always
2812   // OpTypePointer Function %uchar.
2813   if (isa<Function>(GV)) {
2814     const Constant *ConstVal = GV;
2815     MachineBasicBlock &BB = *I.getParent();
2816     Register NewReg = GR.find(ConstVal, GR.CurMF);
2817     if (!NewReg.isValid()) {
2818       Register NewReg = ResVReg;
2819       GR.add(ConstVal, GR.CurMF, NewReg);
2820       const Function *GVFun =
2821           STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2822               ? dyn_cast<Function>(GV)
2823               : nullptr;
2824       if (GVFun) {
2825         // References to a function via function pointers generate virtual
2826         // registers without a definition. We will resolve it later, during
2827         // module analysis stage.
2828         MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2829         Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
2830         MRI->setRegClass(FuncVReg, &SPIRV::iIDRegClass);
2831         MachineInstrBuilder MB =
2832             BuildMI(BB, I, I.getDebugLoc(),
2833                     TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2834                 .addDef(NewReg)
2835                 .addUse(GR.getSPIRVTypeID(ResType))
2836                 .addUse(FuncVReg);
2837         // mapping the function pointer to the used Function
2838         GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
2839         return MB.constrainAllUses(TII, TRI, RBI);
2840       }
2841       return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2842           .addDef(NewReg)
2843           .addUse(GR.getSPIRVTypeID(ResType))
2844           .constrainAllUses(TII, TRI, RBI);
2845     }
2846     assert(NewReg != ResVReg);
2847     return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
2848         .addDef(ResVReg)
2849         .addUse(NewReg)
2850         .constrainAllUses(TII, TRI, RBI);
2851   }
2852   auto GlobalVar = cast<GlobalVariable>(GV);
2853   assert(GlobalVar->getName() != "llvm.global.annotations");
2854 
2855   bool HasInit = GlobalVar->hasInitializer() &&
2856                  !isa<UndefValue>(GlobalVar->getInitializer());
2857   // Skip empty declaration for GVs with initilaizers till we get the decl with
2858   // passed initializer.
2859   if (HasInit && !Init)
2860     return true;
2861 
2862   unsigned AddrSpace = GV->getAddressSpace();
2863   SPIRV::StorageClass::StorageClass Storage =
2864       addressSpaceToStorageClass(AddrSpace, STI);
2865   bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
2866                   Storage != SPIRV::StorageClass::Function;
2867   SPIRV::LinkageType::LinkageType LnkType =
2868       (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
2869           ? SPIRV::LinkageType::Import
2870           : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
2871                      STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2872                  ? SPIRV::LinkageType::LinkOnceODR
2873                  : SPIRV::LinkageType::Export);
2874 
2875   Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2876                                         Storage, Init, GlobalVar->isConstant(),
2877                                         HasLnkTy, LnkType, MIRBuilder, true);
2878   return Reg.isValid();
2879 }
2880 
2881 bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
2882                                            const SPIRVType *ResType,
2883                                            MachineInstr &I) const {
2884   if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2885     return selectExtInst(ResVReg, ResType, I, CL::log10);
2886   }
2887 
2888   // There is no log10 instruction in the GLSL Extended Instruction set, so it
2889   // is implemented as:
2890   // log10(x) = log2(x) * (1 / log2(10))
2891   //          = log2(x) * 0.30103
2892 
2893   MachineIRBuilder MIRBuilder(I);
2894   MachineBasicBlock &BB = *I.getParent();
2895 
2896   // Build log2(x).
2897   Register VarReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
2898   bool Result =
2899       BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
2900           .addDef(VarReg)
2901           .addUse(GR.getSPIRVTypeID(ResType))
2902           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2903           .addImm(GL::Log2)
2904           .add(I.getOperand(1))
2905           .constrainAllUses(TII, TRI, RBI);
2906 
2907   // Build 0.30103.
2908   assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
2909          ResType->getOpcode() == SPIRV::OpTypeFloat);
2910   // TODO: Add matrix implementation once supported by the HLSL frontend.
2911   const SPIRVType *SpirvScalarType =
2912       ResType->getOpcode() == SPIRV::OpTypeVector
2913           ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
2914           : ResType;
2915   Register ScaleReg =
2916       GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2917 
2918   // Multiply log2(x) by 0.30103 to get log10(x) result.
2919   auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
2920                     ? SPIRV::OpVectorTimesScalar
2921                     : SPIRV::OpFMulS;
2922   Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2923                 .addDef(ResVReg)
2924                 .addUse(GR.getSPIRVTypeID(ResType))
2925                 .addUse(VarReg)
2926                 .addUse(ScaleReg)
2927                 .constrainAllUses(TII, TRI, RBI);
2928 
2929   return Result;
2930 }
2931 
2932 bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
2933                                                  const SPIRVType *ResType,
2934                                                  MachineInstr &I) const {
2935   // DX intrinsic: @llvm.dx.thread.id(i32)
2936   // ID  Name      Description
2937   // 93  ThreadId  reads the thread ID
2938 
2939   MachineIRBuilder MIRBuilder(I);
2940   const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2941   const SPIRVType *Vec3Ty =
2942       GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2943   const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2944       Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2945 
2946   // Create new register for GlobalInvocationID builtin variable.
2947   Register NewRegister =
2948       MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::iIDRegClass);
2949   MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 64));
2950   GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2951 
2952   // Build GlobalInvocationID global variable with the necessary decorations.
2953   Register Variable = GR.buildGlobalVariable(
2954       NewRegister, PtrType,
2955       getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2956       SPIRV::StorageClass::Input, nullptr, true, true,
2957       SPIRV::LinkageType::Import, MIRBuilder, false);
2958 
2959   // Create new register for loading value.
2960   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2961   Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2962   MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 64));
2963   GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2964 
2965   // Load v3uint value from the global variable.
2966   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2967       .addDef(LoadedRegister)
2968       .addUse(GR.getSPIRVTypeID(Vec3Ty))
2969       .addUse(Variable);
2970 
2971   // Get Thread ID index. Expecting operand is a constant immediate value,
2972   // wrapped in a type assignment.
2973   assert(I.getOperand(2).isReg());
2974   const uint32_t ThreadId = foldImm(I.getOperand(2), MRI);
2975 
2976   // Extract the thread ID from the loaded vector value.
2977   MachineBasicBlock &BB = *I.getParent();
2978   auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2979                  .addDef(ResVReg)
2980                  .addUse(GR.getSPIRVTypeID(ResType))
2981                  .addUse(LoadedRegister)
2982                  .addImm(ThreadId);
2983   return MIB.constrainAllUses(TII, TRI, RBI);
2984 }
2985 
2986 namespace llvm {
2987 InstructionSelector *
2988 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
2989                                const SPIRVSubtarget &Subtarget,
2990                                const RegisterBankInfo &RBI) {
2991   return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2992 }
2993 } // namespace llvm
2994