xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp (revision d459784cbea334d167b2dca48e0c26115c68e5d3)
1 //===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements lowering builtin function calls and types using their
10 // demangled names and TableGen records.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SPIRVBuiltins.h"
15 #include "SPIRV.h"
16 #include "SPIRVSubtarget.h"
17 #include "SPIRVUtils.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/IR/IntrinsicsSPIRV.h"
21 #include <regex>
22 #include <string>
23 #include <tuple>
24 
25 #define DEBUG_TYPE "spirv-builtins"
26 
27 namespace llvm {
28 namespace SPIRV {
29 #define GET_BuiltinGroup_DECL
30 #include "SPIRVGenTables.inc"
31 
32 struct DemangledBuiltin {
33   StringRef Name;
34   InstructionSet::InstructionSet Set;
35   BuiltinGroup Group;
36   uint8_t MinNumArgs;
37   uint8_t MaxNumArgs;
38 };
39 
40 #define GET_DemangledBuiltins_DECL
41 #define GET_DemangledBuiltins_IMPL
42 
43 struct IncomingCall {
44   const std::string BuiltinName;
45   const DemangledBuiltin *Builtin;
46 
47   const Register ReturnRegister;
48   const SPIRVType *ReturnType;
49   const SmallVectorImpl<Register> &Arguments;
50 
51   IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin,
52                const Register ReturnRegister, const SPIRVType *ReturnType,
53                const SmallVectorImpl<Register> &Arguments)
54       : BuiltinName(BuiltinName), Builtin(Builtin),
55         ReturnRegister(ReturnRegister), ReturnType(ReturnType),
56         Arguments(Arguments) {}
57 
58   bool isSpirvOp() const { return BuiltinName.rfind("__spirv_", 0) == 0; }
59 };
60 
61 struct NativeBuiltin {
62   StringRef Name;
63   InstructionSet::InstructionSet Set;
64   uint32_t Opcode;
65 };
66 
67 #define GET_NativeBuiltins_DECL
68 #define GET_NativeBuiltins_IMPL
69 
70 struct GroupBuiltin {
71   StringRef Name;
72   uint32_t Opcode;
73   uint32_t GroupOperation;
74   bool IsElect;
75   bool IsAllOrAny;
76   bool IsAllEqual;
77   bool IsBallot;
78   bool IsInverseBallot;
79   bool IsBallotBitExtract;
80   bool IsBallotFindBit;
81   bool IsLogical;
82   bool NoGroupOperation;
83   bool HasBoolArg;
84 };
85 
86 #define GET_GroupBuiltins_DECL
87 #define GET_GroupBuiltins_IMPL
88 
89 struct IntelSubgroupsBuiltin {
90   StringRef Name;
91   uint32_t Opcode;
92   bool IsBlock;
93   bool IsWrite;
94   bool IsMedia;
95 };
96 
97 #define GET_IntelSubgroupsBuiltins_DECL
98 #define GET_IntelSubgroupsBuiltins_IMPL
99 
100 struct AtomicFloatingBuiltin {
101   StringRef Name;
102   uint32_t Opcode;
103 };
104 
105 #define GET_AtomicFloatingBuiltins_DECL
106 #define GET_AtomicFloatingBuiltins_IMPL
107 struct GroupUniformBuiltin {
108   StringRef Name;
109   uint32_t Opcode;
110   bool IsLogical;
111 };
112 
113 #define GET_GroupUniformBuiltins_DECL
114 #define GET_GroupUniformBuiltins_IMPL
115 
116 struct GetBuiltin {
117   StringRef Name;
118   InstructionSet::InstructionSet Set;
119   BuiltIn::BuiltIn Value;
120 };
121 
122 using namespace BuiltIn;
123 #define GET_GetBuiltins_DECL
124 #define GET_GetBuiltins_IMPL
125 
126 struct ImageQueryBuiltin {
127   StringRef Name;
128   InstructionSet::InstructionSet Set;
129   uint32_t Component;
130 };
131 
132 #define GET_ImageQueryBuiltins_DECL
133 #define GET_ImageQueryBuiltins_IMPL
134 
135 struct ConvertBuiltin {
136   StringRef Name;
137   InstructionSet::InstructionSet Set;
138   bool IsDestinationSigned;
139   bool IsSaturated;
140   bool IsRounded;
141   bool IsBfloat16;
142   FPRoundingMode::FPRoundingMode RoundingMode;
143 };
144 
145 struct VectorLoadStoreBuiltin {
146   StringRef Name;
147   InstructionSet::InstructionSet Set;
148   uint32_t Number;
149   uint32_t ElementCount;
150   bool IsRounded;
151   FPRoundingMode::FPRoundingMode RoundingMode;
152 };
153 
154 using namespace FPRoundingMode;
155 #define GET_ConvertBuiltins_DECL
156 #define GET_ConvertBuiltins_IMPL
157 
158 using namespace InstructionSet;
159 #define GET_VectorLoadStoreBuiltins_DECL
160 #define GET_VectorLoadStoreBuiltins_IMPL
161 
162 #define GET_CLMemoryScope_DECL
163 #define GET_CLSamplerAddressingMode_DECL
164 #define GET_CLMemoryFenceFlags_DECL
165 #define GET_ExtendedBuiltins_DECL
166 #include "SPIRVGenTables.inc"
167 } // namespace SPIRV
168 
169 //===----------------------------------------------------------------------===//
170 // Misc functions for looking up builtins and veryfying requirements using
171 // TableGen records
172 //===----------------------------------------------------------------------===//
173 
174 namespace SPIRV {
175 /// Parses the name part of the demangled builtin call.
176 std::string lookupBuiltinNameHelper(StringRef DemangledCall,
177                                     FPDecorationId *DecorationId) {
178   const static std::string PassPrefix = "(anonymous namespace)::";
179   std::string BuiltinName;
180   // Itanium Demangler result may have "(anonymous namespace)::" prefix
181   if (DemangledCall.starts_with(PassPrefix.c_str()))
182     BuiltinName = DemangledCall.substr(PassPrefix.length());
183   else
184     BuiltinName = DemangledCall;
185   // Extract the builtin function name and types of arguments from the call
186   // skeleton.
187   BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
188 
189   // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
190   if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
191     BuiltinName = BuiltinName.substr(12);
192 
193   // Check if the extracted name contains type information between angle
194   // brackets. If so, the builtin is an instantiated template - needs to have
195   // the information after angle brackets and return type removed.
196   std::size_t Pos1 = BuiltinName.rfind('<');
197   if (Pos1 != std::string::npos && BuiltinName.back() == '>') {
198     std::size_t Pos2 = BuiltinName.rfind(' ', Pos1);
199     if (Pos2 == std::string::npos)
200       Pos2 = 0;
201     else
202       ++Pos2;
203     BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
204     BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
205   }
206 
207   // Check if the extracted name begins with:
208   // - "__spirv_ImageSampleExplicitLod"
209   // - "__spirv_ImageRead"
210   // - "__spirv_ImageQuerySizeLod"
211   // - "__spirv_UDotKHR"
212   // - "__spirv_SDotKHR"
213   // - "__spirv_SUDotKHR"
214   // - "__spirv_SDotAccSatKHR"
215   // - "__spirv_UDotAccSatKHR"
216   // - "__spirv_SUDotAccSatKHR"
217   // - "__spirv_ReadClockKHR"
218   // - "__spirv_SubgroupBlockReadINTEL"
219   // - "__spirv_SubgroupImageBlockReadINTEL"
220   // - "__spirv_SubgroupImageMediaBlockReadINTEL"
221   // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
222   // - "__spirv_Convert"
223   // - "__spirv_UConvert"
224   // - "__spirv_SConvert"
225   // - "__spirv_FConvert"
226   // - "__spirv_SatConvert"
227   // and contains return type information at the end "_R<type>".
228   // If so, extract the plain builtin name without the type information.
229   static const std::regex SpvWithR(
230       "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageQuerySizeLod|UDotKHR|"
231       "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
232       "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
233       "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
234       "Convert|"
235       "UConvert|SConvert|FConvert|SatConvert).*)_R[^_]*_?(\\w+)?.*");
236   std::smatch Match;
237   if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
238     std::ssub_match SubMatch;
239     if (DecorationId && Match.size() > 3) {
240       SubMatch = Match[3];
241       *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
242     }
243     SubMatch = Match[1];
244     BuiltinName = SubMatch.str();
245   }
246 
247   return BuiltinName;
248 }
249 } // namespace SPIRV
250 
251 /// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
252 /// the provided \p DemangledCall and specified \p Set.
253 ///
254 /// The lookup follows the following algorithm, returning the first successful
255 /// match:
256 /// 1. Search with the plain demangled name (expecting a 1:1 match).
257 /// 2. Search with the prefix before or suffix after the demangled name
258 /// signyfying the type of the first argument.
259 ///
260 /// \returns Wrapper around the demangled call and found builtin definition.
261 static std::unique_ptr<const SPIRV::IncomingCall>
262 lookupBuiltin(StringRef DemangledCall,
263               SPIRV::InstructionSet::InstructionSet Set,
264               Register ReturnRegister, const SPIRVType *ReturnType,
265               const SmallVectorImpl<Register> &Arguments) {
266   std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
267 
268   SmallVector<StringRef, 10> BuiltinArgumentTypes;
269   StringRef BuiltinArgs =
270       DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
271   BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
272 
273   // Look up the builtin in the defined set. Start with the plain demangled
274   // name, expecting a 1:1 match in the defined builtin set.
275   const SPIRV::DemangledBuiltin *Builtin;
276   if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
277     return std::make_unique<SPIRV::IncomingCall>(
278         BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
279 
280   // If the initial look up was unsuccessful and the demangled call takes at
281   // least 1 argument, add a prefix or suffix signifying the type of the first
282   // argument and repeat the search.
283   if (BuiltinArgumentTypes.size() >= 1) {
284     char FirstArgumentType = BuiltinArgumentTypes[0][0];
285     // Prefix to be added to the builtin's name for lookup.
286     // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
287     std::string Prefix;
288 
289     switch (FirstArgumentType) {
290     // Unsigned:
291     case 'u':
292       if (Set == SPIRV::InstructionSet::OpenCL_std)
293         Prefix = "u_";
294       else if (Set == SPIRV::InstructionSet::GLSL_std_450)
295         Prefix = "u";
296       break;
297     // Signed:
298     case 'c':
299     case 's':
300     case 'i':
301     case 'l':
302       if (Set == SPIRV::InstructionSet::OpenCL_std)
303         Prefix = "s_";
304       else if (Set == SPIRV::InstructionSet::GLSL_std_450)
305         Prefix = "s";
306       break;
307     // Floating-point:
308     case 'f':
309     case 'd':
310     case 'h':
311       if (Set == SPIRV::InstructionSet::OpenCL_std ||
312           Set == SPIRV::InstructionSet::GLSL_std_450)
313         Prefix = "f";
314       break;
315     }
316 
317     // If argument-type name prefix was added, look up the builtin again.
318     if (!Prefix.empty() &&
319         (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
320       return std::make_unique<SPIRV::IncomingCall>(
321           BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
322 
323     // If lookup with a prefix failed, find a suffix to be added to the
324     // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
325     // an unsigned value has a suffix "u".
326     std::string Suffix;
327 
328     switch (FirstArgumentType) {
329     // Unsigned:
330     case 'u':
331       Suffix = "u";
332       break;
333     // Signed:
334     case 'c':
335     case 's':
336     case 'i':
337     case 'l':
338       Suffix = "s";
339       break;
340     // Floating-point:
341     case 'f':
342     case 'd':
343     case 'h':
344       Suffix = "f";
345       break;
346     }
347 
348     // If argument-type name suffix was added, look up the builtin again.
349     if (!Suffix.empty() &&
350         (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
351       return std::make_unique<SPIRV::IncomingCall>(
352           BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
353   }
354 
355   // No builtin with such name was found in the set.
356   return nullptr;
357 }
358 
359 static MachineInstr *getBlockStructInstr(Register ParamReg,
360                                          MachineRegisterInfo *MRI) {
361   // We expect the following sequence of instructions:
362   //   %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
363   //   or       = G_GLOBAL_VALUE @block_literal_global
364   //   %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
365   //   %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
366   MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
367   assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
368          MI->getOperand(1).isReg());
369   Register BitcastReg = MI->getOperand(1).getReg();
370   MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
371   assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
372          BitcastMI->getOperand(2).isReg());
373   Register ValueReg = BitcastMI->getOperand(2).getReg();
374   MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
375   return ValueMI;
376 }
377 
378 // Return an integer constant corresponding to the given register and
379 // defined in spv_track_constant.
380 // TODO: maybe unify with prelegalizer pass.
381 static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI) {
382   MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
383   assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
384          DefMI->getOperand(2).isReg());
385   MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
386   assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
387          DefMI2->getOperand(1).isCImm());
388   return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
389 }
390 
391 // Return type of the instruction result from spv_assign_type intrinsic.
392 // TODO: maybe unify with prelegalizer pass.
393 static const Type *getMachineInstrType(MachineInstr *MI) {
394   MachineInstr *NextMI = MI->getNextNode();
395   if (!NextMI)
396     return nullptr;
397   if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
398     if ((NextMI = NextMI->getNextNode()) == nullptr)
399       return nullptr;
400   Register ValueReg = MI->getOperand(0).getReg();
401   if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
402        !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
403       NextMI->getOperand(1).getReg() != ValueReg)
404     return nullptr;
405   Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
406   assert(Ty && "Type is expected");
407   return Ty;
408 }
409 
410 static const Type *getBlockStructType(Register ParamReg,
411                                       MachineRegisterInfo *MRI) {
412   // In principle, this information should be passed to us from Clang via
413   // an elementtype attribute. However, said attribute requires that
414   // the function call be an intrinsic, which is not. Instead, we rely on being
415   // able to trace this to the declaration of a variable: OpenCL C specification
416   // section 6.12.5 should guarantee that we can do this.
417   MachineInstr *MI = getBlockStructInstr(ParamReg, MRI);
418   if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
419     return MI->getOperand(1).getGlobal()->getType();
420   assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
421          "Blocks in OpenCL C must be traceable to allocation site");
422   return getMachineInstrType(MI);
423 }
424 
425 //===----------------------------------------------------------------------===//
426 // Helper functions for building misc instructions
427 //===----------------------------------------------------------------------===//
428 
429 /// Helper function building either a resulting scalar or vector bool register
430 /// depending on the expected \p ResultType.
431 ///
432 /// \returns Tuple of the resulting register and its type.
433 static std::tuple<Register, SPIRVType *>
434 buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
435                   SPIRVGlobalRegistry *GR) {
436   LLT Type;
437   SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
438 
439   if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
440     unsigned VectorElements = ResultType->getOperand(2).getImm();
441     BoolType =
442         GR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder);
443     const FixedVectorType *LLVMVectorType =
444         cast<FixedVectorType>(GR->getTypeForSPIRVType(BoolType));
445     Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
446   } else {
447     Type = LLT::scalar(1);
448   }
449 
450   Register ResultRegister =
451       MIRBuilder.getMRI()->createGenericVirtualRegister(Type);
452   MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
453   GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
454   return std::make_tuple(ResultRegister, BoolType);
455 }
456 
457 /// Helper function for building either a vector or scalar select instruction
458 /// depending on the expected \p ResultType.
459 static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
460                             Register ReturnRegister, Register SourceRegister,
461                             const SPIRVType *ReturnType,
462                             SPIRVGlobalRegistry *GR) {
463   Register TrueConst, FalseConst;
464 
465   if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
466     unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
467     uint64_t AllOnes = APInt::getAllOnes(Bits).getZExtValue();
468     TrueConst = GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType);
469     FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType);
470   } else {
471     TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType);
472     FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType);
473   }
474 
475   return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
476                                 FalseConst);
477 }
478 
479 /// Helper function for building a load instruction loading into the
480 /// \p DestinationReg.
481 static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister,
482                               MachineIRBuilder &MIRBuilder,
483                               SPIRVGlobalRegistry *GR, LLT LowLevelType,
484                               Register DestinationReg = Register(0)) {
485   if (!DestinationReg.isValid())
486     DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
487   // TODO: consider using correct address space and alignment (p0 is canonical
488   // type for selection though).
489   MachinePointerInfo PtrInfo = MachinePointerInfo();
490   MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
491   return DestinationReg;
492 }
493 
494 /// Helper function for building a load instruction for loading a builtin global
495 /// variable of \p BuiltinValue value.
496 static Register buildBuiltinVariableLoad(
497     MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
498     SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
499     Register Reg = Register(0), bool isConst = true, bool hasLinkageTy = true) {
500   Register NewRegister =
501       MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
502   MIRBuilder.getMRI()->setType(
503       NewRegister,
504       LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
505                    GR->getPointerSize()));
506   SPIRVType *PtrType = GR->getOrCreateSPIRVPointerType(
507       VariableType, MIRBuilder, SPIRV::StorageClass::Input);
508   GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
509 
510   // Set up the global OpVariable with the necessary builtin decorations.
511   Register Variable = GR->buildGlobalVariable(
512       NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
513       SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
514       /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
515       false);
516 
517   // Load the value from the global variable.
518   Register LoadedRegister =
519       buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
520   MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
521   return LoadedRegister;
522 }
523 
524 /// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
525 /// and its definition, set the new register as a destination of the definition,
526 /// assign SPIRVType to both registers. If SpirvTy is provided, use it as
527 /// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
528 /// SPIRVPreLegalizer.cpp.
529 extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
530                                   SPIRVGlobalRegistry *GR,
531                                   MachineIRBuilder &MIB,
532                                   MachineRegisterInfo &MRI);
533 
534 // TODO: Move to TableGen.
535 static SPIRV::MemorySemantics::MemorySemantics
536 getSPIRVMemSemantics(std::memory_order MemOrder) {
537   switch (MemOrder) {
538   case std::memory_order_relaxed:
539     return SPIRV::MemorySemantics::None;
540   case std::memory_order_acquire:
541     return SPIRV::MemorySemantics::Acquire;
542   case std::memory_order_release:
543     return SPIRV::MemorySemantics::Release;
544   case std::memory_order_acq_rel:
545     return SPIRV::MemorySemantics::AcquireRelease;
546   case std::memory_order_seq_cst:
547     return SPIRV::MemorySemantics::SequentiallyConsistent;
548   default:
549     report_fatal_error("Unknown CL memory scope");
550   }
551 }
552 
553 static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
554   switch (ClScope) {
555   case SPIRV::CLMemoryScope::memory_scope_work_item:
556     return SPIRV::Scope::Invocation;
557   case SPIRV::CLMemoryScope::memory_scope_work_group:
558     return SPIRV::Scope::Workgroup;
559   case SPIRV::CLMemoryScope::memory_scope_device:
560     return SPIRV::Scope::Device;
561   case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
562     return SPIRV::Scope::CrossDevice;
563   case SPIRV::CLMemoryScope::memory_scope_sub_group:
564     return SPIRV::Scope::Subgroup;
565   }
566   report_fatal_error("Unknown CL memory scope");
567 }
568 
569 static Register buildConstantIntReg32(uint64_t Val,
570                                       MachineIRBuilder &MIRBuilder,
571                                       SPIRVGlobalRegistry *GR) {
572   return GR->buildConstantInt(Val, MIRBuilder,
573                               GR->getOrCreateSPIRVIntegerType(32, MIRBuilder));
574 }
575 
576 static Register buildScopeReg(Register CLScopeRegister,
577                               SPIRV::Scope::Scope Scope,
578                               MachineIRBuilder &MIRBuilder,
579                               SPIRVGlobalRegistry *GR,
580                               MachineRegisterInfo *MRI) {
581   if (CLScopeRegister.isValid()) {
582     auto CLScope =
583         static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
584     Scope = getSPIRVScope(CLScope);
585 
586     if (CLScope == static_cast<unsigned>(Scope)) {
587       MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
588       return CLScopeRegister;
589     }
590   }
591   return buildConstantIntReg32(Scope, MIRBuilder, GR);
592 }
593 
594 static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI,
595                               SPIRVGlobalRegistry *GR) {
596   if (MRI->getRegClassOrNull(Reg))
597     return;
598   SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
599   MRI->setRegClass(Reg,
600                    SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
601 }
602 
603 static Register buildMemSemanticsReg(Register SemanticsRegister,
604                                      Register PtrRegister, unsigned &Semantics,
605                                      MachineIRBuilder &MIRBuilder,
606                                      SPIRVGlobalRegistry *GR) {
607   if (SemanticsRegister.isValid()) {
608     MachineRegisterInfo *MRI = MIRBuilder.getMRI();
609     std::memory_order Order =
610         static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
611     Semantics =
612         getSPIRVMemSemantics(Order) |
613         getMemSemanticsForStorageClass(GR->getPointerStorageClass(PtrRegister));
614     if (static_cast<unsigned>(Order) == Semantics) {
615       MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
616       return SemanticsRegister;
617     }
618   }
619   return buildConstantIntReg32(Semantics, MIRBuilder, GR);
620 }
621 
622 static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
623                                const SPIRV::IncomingCall *Call,
624                                Register TypeReg,
625                                ArrayRef<uint32_t> ImmArgs = {}) {
626   auto MIB = MIRBuilder.buildInstr(Opcode);
627   if (TypeReg.isValid())
628     MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
629   unsigned Sz = Call->Arguments.size() - ImmArgs.size();
630   for (unsigned i = 0; i < Sz; ++i)
631     MIB.addUse(Call->Arguments[i]);
632   for (uint32_t ImmArg : ImmArgs)
633     MIB.addImm(ImmArg);
634   return true;
635 }
636 
637 /// Helper function for translating atomic init to OpStore.
638 static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call,
639                                 MachineIRBuilder &MIRBuilder) {
640   if (Call->isSpirvOp())
641     return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
642 
643   assert(Call->Arguments.size() == 2 &&
644          "Need 2 arguments for atomic init translation");
645   MIRBuilder.buildInstr(SPIRV::OpStore)
646       .addUse(Call->Arguments[0])
647       .addUse(Call->Arguments[1]);
648   return true;
649 }
650 
651 /// Helper function for building an atomic load instruction.
652 static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
653                                 MachineIRBuilder &MIRBuilder,
654                                 SPIRVGlobalRegistry *GR) {
655   Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
656   if (Call->isSpirvOp())
657     return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
658 
659   Register PtrRegister = Call->Arguments[0];
660   // TODO: if true insert call to __translate_ocl_memory_sccope before
661   // OpAtomicLoad and the function implementation. We can use Translator's
662   // output for transcoding/atomic_explicit_arguments.cl as an example.
663   Register ScopeRegister =
664       Call->Arguments.size() > 1
665           ? Call->Arguments[1]
666           : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
667   Register MemSemanticsReg;
668   if (Call->Arguments.size() > 2) {
669     // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
670     MemSemanticsReg = Call->Arguments[2];
671   } else {
672     int Semantics =
673         SPIRV::MemorySemantics::SequentiallyConsistent |
674         getMemSemanticsForStorageClass(GR->getPointerStorageClass(PtrRegister));
675     MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
676   }
677 
678   MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
679       .addDef(Call->ReturnRegister)
680       .addUse(TypeReg)
681       .addUse(PtrRegister)
682       .addUse(ScopeRegister)
683       .addUse(MemSemanticsReg);
684   return true;
685 }
686 
687 /// Helper function for building an atomic store instruction.
688 static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call,
689                                  MachineIRBuilder &MIRBuilder,
690                                  SPIRVGlobalRegistry *GR) {
691   if (Call->isSpirvOp())
692     return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call, Register(0));
693 
694   Register ScopeRegister =
695       buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
696   Register PtrRegister = Call->Arguments[0];
697   int Semantics =
698       SPIRV::MemorySemantics::SequentiallyConsistent |
699       getMemSemanticsForStorageClass(GR->getPointerStorageClass(PtrRegister));
700   Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
701   MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
702       .addUse(PtrRegister)
703       .addUse(ScopeRegister)
704       .addUse(MemSemanticsReg)
705       .addUse(Call->Arguments[1]);
706   return true;
707 }
708 
709 /// Helper function for building an atomic compare-exchange instruction.
710 static bool buildAtomicCompareExchangeInst(
711     const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
712     unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
713   if (Call->isSpirvOp())
714     return buildOpFromWrapper(MIRBuilder, Opcode, Call,
715                               GR->getSPIRVTypeID(Call->ReturnType));
716 
717   bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
718   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
719 
720   Register ObjectPtr = Call->Arguments[0];   // Pointer (volatile A *object.)
721   Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
722   Register Desired = Call->Arguments[2];     // Value (C Desired).
723   SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
724   LLT DesiredLLT = MRI->getType(Desired);
725 
726   assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
727          SPIRV::OpTypePointer);
728   unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
729   (void)ExpectedType;
730   assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
731                    : ExpectedType == SPIRV::OpTypePointer);
732   assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
733 
734   SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
735   assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
736   auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
737       SpvObjectPtrTy->getOperand(1).getImm());
738   auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
739 
740   Register MemSemEqualReg;
741   Register MemSemUnequalReg;
742   uint64_t MemSemEqual =
743       IsCmpxchg
744           ? SPIRV::MemorySemantics::None
745           : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
746   uint64_t MemSemUnequal =
747       IsCmpxchg
748           ? SPIRV::MemorySemantics::None
749           : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
750   if (Call->Arguments.size() >= 4) {
751     assert(Call->Arguments.size() >= 5 &&
752            "Need 5+ args for explicit atomic cmpxchg");
753     auto MemOrdEq =
754         static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
755     auto MemOrdNeq =
756         static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
757     MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
758     MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
759     if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
760       MemSemEqualReg = Call->Arguments[3];
761     if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
762       MemSemUnequalReg = Call->Arguments[4];
763   }
764   if (!MemSemEqualReg.isValid())
765     MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
766   if (!MemSemUnequalReg.isValid())
767     MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
768 
769   Register ScopeReg;
770   auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
771   if (Call->Arguments.size() >= 6) {
772     assert(Call->Arguments.size() == 6 &&
773            "Extra args for explicit atomic cmpxchg");
774     auto ClScope = static_cast<SPIRV::CLMemoryScope>(
775         getIConstVal(Call->Arguments[5], MRI));
776     Scope = getSPIRVScope(ClScope);
777     if (ClScope == static_cast<unsigned>(Scope))
778       ScopeReg = Call->Arguments[5];
779   }
780   if (!ScopeReg.isValid())
781     ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
782 
783   Register Expected = IsCmpxchg
784                           ? ExpectedArg
785                           : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
786                                           GR, LLT::scalar(64));
787   MRI->setType(Expected, DesiredLLT);
788   Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
789                             : Call->ReturnRegister;
790   if (!MRI->getRegClassOrNull(Tmp))
791     MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
792   GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
793 
794   SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
795   MIRBuilder.buildInstr(Opcode)
796       .addDef(Tmp)
797       .addUse(GR->getSPIRVTypeID(IntTy))
798       .addUse(ObjectPtr)
799       .addUse(ScopeReg)
800       .addUse(MemSemEqualReg)
801       .addUse(MemSemUnequalReg)
802       .addUse(Desired)
803       .addUse(Expected);
804   if (!IsCmpxchg) {
805     MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
806     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
807   }
808   return true;
809 }
810 
811 /// Helper function for building atomic instructions.
812 static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
813                                MachineIRBuilder &MIRBuilder,
814                                SPIRVGlobalRegistry *GR) {
815   if (Call->isSpirvOp())
816     return buildOpFromWrapper(MIRBuilder, Opcode, Call,
817                               GR->getSPIRVTypeID(Call->ReturnType));
818 
819   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
820   Register ScopeRegister =
821       Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
822 
823   assert(Call->Arguments.size() <= 4 &&
824          "Too many args for explicit atomic RMW");
825   ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
826                                 MIRBuilder, GR, MRI);
827 
828   Register PtrRegister = Call->Arguments[0];
829   unsigned Semantics = SPIRV::MemorySemantics::None;
830   Register MemSemanticsReg =
831       Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
832   MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
833                                          Semantics, MIRBuilder, GR);
834   Register ValueReg = Call->Arguments[1];
835   Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
836   // support cl_ext_float_atomics
837   if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
838     if (Opcode == SPIRV::OpAtomicIAdd) {
839       Opcode = SPIRV::OpAtomicFAddEXT;
840     } else if (Opcode == SPIRV::OpAtomicISub) {
841       // Translate OpAtomicISub applied to a floating type argument to
842       // OpAtomicFAddEXT with the negative value operand
843       Opcode = SPIRV::OpAtomicFAddEXT;
844       Register NegValueReg =
845           MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
846       MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
847       GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
848                                 MIRBuilder.getMF());
849       MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
850           .addDef(NegValueReg)
851           .addUse(ValueReg);
852       insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
853                         MIRBuilder.getMF().getRegInfo());
854       ValueReg = NegValueReg;
855     }
856   }
857   MIRBuilder.buildInstr(Opcode)
858       .addDef(Call->ReturnRegister)
859       .addUse(ValueTypeReg)
860       .addUse(PtrRegister)
861       .addUse(ScopeRegister)
862       .addUse(MemSemanticsReg)
863       .addUse(ValueReg);
864   return true;
865 }
866 
867 /// Helper function for building an atomic floating-type instruction.
868 static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call,
869                                        unsigned Opcode,
870                                        MachineIRBuilder &MIRBuilder,
871                                        SPIRVGlobalRegistry *GR) {
872   assert(Call->Arguments.size() == 4 &&
873          "Wrong number of atomic floating-type builtin");
874   Register PtrReg = Call->Arguments[0];
875   Register ScopeReg = Call->Arguments[1];
876   Register MemSemanticsReg = Call->Arguments[2];
877   Register ValueReg = Call->Arguments[3];
878   MIRBuilder.buildInstr(Opcode)
879       .addDef(Call->ReturnRegister)
880       .addUse(GR->getSPIRVTypeID(Call->ReturnType))
881       .addUse(PtrReg)
882       .addUse(ScopeReg)
883       .addUse(MemSemanticsReg)
884       .addUse(ValueReg);
885   return true;
886 }
887 
888 /// Helper function for building atomic flag instructions (e.g.
889 /// OpAtomicFlagTestAndSet).
890 static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call,
891                                 unsigned Opcode, MachineIRBuilder &MIRBuilder,
892                                 SPIRVGlobalRegistry *GR) {
893   bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
894   Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
895   if (Call->isSpirvOp())
896     return buildOpFromWrapper(MIRBuilder, Opcode, Call,
897                               IsSet ? TypeReg : Register(0));
898 
899   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
900   Register PtrRegister = Call->Arguments[0];
901   unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
902   Register MemSemanticsReg =
903       Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
904   MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
905                                          Semantics, MIRBuilder, GR);
906 
907   assert((Opcode != SPIRV::OpAtomicFlagClear ||
908           (Semantics != SPIRV::MemorySemantics::Acquire &&
909            Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
910          "Invalid memory order argument!");
911 
912   Register ScopeRegister =
913       Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
914   ScopeRegister =
915       buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
916 
917   auto MIB = MIRBuilder.buildInstr(Opcode);
918   if (IsSet)
919     MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
920 
921   MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
922   return true;
923 }
924 
925 /// Helper function for building barriers, i.e., memory/control ordering
926 /// operations.
927 static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
928                              MachineIRBuilder &MIRBuilder,
929                              SPIRVGlobalRegistry *GR) {
930   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
931   const auto *ST =
932       static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
933   if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
934        Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
935       !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
936     std::string DiagMsg = std::string(Builtin->Name) +
937                           ": the builtin requires the following SPIR-V "
938                           "extension: SPV_INTEL_split_barrier";
939     report_fatal_error(DiagMsg.c_str(), false);
940   }
941 
942   if (Call->isSpirvOp())
943     return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
944 
945   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
946   unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
947   unsigned MemSemantics = SPIRV::MemorySemantics::None;
948 
949   if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
950     MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
951 
952   if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
953     MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
954 
955   if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
956     MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
957 
958   if (Opcode == SPIRV::OpMemoryBarrier)
959     MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
960                        getIConstVal(Call->Arguments[1], MRI))) |
961                    MemSemantics;
962   else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
963     MemSemantics |= SPIRV::MemorySemantics::Release;
964   else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
965     MemSemantics |= SPIRV::MemorySemantics::Acquire;
966   else
967     MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
968 
969   Register MemSemanticsReg =
970       MemFlags == MemSemantics
971           ? Call->Arguments[0]
972           : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
973   Register ScopeReg;
974   SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
975   SPIRV::Scope::Scope MemScope = Scope;
976   if (Call->Arguments.size() >= 2) {
977     assert(
978         ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
979          (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
980         "Extra args for explicitly scoped barrier");
981     Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
982                                                            : Call->Arguments[1];
983     SPIRV::CLMemoryScope CLScope =
984         static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
985     MemScope = getSPIRVScope(CLScope);
986     if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
987         (Opcode == SPIRV::OpMemoryBarrier))
988       Scope = MemScope;
989     if (CLScope == static_cast<unsigned>(Scope))
990       ScopeReg = Call->Arguments[1];
991   }
992 
993   if (!ScopeReg.isValid())
994     ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
995 
996   auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
997   if (Opcode != SPIRV::OpMemoryBarrier)
998     MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
999   MIB.addUse(MemSemanticsReg);
1000   return true;
1001 }
1002 
1003 static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1004   switch (dim) {
1005   case SPIRV::Dim::DIM_1D:
1006   case SPIRV::Dim::DIM_Buffer:
1007     return 1;
1008   case SPIRV::Dim::DIM_2D:
1009   case SPIRV::Dim::DIM_Cube:
1010   case SPIRV::Dim::DIM_Rect:
1011     return 2;
1012   case SPIRV::Dim::DIM_3D:
1013     return 3;
1014   default:
1015     report_fatal_error("Cannot get num components for given Dim");
1016   }
1017 }
1018 
1019 /// Helper function for obtaining the number of size components.
1020 static unsigned getNumSizeComponents(SPIRVType *imgType) {
1021   assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1022   auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1023   unsigned numComps = getNumComponentsForDim(dim);
1024   bool arrayed = imgType->getOperand(4).getImm() == 1;
1025   return arrayed ? numComps + 1 : numComps;
1026 }
1027 
1028 //===----------------------------------------------------------------------===//
1029 // Implementation functions for each builtin group
1030 //===----------------------------------------------------------------------===//
1031 
1032 static bool generateExtInst(const SPIRV::IncomingCall *Call,
1033                             MachineIRBuilder &MIRBuilder,
1034                             SPIRVGlobalRegistry *GR) {
1035   // Lookup the extended instruction number in the TableGen records.
1036   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1037   uint32_t Number =
1038       SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1039 
1040   // Build extended instruction.
1041   auto MIB =
1042       MIRBuilder.buildInstr(SPIRV::OpExtInst)
1043           .addDef(Call->ReturnRegister)
1044           .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1045           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1046           .addImm(Number);
1047 
1048   for (auto Argument : Call->Arguments)
1049     MIB.addUse(Argument);
1050   return true;
1051 }
1052 
1053 static bool generateRelationalInst(const SPIRV::IncomingCall *Call,
1054                                    MachineIRBuilder &MIRBuilder,
1055                                    SPIRVGlobalRegistry *GR) {
1056   // Lookup the instruction opcode in the TableGen records.
1057   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1058   unsigned Opcode =
1059       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1060 
1061   Register CompareRegister;
1062   SPIRVType *RelationType;
1063   std::tie(CompareRegister, RelationType) =
1064       buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1065 
1066   // Build relational instruction.
1067   auto MIB = MIRBuilder.buildInstr(Opcode)
1068                  .addDef(CompareRegister)
1069                  .addUse(GR->getSPIRVTypeID(RelationType));
1070 
1071   for (auto Argument : Call->Arguments)
1072     MIB.addUse(Argument);
1073 
1074   // Build select instruction.
1075   return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1076                          Call->ReturnType, GR);
1077 }
1078 
1079 static bool generateGroupInst(const SPIRV::IncomingCall *Call,
1080                               MachineIRBuilder &MIRBuilder,
1081                               SPIRVGlobalRegistry *GR) {
1082   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1083   const SPIRV::GroupBuiltin *GroupBuiltin =
1084       SPIRV::lookupGroupBuiltin(Builtin->Name);
1085 
1086   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1087   if (Call->isSpirvOp()) {
1088     if (GroupBuiltin->NoGroupOperation)
1089       return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1090                                 GR->getSPIRVTypeID(Call->ReturnType));
1091 
1092     // Group Operation is a literal
1093     Register GroupOpReg = Call->Arguments[1];
1094     const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1095     if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1096       report_fatal_error(
1097           "Group Operation parameter must be an integer constant");
1098     uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1099     Register ScopeReg = Call->Arguments[0];
1100     auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1101                    .addDef(Call->ReturnRegister)
1102                    .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1103                    .addUse(ScopeReg)
1104                    .addImm(GrpOp);
1105     for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1106       MIB.addUse(Call->Arguments[i]);
1107     return true;
1108   }
1109 
1110   Register Arg0;
1111   if (GroupBuiltin->HasBoolArg) {
1112     SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1113     Register BoolReg = Call->Arguments[0];
1114     SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1115     if (!BoolRegType)
1116       report_fatal_error("Can't find a register's type definition");
1117     MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1118     if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1119       if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1120         Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1121                                     BoolType);
1122     } else {
1123       if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1124         Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1125         MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1126         GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1127         MIRBuilder.buildICmp(CmpInst::ICMP_NE, Arg0, BoolReg,
1128                              GR->buildConstantInt(0, MIRBuilder, BoolRegType));
1129         insertAssignInstr(Arg0, nullptr, BoolType, GR, MIRBuilder,
1130                           MIRBuilder.getMF().getRegInfo());
1131       } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1132         report_fatal_error("Expect a boolean argument");
1133       }
1134       // if BoolReg is a boolean register, we don't need to do anything
1135     }
1136   }
1137 
1138   Register GroupResultRegister = Call->ReturnRegister;
1139   SPIRVType *GroupResultType = Call->ReturnType;
1140 
1141   // TODO: maybe we need to check whether the result type is already boolean
1142   // and in this case do not insert select instruction.
1143   const bool HasBoolReturnTy =
1144       GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1145       GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1146       GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1147 
1148   if (HasBoolReturnTy)
1149     std::tie(GroupResultRegister, GroupResultType) =
1150         buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1151 
1152   auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1153                                                       : SPIRV::Scope::Workgroup;
1154   Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1155 
1156   Register VecReg;
1157   if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1158       Call->Arguments.size() > 2) {
1159     // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1160     // scalar, a vector with 2 components, or a vector with 3 components.",
1161     // meaning that we must create a vector from the function arguments if
1162     // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1163     // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1164     Register ElemReg = Call->Arguments[1];
1165     SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1166     if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1167       report_fatal_error("Expect an integer <LocalId> argument");
1168     unsigned VecLen = Call->Arguments.size() - 1;
1169     VecReg = MRI->createGenericVirtualRegister(
1170         LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1171     MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1172     SPIRVType *VecType =
1173         GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder);
1174     GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1175     auto MIB =
1176         MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1177     for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1178       MIB.addUse(Call->Arguments[i]);
1179       setRegClassIfNull(Call->Arguments[i], MRI, GR);
1180     }
1181     insertAssignInstr(VecReg, nullptr, VecType, GR, MIRBuilder,
1182                       MIRBuilder.getMF().getRegInfo());
1183   }
1184 
1185   // Build work/sub group instruction.
1186   auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1187                  .addDef(GroupResultRegister)
1188                  .addUse(GR->getSPIRVTypeID(GroupResultType))
1189                  .addUse(ScopeRegister);
1190 
1191   if (!GroupBuiltin->NoGroupOperation)
1192     MIB.addImm(GroupBuiltin->GroupOperation);
1193   if (Call->Arguments.size() > 0) {
1194     MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1195     setRegClassIfNull(Call->Arguments[0], MRI, GR);
1196     if (VecReg.isValid())
1197       MIB.addUse(VecReg);
1198     else
1199       for (unsigned i = 1; i < Call->Arguments.size(); i++)
1200         MIB.addUse(Call->Arguments[i]);
1201   }
1202 
1203   // Build select instruction.
1204   if (HasBoolReturnTy)
1205     buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1206                     Call->ReturnType, GR);
1207   return true;
1208 }
1209 
1210 static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call,
1211                                        MachineIRBuilder &MIRBuilder,
1212                                        SPIRVGlobalRegistry *GR) {
1213   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1214   MachineFunction &MF = MIRBuilder.getMF();
1215   const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1216   const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1217       SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1218 
1219   if (IntelSubgroups->IsMedia &&
1220       !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1221     std::string DiagMsg = std::string(Builtin->Name) +
1222                           ": the builtin requires the following SPIR-V "
1223                           "extension: SPV_INTEL_media_block_io";
1224     report_fatal_error(DiagMsg.c_str(), false);
1225   } else if (!IntelSubgroups->IsMedia &&
1226              !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1227     std::string DiagMsg = std::string(Builtin->Name) +
1228                           ": the builtin requires the following SPIR-V "
1229                           "extension: SPV_INTEL_subgroups";
1230     report_fatal_error(DiagMsg.c_str(), false);
1231   }
1232 
1233   uint32_t OpCode = IntelSubgroups->Opcode;
1234   if (Call->isSpirvOp()) {
1235     bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1236                  OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1237                  OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1238     return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1239                               IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1240                                     : Register(0));
1241   }
1242 
1243   if (IntelSubgroups->IsBlock) {
1244     // Minimal number or arguments set in TableGen records is 1
1245     if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1246       if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1247         // TODO: add required validation from the specification:
1248         // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1249         // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1250         // dimensions require a capability."
1251         switch (OpCode) {
1252         case SPIRV::OpSubgroupBlockReadINTEL:
1253           OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1254           break;
1255         case SPIRV::OpSubgroupBlockWriteINTEL:
1256           OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1257           break;
1258         }
1259       }
1260     }
1261   }
1262 
1263   // TODO: opaque pointers types should be eventually resolved in such a way
1264   // that validation of block read is enabled with respect to the following
1265   // specification requirement:
1266   // "'Result Type' may be a scalar or vector type, and its component type must
1267   // be equal to the type pointed to by 'Ptr'."
1268   // For example, function parameter type should not be default i8 pointer, but
1269   // depend on the result type of the instruction where it is used as a pointer
1270   // argument of OpSubgroupBlockReadINTEL
1271 
1272   // Build Intel subgroups instruction
1273   MachineInstrBuilder MIB =
1274       IntelSubgroups->IsWrite
1275           ? MIRBuilder.buildInstr(OpCode)
1276           : MIRBuilder.buildInstr(OpCode)
1277                 .addDef(Call->ReturnRegister)
1278                 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1279   for (size_t i = 0; i < Call->Arguments.size(); ++i)
1280     MIB.addUse(Call->Arguments[i]);
1281   return true;
1282 }
1283 
1284 static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call,
1285                                      MachineIRBuilder &MIRBuilder,
1286                                      SPIRVGlobalRegistry *GR) {
1287   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1288   MachineFunction &MF = MIRBuilder.getMF();
1289   const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1290   if (!ST->canUseExtension(
1291           SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1292     std::string DiagMsg = std::string(Builtin->Name) +
1293                           ": the builtin requires the following SPIR-V "
1294                           "extension: SPV_KHR_uniform_group_instructions";
1295     report_fatal_error(DiagMsg.c_str(), false);
1296   }
1297   const SPIRV::GroupUniformBuiltin *GroupUniform =
1298       SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1299   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1300 
1301   Register GroupResultReg = Call->ReturnRegister;
1302   Register ScopeReg = Call->Arguments[0];
1303   Register ValueReg = Call->Arguments[2];
1304 
1305   // Group Operation
1306   Register ConstGroupOpReg = Call->Arguments[1];
1307   const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1308   if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1309     report_fatal_error(
1310         "expect a constant group operation for a uniform group instruction",
1311         false);
1312   const MachineOperand &ConstOperand = Const->getOperand(1);
1313   if (!ConstOperand.isCImm())
1314     report_fatal_error("uniform group instructions: group operation must be an "
1315                        "integer constant",
1316                        false);
1317 
1318   auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1319                  .addDef(GroupResultReg)
1320                  .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1321                  .addUse(ScopeReg);
1322   addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1323   MIB.addUse(ValueReg);
1324 
1325   return true;
1326 }
1327 
1328 static bool generateKernelClockInst(const SPIRV::IncomingCall *Call,
1329                                     MachineIRBuilder &MIRBuilder,
1330                                     SPIRVGlobalRegistry *GR) {
1331   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1332   MachineFunction &MF = MIRBuilder.getMF();
1333   const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1334   if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1335     std::string DiagMsg = std::string(Builtin->Name) +
1336                           ": the builtin requires the following SPIR-V "
1337                           "extension: SPV_KHR_shader_clock";
1338     report_fatal_error(DiagMsg.c_str(), false);
1339   }
1340 
1341   Register ResultReg = Call->ReturnRegister;
1342 
1343   // Deduce the `Scope` operand from the builtin function name.
1344   SPIRV::Scope::Scope ScopeArg =
1345       StringSwitch<SPIRV::Scope::Scope>(Builtin->Name)
1346           .EndsWith("device", SPIRV::Scope::Scope::Device)
1347           .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1348           .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1349   Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1350 
1351   MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1352       .addDef(ResultReg)
1353       .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1354       .addUse(ScopeReg);
1355 
1356   return true;
1357 }
1358 
1359 // These queries ask for a single size_t result for a given dimension index, e.g
1360 // size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to
1361 // these values are all vec3 types, so we need to extract the correct index or
1362 // return defaultVal (0 or 1 depending on the query). We also handle extending
1363 // or tuncating in case size_t does not match the expected result type's
1364 // bitwidth.
1365 //
1366 // For a constant index >= 3 we generate:
1367 //  %res = OpConstant %SizeT 0
1368 //
1369 // For other indices we generate:
1370 //  %g = OpVariable %ptr_V3_SizeT Input
1371 //  OpDecorate %g BuiltIn XXX
1372 //  OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1373 //  OpDecorate %g Constant
1374 //  %loadedVec = OpLoad %V3_SizeT %g
1375 //
1376 //  Then, if the index is constant < 3, we generate:
1377 //    %res = OpCompositeExtract %SizeT %loadedVec idx
1378 //  If the index is dynamic, we generate:
1379 //    %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1380 //    %cmp = OpULessThan %bool %idx %const_3
1381 //    %res = OpSelect %SizeT %cmp %tmp %const_0
1382 //
1383 //  If the bitwidth of %res does not match the expected return type, we add an
1384 //  extend or truncate.
1385 static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call,
1386                               MachineIRBuilder &MIRBuilder,
1387                               SPIRVGlobalRegistry *GR,
1388                               SPIRV::BuiltIn::BuiltIn BuiltinValue,
1389                               uint64_t DefaultValue) {
1390   Register IndexRegister = Call->Arguments[0];
1391   const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1392   const unsigned PointerSize = GR->getPointerSize();
1393   const SPIRVType *PointerSizeType =
1394       GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1395   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1396   auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1397 
1398   // Set up the final register to do truncation or extension on at the end.
1399   Register ToTruncate = Call->ReturnRegister;
1400 
1401   // If the index is constant, we can statically determine if it is in range.
1402   bool IsConstantIndex =
1403       IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1404 
1405   // If it's out of range (max dimension is 3), we can just return the constant
1406   // default value (0 or 1 depending on which query function).
1407   if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1408     Register DefaultReg = Call->ReturnRegister;
1409     if (PointerSize != ResultWidth) {
1410       DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1411       MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1412       GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1413                                 MIRBuilder.getMF());
1414       ToTruncate = DefaultReg;
1415     }
1416     auto NewRegister =
1417         GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1418     MIRBuilder.buildCopy(DefaultReg, NewRegister);
1419   } else { // If it could be in range, we need to load from the given builtin.
1420     auto Vec3Ty =
1421         GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder);
1422     Register LoadedVector =
1423         buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1424                                  LLT::fixed_vector(3, PointerSize));
1425     // Set up the vreg to extract the result to (possibly a new temporary one).
1426     Register Extracted = Call->ReturnRegister;
1427     if (!IsConstantIndex || PointerSize != ResultWidth) {
1428       Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1429       MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1430       GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1431     }
1432     // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1433     // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1434     MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1435         Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1436     ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1437 
1438     // If the index is dynamic, need check if it's < 3, and then use a select.
1439     if (!IsConstantIndex) {
1440       insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1441                         *MRI);
1442 
1443       auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1444       auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1445 
1446       Register CompareRegister =
1447           MRI->createGenericVirtualRegister(LLT::scalar(1));
1448       MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1449       GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1450 
1451       // Use G_ICMP to check if idxVReg < 3.
1452       MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1453                            GR->buildConstantInt(3, MIRBuilder, IndexType));
1454 
1455       // Get constant for the default value (0 or 1 depending on which
1456       // function).
1457       Register DefaultRegister =
1458           GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1459 
1460       // Get a register for the selection result (possibly a new temporary one).
1461       Register SelectionResult = Call->ReturnRegister;
1462       if (PointerSize != ResultWidth) {
1463         SelectionResult =
1464             MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1465         MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1466         GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1467                                   MIRBuilder.getMF());
1468       }
1469       // Create the final G_SELECT to return the extracted value or the default.
1470       MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1471                              DefaultRegister);
1472       ToTruncate = SelectionResult;
1473     } else {
1474       ToTruncate = Extracted;
1475     }
1476   }
1477   // Alter the result's bitwidth if it does not match the SizeT value extracted.
1478   if (PointerSize != ResultWidth)
1479     MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1480   return true;
1481 }
1482 
1483 static bool generateBuiltinVar(const SPIRV::IncomingCall *Call,
1484                                MachineIRBuilder &MIRBuilder,
1485                                SPIRVGlobalRegistry *GR) {
1486   // Lookup the builtin variable record.
1487   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1488   SPIRV::BuiltIn::BuiltIn Value =
1489       SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1490 
1491   if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1492     return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1493 
1494   // Build a load instruction for the builtin variable.
1495   unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1496   LLT LLType;
1497   if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1498     LLType =
1499         LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1500   else
1501     LLType = LLT::scalar(BitWidth);
1502 
1503   return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1504                                   LLType, Call->ReturnRegister);
1505 }
1506 
1507 static bool generateAtomicInst(const SPIRV::IncomingCall *Call,
1508                                MachineIRBuilder &MIRBuilder,
1509                                SPIRVGlobalRegistry *GR) {
1510   // Lookup the instruction opcode in the TableGen records.
1511   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1512   unsigned Opcode =
1513       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1514 
1515   switch (Opcode) {
1516   case SPIRV::OpStore:
1517     return buildAtomicInitInst(Call, MIRBuilder);
1518   case SPIRV::OpAtomicLoad:
1519     return buildAtomicLoadInst(Call, MIRBuilder, GR);
1520   case SPIRV::OpAtomicStore:
1521     return buildAtomicStoreInst(Call, MIRBuilder, GR);
1522   case SPIRV::OpAtomicCompareExchange:
1523   case SPIRV::OpAtomicCompareExchangeWeak:
1524     return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1525                                           GR);
1526   case SPIRV::OpAtomicIAdd:
1527   case SPIRV::OpAtomicISub:
1528   case SPIRV::OpAtomicOr:
1529   case SPIRV::OpAtomicXor:
1530   case SPIRV::OpAtomicAnd:
1531   case SPIRV::OpAtomicExchange:
1532     return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1533   case SPIRV::OpMemoryBarrier:
1534     return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1535   case SPIRV::OpAtomicFlagTestAndSet:
1536   case SPIRV::OpAtomicFlagClear:
1537     return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1538   default:
1539     if (Call->isSpirvOp())
1540       return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1541                                 GR->getSPIRVTypeID(Call->ReturnType));
1542     return false;
1543   }
1544 }
1545 
1546 static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call,
1547                                        MachineIRBuilder &MIRBuilder,
1548                                        SPIRVGlobalRegistry *GR) {
1549   // Lookup the instruction opcode in the TableGen records.
1550   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1551   unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1552 
1553   switch (Opcode) {
1554   case SPIRV::OpAtomicFAddEXT:
1555   case SPIRV::OpAtomicFMinEXT:
1556   case SPIRV::OpAtomicFMaxEXT:
1557     return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1558   default:
1559     return false;
1560   }
1561 }
1562 
1563 static bool generateBarrierInst(const SPIRV::IncomingCall *Call,
1564                                 MachineIRBuilder &MIRBuilder,
1565                                 SPIRVGlobalRegistry *GR) {
1566   // Lookup the instruction opcode in the TableGen records.
1567   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1568   unsigned Opcode =
1569       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1570 
1571   return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1572 }
1573 
1574 static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call,
1575                                   MachineIRBuilder &MIRBuilder) {
1576   MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1577       .addDef(Call->ReturnRegister)
1578       .addUse(Call->Arguments[0]);
1579   return true;
1580 }
1581 
1582 static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call,
1583                                   MachineIRBuilder &MIRBuilder,
1584                                   SPIRVGlobalRegistry *GR) {
1585   if (Call->isSpirvOp())
1586     return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1587                               GR->getSPIRVTypeID(Call->ReturnType));
1588   unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
1589   bool IsVec = Opcode == SPIRV::OpTypeVector;
1590   // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1591   MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1592       .addDef(Call->ReturnRegister)
1593       .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1594       .addUse(Call->Arguments[0])
1595       .addUse(Call->Arguments[1]);
1596   return true;
1597 }
1598 
1599 static bool generateWaveInst(const SPIRV::IncomingCall *Call,
1600                              MachineIRBuilder &MIRBuilder,
1601                              SPIRVGlobalRegistry *GR) {
1602   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1603   SPIRV::BuiltIn::BuiltIn Value =
1604       SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1605 
1606   // For now, we only support a single Wave intrinsic with a single return type.
1607   assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1608   LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1609 
1610   return buildBuiltinVariableLoad(
1611       MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1612       /* isConst= */ false, /* hasLinkageTy= */ false);
1613 }
1614 
1615 // We expect a builtin
1616 //     Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1617 // where %result is a pointer to where the result of the builtin execution
1618 // is to be stored, and generate the following instructions:
1619 //     Res = Opcode RetType Operand1 Operand1
1620 //     OpStore RetVariable Res
1621 static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call,
1622                                      MachineIRBuilder &MIRBuilder,
1623                                      SPIRVGlobalRegistry *GR) {
1624   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1625   unsigned Opcode =
1626       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1627 
1628   Register SRetReg = Call->Arguments[0];
1629   SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1630   SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1631   if (!RetType)
1632     report_fatal_error("The first parameter must be a pointer");
1633   if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1634     report_fatal_error("Expected struct type result for the arithmetic with "
1635                        "overflow builtins");
1636 
1637   SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1638   SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1639   if (!OpType1 || !OpType2 || OpType1 != OpType2)
1640     report_fatal_error("Operands must have the same type");
1641   if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1642     switch (Opcode) {
1643     case SPIRV::OpIAddCarryS:
1644       Opcode = SPIRV::OpIAddCarryV;
1645       break;
1646     case SPIRV::OpISubBorrowS:
1647       Opcode = SPIRV::OpISubBorrowV;
1648       break;
1649     }
1650 
1651   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1652   Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1653   if (const TargetRegisterClass *DstRC =
1654           MRI->getRegClassOrNull(Call->Arguments[1])) {
1655     MRI->setRegClass(ResReg, DstRC);
1656     MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1657   } else {
1658     MRI->setType(ResReg, LLT::scalar(64));
1659   }
1660   GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1661   MIRBuilder.buildInstr(Opcode)
1662       .addDef(ResReg)
1663       .addUse(GR->getSPIRVTypeID(RetType))
1664       .addUse(Call->Arguments[1])
1665       .addUse(Call->Arguments[2]);
1666   MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1667   return true;
1668 }
1669 
1670 static bool generateGetQueryInst(const SPIRV::IncomingCall *Call,
1671                                  MachineIRBuilder &MIRBuilder,
1672                                  SPIRVGlobalRegistry *GR) {
1673   // Lookup the builtin record.
1674   SPIRV::BuiltIn::BuiltIn Value =
1675       SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1676   uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1677                         Value == SPIRV::BuiltIn::WorkgroupSize ||
1678                         Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1679   return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1680 }
1681 
1682 static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call,
1683                                        MachineIRBuilder &MIRBuilder,
1684                                        SPIRVGlobalRegistry *GR) {
1685   // Lookup the image size query component number in the TableGen records.
1686   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1687   uint32_t Component =
1688       SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1689   // Query result may either be a vector or a scalar. If return type is not a
1690   // vector, expect only a single size component. Otherwise get the number of
1691   // expected components.
1692   SPIRVType *RetTy = Call->ReturnType;
1693   unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1694                                           ? RetTy->getOperand(2).getImm()
1695                                           : 1;
1696   // Get the actual number of query result/size components.
1697   SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1698   unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1699   Register QueryResult = Call->ReturnRegister;
1700   SPIRVType *QueryResultType = Call->ReturnType;
1701   if (NumExpectedRetComponents != NumActualRetComponents) {
1702     QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1703         LLT::fixed_vector(NumActualRetComponents, 32));
1704     MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
1705     SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1706     QueryResultType = GR->getOrCreateSPIRVVectorType(
1707         IntTy, NumActualRetComponents, MIRBuilder);
1708     GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1709   }
1710   bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1711   unsigned Opcode =
1712       IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1713   auto MIB = MIRBuilder.buildInstr(Opcode)
1714                  .addDef(QueryResult)
1715                  .addUse(GR->getSPIRVTypeID(QueryResultType))
1716                  .addUse(Call->Arguments[0]);
1717   if (!IsDimBuf)
1718     MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
1719   if (NumExpectedRetComponents == NumActualRetComponents)
1720     return true;
1721   if (NumExpectedRetComponents == 1) {
1722     // Only 1 component is expected, build OpCompositeExtract instruction.
1723     unsigned ExtractedComposite =
1724         Component == 3 ? NumActualRetComponents - 1 : Component;
1725     assert(ExtractedComposite < NumActualRetComponents &&
1726            "Invalid composite index!");
1727     Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1728     SPIRVType *NewType = nullptr;
1729     if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1730       Register NewTypeReg = QueryResultType->getOperand(1).getReg();
1731       if (TypeReg != NewTypeReg &&
1732           (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
1733         TypeReg = NewTypeReg;
1734     }
1735     MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1736         .addDef(Call->ReturnRegister)
1737         .addUse(TypeReg)
1738         .addUse(QueryResult)
1739         .addImm(ExtractedComposite);
1740     if (NewType != nullptr)
1741       insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1742                         MIRBuilder.getMF().getRegInfo());
1743   } else {
1744     // More than 1 component is expected, fill a new vector.
1745     auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1746                    .addDef(Call->ReturnRegister)
1747                    .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1748                    .addUse(QueryResult)
1749                    .addUse(QueryResult);
1750     for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1751       MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1752   }
1753   return true;
1754 }
1755 
1756 static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call,
1757                                        MachineIRBuilder &MIRBuilder,
1758                                        SPIRVGlobalRegistry *GR) {
1759   assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1760          "Image samples query result must be of int type!");
1761 
1762   // Lookup the instruction opcode in the TableGen records.
1763   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1764   unsigned Opcode =
1765       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1766 
1767   Register Image = Call->Arguments[0];
1768   SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1769       GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
1770   (void)ImageDimensionality;
1771 
1772   switch (Opcode) {
1773   case SPIRV::OpImageQuerySamples:
1774     assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1775            "Image must be of 2D dimensionality");
1776     break;
1777   case SPIRV::OpImageQueryLevels:
1778     assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1779             ImageDimensionality == SPIRV::Dim::DIM_2D ||
1780             ImageDimensionality == SPIRV::Dim::DIM_3D ||
1781             ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1782            "Image must be of 1D/2D/3D/Cube dimensionality");
1783     break;
1784   }
1785 
1786   MIRBuilder.buildInstr(Opcode)
1787       .addDef(Call->ReturnRegister)
1788       .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1789       .addUse(Image);
1790   return true;
1791 }
1792 
1793 // TODO: Move to TableGen.
1794 static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1795 getSamplerAddressingModeFromBitmask(unsigned Bitmask) {
1796   switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1797   case SPIRV::CLK_ADDRESS_CLAMP:
1798     return SPIRV::SamplerAddressingMode::Clamp;
1799   case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1800     return SPIRV::SamplerAddressingMode::ClampToEdge;
1801   case SPIRV::CLK_ADDRESS_REPEAT:
1802     return SPIRV::SamplerAddressingMode::Repeat;
1803   case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1804     return SPIRV::SamplerAddressingMode::RepeatMirrored;
1805   case SPIRV::CLK_ADDRESS_NONE:
1806     return SPIRV::SamplerAddressingMode::None;
1807   default:
1808     report_fatal_error("Unknown CL address mode");
1809   }
1810 }
1811 
1812 static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
1813   return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1814 }
1815 
1816 static SPIRV::SamplerFilterMode::SamplerFilterMode
1817 getSamplerFilterModeFromBitmask(unsigned Bitmask) {
1818   if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1819     return SPIRV::SamplerFilterMode::Linear;
1820   if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1821     return SPIRV::SamplerFilterMode::Nearest;
1822   return SPIRV::SamplerFilterMode::Nearest;
1823 }
1824 
1825 static bool generateReadImageInst(const StringRef DemangledCall,
1826                                   const SPIRV::IncomingCall *Call,
1827                                   MachineIRBuilder &MIRBuilder,
1828                                   SPIRVGlobalRegistry *GR) {
1829   Register Image = Call->Arguments[0];
1830   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1831   bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
1832   bool HasMsaa = DemangledCall.contains_insensitive("msaa");
1833   if (HasOclSampler) {
1834     Register Sampler = Call->Arguments[1];
1835 
1836     if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1837         getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
1838       uint64_t SamplerMask = getIConstVal(Sampler, MRI);
1839       Sampler = GR->buildConstantSampler(
1840           Register(), getSamplerAddressingModeFromBitmask(SamplerMask),
1841           getSamplerParamFromBitmask(SamplerMask),
1842           getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder,
1843           GR->getSPIRVTypeForVReg(Sampler));
1844     }
1845     SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1846     SPIRVType *SampledImageType =
1847         GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1848     Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1849 
1850     MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1851         .addDef(SampledImage)
1852         .addUse(GR->getSPIRVTypeID(SampledImageType))
1853         .addUse(Image)
1854         .addUse(Sampler);
1855 
1856     Register Lod = GR->buildConstantFP(APFloat::getZero(APFloat::IEEEsingle()),
1857                                        MIRBuilder);
1858 
1859     if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
1860       SPIRVType *TempType =
1861           GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1862       Register TempRegister =
1863           MRI->createGenericVirtualRegister(GR->getRegType(TempType));
1864       MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
1865       GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
1866       MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1867           .addDef(TempRegister)
1868           .addUse(GR->getSPIRVTypeID(TempType))
1869           .addUse(SampledImage)
1870           .addUse(Call->Arguments[2]) // Coordinate.
1871           .addImm(SPIRV::ImageOperand::Lod)
1872           .addUse(Lod);
1873       MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1874           .addDef(Call->ReturnRegister)
1875           .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1876           .addUse(TempRegister)
1877           .addImm(0);
1878     } else {
1879       MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1880           .addDef(Call->ReturnRegister)
1881           .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1882           .addUse(SampledImage)
1883           .addUse(Call->Arguments[2]) // Coordinate.
1884           .addImm(SPIRV::ImageOperand::Lod)
1885           .addUse(Lod);
1886     }
1887   } else if (HasMsaa) {
1888     MIRBuilder.buildInstr(SPIRV::OpImageRead)
1889         .addDef(Call->ReturnRegister)
1890         .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1891         .addUse(Image)
1892         .addUse(Call->Arguments[1]) // Coordinate.
1893         .addImm(SPIRV::ImageOperand::Sample)
1894         .addUse(Call->Arguments[2]);
1895   } else {
1896     MIRBuilder.buildInstr(SPIRV::OpImageRead)
1897         .addDef(Call->ReturnRegister)
1898         .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1899         .addUse(Image)
1900         .addUse(Call->Arguments[1]); // Coordinate.
1901   }
1902   return true;
1903 }
1904 
1905 static bool generateWriteImageInst(const SPIRV::IncomingCall *Call,
1906                                    MachineIRBuilder &MIRBuilder,
1907                                    SPIRVGlobalRegistry *GR) {
1908   MIRBuilder.buildInstr(SPIRV::OpImageWrite)
1909       .addUse(Call->Arguments[0])  // Image.
1910       .addUse(Call->Arguments[1])  // Coordinate.
1911       .addUse(Call->Arguments[2]); // Texel.
1912   return true;
1913 }
1914 
1915 static bool generateSampleImageInst(const StringRef DemangledCall,
1916                                     const SPIRV::IncomingCall *Call,
1917                                     MachineIRBuilder &MIRBuilder,
1918                                     SPIRVGlobalRegistry *GR) {
1919   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1920   if (Call->Builtin->Name.contains_insensitive(
1921           "__translate_sampler_initializer")) {
1922     // Build sampler literal.
1923     uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
1924     Register Sampler = GR->buildConstantSampler(
1925         Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
1926         getSamplerParamFromBitmask(Bitmask),
1927         getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
1928     return Sampler.isValid();
1929   } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
1930     // Create OpSampledImage.
1931     Register Image = Call->Arguments[0];
1932     SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1933     SPIRVType *SampledImageType =
1934         GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1935     Register SampledImage =
1936         Call->ReturnRegister.isValid()
1937             ? Call->ReturnRegister
1938             : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1939     MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1940         .addDef(SampledImage)
1941         .addUse(GR->getSPIRVTypeID(SampledImageType))
1942         .addUse(Image)
1943         .addUse(Call->Arguments[1]); // Sampler.
1944     return true;
1945   } else if (Call->Builtin->Name.contains_insensitive(
1946                  "__spirv_ImageSampleExplicitLod")) {
1947     // Sample an image using an explicit level of detail.
1948     std::string ReturnType = DemangledCall.str();
1949     if (DemangledCall.contains("_R")) {
1950       ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
1951       ReturnType = ReturnType.substr(0, ReturnType.find('('));
1952     }
1953     SPIRVType *Type =
1954         Call->ReturnType
1955             ? Call->ReturnType
1956             : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder);
1957     if (!Type) {
1958       std::string DiagMsg =
1959           "Unable to recognize SPIRV type name: " + ReturnType;
1960       report_fatal_error(DiagMsg.c_str());
1961     }
1962     MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1963         .addDef(Call->ReturnRegister)
1964         .addUse(GR->getSPIRVTypeID(Type))
1965         .addUse(Call->Arguments[0]) // Image.
1966         .addUse(Call->Arguments[1]) // Coordinate.
1967         .addImm(SPIRV::ImageOperand::Lod)
1968         .addUse(Call->Arguments[3]);
1969     return true;
1970   }
1971   return false;
1972 }
1973 
1974 static bool generateSelectInst(const SPIRV::IncomingCall *Call,
1975                                MachineIRBuilder &MIRBuilder) {
1976   MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
1977                          Call->Arguments[1], Call->Arguments[2]);
1978   return true;
1979 }
1980 
1981 static bool generateConstructInst(const SPIRV::IncomingCall *Call,
1982                                   MachineIRBuilder &MIRBuilder,
1983                                   SPIRVGlobalRegistry *GR) {
1984   return buildOpFromWrapper(MIRBuilder, SPIRV::OpCompositeConstruct, Call,
1985                             GR->getSPIRVTypeID(Call->ReturnType));
1986 }
1987 
1988 static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call,
1989                                  MachineIRBuilder &MIRBuilder,
1990                                  SPIRVGlobalRegistry *GR) {
1991   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1992   unsigned Opcode =
1993       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1994   bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
1995                Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
1996                Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
1997   unsigned ArgSz = Call->Arguments.size();
1998   unsigned LiteralIdx = 0;
1999   switch (Opcode) {
2000   // Memory operand is optional and is literal.
2001   case SPIRV::OpCooperativeMatrixLoadKHR:
2002     LiteralIdx = ArgSz > 3 ? 3 : 0;
2003     break;
2004   case SPIRV::OpCooperativeMatrixStoreKHR:
2005     LiteralIdx = ArgSz > 4 ? 4 : 0;
2006     break;
2007   case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2008     LiteralIdx = ArgSz > 7 ? 7 : 0;
2009     break;
2010   case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2011     LiteralIdx = ArgSz > 8 ? 8 : 0;
2012     break;
2013   // Cooperative Matrix Operands operand is optional and is literal.
2014   case SPIRV::OpCooperativeMatrixMulAddKHR:
2015     LiteralIdx = ArgSz > 3 ? 3 : 0;
2016     break;
2017   };
2018 
2019   SmallVector<uint32_t, 1> ImmArgs;
2020   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2021   if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2022     const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2023     auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2024                    .addUse(Call->Arguments[0])  // pointer
2025                    .addUse(Call->Arguments[1])  // rows
2026                    .addUse(Call->Arguments[2])  // columns
2027                    .addImm(CacheLevel)          // cache level
2028                    .addUse(Call->Arguments[4]); // memory layout
2029     if (ArgSz > 5)
2030       MIB.addUse(Call->Arguments[5]); // stride
2031     if (ArgSz > 6) {
2032       const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2033       MIB.addImm(MemOp); // memory operand
2034     }
2035     return true;
2036   }
2037   if (LiteralIdx > 0)
2038     ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2039   Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2040   if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2041     SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2042     if (!CoopMatrType)
2043       report_fatal_error("Can't find a register's type definition");
2044     MIRBuilder.buildInstr(Opcode)
2045         .addDef(Call->ReturnRegister)
2046         .addUse(TypeReg)
2047         .addUse(CoopMatrType->getOperand(0).getReg());
2048     return true;
2049   }
2050   return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2051                             IsSet ? TypeReg : Register(0), ImmArgs);
2052 }
2053 
2054 static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call,
2055                                      MachineIRBuilder &MIRBuilder,
2056                                      SPIRVGlobalRegistry *GR) {
2057   // Lookup the instruction opcode in the TableGen records.
2058   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2059   unsigned Opcode =
2060       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2061   const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2062 
2063   switch (Opcode) {
2064   case SPIRV::OpSpecConstant: {
2065     // Build the SpecID decoration.
2066     unsigned SpecId =
2067         static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2068     buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2069                     {SpecId});
2070     // Determine the constant MI.
2071     Register ConstRegister = Call->Arguments[1];
2072     const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2073     assert(Const &&
2074            (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2075             Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2076            "Argument should be either an int or floating-point constant");
2077     // Determine the opcode and built the OpSpec MI.
2078     const MachineOperand &ConstOperand = Const->getOperand(1);
2079     if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2080       assert(ConstOperand.isCImm() && "Int constant operand is expected");
2081       Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2082                    ? SPIRV::OpSpecConstantTrue
2083                    : SPIRV::OpSpecConstantFalse;
2084     }
2085     auto MIB = MIRBuilder.buildInstr(Opcode)
2086                    .addDef(Call->ReturnRegister)
2087                    .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2088 
2089     if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2090       if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2091         addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2092       else
2093         addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2094     }
2095     return true;
2096   }
2097   case SPIRV::OpSpecConstantComposite: {
2098     auto MIB = MIRBuilder.buildInstr(Opcode)
2099                    .addDef(Call->ReturnRegister)
2100                    .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2101     for (unsigned i = 0; i < Call->Arguments.size(); i++)
2102       MIB.addUse(Call->Arguments[i]);
2103     return true;
2104   }
2105   default:
2106     return false;
2107   }
2108 }
2109 
2110 static bool buildNDRange(const SPIRV::IncomingCall *Call,
2111                          MachineIRBuilder &MIRBuilder,
2112                          SPIRVGlobalRegistry *GR) {
2113   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2114   SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2115   assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2116          PtrType->getOperand(2).isReg());
2117   Register TypeReg = PtrType->getOperand(2).getReg();
2118   SPIRVType *StructType = GR->getSPIRVTypeForVReg(TypeReg);
2119   MachineFunction &MF = MIRBuilder.getMF();
2120   Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2121   GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2122   // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2123   // three other arguments, so pass zero constant on absence.
2124   unsigned NumArgs = Call->Arguments.size();
2125   assert(NumArgs >= 2);
2126   Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2127   Register LocalWorkSize =
2128       NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2129   Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2130   if (NumArgs < 4) {
2131     Register Const;
2132     SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2133     if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2134       MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2135       assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2136              DefInstr->getOperand(3).isReg());
2137       Register GWSPtr = DefInstr->getOperand(3).getReg();
2138       // TODO: Maybe simplify generation of the type of the fields.
2139       unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2140       unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2141       Type *BaseTy = IntegerType::get(MF.getFunction().getContext(), BitWidth);
2142       Type *FieldTy = ArrayType::get(BaseTy, Size);
2143       SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(FieldTy, MIRBuilder);
2144       GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2145       GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2146       MIRBuilder.buildInstr(SPIRV::OpLoad)
2147           .addDef(GlobalWorkSize)
2148           .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2149           .addUse(GWSPtr);
2150       const SPIRVSubtarget &ST =
2151           cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2152       Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2153                                            SpvFieldTy, *ST.getInstrInfo());
2154     } else {
2155       Const = GR->buildConstantInt(0, MIRBuilder, SpvTy);
2156     }
2157     if (!LocalWorkSize.isValid())
2158       LocalWorkSize = Const;
2159     if (!GlobalWorkOffset.isValid())
2160       GlobalWorkOffset = Const;
2161   }
2162   assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2163   MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2164       .addDef(TmpReg)
2165       .addUse(TypeReg)
2166       .addUse(GlobalWorkSize)
2167       .addUse(LocalWorkSize)
2168       .addUse(GlobalWorkOffset);
2169   return MIRBuilder.buildInstr(SPIRV::OpStore)
2170       .addUse(Call->Arguments[0])
2171       .addUse(TmpReg);
2172 }
2173 
2174 // TODO: maybe move to the global register.
2175 static SPIRVType *
2176 getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder,
2177                                    SPIRVGlobalRegistry *GR) {
2178   LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2179   unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2180   Type *PtrType = PointerType::get(Context, SC1);
2181   return GR->getOrCreateSPIRVType(PtrType, MIRBuilder);
2182 }
2183 
2184 static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call,
2185                                MachineIRBuilder &MIRBuilder,
2186                                SPIRVGlobalRegistry *GR) {
2187   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2188   const DataLayout &DL = MIRBuilder.getDataLayout();
2189   bool IsSpirvOp = Call->isSpirvOp();
2190   bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2191   const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2192 
2193   // Make vararg instructions before OpEnqueueKernel.
2194   // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2195   // local size operands as an array, so we need to unpack them.
2196   SmallVector<Register, 16> LocalSizes;
2197   if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2198     const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2199     Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2200     MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2201     assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2202            GepMI->getOperand(3).isReg());
2203     Register ArrayReg = GepMI->getOperand(3).getReg();
2204     MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2205     const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2206     assert(LocalSizeTy && "Local size type is expected");
2207     const uint64_t LocalSizeNum =
2208         cast<ArrayType>(LocalSizeTy)->getNumElements();
2209     unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2210     const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2211     const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2212         Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2213     for (unsigned I = 0; I < LocalSizeNum; ++I) {
2214       Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2215       MRI->setType(Reg, LLType);
2216       GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2217       auto GEPInst = MIRBuilder.buildIntrinsic(
2218           Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2219       GEPInst
2220           .addImm(GepMI->getOperand(2).getImm())            // In bound.
2221           .addUse(ArrayMI->getOperand(0).getReg())          // Alloca.
2222           .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2223           .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2224       LocalSizes.push_back(Reg);
2225     }
2226   }
2227 
2228   // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2229   auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2230                  .addDef(Call->ReturnRegister)
2231                  .addUse(GR->getSPIRVTypeID(Int32Ty));
2232 
2233   // Copy all arguments before block invoke function pointer.
2234   const unsigned BlockFIdx = HasEvents ? 6 : 3;
2235   for (unsigned i = 0; i < BlockFIdx; i++)
2236     MIB.addUse(Call->Arguments[i]);
2237 
2238   // If there are no event arguments in the original call, add dummy ones.
2239   if (!HasEvents) {
2240     MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2241     Register NullPtr = GR->getOrCreateConstNullPtr(
2242         MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2243     MIB.addUse(NullPtr); // Dummy wait events.
2244     MIB.addUse(NullPtr); // Dummy ret event.
2245   }
2246 
2247   MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2248   assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2249   // Invoke: Pointer to invoke function.
2250   MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2251 
2252   Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2253   // Param: Pointer to block literal.
2254   MIB.addUse(BlockLiteralReg);
2255 
2256   Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2257   // TODO: these numbers should be obtained from block literal structure.
2258   // Param Size: Size of block literal structure.
2259   MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2260   // Param Aligment: Aligment of block literal structure.
2261   MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2262                                    MIRBuilder, GR));
2263 
2264   for (unsigned i = 0; i < LocalSizes.size(); i++)
2265     MIB.addUse(LocalSizes[i]);
2266   return true;
2267 }
2268 
2269 static bool generateEnqueueInst(const SPIRV::IncomingCall *Call,
2270                                 MachineIRBuilder &MIRBuilder,
2271                                 SPIRVGlobalRegistry *GR) {
2272   // Lookup the instruction opcode in the TableGen records.
2273   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2274   unsigned Opcode =
2275       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2276 
2277   switch (Opcode) {
2278   case SPIRV::OpRetainEvent:
2279   case SPIRV::OpReleaseEvent:
2280     return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2281   case SPIRV::OpCreateUserEvent:
2282   case SPIRV::OpGetDefaultQueue:
2283     return MIRBuilder.buildInstr(Opcode)
2284         .addDef(Call->ReturnRegister)
2285         .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2286   case SPIRV::OpIsValidEvent:
2287     return MIRBuilder.buildInstr(Opcode)
2288         .addDef(Call->ReturnRegister)
2289         .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2290         .addUse(Call->Arguments[0]);
2291   case SPIRV::OpSetUserEventStatus:
2292     return MIRBuilder.buildInstr(Opcode)
2293         .addUse(Call->Arguments[0])
2294         .addUse(Call->Arguments[1]);
2295   case SPIRV::OpCaptureEventProfilingInfo:
2296     return MIRBuilder.buildInstr(Opcode)
2297         .addUse(Call->Arguments[0])
2298         .addUse(Call->Arguments[1])
2299         .addUse(Call->Arguments[2]);
2300   case SPIRV::OpBuildNDRange:
2301     return buildNDRange(Call, MIRBuilder, GR);
2302   case SPIRV::OpEnqueueKernel:
2303     return buildEnqueueKernel(Call, MIRBuilder, GR);
2304   default:
2305     return false;
2306   }
2307 }
2308 
2309 static bool generateAsyncCopy(const SPIRV::IncomingCall *Call,
2310                               MachineIRBuilder &MIRBuilder,
2311                               SPIRVGlobalRegistry *GR) {
2312   // Lookup the instruction opcode in the TableGen records.
2313   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2314   unsigned Opcode =
2315       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2316 
2317   bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2318   Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2319   if (Call->isSpirvOp())
2320     return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2321                               IsSet ? TypeReg : Register(0));
2322 
2323   auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2324 
2325   switch (Opcode) {
2326   case SPIRV::OpGroupAsyncCopy: {
2327     SPIRVType *NewType =
2328         Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2329             ? nullptr
2330             : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder);
2331     Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2332     unsigned NumArgs = Call->Arguments.size();
2333     Register EventReg = Call->Arguments[NumArgs - 1];
2334     bool Res = MIRBuilder.buildInstr(Opcode)
2335                    .addDef(Call->ReturnRegister)
2336                    .addUse(TypeReg)
2337                    .addUse(Scope)
2338                    .addUse(Call->Arguments[0])
2339                    .addUse(Call->Arguments[1])
2340                    .addUse(Call->Arguments[2])
2341                    .addUse(Call->Arguments.size() > 4
2342                                ? Call->Arguments[3]
2343                                : buildConstantIntReg32(1, MIRBuilder, GR))
2344                    .addUse(EventReg);
2345     if (NewType != nullptr)
2346       insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2347                         MIRBuilder.getMF().getRegInfo());
2348     return Res;
2349   }
2350   case SPIRV::OpGroupWaitEvents:
2351     return MIRBuilder.buildInstr(Opcode)
2352         .addUse(Scope)
2353         .addUse(Call->Arguments[0])
2354         .addUse(Call->Arguments[1]);
2355   default:
2356     return false;
2357   }
2358 }
2359 
2360 static bool generateConvertInst(const StringRef DemangledCall,
2361                                 const SPIRV::IncomingCall *Call,
2362                                 MachineIRBuilder &MIRBuilder,
2363                                 SPIRVGlobalRegistry *GR) {
2364   // Lookup the conversion builtin in the TableGen records.
2365   const SPIRV::ConvertBuiltin *Builtin =
2366       SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2367 
2368   if (!Builtin && Call->isSpirvOp()) {
2369     const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2370     unsigned Opcode =
2371         SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2372     return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2373                               GR->getSPIRVTypeID(Call->ReturnType));
2374   }
2375 
2376   if (Builtin->IsSaturated)
2377     buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2378                     SPIRV::Decoration::SaturatedConversion, {});
2379   if (Builtin->IsRounded)
2380     buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2381                     SPIRV::Decoration::FPRoundingMode,
2382                     {(unsigned)Builtin->RoundingMode});
2383 
2384   std::string NeedExtMsg;              // no errors if empty
2385   bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2386   unsigned Opcode = SPIRV::OpNop;
2387   if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2388     // Int -> ...
2389     if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2390       // Int -> Int
2391       if (Builtin->IsSaturated)
2392         Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2393                                               : SPIRV::OpSatConvertSToU;
2394       else
2395         Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2396                                               : SPIRV::OpSConvert;
2397     } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2398                                           SPIRV::OpTypeFloat)) {
2399       // Int -> Float
2400       if (Builtin->IsBfloat16) {
2401         const auto *ST = static_cast<const SPIRVSubtarget *>(
2402             &MIRBuilder.getMF().getSubtarget());
2403         if (!ST->canUseExtension(
2404                 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2405           NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2406         IsRightComponentsNumber =
2407             GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2408             GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2409         Opcode = SPIRV::OpConvertBF16ToFINTEL;
2410       } else {
2411         bool IsSourceSigned =
2412             DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2413         Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2414       }
2415     }
2416   } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2417                                         SPIRV::OpTypeFloat)) {
2418     // Float -> ...
2419     if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2420       // Float -> Int
2421       if (Builtin->IsBfloat16) {
2422         const auto *ST = static_cast<const SPIRVSubtarget *>(
2423             &MIRBuilder.getMF().getSubtarget());
2424         if (!ST->canUseExtension(
2425                 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2426           NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2427         IsRightComponentsNumber =
2428             GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2429             GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2430         Opcode = SPIRV::OpConvertFToBF16INTEL;
2431       } else {
2432         Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2433                                               : SPIRV::OpConvertFToU;
2434       }
2435     } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2436                                           SPIRV::OpTypeFloat)) {
2437       // Float -> Float
2438       Opcode = SPIRV::OpFConvert;
2439     }
2440   }
2441 
2442   if (!NeedExtMsg.empty()) {
2443     std::string DiagMsg = std::string(Builtin->Name) +
2444                           ": the builtin requires the following SPIR-V "
2445                           "extension: " +
2446                           NeedExtMsg;
2447     report_fatal_error(DiagMsg.c_str(), false);
2448   }
2449   if (!IsRightComponentsNumber) {
2450     std::string DiagMsg =
2451         std::string(Builtin->Name) +
2452         ": result and argument must have the same number of components";
2453     report_fatal_error(DiagMsg.c_str(), false);
2454   }
2455   assert(Opcode != SPIRV::OpNop &&
2456          "Conversion between the types not implemented!");
2457 
2458   MIRBuilder.buildInstr(Opcode)
2459       .addDef(Call->ReturnRegister)
2460       .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2461       .addUse(Call->Arguments[0]);
2462   return true;
2463 }
2464 
2465 static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call,
2466                                         MachineIRBuilder &MIRBuilder,
2467                                         SPIRVGlobalRegistry *GR) {
2468   // Lookup the vector load/store builtin in the TableGen records.
2469   const SPIRV::VectorLoadStoreBuiltin *Builtin =
2470       SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2471                                           Call->Builtin->Set);
2472   // Build extended instruction.
2473   auto MIB =
2474       MIRBuilder.buildInstr(SPIRV::OpExtInst)
2475           .addDef(Call->ReturnRegister)
2476           .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2477           .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2478           .addImm(Builtin->Number);
2479   for (auto Argument : Call->Arguments)
2480     MIB.addUse(Argument);
2481   if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2482     MIB.addImm(Builtin->ElementCount);
2483 
2484   // Rounding mode should be passed as a last argument in the MI for builtins
2485   // like "vstorea_halfn_r".
2486   if (Builtin->IsRounded)
2487     MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2488   return true;
2489 }
2490 
2491 static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call,
2492                                   MachineIRBuilder &MIRBuilder,
2493                                   SPIRVGlobalRegistry *GR) {
2494   // Lookup the instruction opcode in the TableGen records.
2495   const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2496   unsigned Opcode =
2497       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2498   bool IsLoad = Opcode == SPIRV::OpLoad;
2499   // Build the instruction.
2500   auto MIB = MIRBuilder.buildInstr(Opcode);
2501   if (IsLoad) {
2502     MIB.addDef(Call->ReturnRegister);
2503     MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2504   }
2505   // Add a pointer to the value to load/store.
2506   MIB.addUse(Call->Arguments[0]);
2507   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2508   // Add a value to store.
2509   if (!IsLoad)
2510     MIB.addUse(Call->Arguments[1]);
2511   // Add optional memory attributes and an alignment.
2512   unsigned NumArgs = Call->Arguments.size();
2513   if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2514     MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2515   if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2516     MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2517   return true;
2518 }
2519 
2520 namespace SPIRV {
2521 // Try to find a builtin function attributes by a demangled function name and
2522 // return a tuple <builtin group, op code, ext instruction number>, or a special
2523 // tuple value <-1, 0, 0> if the builtin function is not found.
2524 // Not all builtin functions are supported, only those with a ready-to-use op
2525 // code or instruction number defined in TableGen.
2526 // TODO: consider a major rework of mapping demangled calls into a builtin
2527 // functions to unify search and decrease number of individual cases.
2528 std::tuple<int, unsigned, unsigned>
2529 mapBuiltinToOpcode(const StringRef DemangledCall,
2530                    SPIRV::InstructionSet::InstructionSet Set) {
2531   Register Reg;
2532   SmallVector<Register> Args;
2533   std::unique_ptr<const IncomingCall> Call =
2534       lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
2535   if (!Call)
2536     return std::make_tuple(-1, 0, 0);
2537 
2538   switch (Call->Builtin->Group) {
2539   case SPIRV::Relational:
2540   case SPIRV::Atomic:
2541   case SPIRV::Barrier:
2542   case SPIRV::CastToPtr:
2543   case SPIRV::ImageMiscQuery:
2544   case SPIRV::SpecConstant:
2545   case SPIRV::Enqueue:
2546   case SPIRV::AsyncCopy:
2547   case SPIRV::LoadStore:
2548   case SPIRV::CoopMatr:
2549     if (const auto *R =
2550             SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2551       return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2552     break;
2553   case SPIRV::Extended:
2554     if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2555                                                      Call->Builtin->Set))
2556       return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2557     break;
2558   case SPIRV::VectorLoadStore:
2559     if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2560                                                             Call->Builtin->Set))
2561       return std::make_tuple(SPIRV::Extended, 0, R->Number);
2562     break;
2563   case SPIRV::Group:
2564     if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2565       return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2566     break;
2567   case SPIRV::AtomicFloating:
2568     if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2569       return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2570     break;
2571   case SPIRV::IntelSubgroups:
2572     if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2573       return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2574     break;
2575   case SPIRV::GroupUniform:
2576     if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2577       return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2578     break;
2579   case SPIRV::WriteImage:
2580     return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2581   case SPIRV::Select:
2582     return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2583   case SPIRV::Construct:
2584     return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2585                            0);
2586   case SPIRV::KernelClock:
2587     return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2588   default:
2589     return std::make_tuple(-1, 0, 0);
2590   }
2591   return std::make_tuple(-1, 0, 0);
2592 }
2593 
2594 std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
2595                                  SPIRV::InstructionSet::InstructionSet Set,
2596                                  MachineIRBuilder &MIRBuilder,
2597                                  const Register OrigRet, const Type *OrigRetTy,
2598                                  const SmallVectorImpl<Register> &Args,
2599                                  SPIRVGlobalRegistry *GR) {
2600   LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2601 
2602   // Lookup the builtin in the TableGen records.
2603   SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
2604   assert(SpvType && "Inconsistent return register: expected valid type info");
2605   std::unique_ptr<const IncomingCall> Call =
2606       lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
2607 
2608   if (!Call) {
2609     LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
2610     return std::nullopt;
2611   }
2612 
2613   // TODO: check if the provided args meet the builtin requirments.
2614   assert(Args.size() >= Call->Builtin->MinNumArgs &&
2615          "Too few arguments to generate the builtin");
2616   if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2617     LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
2618 
2619   // Match the builtin with implementation based on the grouping.
2620   switch (Call->Builtin->Group) {
2621   case SPIRV::Extended:
2622     return generateExtInst(Call.get(), MIRBuilder, GR);
2623   case SPIRV::Relational:
2624     return generateRelationalInst(Call.get(), MIRBuilder, GR);
2625   case SPIRV::Group:
2626     return generateGroupInst(Call.get(), MIRBuilder, GR);
2627   case SPIRV::Variable:
2628     return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2629   case SPIRV::Atomic:
2630     return generateAtomicInst(Call.get(), MIRBuilder, GR);
2631   case SPIRV::AtomicFloating:
2632     return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2633   case SPIRV::Barrier:
2634     return generateBarrierInst(Call.get(), MIRBuilder, GR);
2635   case SPIRV::CastToPtr:
2636     return generateCastToPtrInst(Call.get(), MIRBuilder);
2637   case SPIRV::Dot:
2638     return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
2639   case SPIRV::Wave:
2640     return generateWaveInst(Call.get(), MIRBuilder, GR);
2641   case SPIRV::ICarryBorrow:
2642     return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
2643   case SPIRV::GetQuery:
2644     return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2645   case SPIRV::ImageSizeQuery:
2646     return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
2647   case SPIRV::ImageMiscQuery:
2648     return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
2649   case SPIRV::ReadImage:
2650     return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2651   case SPIRV::WriteImage:
2652     return generateWriteImageInst(Call.get(), MIRBuilder, GR);
2653   case SPIRV::SampleImage:
2654     return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2655   case SPIRV::Select:
2656     return generateSelectInst(Call.get(), MIRBuilder);
2657   case SPIRV::Construct:
2658     return generateConstructInst(Call.get(), MIRBuilder, GR);
2659   case SPIRV::SpecConstant:
2660     return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
2661   case SPIRV::Enqueue:
2662     return generateEnqueueInst(Call.get(), MIRBuilder, GR);
2663   case SPIRV::AsyncCopy:
2664     return generateAsyncCopy(Call.get(), MIRBuilder, GR);
2665   case SPIRV::Convert:
2666     return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
2667   case SPIRV::VectorLoadStore:
2668     return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
2669   case SPIRV::LoadStore:
2670     return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
2671   case SPIRV::IntelSubgroups:
2672     return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
2673   case SPIRV::GroupUniform:
2674     return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
2675   case SPIRV::KernelClock:
2676     return generateKernelClockInst(Call.get(), MIRBuilder, GR);
2677   case SPIRV::CoopMatr:
2678     return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
2679   }
2680   return false;
2681 }
2682 
2683 Type *parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx) {
2684   // Parse strings representing OpenCL builtin types.
2685   if (hasBuiltinTypePrefix(TypeStr)) {
2686     // OpenCL builtin types in demangled call strings have the following format:
2687     // e.g. ocl_image2d_ro
2688     [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
2689     assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
2690 
2691     // Check if this is pointer to a builtin type and not just pointer
2692     // representing a builtin type. In case it is a pointer to builtin type,
2693     // this will require additional handling in the method calling
2694     // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
2695     // base types.
2696     if (TypeStr.ends_with("*"))
2697       TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
2698 
2699     return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
2700                                                Ctx);
2701   }
2702 
2703   // Parse type name in either "typeN" or "type vector[N]" format, where
2704   // N is the number of elements of the vector.
2705   Type *BaseType;
2706   unsigned VecElts = 0;
2707 
2708   BaseType = parseBasicTypeName(TypeStr, Ctx);
2709   if (!BaseType)
2710     // Unable to recognize SPIRV type name.
2711     return nullptr;
2712 
2713   // Handle "typeN*" or "type vector[N]*".
2714   TypeStr.consume_back("*");
2715 
2716   if (TypeStr.consume_front(" vector["))
2717     TypeStr = TypeStr.substr(0, TypeStr.find(']'));
2718 
2719   TypeStr.getAsInteger(10, VecElts);
2720   if (VecElts > 0)
2721     BaseType = VectorType::get(
2722         BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
2723 
2724   return BaseType;
2725 }
2726 
2727 bool parseBuiltinTypeStr(SmallVector<StringRef, 10> &BuiltinArgsTypeStrs,
2728                          const StringRef DemangledCall, LLVMContext &Ctx) {
2729   auto Pos1 = DemangledCall.find('(');
2730   if (Pos1 == StringRef::npos)
2731     return false;
2732   auto Pos2 = DemangledCall.find(')');
2733   if (Pos2 == StringRef::npos || Pos1 > Pos2)
2734     return false;
2735   DemangledCall.slice(Pos1 + 1, Pos2)
2736       .split(BuiltinArgsTypeStrs, ',', -1, false);
2737   return true;
2738 }
2739 
2740 Type *parseBuiltinCallArgumentBaseType(const StringRef DemangledCall,
2741                                        unsigned ArgIdx, LLVMContext &Ctx) {
2742   SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
2743   parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
2744   if (ArgIdx >= BuiltinArgsTypeStrs.size())
2745     return nullptr;
2746   StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
2747   return parseBuiltinCallArgumentType(TypeStr, Ctx);
2748 }
2749 
2750 struct BuiltinType {
2751   StringRef Name;
2752   uint32_t Opcode;
2753 };
2754 
2755 #define GET_BuiltinTypes_DECL
2756 #define GET_BuiltinTypes_IMPL
2757 
2758 struct OpenCLType {
2759   StringRef Name;
2760   StringRef SpirvTypeLiteral;
2761 };
2762 
2763 #define GET_OpenCLTypes_DECL
2764 #define GET_OpenCLTypes_IMPL
2765 
2766 #include "SPIRVGenTables.inc"
2767 } // namespace SPIRV
2768 
2769 //===----------------------------------------------------------------------===//
2770 // Misc functions for parsing builtin types.
2771 //===----------------------------------------------------------------------===//
2772 
2773 static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
2774   if (Name.starts_with("void"))
2775     return Type::getVoidTy(Context);
2776   else if (Name.starts_with("int") || Name.starts_with("uint"))
2777     return Type::getInt32Ty(Context);
2778   else if (Name.starts_with("float"))
2779     return Type::getFloatTy(Context);
2780   else if (Name.starts_with("half"))
2781     return Type::getHalfTy(Context);
2782   report_fatal_error("Unable to recognize type!");
2783 }
2784 
2785 //===----------------------------------------------------------------------===//
2786 // Implementation functions for builtin types.
2787 //===----------------------------------------------------------------------===//
2788 
2789 static SPIRVType *getNonParameterizedType(const TargetExtType *ExtensionType,
2790                                           const SPIRV::BuiltinType *TypeRecord,
2791                                           MachineIRBuilder &MIRBuilder,
2792                                           SPIRVGlobalRegistry *GR) {
2793   unsigned Opcode = TypeRecord->Opcode;
2794   // Create or get an existing type from GlobalRegistry.
2795   return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
2796 }
2797 
2798 static SPIRVType *getSamplerType(MachineIRBuilder &MIRBuilder,
2799                                  SPIRVGlobalRegistry *GR) {
2800   // Create or get an existing type from GlobalRegistry.
2801   return GR->getOrCreateOpTypeSampler(MIRBuilder);
2802 }
2803 
2804 static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
2805                               MachineIRBuilder &MIRBuilder,
2806                               SPIRVGlobalRegistry *GR) {
2807   assert(ExtensionType->getNumIntParameters() == 1 &&
2808          "Invalid number of parameters for SPIR-V pipe builtin!");
2809   // Create or get an existing type from GlobalRegistry.
2810   return GR->getOrCreateOpTypePipe(MIRBuilder,
2811                                    SPIRV::AccessQualifier::AccessQualifier(
2812                                        ExtensionType->getIntParameter(0)));
2813 }
2814 
2815 static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
2816                                   MachineIRBuilder &MIRBuilder,
2817                                   SPIRVGlobalRegistry *GR) {
2818   assert(ExtensionType->getNumIntParameters() == 4 &&
2819          "Invalid number of parameters for SPIR-V coop matrices builtin!");
2820   assert(ExtensionType->getNumTypeParameters() == 1 &&
2821          "SPIR-V coop matrices builtin type must have a type parameter!");
2822   const SPIRVType *ElemType =
2823       GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2824   // Create or get an existing type from GlobalRegistry.
2825   return GR->getOrCreateOpTypeCoopMatr(
2826       MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
2827       ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2828       ExtensionType->getIntParameter(3));
2829 }
2830 
2831 static SPIRVType *
2832 getImageType(const TargetExtType *ExtensionType,
2833              const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2834              MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
2835   assert(ExtensionType->getNumTypeParameters() == 1 &&
2836          "SPIR-V image builtin type must have sampled type parameter!");
2837   const SPIRVType *SampledType =
2838       GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2839   assert((ExtensionType->getNumIntParameters() == 7 ||
2840           ExtensionType->getNumIntParameters() == 6) &&
2841          "Invalid number of parameters for SPIR-V image builtin!");
2842 
2843   SPIRV::AccessQualifier::AccessQualifier accessQualifier =
2844       SPIRV::AccessQualifier::None;
2845   if (ExtensionType->getNumIntParameters() == 7) {
2846     accessQualifier = Qualifier == SPIRV::AccessQualifier::WriteOnly
2847                           ? SPIRV::AccessQualifier::WriteOnly
2848                           : SPIRV::AccessQualifier::AccessQualifier(
2849                                 ExtensionType->getIntParameter(6));
2850   }
2851 
2852   // Create or get an existing type from GlobalRegistry.
2853   return GR->getOrCreateOpTypeImage(
2854       MIRBuilder, SampledType,
2855       SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2856       ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2857       ExtensionType->getIntParameter(3), ExtensionType->getIntParameter(4),
2858       SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2859       accessQualifier);
2860 }
2861 
2862 static SPIRVType *getSampledImageType(const TargetExtType *OpaqueType,
2863                                       MachineIRBuilder &MIRBuilder,
2864                                       SPIRVGlobalRegistry *GR) {
2865   SPIRVType *OpaqueImageType = getImageType(
2866       OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2867   // Create or get an existing type from GlobalRegistry.
2868   return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
2869 }
2870 
2871 namespace SPIRV {
2872 TargetExtType *parseBuiltinTypeNameToTargetExtType(std::string TypeName,
2873                                                    LLVMContext &Context) {
2874   StringRef NameWithParameters = TypeName;
2875 
2876   // Pointers-to-opaque-structs representing OpenCL types are first translated
2877   // to equivalent SPIR-V types. OpenCL builtin type names should have the
2878   // following format: e.g. %opencl.event_t
2879   if (NameWithParameters.starts_with("opencl.")) {
2880     const SPIRV::OpenCLType *OCLTypeRecord =
2881         SPIRV::lookupOpenCLType(NameWithParameters);
2882     if (!OCLTypeRecord)
2883       report_fatal_error("Missing TableGen record for OpenCL type: " +
2884                          NameWithParameters);
2885     NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
2886     // Continue with the SPIR-V builtin type...
2887   }
2888 
2889   // Names of the opaque structs representing a SPIR-V builtins without
2890   // parameters should have the following format: e.g. %spirv.Event
2891   assert(NameWithParameters.starts_with("spirv.") &&
2892          "Unknown builtin opaque type!");
2893 
2894   // Parameterized SPIR-V builtins names follow this format:
2895   // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
2896   if (!NameWithParameters.contains('_'))
2897     return TargetExtType::get(Context, NameWithParameters);
2898 
2899   SmallVector<StringRef> Parameters;
2900   unsigned BaseNameLength = NameWithParameters.find('_') - 1;
2901   SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
2902 
2903   SmallVector<Type *, 1> TypeParameters;
2904   bool HasTypeParameter = !isDigit(Parameters[0][0]);
2905   if (HasTypeParameter)
2906     TypeParameters.push_back(parseTypeString(Parameters[0], Context));
2907   SmallVector<unsigned> IntParameters;
2908   for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
2909     unsigned IntParameter = 0;
2910     bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
2911     (void)ValidLiteral;
2912     assert(ValidLiteral &&
2913            "Invalid format of SPIR-V builtin parameter literal!");
2914     IntParameters.push_back(IntParameter);
2915   }
2916   return TargetExtType::get(Context,
2917                             NameWithParameters.substr(0, BaseNameLength),
2918                             TypeParameters, IntParameters);
2919 }
2920 
2921 SPIRVType *lowerBuiltinType(const Type *OpaqueType,
2922                             SPIRV::AccessQualifier::AccessQualifier AccessQual,
2923                             MachineIRBuilder &MIRBuilder,
2924                             SPIRVGlobalRegistry *GR) {
2925   // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
2926   // target(...) target extension types or pointers-to-opaque-structs. The
2927   // approach relying on structs is deprecated and works only in the non-opaque
2928   // pointer mode (-opaque-pointers=0).
2929   // In order to maintain compatibility with LLVM IR generated by older versions
2930   // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
2931   // "translated" to target extension types. This translation is temporary and
2932   // will be removed in the future release of LLVM.
2933   const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(OpaqueType);
2934   if (!BuiltinType)
2935     BuiltinType = parseBuiltinTypeNameToTargetExtType(
2936         OpaqueType->getStructName().str(), MIRBuilder.getContext());
2937 
2938   unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
2939 
2940   const StringRef Name = BuiltinType->getName();
2941   LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
2942 
2943   // Lookup the demangled builtin type in the TableGen records.
2944   const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
2945   if (!TypeRecord)
2946     report_fatal_error("Missing TableGen record for builtin type: " + Name);
2947 
2948   // "Lower" the BuiltinType into TargetType. The following get<...>Type methods
2949   // use the implementation details from TableGen records or TargetExtType
2950   // parameters to either create a new OpType<...> machine instruction or get an
2951   // existing equivalent SPIRVType from GlobalRegistry.
2952   SPIRVType *TargetType;
2953   switch (TypeRecord->Opcode) {
2954   case SPIRV::OpTypeImage:
2955     TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GR);
2956     break;
2957   case SPIRV::OpTypePipe:
2958     TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
2959     break;
2960   case SPIRV::OpTypeDeviceEvent:
2961     TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
2962     break;
2963   case SPIRV::OpTypeSampler:
2964     TargetType = getSamplerType(MIRBuilder, GR);
2965     break;
2966   case SPIRV::OpTypeSampledImage:
2967     TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
2968     break;
2969   case SPIRV::OpTypeCooperativeMatrixKHR:
2970     TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
2971     break;
2972   default:
2973     TargetType =
2974         getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
2975     break;
2976   }
2977 
2978   // Emit OpName instruction if a new OpType<...> instruction was added
2979   // (equivalent type was not found in GlobalRegistry).
2980   if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
2981     buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
2982 
2983   return TargetType;
2984 }
2985 } // namespace SPIRV
2986 } // namespace llvm
2987