Lines Matching defs:SPIRV
15 #include "SPIRV.h"
28 namespace SPIRV {
167 } // namespace SPIRV
174 namespace SPIRV {
249 } // namespace SPIRV
261 static std::unique_ptr<const SPIRV::IncomingCall>
263 SPIRV::InstructionSet::InstructionSet Set,
266 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
275 const SPIRV::DemangledBuiltin *Builtin;
276 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
277 return std::make_unique<SPIRV::IncomingCall>(
292 if (Set == SPIRV::InstructionSet::OpenCL_std)
294 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
302 if (Set == SPIRV::InstructionSet::OpenCL_std)
304 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
311 if (Set == SPIRV::InstructionSet::OpenCL_std ||
312 Set == SPIRV::InstructionSet::GLSL_std_450)
319 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
320 return std::make_unique<SPIRV::IncomingCall>(
350 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
351 return std::make_unique<SPIRV::IncomingCall>(
439 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
465 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
498 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
501 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
504 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
507 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
513 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
514 /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
535 static SPIRV::MemorySemantics::MemorySemantics
539 return SPIRV::MemorySemantics::None;
541 return SPIRV::MemorySemantics::Acquire;
543 return SPIRV::MemorySemantics::Release;
545 return SPIRV::MemorySemantics::AcquireRelease;
547 return SPIRV::MemorySemantics::SequentiallyConsistent;
553 static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
555 case SPIRV::CLMemoryScope::memory_scope_work_item:
556 return SPIRV::Scope::Invocation;
557 case SPIRV::CLMemoryScope::memory_scope_work_group:
558 return SPIRV::Scope::Workgroup;
559 case SPIRV::CLMemoryScope::memory_scope_device:
560 return SPIRV::Scope::Device;
561 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
562 return SPIRV::Scope::CrossDevice;
563 case SPIRV::CLMemoryScope::memory_scope_sub_group:
564 return SPIRV::Scope::Subgroup;
577 SPIRV::Scope::Scope Scope,
583 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
587 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
600 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
615 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
623 const SPIRV::IncomingCall *Call,
638 static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call,
641 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
645 MIRBuilder.buildInstr(SPIRV::OpStore)
652 static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
657 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
666 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
673 SPIRV::MemorySemantics::SequentiallyConsistent |
678 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
688 static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call,
692 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call, Register(0));
695 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
698 SPIRV::MemorySemantics::SequentiallyConsistent |
701 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
711 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
727 SPIRV::OpTypePointer);
730 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
731 : ExpectedType == SPIRV::OpTypePointer);
732 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
735 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
736 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
744 ? SPIRV::MemorySemantics::None
745 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
748 ? SPIRV::MemorySemantics::None
749 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
770 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
774 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
805 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
812 static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
825 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
829 unsigned Semantics = SPIRV::MemorySemantics::None;
837 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
838 if (Opcode == SPIRV::OpAtomicIAdd) {
839 Opcode = SPIRV::OpAtomicFAddEXT;
840 } else if (Opcode == SPIRV::OpAtomicISub) {
843 Opcode = SPIRV::OpAtomicFAddEXT;
868 static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call,
890 static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call,
893 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
901 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
907 assert((Opcode != SPIRV::OpAtomicFlagClear ||
908 (Semantics != SPIRV::MemorySemantics::Acquire &&
909 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
915 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
927 static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
930 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
933 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
934 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
935 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
947 unsigned MemSemantics = SPIRV::MemorySemantics::None;
949 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
950 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
952 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
953 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
955 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
956 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
958 if (Opcode == SPIRV::OpMemoryBarrier)
962 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
963 MemSemantics |= SPIRV::MemorySemantics::Release;
964 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
965 MemSemantics |= SPIRV::MemorySemantics::Acquire;
967 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
974 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
975 SPIRV::Scope::Scope MemScope = Scope;
978 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
979 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
981 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
983 SPIRV::CLMemoryScope CLScope =
984 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
986 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
987 (Opcode == SPIRV::OpMemoryBarrier))
997 if (Opcode != SPIRV::OpMemoryBarrier)
1003 static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1005 case SPIRV::Dim::DIM_1D:
1006 case SPIRV::Dim::DIM_Buffer:
1008 case SPIRV::Dim::DIM_2D:
1009 case SPIRV::Dim::DIM_Cube:
1010 case SPIRV::Dim::DIM_Rect:
1012 case SPIRV::Dim::DIM_3D:
1021 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1022 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1032 static bool generateExtInst(const SPIRV::IncomingCall *Call,
1036 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1038 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1042 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1045 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1053 static bool generateRelationalInst(const SPIRV::IncomingCall *Call,
1057 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1059 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1079 static bool generateGroupInst(const SPIRV::IncomingCall *Call,
1082 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1083 const SPIRV::GroupBuiltin *GroupBuiltin =
1084 SPIRV::lookupGroupBuiltin(Builtin->Name);
1119 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1123 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1125 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1131 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1152 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1153 : SPIRV::Scope::Workgroup;
1157 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1166 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1171 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1210 static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call,
1213 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1216 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1217 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1220 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1226 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1235 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1236 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1237 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1246 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1252 case SPIRV::OpSubgroupBlockReadINTEL:
1253 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1255 case SPIRV::OpSubgroupBlockWriteINTEL:
1256 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1284 static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call,
1287 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1291 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1297 const SPIRV::GroupUniformBuiltin *GroupUniform =
1298 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1328 static bool generateKernelClockInst(const SPIRV::IncomingCall *Call,
1331 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1334 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1344 SPIRV::Scope::Scope ScopeArg =
1345 StringSwitch<SPIRV::Scope::Scope>(Builtin->Name)
1346 .EndsWith("device", SPIRV::Scope::Scope::Device)
1347 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1348 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1351 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1385 static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call,
1388 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1411 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1429 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1448 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1465 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1483 static bool generateBuiltinVar(const SPIRV::IncomingCall *Call,
1487 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1488 SPIRV::BuiltIn::BuiltIn Value =
1489 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1491 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1497 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1507 static bool generateAtomicInst(const SPIRV::IncomingCall *Call,
1511 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1513 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1516 case SPIRV::OpStore:
1518 case SPIRV::OpAtomicLoad:
1520 case SPIRV::OpAtomicStore:
1522 case SPIRV::OpAtomicCompareExchange:
1523 case SPIRV::OpAtomicCompareExchangeWeak:
1526 case SPIRV::OpAtomicIAdd:
1527 case SPIRV::OpAtomicISub:
1528 case SPIRV::OpAtomicOr:
1529 case SPIRV::OpAtomicXor:
1530 case SPIRV::OpAtomicAnd:
1531 case SPIRV::OpAtomicExchange:
1533 case SPIRV::OpMemoryBarrier:
1534 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1535 case SPIRV::OpAtomicFlagTestAndSet:
1536 case SPIRV::OpAtomicFlagClear:
1546 static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call,
1550 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1551 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1554 case SPIRV::OpAtomicFAddEXT:
1555 case SPIRV::OpAtomicFMinEXT:
1556 case SPIRV::OpAtomicFMaxEXT:
1563 static bool generateBarrierInst(const SPIRV::IncomingCall *Call,
1567 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1569 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1574 static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call,
1582 static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call,
1586 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1589 bool IsVec = Opcode == SPIRV::OpTypeVector;
1591 MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1599 static bool generateWaveInst(const SPIRV::IncomingCall *Call,
1602 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1603 SPIRV::BuiltIn::BuiltIn Value =
1604 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1607 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1621 static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call,
1624 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1626 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1633 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1641 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1643 case SPIRV::OpIAddCarryS:
1644 Opcode = SPIRV::OpIAddCarryV;
1646 case SPIRV::OpISubBorrowS:
1647 Opcode = SPIRV::OpISubBorrowV;
1652 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1666 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1670 static bool generateGetQueryInst(const SPIRV::IncomingCall *Call,
1674 SPIRV::BuiltIn::BuiltIn Value =
1675 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1676 uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1677 Value == SPIRV::BuiltIn::WorkgroupSize ||
1678 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1682 static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call,
1686 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1688 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1693 unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1704 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
1710 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1712 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1729 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1735 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1745 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1756 static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call,
1759 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1763 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1765 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1768 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1773 case SPIRV::OpImageQuerySamples:
1774 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1777 case SPIRV::OpImageQueryLevels:
1778 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1779 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1780 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1781 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1794 static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1796 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1797 case SPIRV::CLK_ADDRESS_CLAMP:
1798 return SPIRV::SamplerAddressingMode::Clamp;
1799 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1800 return SPIRV::SamplerAddressingMode::ClampToEdge;
1801 case SPIRV::CLK_ADDRESS_REPEAT:
1802 return SPIRV::SamplerAddressingMode::Repeat;
1803 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1804 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1805 case SPIRV::CLK_ADDRESS_NONE:
1806 return SPIRV::SamplerAddressingMode::None;
1813 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1816 static SPIRV::SamplerFilterMode::SamplerFilterMode
1818 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1819 return SPIRV::SamplerFilterMode::Linear;
1820 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1821 return SPIRV::SamplerFilterMode::Nearest;
1822 return SPIRV::SamplerFilterMode::Nearest;
1826 const SPIRV::IncomingCall *Call,
1836 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1848 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1850 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1859 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
1866 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1871 .addImm(SPIRV::ImageOperand::Lod)
1873 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1879 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1884 .addImm(SPIRV::ImageOperand::Lod)
1888 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1893 .addImm(SPIRV::ImageOperand::Sample)
1896 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1905 static bool generateWriteImageInst(const SPIRV::IncomingCall *Call,
1908 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
1916 const SPIRV::IncomingCall *Call,
1938 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1939 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1959 "Unable to recognize SPIRV type name: " + ReturnType;
1962 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1967 .addImm(SPIRV::ImageOperand::Lod)
1974 static bool generateSelectInst(const SPIRV::IncomingCall *Call,
1981 static bool generateConstructInst(const SPIRV::IncomingCall *Call,
1984 return buildOpFromWrapper(MIRBuilder, SPIRV::OpCompositeConstruct, Call,
1988 static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call,
1991 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1993 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1994 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
1995 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
1996 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2001 case SPIRV::OpCooperativeMatrixLoadKHR:
2004 case SPIRV::OpCooperativeMatrixStoreKHR:
2007 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2010 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2014 case SPIRV::OpCooperativeMatrixMulAddKHR:
2021 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2023 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2040 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2054 static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call,
2058 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2060 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2064 case SPIRV::OpSpecConstant: {
2068 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2079 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2082 ? SPIRV::OpSpecConstantTrue
2083 : SPIRV::OpSpecConstantFalse;
2089 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2097 case SPIRV::OpSpecConstantComposite: {
2110 static bool buildNDRange(const SPIRV::IncomingCall *Call,
2115 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2120 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2133 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2144 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2146 MIRBuilder.buildInstr(SPIRV::OpLoad)
2163 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2169 return MIRBuilder.buildInstr(SPIRV::OpStore)
2179 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2184 static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call,
2209 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2212 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2214 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2228 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2229 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2269 static bool generateEnqueueInst(const SPIRV::IncomingCall *Call,
2273 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2275 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2278 case SPIRV::OpRetainEvent:
2279 case SPIRV::OpReleaseEvent:
2281 case SPIRV::OpCreateUserEvent:
2282 case SPIRV::OpGetDefaultQueue:
2286 case SPIRV::OpIsValidEvent:
2291 case SPIRV::OpSetUserEventStatus:
2295 case SPIRV::OpCaptureEventProfilingInfo:
2300 case SPIRV::OpBuildNDRange:
2302 case SPIRV::OpEnqueueKernel:
2309 static bool generateAsyncCopy(const SPIRV::IncomingCall *Call,
2313 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2315 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2317 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2323 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2326 case SPIRV::OpGroupAsyncCopy: {
2328 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2350 case SPIRV::OpGroupWaitEvents:
2361 const SPIRV::IncomingCall *Call,
2365 const SPIRV::ConvertBuiltin *Builtin =
2366 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2369 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2371 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2378 SPIRV::Decoration::SaturatedConversion, {});
2381 SPIRV::Decoration::FPRoundingMode,
2386 unsigned Opcode = SPIRV::OpNop;
2387 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2389 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2392 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2393 : SPIRV::OpSatConvertSToU;
2395 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2396 : SPIRV::OpSConvert;
2398 SPIRV::OpTypeFloat)) {
2404 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2409 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2413 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2417 SPIRV::OpTypeFloat)) {
2419 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2425 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2430 Opcode = SPIRV::OpConvertFToBF16INTEL;
2432 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2433 : SPIRV::OpConvertFToU;
2436 SPIRV::OpTypeFloat)) {
2438 Opcode = SPIRV::OpFConvert;
2455 assert(Opcode != SPIRV::OpNop &&
2465 static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call,
2469 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2470 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2474 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2477 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2491 static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call,
2495 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2497 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2498 bool IsLoad = Opcode == SPIRV::OpLoad;
2520 namespace SPIRV {
2530 SPIRV::InstructionSet::InstructionSet Set) {
2539 case SPIRV::Relational:
2540 case SPIRV::Atomic:
2541 case SPIRV::Barrier:
2542 case SPIRV::CastToPtr:
2543 case SPIRV::ImageMiscQuery:
2544 case SPIRV::SpecConstant:
2545 case SPIRV::Enqueue:
2546 case SPIRV::AsyncCopy:
2547 case SPIRV::LoadStore:
2548 case SPIRV::CoopMatr:
2550 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2553 case SPIRV::Extended:
2554 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2558 case SPIRV::VectorLoadStore:
2559 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2561 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2563 case SPIRV::Group:
2564 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2567 case SPIRV::AtomicFloating:
2568 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2571 case SPIRV::IntelSubgroups:
2572 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2575 case SPIRV::GroupUniform:
2576 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2579 case SPIRV::WriteImage:
2580 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2581 case SPIRV::Select:
2583 case SPIRV::Construct:
2584 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2586 case SPIRV::KernelClock:
2587 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2595 SPIRV::InstructionSet::InstructionSet Set,
2621 case SPIRV::Extended:
2623 case SPIRV::Relational:
2625 case SPIRV::Group:
2627 case SPIRV::Variable:
2629 case SPIRV::Atomic:
2631 case SPIRV::AtomicFloating:
2633 case SPIRV::Barrier:
2635 case SPIRV::CastToPtr:
2637 case SPIRV::Dot:
2639 case SPIRV::Wave:
2641 case SPIRV::ICarryBorrow:
2643 case SPIRV::GetQuery:
2645 case SPIRV::ImageSizeQuery:
2647 case SPIRV::ImageMiscQuery:
2649 case SPIRV::ReadImage:
2651 case SPIRV::WriteImage:
2653 case SPIRV::SampleImage:
2655 case SPIRV::Select:
2657 case SPIRV::Construct:
2659 case SPIRV::SpecConstant:
2661 case SPIRV::Enqueue:
2663 case SPIRV::AsyncCopy:
2665 case SPIRV::Convert:
2667 case SPIRV::VectorLoadStore:
2669 case SPIRV::LoadStore:
2671 case SPIRV::IntelSubgroups:
2673 case SPIRV::GroupUniform:
2675 case SPIRV::KernelClock:
2677 case SPIRV::CoopMatr:
2710 // Unable to recognize SPIRV type name.
2767 } // namespace SPIRV
2790 const SPIRV::BuiltinType *TypeRecord,
2811 SPIRV::AccessQualifier::AccessQualifier(
2833 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2843 SPIRV::AccessQualifier::AccessQualifier accessQualifier =
2844 SPIRV::AccessQualifier::None;
2846 accessQualifier = Qualifier == SPIRV::AccessQualifier::WriteOnly
2847 ? SPIRV::AccessQualifier::WriteOnly
2848 : SPIRV::AccessQualifier::AccessQualifier(
2855 SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2858 SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2866 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2871 namespace SPIRV {
2880 const SPIRV::OpenCLType *OCLTypeRecord =
2881 SPIRV::lookupOpenCLType(NameWithParameters);
2922 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2944 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
2954 case SPIRV::OpTypeImage:
2957 case SPIRV::OpTypePipe:
2960 case SPIRV::OpTypeDeviceEvent:
2963 case SPIRV::OpTypeSampler:
2966 case SPIRV::OpTypeSampledImage:
2969 case SPIRV::OpTypeCooperativeMatrixKHR:
2985 } // namespace SPIRV