Lines Matching defs:SPIRV
15 #include "SPIRV.h"
27 namespace SPIRV {
165 } // namespace SPIRV
172 namespace SPIRV {
208 } // namespace SPIRV
220 static std::unique_ptr<const SPIRV::IncomingCall>
222 SPIRV::InstructionSet::InstructionSet Set,
225 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
234 const SPIRV::DemangledBuiltin *Builtin;
235 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
236 return std::make_unique<SPIRV::IncomingCall>(
251 if (Set == SPIRV::InstructionSet::OpenCL_std)
253 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
261 if (Set == SPIRV::InstructionSet::OpenCL_std)
263 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
270 if (Set == SPIRV::InstructionSet::OpenCL_std ||
271 Set == SPIRV::InstructionSet::GLSL_std_450)
278 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
279 return std::make_unique<SPIRV::IncomingCall>(
309 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
310 return std::make_unique<SPIRV::IncomingCall>(
398 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
411 MIRBuilder.getMRI()->setRegClass(ResultRegister, &SPIRV::IDRegClass);
424 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
445 DestinationReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
460 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
463 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
467 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
473 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
474 /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
495 static SPIRV::MemorySemantics::MemorySemantics
499 return SPIRV::MemorySemantics::None;
501 return SPIRV::MemorySemantics::Acquire;
503 return SPIRV::MemorySemantics::Release;
505 return SPIRV::MemorySemantics::AcquireRelease;
507 return SPIRV::MemorySemantics::SequentiallyConsistent;
513 static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
515 case SPIRV::CLMemoryScope::memory_scope_work_item:
516 return SPIRV::Scope::Invocation;
517 case SPIRV::CLMemoryScope::memory_scope_work_group:
518 return SPIRV::Scope::Workgroup;
519 case SPIRV::CLMemoryScope::memory_scope_device:
520 return SPIRV::Scope::Device;
521 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
522 return SPIRV::Scope::CrossDevice;
523 case SPIRV::CLMemoryScope::memory_scope_sub_group:
524 return SPIRV::Scope::Subgroup;
537 SPIRV::Scope::Scope Scope,
543 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
547 MRI->setRegClass(CLScopeRegister, &SPIRV::IDRegClass);
567 MRI->setRegClass(SemanticsRegister, &SPIRV::IDRegClass);
575 const SPIRV::IncomingCall *Call,
586 MRI->setRegClass(ArgReg, &SPIRV::IDRegClass);
595 static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call,
598 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
602 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
603 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
604 MIRBuilder.buildInstr(SPIRV::OpStore)
611 static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
616 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
619 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
626 MIRBuilder.getMRI()->setRegClass(ScopeRegister, &SPIRV::IDRegClass);
628 ScopeRegister = buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
634 MIRBuilder.getMRI()->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
637 SPIRV::MemorySemantics::SequentiallyConsistent |
642 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
652 static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call,
656 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call, Register(0));
659 buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
661 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
663 SPIRV::MemorySemantics::SequentiallyConsistent |
666 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
667 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
677 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
689 MRI->setRegClass(ObjectPtr, &SPIRV::IDRegClass);
690 MRI->setRegClass(ExpectedArg, &SPIRV::IDRegClass);
691 MRI->setRegClass(Desired, &SPIRV::IDRegClass);
696 SPIRV::OpTypePointer);
699 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
700 : ExpectedType == SPIRV::OpTypePointer);
701 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
704 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
705 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
713 ? SPIRV::MemorySemantics::None
714 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
717 ? SPIRV::MemorySemantics::None
718 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
732 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
733 MRI->setRegClass(Call->Arguments[4], &SPIRV::IDRegClass);
741 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
745 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
750 MRI->setRegClass(Call->Arguments[5], &SPIRV::IDRegClass);
763 MRI->setRegClass(Tmp, &SPIRV::IDRegClass);
777 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
784 static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
797 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
801 unsigned Semantics = SPIRV::MemorySemantics::None;
802 MRI->setRegClass(PtrRegister, &SPIRV::IDRegClass);
807 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
811 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
812 if (Opcode == SPIRV::OpAtomicIAdd) {
813 Opcode = SPIRV::OpAtomicFAddEXT;
814 } else if (Opcode == SPIRV::OpAtomicISub) {
817 Opcode = SPIRV::OpAtomicFAddEXT;
820 MRI->setRegClass(NegValueReg, &SPIRV::IDRegClass);
842 static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call,
852 MRI->setRegClass(PtrReg, &SPIRV::IDRegClass);
855 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
858 MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
861 MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
875 static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call,
878 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
886 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
892 assert((Opcode != SPIRV::OpAtomicFlagClear ||
893 (Semantics != SPIRV::MemorySemantics::Acquire &&
894 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
900 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
912 static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
920 unsigned MemSemantics = SPIRV::MemorySemantics::None;
922 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
923 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
925 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
926 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
928 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
929 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
931 if (Opcode == SPIRV::OpMemoryBarrier) {
936 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
942 MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
947 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
948 SPIRV::Scope::Scope MemScope = Scope;
951 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
952 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
954 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
956 SPIRV::CLMemoryScope CLScope =
957 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
959 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
960 (Opcode == SPIRV::OpMemoryBarrier))
965 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
973 if (Opcode != SPIRV::OpMemoryBarrier)
979 static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
981 case SPIRV::Dim::DIM_1D:
982 case SPIRV::Dim::DIM_Buffer:
984 case SPIRV::Dim::DIM_2D:
985 case SPIRV::Dim::DIM_Cube:
986 case SPIRV::Dim::DIM_Rect:
988 case SPIRV::Dim::DIM_3D:
997 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
998 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1008 static bool generateExtInst(const SPIRV::IncomingCall *Call,
1012 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1014 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1018 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1021 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1029 static bool generateRelationalInst(const SPIRV::IncomingCall *Call,
1033 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1035 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1055 static bool generateGroupInst(const SPIRV::IncomingCall *Call,
1058 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1059 const SPIRV::GroupBuiltin *GroupBuiltin =
1060 SPIRV::lookupGroupBuiltin(Builtin->Name);
1077 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
1086 MRI->setRegClass(ArgReg, &SPIRV::IDRegClass);
1101 SPIRV::OpTypeBool)
1120 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1121 : SPIRV::Scope::Workgroup;
1134 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1137 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
1148 static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call,
1151 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1154 if (!ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1160 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1161 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1165 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1166 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL;
1176 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1182 case SPIRV::OpSubgroupBlockReadINTEL:
1183 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1185 case SPIRV::OpSubgroupBlockWriteINTEL:
1186 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1211 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
1217 static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call,
1220 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1224 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1230 const SPIRV::GroupUniformBuiltin *GroupUniform =
1231 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1235 MRI->setRegClass(GroupResultReg, &SPIRV::IDRegClass);
1239 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
1256 MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
1268 static bool generateKernelClockInst(const SPIRV::IncomingCall *Call,
1271 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1274 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1283 MRI->setRegClass(ResultReg, &SPIRV::IDRegClass);
1286 SPIRV::Scope::Scope ScopeArg =
1287 StringSwitch<SPIRV::Scope::Scope>(Builtin->Name)
1288 .EndsWith("device", SPIRV::Scope::Scope::Device)
1289 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1290 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1293 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1327 static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call,
1330 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1353 MRI->setRegClass(DefaultReg, &SPIRV::IDRegClass);
1371 MRI->setRegClass(Extracted, &SPIRV::IDRegClass);
1390 MRI->setRegClass(CompareRegister, &SPIRV::IDRegClass);
1407 MRI->setRegClass(SelectionResult, &SPIRV::IDRegClass);
1425 static bool generateBuiltinVar(const SPIRV::IncomingCall *Call,
1429 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1430 SPIRV::BuiltIn::BuiltIn Value =
1431 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1433 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1439 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1449 static bool generateAtomicInst(const SPIRV::IncomingCall *Call,
1453 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1455 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1458 case SPIRV::OpStore:
1460 case SPIRV::OpAtomicLoad:
1462 case SPIRV::OpAtomicStore:
1464 case SPIRV::OpAtomicCompareExchange:
1465 case SPIRV::OpAtomicCompareExchangeWeak:
1468 case SPIRV::OpAtomicIAdd:
1469 case SPIRV::OpAtomicISub:
1470 case SPIRV::OpAtomicOr:
1471 case SPIRV::OpAtomicXor:
1472 case SPIRV::OpAtomicAnd:
1473 case SPIRV::OpAtomicExchange:
1475 case SPIRV::OpMemoryBarrier:
1476 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1477 case SPIRV::OpAtomicFlagTestAndSet:
1478 case SPIRV::OpAtomicFlagClear:
1488 static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call,
1492 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1493 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1496 case SPIRV::OpAtomicFAddEXT:
1497 case SPIRV::OpAtomicFMinEXT:
1498 case SPIRV::OpAtomicFMaxEXT:
1505 static bool generateBarrierInst(const SPIRV::IncomingCall *Call,
1509 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1511 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1516 static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call,
1524 static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call,
1528 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1531 bool IsVec = Opcode == SPIRV::OpTypeVector;
1533 MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1541 static bool generateWaveInst(const SPIRV::IncomingCall *Call,
1544 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1545 SPIRV::BuiltIn::BuiltIn Value =
1546 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1549 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1557 static bool generateGetQueryInst(const SPIRV::IncomingCall *Call,
1561 SPIRV::BuiltIn::BuiltIn Value =
1562 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1563 uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1564 Value == SPIRV::BuiltIn::WorkgroupSize ||
1565 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1569 static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call,
1573 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1575 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1580 unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1591 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::IDRegClass);
1597 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1599 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1600 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1617 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1623 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1633 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1644 static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call,
1647 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1651 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1653 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1656 MIRBuilder.getMRI()->setRegClass(Image, &SPIRV::IDRegClass);
1657 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1662 case SPIRV::OpImageQuerySamples:
1663 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1666 case SPIRV::OpImageQueryLevels:
1667 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1668 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1669 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1670 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1683 static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1685 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1686 case SPIRV::CLK_ADDRESS_CLAMP:
1687 return SPIRV::SamplerAddressingMode::Clamp;
1688 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1689 return SPIRV::SamplerAddressingMode::ClampToEdge;
1690 case SPIRV::CLK_ADDRESS_REPEAT:
1691 return SPIRV::SamplerAddressingMode::Repeat;
1692 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1693 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1694 case SPIRV::CLK_ADDRESS_NONE:
1695 return SPIRV::SamplerAddressingMode::None;
1702 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1705 static SPIRV::SamplerFilterMode::SamplerFilterMode
1707 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1708 return SPIRV::SamplerFilterMode::Linear;
1709 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1710 return SPIRV::SamplerFilterMode::Nearest;
1711 return SPIRV::SamplerFilterMode::Nearest;
1715 const SPIRV::IncomingCall *Call,
1720 MRI->setRegClass(Image, &SPIRV::IDRegClass);
1721 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1725 MRI->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1729 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1741 Register SampledImage = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1743 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1753 if (TempType->getOpcode() != SPIRV::OpTypeVector) {
1760 MRI->setRegClass(TempRegister, &SPIRV::IDRegClass);
1763 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1768 .addImm(SPIRV::ImageOperand::Lod)
1772 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1778 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1783 .addImm(SPIRV::ImageOperand::Sample)
1786 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1795 static bool generateWriteImageInst(const SPIRV::IncomingCall *Call,
1798 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1799 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1800 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1801 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
1809 const SPIRV::IncomingCall *Call,
1831 : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1832 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1852 "Unable to recognize SPIRV type name: " + ReturnType;
1855 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1856 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1857 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
1859 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1864 .addImm(SPIRV::ImageOperand::Lod)
1871 static bool generateSelectInst(const SPIRV::IncomingCall *Call,
1878 static bool generateConstructInst(const SPIRV::IncomingCall *Call,
1881 return buildOpFromWrapper(MIRBuilder, SPIRV::OpCompositeConstruct, Call,
1885 static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call,
1888 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1890 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1891 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR;
1894 if (Opcode == SPIRV::OpCooperativeMatrixLoadKHR && ArgSz > 3)
1896 else if (Opcode == SPIRV::OpCooperativeMatrixStoreKHR && ArgSz > 4)
1903 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
1917 static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call,
1921 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1923 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1927 case SPIRV::OpSpecConstant: {
1931 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
1942 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
1945 ? SPIRV::OpSpecConstantTrue
1946 : SPIRV::OpSpecConstantFalse;
1952 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
1960 case SPIRV::OpSpecConstantComposite: {
1973 static bool buildNDRange(const SPIRV::IncomingCall *Call,
1977 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1979 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
1984 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1991 MRI->setRegClass(GlobalWorkSize, &SPIRV::IDRegClass);
1995 MRI->setRegClass(LocalWorkSize, &SPIRV::IDRegClass);
1998 MRI->setRegClass(GlobalWorkOffset, &SPIRV::IDRegClass);
2002 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2008 MRI->setRegClass(GWSPtr, &SPIRV::IDRegClass);
2015 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2017 MIRBuilder.buildInstr(SPIRV::OpLoad)
2034 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2040 return MIRBuilder.buildInstr(SPIRV::OpStore)
2055 unsigned SC0 = storageClassToAddressSpace(SPIRV::StorageClass::Function);
2056 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2061 static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call,
2086 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2089 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2091 Register Reg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2105 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2106 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2146 static bool generateEnqueueInst(const SPIRV::IncomingCall *Call,
2150 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2152 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2155 case SPIRV::OpRetainEvent:
2156 case SPIRV::OpReleaseEvent:
2157 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2159 case SPIRV::OpCreateUserEvent:
2160 case SPIRV::OpGetDefaultQueue:
2164 case SPIRV::OpIsValidEvent:
2165 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2170 case SPIRV::OpSetUserEventStatus:
2171 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2172 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2176 case SPIRV::OpCaptureEventProfilingInfo:
2177 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2178 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2179 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
2184 case SPIRV::OpBuildNDRange:
2186 case SPIRV::OpEnqueueKernel:
2193 static bool generateAsyncCopy(const SPIRV::IncomingCall *Call,
2197 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2199 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2201 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2207 auto Scope = buildConstantIntReg(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2210 case SPIRV::OpGroupAsyncCopy: {
2212 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2234 case SPIRV::OpGroupWaitEvents:
2245 const SPIRV::IncomingCall *Call,
2249 const SPIRV::ConvertBuiltin *Builtin =
2250 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2253 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2255 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2262 SPIRV::Decoration::SaturatedConversion, {});
2265 SPIRV::Decoration::FPRoundingMode,
2270 unsigned Opcode = SPIRV::OpNop;
2271 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2273 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2276 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2277 : SPIRV::OpSatConvertSToU;
2279 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2280 : SPIRV::OpSConvert;
2282 SPIRV::OpTypeFloat)) {
2288 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2293 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2297 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2301 SPIRV::OpTypeFloat)) {
2303 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2309 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2314 Opcode = SPIRV::OpConvertFToBF16INTEL;
2316 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2317 : SPIRV::OpConvertFToU;
2320 SPIRV::OpTypeFloat)) {
2322 Opcode = SPIRV::OpFConvert;
2339 assert(Opcode != SPIRV::OpNop &&
2349 static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call,
2353 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2354 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2358 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2361 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2375 static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call,
2379 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2381 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2382 bool IsLoad = Opcode == SPIRV::OpLoad;
2392 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2396 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2402 MRI->setRegClass(Call->Arguments[IsLoad ? 1 : 2], &SPIRV::IDRegClass);
2406 MRI->setRegClass(Call->Arguments[IsLoad ? 2 : 3], &SPIRV::IDRegClass);
2411 namespace SPIRV {
2421 SPIRV::InstructionSet::InstructionSet Set) {
2430 case SPIRV::Relational:
2431 case SPIRV::Atomic:
2432 case SPIRV::Barrier:
2433 case SPIRV::CastToPtr:
2434 case SPIRV::ImageMiscQuery:
2435 case SPIRV::SpecConstant:
2436 case SPIRV::Enqueue:
2437 case SPIRV::AsyncCopy:
2438 case SPIRV::LoadStore:
2439 case SPIRV::CoopMatr:
2441 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2444 case SPIRV::Extended:
2445 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2449 case SPIRV::VectorLoadStore:
2450 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2452 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2454 case SPIRV::Group:
2455 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2458 case SPIRV::AtomicFloating:
2459 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2462 case SPIRV::IntelSubgroups:
2463 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2466 case SPIRV::GroupUniform:
2467 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2470 case SPIRV::WriteImage:
2471 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2472 case SPIRV::Select:
2474 case SPIRV::Construct:
2475 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2477 case SPIRV::KernelClock:
2478 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2486 SPIRV::InstructionSet::InstructionSet Set,
2499 MIRBuilder.getMRI()->setRegClass(ReturnRegister, &SPIRV::IDRegClass);
2523 case SPIRV::Extended:
2525 case SPIRV::Relational:
2527 case SPIRV::Group:
2529 case SPIRV::Variable:
2531 case SPIRV::Atomic:
2533 case SPIRV::AtomicFloating:
2535 case SPIRV::Barrier:
2537 case SPIRV::CastToPtr:
2539 case SPIRV::Dot:
2541 case SPIRV::Wave:
2543 case SPIRV::GetQuery:
2545 case SPIRV::ImageSizeQuery:
2547 case SPIRV::ImageMiscQuery:
2549 case SPIRV::ReadImage:
2551 case SPIRV::WriteImage:
2553 case SPIRV::SampleImage:
2555 case SPIRV::Select:
2557 case SPIRV::Construct:
2559 case SPIRV::SpecConstant:
2561 case SPIRV::Enqueue:
2563 case SPIRV::AsyncCopy:
2565 case SPIRV::Convert:
2567 case SPIRV::VectorLoadStore:
2569 case SPIRV::LoadStore:
2571 case SPIRV::IntelSubgroups:
2573 case SPIRV::GroupUniform:
2575 case SPIRV::KernelClock:
2577 case SPIRV::CoopMatr:
2619 // Unable to recognize SPIRV type name.
2653 } // namespace SPIRV
2676 const SPIRV::BuiltinType *TypeRecord,
2697 SPIRV::AccessQualifier::AccessQualifier(
2719 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2730 SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2733 SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2734 Qualifier == SPIRV::AccessQualifier::WriteOnly
2735 ? SPIRV::AccessQualifier::WriteOnly
2736 : SPIRV::AccessQualifier::AccessQualifier(
2744 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2749 namespace SPIRV {
2758 const SPIRV::OpenCLType *OCLTypeRecord =
2759 SPIRV::lookupOpenCLType(NameWithParameters);
2800 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2822 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
2832 case SPIRV::OpTypeImage:
2835 case SPIRV::OpTypePipe:
2838 case SPIRV::OpTypeDeviceEvent:
2841 case SPIRV::OpTypeSampler:
2844 case SPIRV::OpTypeSampledImage:
2847 case SPIRV::OpTypeCooperativeMatrixKHR:
2863 } // namespace SPIRV