Lines Matching full:call
173 /// Parses the name part of the demangled builtin call.
182 // Extract the builtin function name and types of arguments from the call
210 /// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
219 /// \returns Wrapper around the demangled call and found builtin definition.
239 // If the initial look up was unsuccessful and the demangled call takes at
373 // the function call be an intrinsic, which is not. Instead, we rely on being
575 const SPIRV::IncomingCall *Call,
581 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
582 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
584 Register ArgReg = Call->Arguments[i];
595 static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call,
597 if (Call->isSpirvOp())
598 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
600 assert(Call->Arguments.size() == 2 &&
602 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
603 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
605 .addUse(Call->Arguments[0])
606 .addUse(Call->Arguments[1]);
611 static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
614 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
615 if (Call->isSpirvOp())
616 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
618 Register PtrRegister = Call->Arguments[0];
620 // TODO: if true insert call to __translate_ocl_memory_sccope before
624 if (Call->Arguments.size() > 1) {
625 ScopeRegister = Call->Arguments[1];
631 if (Call->Arguments.size() > 2) {
632 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
633 MemSemanticsReg = Call->Arguments[2];
643 .addDef(Call->ReturnRegister)
652 static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call,
655 if (Call->isSpirvOp())
656 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call, Register(0));
660 Register PtrRegister = Call->Arguments[0];
666 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
671 .addUse(Call->Arguments[1]);
677 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
679 if (Call->isSpirvOp())
680 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
681 GR->getSPIRVTypeID(Call->ReturnType));
683 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
686 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
687 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
688 Register Desired = Call->Arguments[2]; // Value (C Desired).
719 if (Call->Arguments.size() >= 4) {
720 assert(Call->Arguments.size() >= 5 &&
723 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
725 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
729 MemSemEqualReg = Call->Arguments[3];
731 MemSemUnequalReg = Call->Arguments[4];
732 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
733 MRI->setRegClass(Call->Arguments[4], &SPIRV::IDRegClass);
742 if (Call->Arguments.size() >= 6) {
743 assert(Call->Arguments.size() == 6 &&
746 getIConstVal(Call->Arguments[5], MRI));
749 ScopeReg = Call->Arguments[5];
750 MRI->setRegClass(Call->Arguments[5], &SPIRV::IDRegClass);
761 : Call->ReturnRegister;
778 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
784 static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
787 if (Call->isSpirvOp())
788 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
789 GR->getSPIRVTypeID(Call->ReturnType));
793 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
795 assert(Call->Arguments.size() <= 4 &&
800 Register PtrRegister = Call->Arguments[0];
804 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
807 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
808 Register ValueReg = Call->Arguments[1];
809 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
811 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
821 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
826 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
832 .addDef(Call->ReturnRegister)
842 static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call,
846 assert(Call->Arguments.size() == 4 &&
851 Register PtrReg = Call->Arguments[0];
854 Register ScopeReg = Call->Arguments[1];
857 Register MemSemanticsReg = Call->Arguments[2];
860 Register ValueReg = Call->Arguments[3];
864 .addDef(Call->ReturnRegister)
865 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
875 static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call,
879 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
880 if (Call->isSpirvOp())
881 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
885 Register PtrRegister = Call->Arguments[0];
888 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
898 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
904 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
912 static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
915 if (Call->isSpirvOp())
916 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
919 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
933 static_cast<std::memory_order>(getIConstVal(Call->Arguments[1], MRI));
941 MemSemanticsReg = Call->Arguments[0];
949 if (Call->Arguments.size() >= 2) {
951 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
952 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
954 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
955 : Call->Arguments[1];
964 ScopeReg = Call->Arguments[1];
1008 static bool generateExtInst(const SPIRV::IncomingCall *Call,
1012 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1019 .addDef(Call->ReturnRegister)
1020 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1024 for (auto Argument : Call->Arguments)
1029 static bool generateRelationalInst(const SPIRV::IncomingCall *Call,
1033 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1040 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1047 for (auto Argument : Call->Arguments)
1051 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1052 Call->ReturnType, GR);
1055 static bool generateGroupInst(const SPIRV::IncomingCall *Call,
1058 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1063 if (Call->isSpirvOp()) {
1065 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1066 GR->getSPIRVTypeID(Call->ReturnType));
1069 Register GroupOpReg = Call->Arguments[1];
1075 Register ScopeReg = Call->Arguments[0];
1079 .addDef(Call->ReturnRegister)
1080 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1083 for (unsigned i = 2; i < Call->Arguments.size(); ++i) {
1084 Register ArgReg = Call->Arguments[i];
1094 Register ConstRegister = Call->Arguments[0];
1100 if (GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() !=
1106 Register GroupResultRegister = Call->ReturnRegister;
1107 SPIRVType *GroupResultType = Call->ReturnType;
1118 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1132 if (Call->Arguments.size() > 0) {
1133 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1134 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1135 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1136 MIB.addUse(Call->Arguments[i]);
1137 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
1143 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1144 Call->ReturnType, GR);
1148 static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call,
1151 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1164 if (Call->isSpirvOp()) {
1167 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1168 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1175 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1207 .addDef(Call->ReturnRegister)
1208 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1209 for (size_t i = 0; i < Call->Arguments.size(); ++i) {
1210 MIB.addUse(Call->Arguments[i]);
1211 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
1217 static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call,
1220 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1234 Register GroupResultReg = Call->ReturnRegister;
1238 Register ScopeReg = Call->Arguments[0];
1242 Register ConstGroupOpReg = Call->Arguments[1];
1255 Register ValueReg = Call->Arguments[2];
1260 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1268 static bool generateKernelClockInst(const SPIRV::IncomingCall *Call,
1271 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1282 Register ResultReg = Call->ReturnRegister;
1295 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1327 static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call,
1332 Register IndexRegister = Call->Arguments[0];
1333 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1341 Register ToTruncate = Call->ReturnRegister;
1350 Register DefaultReg = Call->ReturnRegister;
1368 Register Extracted = Call->ReturnRegister;
1403 Register SelectionResult = Call->ReturnRegister;
1421 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1425 static bool generateBuiltinVar(const SPIRV::IncomingCall *Call,
1429 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1434 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1437 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1439 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1441 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1445 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1446 LLType, Call->ReturnRegister);
1449 static bool generateAtomicInst(const SPIRV::IncomingCall *Call,
1453 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1459 return buildAtomicInitInst(Call, MIRBuilder);
1461 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1463 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1466 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1474 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1476 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1479 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1481 if (Call->isSpirvOp())
1482 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1483 GR->getSPIRVTypeID(Call->ReturnType));
1488 static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call,
1492 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1499 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1505 static bool generateBarrierInst(const SPIRV::IncomingCall *Call,
1509 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1513 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1516 static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call,
1519 .addDef(Call->ReturnRegister)
1520 .addUse(Call->Arguments[0]);
1524 static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call,
1527 if (Call->isSpirvOp())
1528 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1529 GR->getSPIRVTypeID(Call->ReturnType));
1530 unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
1534 .addDef(Call->ReturnRegister)
1535 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1536 .addUse(Call->Arguments[0])
1537 .addUse(Call->Arguments[1]);
1541 static bool generateWaveInst(const SPIRV::IncomingCall *Call,
1544 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1549 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1550 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1553 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1557 static bool generateGetQueryInst(const SPIRV::IncomingCall *Call,
1562 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1566 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1569 static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call,
1573 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1579 SPIRVType *RetTy = Call->ReturnType;
1584 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1586 Register QueryResult = Call->ReturnRegister;
1587 SPIRVType *QueryResultType = Call->ReturnType;
1600 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1604 .addUse(Call->Arguments[0]);
1615 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1624 .addDef(Call->ReturnRegister)
1629 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1634 .addDef(Call->ReturnRegister)
1635 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1644 static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call,
1647 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1651 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1655 Register Image = Call->Arguments[0];
1676 .addDef(Call->ReturnRegister)
1677 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1715 const SPIRV::IncomingCall *Call,
1718 Register Image = Call->Arguments[0];
1721 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1725 MRI->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1727 Register Sampler = Call->Arguments[1];
1751 SPIRVType *TempType = Call->ReturnType;
1755 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1764 .addDef(NeedsExtraction ? TempRegister : Call->ReturnRegister)
1767 .addUse(Call->Arguments[2]) // Coordinate.
1773 .addDef(Call->ReturnRegister)
1774 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1779 .addDef(Call->ReturnRegister)
1780 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1782 .addUse(Call->Arguments[1]) // Coordinate.
1784 .addUse(Call->Arguments[2]);
1787 .addDef(Call->ReturnRegister)
1788 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1790 .addUse(Call->Arguments[1]); // Coordinate.
1795 static bool generateWriteImageInst(const SPIRV::IncomingCall *Call,
1798 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1799 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1800 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1802 .addUse(Call->Arguments[0]) // Image.
1803 .addUse(Call->Arguments[1]) // Coordinate.
1804 .addUse(Call->Arguments[2]); // Texel.
1809 const SPIRV::IncomingCall *Call,
1813 if (Call->Builtin->Name.contains_insensitive(
1816 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
1818 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
1820 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
1822 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
1824 Register Image = Call->Arguments[0];
1829 Call->ReturnRegister.isValid()
1830 ? Call->ReturnRegister
1836 .addUse(Call->Arguments[1]); // Sampler.
1838 } else if (Call->Builtin->Name.contains_insensitive(
1847 Call->ReturnType
1848 ? Call->ReturnType
1855 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1856 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1857 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
1860 .addDef(Call->ReturnRegister)
1862 .addUse(Call->Arguments[0]) // Image.
1863 .addUse(Call->Arguments[1]) // Coordinate.
1865 .addUse(Call->Arguments[3]);
1871 static bool generateSelectInst(const SPIRV::IncomingCall *Call,
1873 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
1874 Call->Arguments[1], Call->Arguments[2]);
1878 static bool generateConstructInst(const SPIRV::IncomingCall *Call,
1881 return buildOpFromWrapper(MIRBuilder, SPIRV::OpCompositeConstruct, Call,
1882 GR->getSPIRVTypeID(Call->ReturnType));
1885 static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call,
1888 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1892 unsigned ArgSz = Call->Arguments.size();
1901 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
1902 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1904 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1908 .addDef(Call->ReturnRegister)
1913 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1917 static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call,
1921 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1930 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
1931 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
1934 Register ConstRegister = Call->Arguments[1];
1942 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
1949 .addDef(Call->ReturnRegister)
1950 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1952 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
1962 .addDef(Call->ReturnRegister)
1963 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1964 for (unsigned i = 0; i < Call->Arguments.size(); i++)
1965 MIB.addUse(Call->Arguments[i]);
1973 static bool buildNDRange(const SPIRV::IncomingCall *Call,
1977 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1978 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1988 unsigned NumArgs = Call->Arguments.size();
1990 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
1993 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
1996 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2010 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2041 .addUse(Call->Arguments[0])
2061 static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call,
2066 bool IsSpirvOp = Call->isSpirvOp();
2067 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2074 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2076 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2107 .addDef(Call->ReturnRegister)
2113 MIB.addUse(Call->Arguments[i]);
2115 // If there are no event arguments in the original call, add dummy ones.
2124 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2129 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2146 static bool generateEnqueueInst(const SPIRV::IncomingCall *Call,
2150 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2157 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2158 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2162 .addDef(Call->ReturnRegister)
2163 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2165 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2167 .addDef(Call->ReturnRegister)
2168 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2169 .addUse(Call->Arguments[0]);
2171 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2172 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2174 .addUse(Call->Arguments[0])
2175 .addUse(Call->Arguments[1]);
2177 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2178 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2179 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
2181 .addUse(Call->Arguments[0])
2182 .addUse(Call->Arguments[1])
2183 .addUse(Call->Arguments[2]);
2185 return buildNDRange(Call, MIRBuilder, GR);
2187 return buildEnqueueKernel(Call, MIRBuilder, GR);
2193 static bool generateAsyncCopy(const SPIRV::IncomingCall *Call,
2197 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2202 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2203 if (Call->isSpirvOp())
2204 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2212 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2215 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2216 unsigned NumArgs = Call->Arguments.size();
2217 Register EventReg = Call->Arguments[NumArgs - 1];
2219 .addDef(Call->ReturnRegister)
2222 .addUse(Call->Arguments[0])
2223 .addUse(Call->Arguments[1])
2224 .addUse(Call->Arguments[2])
2225 .addUse(Call->Arguments.size() > 4
2226 ? Call->Arguments[3]
2230 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2237 .addUse(Call->Arguments[0])
2238 .addUse(Call->Arguments[1]);
2245 const SPIRV::IncomingCall *Call,
2250 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2252 if (!Builtin && Call->isSpirvOp()) {
2253 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2256 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2257 GR->getSPIRVTypeID(Call->ReturnType));
2261 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2264 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2271 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2273 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2281 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2291 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2292 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2300 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2303 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2312 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2313 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2319 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2343 .addDef(Call->ReturnRegister)
2344 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2345 .addUse(Call->Arguments[0]);
2349 static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call,
2354 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2355 Call->Builtin->Set);
2359 .addDef(Call->ReturnRegister)
2360 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2363 for (auto Argument : Call->Arguments)
2375 static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call,
2379 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2386 MIB.addDef(Call->ReturnRegister);
2387 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2390 MIB.addUse(Call->Arguments[0]);
2392 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2395 MIB.addUse(Call->Arguments[1]);
2396 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2399 unsigned NumArgs = Call->Arguments.size();
2401 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2402 MRI->setRegClass(Call->Arguments[IsLoad ? 1 : 2], &SPIRV::IDRegClass);
2405 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2406 MRI->setRegClass(Call->Arguments[IsLoad ? 2 : 3], &SPIRV::IDRegClass);
2424 std::unique_ptr<const IncomingCall> Call =
2426 if (!Call)
2429 switch (Call->Builtin->Group) {
2441 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2442 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2445 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2446 Call->Builtin->Set))
2447 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2450 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2451 Call->Builtin->Set))
2455 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2456 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2459 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2460 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2463 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2464 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2467 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2468 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2471 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2473 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2475 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2478 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2491 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2507 std::unique_ptr<const IncomingCall> Call =
2510 if (!Call) {
2516 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2518 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2522 switch (Call->Builtin->Group) {
2524 return generateExtInst(Call.get(), MIRBuilder, GR);
2526 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2528 return generateGroupInst(Call.get(), MIRBuilder, GR);
2530 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2532 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2534 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2536 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2538 return generateCastToPtrInst(Call.get(), MIRBuilder);
2540 return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
2542 return generateWaveInst(Call.get(), MIRBuilder, GR);
2544 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2546 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
2548 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
2550 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2552 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
2554 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2556 return generateSelectInst(Call.get(), MIRBuilder);
2558 return generateConstructInst(Call.get(), MIRBuilder, GR);
2560 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
2562 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
2564 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
2566 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
2568 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
2570 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
2572 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
2574 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
2576 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
2578 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
2595 // OpenCL builtin types in demangled call strings have the following format: