Lines Matching defs:Call
623 const SPIRV::IncomingCall *Call,
628 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
629 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
631 MIB.addUse(Call->Arguments[i]);
638 static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call,
640 if (Call->isSpirvOp())
641 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
643 assert(Call->Arguments.size() == 2 &&
646 .addUse(Call->Arguments[0])
647 .addUse(Call->Arguments[1]);
652 static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
655 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
656 if (Call->isSpirvOp())
657 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
659 Register PtrRegister = Call->Arguments[0];
664 Call->Arguments.size() > 1
665 ? Call->Arguments[1]
668 if (Call->Arguments.size() > 2) {
670 MemSemanticsReg = Call->Arguments[2];
679 .addDef(Call->ReturnRegister)
688 static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call,
691 if (Call->isSpirvOp())
692 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call, Register(0));
696 Register PtrRegister = Call->Arguments[0];
705 .addUse(Call->Arguments[1]);
711 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
713 if (Call->isSpirvOp())
714 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
715 GR->getSPIRVTypeID(Call->ReturnType));
717 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
720 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
721 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
722 Register Desired = Call->Arguments[2]; // Value (C Desired).
750 if (Call->Arguments.size() >= 4) {
751 assert(Call->Arguments.size() >= 5 &&
754 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
756 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
760 MemSemEqualReg = Call->Arguments[3];
762 MemSemUnequalReg = Call->Arguments[4];
771 if (Call->Arguments.size() >= 6) {
772 assert(Call->Arguments.size() == 6 &&
775 getIConstVal(Call->Arguments[5], MRI));
778 ScopeReg = Call->Arguments[5];
789 : Call->ReturnRegister;
806 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
812 static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
815 if (Call->isSpirvOp())
816 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
817 GR->getSPIRVTypeID(Call->ReturnType));
821 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
823 assert(Call->Arguments.size() <= 4 &&
828 Register PtrRegister = Call->Arguments[0];
831 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
834 Register ValueReg = Call->Arguments[1];
835 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
837 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
846 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
847 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
852 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
858 .addDef(Call->ReturnRegister)
868 static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call,
872 assert(Call->Arguments.size() == 4 &&
874 Register PtrReg = Call->Arguments[0];
875 Register ScopeReg = Call->Arguments[1];
876 Register MemSemanticsReg = Call->Arguments[2];
877 Register ValueReg = Call->Arguments[3];
879 .addDef(Call->ReturnRegister)
880 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
890 static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call,
894 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
895 if (Call->isSpirvOp())
896 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
900 Register PtrRegister = Call->Arguments[0];
903 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
913 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
919 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
927 static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
930 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
942 if (Call->isSpirvOp())
943 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
946 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
960 getIConstVal(Call->Arguments[1], MRI))) |
971 ? Call->Arguments[0]
976 if (Call->Arguments.size() >= 2) {
978 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
979 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
981 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
982 : Call->Arguments[1];
990 ScopeReg = Call->Arguments[1];
1032 static bool generateExtInst(const SPIRV::IncomingCall *Call,
1036 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1043 .addDef(Call->ReturnRegister)
1044 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1048 for (auto Argument : Call->Arguments)
1053 static bool generateRelationalInst(const SPIRV::IncomingCall *Call,
1057 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1064 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1071 for (auto Argument : Call->Arguments)
1075 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1076 Call->ReturnType, GR);
1079 static bool generateGroupInst(const SPIRV::IncomingCall *Call,
1082 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1087 if (Call->isSpirvOp()) {
1089 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1090 GR->getSPIRVTypeID(Call->ReturnType));
1093 Register GroupOpReg = Call->Arguments[1];
1099 Register ScopeReg = Call->Arguments[0];
1101 .addDef(Call->ReturnRegister)
1102 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1105 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1106 MIB.addUse(Call->Arguments[i]);
1113 Register BoolReg = Call->Arguments[0];
1138 Register GroupResultRegister = Call->ReturnRegister;
1139 SPIRVType *GroupResultType = Call->ReturnType;
1150 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1158 Call->Arguments.size() > 2) {
1164 Register ElemReg = Call->Arguments[1];
1168 unsigned VecLen = Call->Arguments.size() - 1;
1177 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1178 MIB.addUse(Call->Arguments[i]);
1179 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1193 if (Call->Arguments.size() > 0) {
1194 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1195 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1199 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1200 MIB.addUse(Call->Arguments[i]);
1205 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1206 Call->ReturnType, GR);
1210 static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call,
1213 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1234 if (Call->isSpirvOp()) {
1238 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1239 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1245 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1277 .addDef(Call->ReturnRegister)
1278 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1279 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1280 MIB.addUse(Call->Arguments[i]);
1284 static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call,
1287 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1301 Register GroupResultReg = Call->ReturnRegister;
1302 Register ScopeReg = Call->Arguments[0];
1303 Register ValueReg = Call->Arguments[2];
1306 Register ConstGroupOpReg = Call->Arguments[1];
1320 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1328 static bool generateKernelClockInst(const SPIRV::IncomingCall *Call,
1331 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1341 Register ResultReg = Call->ReturnRegister;
1353 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1385 static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call,
1390 Register IndexRegister = Call->Arguments[0];
1391 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1399 Register ToTruncate = Call->ReturnRegister;
1408 Register DefaultReg = Call->ReturnRegister;
1426 Register Extracted = Call->ReturnRegister;
1461 Register SelectionResult = Call->ReturnRegister;
1479 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1483 static bool generateBuiltinVar(const SPIRV::IncomingCall *Call,
1487 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1492 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1495 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1497 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1499 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1503 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1504 LLType, Call->ReturnRegister);
1507 static bool generateAtomicInst(const SPIRV::IncomingCall *Call,
1511 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1517 return buildAtomicInitInst(Call, MIRBuilder);
1519 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1521 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1524 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1532 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1534 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1537 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1539 if (Call->isSpirvOp())
1540 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1541 GR->getSPIRVTypeID(Call->ReturnType));
1546 static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call,
1550 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1557 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1563 static bool generateBarrierInst(const SPIRV::IncomingCall *Call,
1567 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1571 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1574 static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call,
1577 .addDef(Call->ReturnRegister)
1578 .addUse(Call->Arguments[0]);
1582 static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call,
1585 if (Call->isSpirvOp())
1586 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1587 GR->getSPIRVTypeID(Call->ReturnType));
1588 unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
1592 .addDef(Call->ReturnRegister)
1593 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1594 .addUse(Call->Arguments[0])
1595 .addUse(Call->Arguments[1]);
1599 static bool generateWaveInst(const SPIRV::IncomingCall *Call,
1602 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1607 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1608 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1611 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1621 static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call,
1624 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1628 Register SRetReg = Call->Arguments[0];
1637 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1638 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1654 MRI->getRegClassOrNull(Call->Arguments[1])) {
1656 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1664 .addUse(Call->Arguments[1])
1665 .addUse(Call->Arguments[2]);
1670 static bool generateGetQueryInst(const SPIRV::IncomingCall *Call,
1675 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1679 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1682 static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call,
1686 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1692 SPIRVType *RetTy = Call->ReturnType;
1697 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1699 Register QueryResult = Call->ReturnRegister;
1700 SPIRVType *QueryResultType = Call->ReturnType;
1716 .addUse(Call->Arguments[0]);
1727 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1736 .addDef(Call->ReturnRegister)
1741 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1746 .addDef(Call->ReturnRegister)
1747 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1756 static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call,
1759 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1763 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1767 Register Image = Call->Arguments[0];
1787 .addDef(Call->ReturnRegister)
1788 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1826 const SPIRV::IncomingCall *Call,
1829 Register Image = Call->Arguments[0];
1834 Register Sampler = Call->Arguments[1];
1859 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
1861 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1870 .addUse(Call->Arguments[2]) // Coordinate.
1874 .addDef(Call->ReturnRegister)
1875 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1880 .addDef(Call->ReturnRegister)
1881 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1883 .addUse(Call->Arguments[2]) // Coordinate.
1889 .addDef(Call->ReturnRegister)
1890 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1892 .addUse(Call->Arguments[1]) // Coordinate.
1894 .addUse(Call->Arguments[2]);
1897 .addDef(Call->ReturnRegister)
1898 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1900 .addUse(Call->Arguments[1]); // Coordinate.
1905 static bool generateWriteImageInst(const SPIRV::IncomingCall *Call,
1909 .addUse(Call->Arguments[0]) // Image.
1910 .addUse(Call->Arguments[1]) // Coordinate.
1911 .addUse(Call->Arguments[2]); // Texel.
1916 const SPIRV::IncomingCall *Call,
1920 if (Call->Builtin->Name.contains_insensitive(
1923 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
1925 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
1927 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
1929 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
1931 Register Image = Call->Arguments[0];
1936 Call->ReturnRegister.isValid()
1937 ? Call->ReturnRegister
1943 .addUse(Call->Arguments[1]); // Sampler.
1945 } else if (Call->Builtin->Name.contains_insensitive(
1954 Call->ReturnType
1955 ? Call->ReturnType
1963 .addDef(Call->ReturnRegister)
1965 .addUse(Call->Arguments[0]) // Image.
1966 .addUse(Call->Arguments[1]) // Coordinate.
1968 .addUse(Call->Arguments[3]);
1974 static bool generateSelectInst(const SPIRV::IncomingCall *Call,
1976 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
1977 Call->Arguments[1], Call->Arguments[2]);
1981 static bool generateConstructInst(const SPIRV::IncomingCall *Call,
1984 return buildOpFromWrapper(MIRBuilder, SPIRV::OpCompositeConstruct, Call,
1985 GR->getSPIRVTypeID(Call->ReturnType));
1988 static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call,
1991 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1997 unsigned ArgSz = Call->Arguments.size();
2022 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2024 .addUse(Call->Arguments[0]) // pointer
2025 .addUse(Call->Arguments[1]) // rows
2026 .addUse(Call->Arguments[2]) // columns
2028 .addUse(Call->Arguments[4]); // memory layout
2030 MIB.addUse(Call->Arguments[5]); // stride
2032 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2038 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2039 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2041 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2045 .addDef(Call->ReturnRegister)
2050 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2054 static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call,
2058 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2067 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2068 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2071 Register ConstRegister = Call->Arguments[1];
2079 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2086 .addDef(Call->ReturnRegister)
2087 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2089 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2099 .addDef(Call->ReturnRegister)
2100 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2101 for (unsigned i = 0; i < Call->Arguments.size(); i++)
2102 MIB.addUse(Call->Arguments[i]);
2110 static bool buildNDRange(const SPIRV::IncomingCall *Call,
2114 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2124 unsigned NumArgs = Call->Arguments.size();
2126 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2128 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2129 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2139 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2170 .addUse(Call->Arguments[0])
2184 static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call,
2189 bool IsSpirvOp = Call->isSpirvOp();
2190 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2197 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2199 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2230 .addDef(Call->ReturnRegister)
2236 MIB.addUse(Call->Arguments[i]);
2247 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2252 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2269 static bool generateEnqueueInst(const SPIRV::IncomingCall *Call,
2273 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2280 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2284 .addDef(Call->ReturnRegister)
2285 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2288 .addDef(Call->ReturnRegister)
2289 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2290 .addUse(Call->Arguments[0]);
2293 .addUse(Call->Arguments[0])
2294 .addUse(Call->Arguments[1]);
2297 .addUse(Call->Arguments[0])
2298 .addUse(Call->Arguments[1])
2299 .addUse(Call->Arguments[2]);
2301 return buildNDRange(Call, MIRBuilder, GR);
2303 return buildEnqueueKernel(Call, MIRBuilder, GR);
2309 static bool generateAsyncCopy(const SPIRV::IncomingCall *Call,
2313 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2318 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2319 if (Call->isSpirvOp())
2320 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2328 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2331 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2332 unsigned NumArgs = Call->Arguments.size();
2333 Register EventReg = Call->Arguments[NumArgs - 1];
2335 .addDef(Call->ReturnRegister)
2338 .addUse(Call->Arguments[0])
2339 .addUse(Call->Arguments[1])
2340 .addUse(Call->Arguments[2])
2341 .addUse(Call->Arguments.size() > 4
2342 ? Call->Arguments[3]
2346 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2353 .addUse(Call->Arguments[0])
2354 .addUse(Call->Arguments[1]);
2361 const SPIRV::IncomingCall *Call,
2366 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2368 if (!Builtin && Call->isSpirvOp()) {
2369 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2372 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2373 GR->getSPIRVTypeID(Call->ReturnType));
2377 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2380 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2387 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2389 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2397 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2407 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2408 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2416 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2419 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2428 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2429 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2435 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2459 .addDef(Call->ReturnRegister)
2460 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2461 .addUse(Call->Arguments[0]);
2465 static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call,
2470 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2471 Call->Builtin->Set);
2475 .addDef(Call->ReturnRegister)
2476 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2479 for (auto Argument : Call->Arguments)
2491 static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call,
2495 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2502 MIB.addDef(Call->ReturnRegister);
2503 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2506 MIB.addUse(Call->Arguments[0]);
2510 MIB.addUse(Call->Arguments[1]);
2512 unsigned NumArgs = Call->Arguments.size();
2514 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2516 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2533 std::unique_ptr<const IncomingCall> Call =
2535 if (!Call)
2538 switch (Call->Builtin->Group) {
2550 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2551 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2554 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2555 Call->Builtin->Set))
2556 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2559 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2560 Call->Builtin->Set))
2564 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2565 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2568 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2569 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2572 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2573 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2576 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2577 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2580 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2582 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2584 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2587 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2605 std::unique_ptr<const IncomingCall> Call =
2608 if (!Call) {
2614 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2616 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2620 switch (Call->Builtin->Group) {
2622 return generateExtInst(Call.get(), MIRBuilder, GR);
2624 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2626 return generateGroupInst(Call.get(), MIRBuilder, GR);
2628 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2630 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2632 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2634 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2636 return generateCastToPtrInst(Call.get(), MIRBuilder);
2638 return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
2640 return generateWaveInst(Call.get(), MIRBuilder, GR);
2642 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
2644 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2646 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
2648 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
2650 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2652 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
2654 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2656 return generateSelectInst(Call.get(), MIRBuilder);
2658 return generateConstructInst(Call.get(), MIRBuilder, GR);
2660 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
2662 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
2664 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
2666 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
2668 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
2670 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
2672 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
2674 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
2676 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
2678 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);