| /openbsd-src/gnu/llvm/llvm/lib/Target/AMDGPU/ |
| H A D | R600ISelDAGToDAG.cpp | 100 case ISD::SCALAR_TO_VECTOR: in Select()
|
| H A D | SIISelLowering.cpp | 276 case ISD::SCALAR_TO_VECTOR: in SITargetLowering() 307 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); in SITargetLowering() 308 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); in SITargetLowering() 321 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); in SITargetLowering() 322 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v6i32); in SITargetLowering() 335 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); in SITargetLowering() 336 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v8i32); in SITargetLowering() 349 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); in SITargetLowering() 350 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v16i32); in SITargetLowering() 363 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); in SITargetLowering() [all …]
|
| H A D | AMDGPUISelDAGToDAG.cpp | 230 SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo); in matchLoadD16FromBuildVector() 479 assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts); in SelectBuildVector() 552 case ISD::SCALAR_TO_VECTOR: in Select()
|
| /openbsd-src/gnu/llvm/llvm/include/llvm/CodeGen/ |
| H A D | ISDOpcodes.h | 606 SCALAR_TO_VECTOR, enumerator
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/VE/ |
| H A D | VECustomDAG.cpp | 257 case ISD::SCALAR_TO_VECTOR: in getIdiomaticVectorType()
|
| /openbsd-src/gnu/llvm/llvm/lib/CodeGen/SelectionDAG/ |
| H A D | LegalizeVectorTypes.cpp | 63 case ISD::SCALAR_TO_VECTOR: R = ScalarizeVecRes_SCALAR_TO_VECTOR(N); break; in ScalarizeVectorResult() 293 ISD::SCALAR_TO_VECTOR, DL, OtherVT, SDValue(ScalarNode, OtherNo)); in ScalarizeVecRes_OverflowOp() 739 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Op); in ScalarizeVecOp_UnaryOp() 756 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_UnaryOp_StrictFP() 822 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Res); in ScalarizeVecOp_VSETCC() 853 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_FP_ROUND() 868 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_STRICT_FP_ROUND() 882 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_FP_EXTEND() 897 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_STRICT_FP_EXTEND() 967 case ISD::SCALAR_TO_VECTOR: in SplitVectorResult() [all …]
|
| H A D | LegalizeDAG.cpp | 407 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, in ExpandINSERT_VECTOR_ELT() 1826 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, V); in ExpandBVWithShuffles() 1934 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); in ExpandBUILD_VECTOR() 1989 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); in ExpandBUILD_VECTOR() 1992 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); in ExpandBUILD_VECTOR() 3055 case ISD::SCALAR_TO_VECTOR: in ExpandNode() 5039 case ISD::SCALAR_TO_VECTOR: { in PromoteNode()
|
| H A D | SelectionDAGDumper.cpp | 300 case ISD::SCALAR_TO_VECTOR: return "scalar_to_vector"; in getOperationName()
|
| H A D | DAGCombiner.cpp | 1804 case ISD::SCALAR_TO_VECTOR: return visitSCALAR_TO_VECTOR(N); in visit() 5420 if ((HandOpcode == ISD::BITCAST || HandOpcode == ISD::SCALAR_TO_VECTOR) && in hoistLogicOpWithSameOpcodeHands() 20308 if (CurVec.getOpcode() == ISD::SCALAR_TO_VECTOR && CurVec.hasOneUse()) { in visitINSERT_VECTOR_ELT() 20674 if (VecOp.getOpcode() == ISD::SCALAR_TO_VECTOR) { in visitEXTRACT_VECTOR_ELT() 20751 BCSrc.getOpcode() == ISD::SCALAR_TO_VECTOR) { in visitEXTRACT_VECTOR_ELT() 20895 } else if (VecOp.getOpcode() == ISD::SCALAR_TO_VECTOR && in visitEXTRACT_VECTOR_ELT() 22288 if (!LegalOperations && Scalar.getOpcode() == ISD::SCALAR_TO_VECTOR && in visitCONCAT_VECTORS() 22322 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), NVT, Scalar); in visitCONCAT_VECTORS() 23200 } else if (S.getOpcode() == ISD::SCALAR_TO_VECTOR) { in combineShuffleOfScalars() 23872 SDValue Insert = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, NewBO); in visitVECTOR_SHUFFLE() [all …]
|
| H A D | LegalizeIntegerTypes.cpp | 124 case ISD::SCALAR_TO_VECTOR: in PromoteIntegerResult() 1649 case ISD::SCALAR_TO_VECTOR: in PromoteIntegerOperand() 4829 case ISD::SCALAR_TO_VECTOR: Res = ExpandOp_SCALAR_TO_VECTOR(N); break; in ExpandIntegerOperand()
|
| H A D | SelectionDAG.cpp | 3103 case ISD::SCALAR_TO_VECTOR: { in computeKnownBits() 5554 case ISD::SCALAR_TO_VECTOR: in getNode()
|
| H A D | TargetLowering.cpp | 1128 case ISD::SCALAR_TO_VECTOR: { in SimplifyDemandedBits() 2877 case ISD::SCALAR_TO_VECTOR: { in SimplifyDemandedVectorElts()
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/X86/ |
| H A D | X86ISelLowering.cpp | 1090 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 1539 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 1879 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 2097 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32f16, Custom); in X86TargetLowering() 2142 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8f16, Legal); in X86TargetLowering() 2143 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16f16, Custom); in X86TargetLowering() 2360 ISD::SCALAR_TO_VECTOR, in X86TargetLowering() 3245 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, in LowerReturn() 3484 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned); in lowerRegToMasks() 3831 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val) in LowerMemArgument() [all …]
|
| H A D | X86ISelDAGToDAG.cpp | 1199 SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, in PreprocessISelDAG() 1201 SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, in PreprocessISelDAG()
|
| H A D | X86FastISel.cpp | 2640 InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR, in fastLowerIntrinsicCall()
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/PowerPC/ |
| H A D | README_ALTIVEC.txt | 56 We currently codegen SCALAR_TO_VECTOR as a store of the scalar to a 16-byte
|
| H A D | PPCISelLowering.cpp | 862 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); in PPCTargetLowering() 969 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); in PPCTargetLowering() 970 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); in PPCTargetLowering() 984 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); in PPCTargetLowering() 987 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); in PPCTargetLowering() 991 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); in PPCTargetLowering() 992 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); in PPCTargetLowering() 993 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); in PPCTargetLowering() 994 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); in PPCTargetLowering() 2985 UI->getOpcode() != ISD::SCALAR_TO_VECTOR && in usePartialVectorLoads() [all …]
|
| H A D | PPCISelDAGToDAG.cpp | 5780 Op1.getOpcode() == ISD::SCALAR_TO_VECTOR && in Select()
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/SystemZ/ |
| H A D | SystemZISelLowering.cpp | 382 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in SystemZTargetLowering() 501 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); in SystemZTargetLowering() 502 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); in SystemZTargetLowering() 5183 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); in buildScalarToVector() 5446 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op)) in lowerBUILD_VECTOR() 5470 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) || in lowerVECTOR_SHUFFLE() 5642 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) || in lowerShift() 5793 case ISD::SCALAR_TO_VECTOR: in LowerOperation()
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/AArch64/ |
| H A D | AArch64ISelLowering.cpp | 1708 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal); in addTypeForStreamingSVE() 4931 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, N); in LowerINTRINSIC_WO_CHAIN() 5755 SDValue Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f32, Load); in LowerLOAD() 11494 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) in LowerVECTOR_SHUFFLE() 12275 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); in LowerBUILD_VECTOR() 12482 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0); in LowerBUILD_VECTOR() 20675 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0)); in performSelectCombine() 20677 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1)); in performSelectCombine()
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/ARM/ |
| H A D | ARMISelLowering.cpp | 337 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal); in addMVEVectorTypes() 402 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); in addMVEVectorTypes() 448 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); in addMVEVectorTypes() 6029 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); in LowerFCOPYSIGN() 6031 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); in LowerFCOPYSIGN() 7964 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); in LowerBUILD_VECTOR() 8763 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { in LowerVECTOR_SHUFFLE()
|
| /openbsd-src/gnu/llvm/llvm/include/llvm/Target/ |
| H A D | TargetSelectionDAG.td | 707 def scalar_to_vector : SDNode<"ISD::SCALAR_TO_VECTOR", SDTypeProfile<1, 1, []>,
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/Hexagon/ |
| H A D | HexagonISelLowering.cpp | 1642 ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR, in HexagonTargetLowering()
|