| /openbsd-src/gnu/llvm/llvm/include/llvm/CodeGen/ |
| H A D | SelectionDAGNodes.h | 1447 EVT MemVT, MachineMemOperand *MMO) 1448 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) { 2318 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT, 2320 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { 2352 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT, 2354 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) { 2380 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT, 2382 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) { 2414 ISD::MemIndexedMode AM, EVT MemVT, 2416 : MemSDNode(NodeTy, Order, DL, VTs, MemVT, MMO) { [all …]
|
| H A D | SelectionDAG.h | 1232 SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, 1238 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, 1243 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT, 1248 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 1258 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, 1265 EVT MemVT, MachinePointerInfo PtrInfo, 1271 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo, 1272 Alignment.value_or(getEVTAlign(MemVT)), Flags, 1277 ArrayRef<SDValue> Ops, EVT MemVT, 1322 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, [all …]
|
| H A D | TargetLowering.h | 593 virtual bool storeOfVectorConstantIsCheap(EVT MemVT, in storeOfVectorConstantIsCheap() argument 602 virtual bool mergeStoresAfterLegalization(EVT MemVT) const { in mergeStoresAfterLegalization() argument 607 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, in canMergeStoresTo() argument 1298 EVT MemVT) const { in getLoadExtAction() argument 1299 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; in getLoadExtAction() 1301 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; in getLoadExtAction() 1309 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const { in isLoadExtLegal() argument 1310 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal; in isLoadExtLegal() 1315 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const { in isLoadExtLegalOrCustom() argument 1316 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal || in isLoadExtLegalOrCustom() [all …]
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/AMDGPU/ |
| H A D | R600ISelLowering.h | 49 bool canMergeStoresTo(unsigned AS, EVT MemVT, 57 bool canCombineTruncStore(EVT ValVT, EVT MemVT, in canCombineTruncStore() argument 63 return isTruncStoreLegal(ValVT, MemVT); in canCombineTruncStore()
|
| H A D | R600ISelLowering.cpp | 1045 EVT MemVT = Store->getMemoryVT(); in lowerPrivateTruncStore() local 1078 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT); in lowerPrivateTruncStore() 1119 EVT MemVT = StoreNode->getMemoryVT(); in LowerSTORE() local 1135 NewChain, DL, Value, Ptr, StoreNode->getPointerInfo(), MemVT, in LowerSTORE() 1145 if (Alignment < MemVT.getStoreSize() && in LowerSTORE() 1146 !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment, in LowerSTORE() 1161 if (MemVT == MVT::i8) { in LowerSTORE() 1164 assert(MemVT == MVT::i16); in LowerSTORE() 1192 Op->getVTList(), Args, MemVT, in LowerSTORE() 1211 if (MemVT.bitsLT(MVT::i32)) in LowerSTORE() [all …]
|
| H A D | SIISelLowering.h | 52 SDValue lowerKernargMemParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, 126 ArrayRef<SDValue> Ops, EVT MemVT, 140 SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Val, 172 EVT MemVT, 288 bool canMergeStoresTo(unsigned AS, EVT MemVT,
|
| H A D | AMDGPUISelLowering.cpp | 160 for (auto MemVT : in AMDGPUTargetLowering() 162 setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, MemVT, in AMDGPUTargetLowering() 830 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT, in storeOfVectorConstantIsCheap() argument 1061 EVT MemVT = ArgVT; in analyzeFormalArgumentsCompute() local 1070 MemVT = RegisterVT; in analyzeFormalArgumentsCompute() 1072 MemVT = ArgVT; in analyzeFormalArgumentsCompute() 1080 MemVT = RegisterVT; in analyzeFormalArgumentsCompute() 1085 MemVT = ArgVT.getScalarType(); in analyzeFormalArgumentsCompute() 1088 MemVT = RegisterVT; in analyzeFormalArgumentsCompute() 1093 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits); in analyzeFormalArgumentsCompute() [all …]
|
| H A D | SIISelLowering.cpp | 1386 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, in canMergeStoresTo() argument 1389 return (MemVT.getSizeInBits() <= 4 * 32); in canMergeStoresTo() 1392 return (MemVT.getSizeInBits() <= MaxPrivateBits); in canMergeStoresTo() 1394 return (MemVT.getSizeInBits() <= 2 * 32); in canMergeStoresTo() 1722 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, in convertArgType() argument 1728 VT.getVectorNumElements() != MemVT.getVectorNumElements()) { in convertArgType() 1730 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), in convertArgType() 1738 VT.bitsLT(MemVT)) { in convertArgType() 1740 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); in convertArgType() 1743 if (MemVT.isFloatingPoint()) in convertArgType() [all …]
|
| H A D | AMDGPUISelLowering.h | 196 bool storeOfVectorConstantIsCheap(EVT MemVT,
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/VE/ |
| H A D | VEISelLowering.cpp | 1380 EVT MemVT = LdNode->getMemoryVT(); in lowerLoadI1() local 1381 if (MemVT == MVT::v256i1 || MemVT == MVT::v4i64) { in lowerLoadI1() 1383 SDNode *VM = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MemVT); in lowerLoadI1() 1402 } else if (MemVT == MVT::v512i1 || MemVT == MVT::v8i64) { in lowerLoadI1() 1404 SDNode *VM = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MemVT); in lowerLoadI1() 1432 EVT MemVT = LdNode->getMemoryVT(); in lowerLOAD() local 1435 if (MemVT.isVector() && !isMaskType(MemVT)) in lowerLOAD() 1445 if (MemVT == MVT::f128) in lowerLOAD() 1447 if (isMaskType(MemVT)) in lowerLOAD() 1505 EVT MemVT = StNode->getMemoryVT(); in lowerStoreI1() local [all …]
|
| /openbsd-src/gnu/llvm/llvm/lib/CodeGen/SelectionDAG/ |
| H A D | SelectionDAG.cpp | 3506 EVT MemVT = LD->getMemoryVT(); in computeKnownBits() local 3507 KnownBits KnownFull(MemVT.getSizeInBits()); in computeKnownBits() 7761 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, in getAtomic() argument 7765 ID.AddInteger(MemVT.getRawBits()); in getAtomic() 7776 VTList, MemVT, MMO); in getAtomic() 7785 EVT MemVT, SDVTList VTs, SDValue Chain, in getAtomicCmpSwap() argument 7793 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); in getAtomicCmpSwap() 7796 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, in getAtomic() argument 7825 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); in getAtomic() 7828 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, in getAtomic() argument [all …]
|
| H A D | DAGCombiner.cpp | 701 EVT &MemVT, unsigned ShAmt = 0); 721 EVT MemVT, unsigned NumStores, 750 EVT MemVT, SDNode *Root, bool AllowVectors); 757 unsigned NumConsecutiveStores, EVT MemVT, 763 unsigned NumConsecutiveStores, EVT MemVT, 1291 EVT MemVT = LD->getMemoryVT(); in PromoteOperand() local 1297 MemVT, LD->getMemOperand()); in PromoteOperand() 1528 EVT MemVT = LD->getMemoryVT(); in PromoteLoad() local 1533 MemVT, LD->getMemOperand()); in PromoteLoad() 5770 ISD::LoadExtType ExtType, EVT &MemVT, in isLegalNarrowLdSt() argument [all …]
|
| H A D | SelectionDAGBuilder.cpp | 2530 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType()); in visitSwitchCase() local 2549 if (CondLHS.getValueType() != MemVT) { in visitSwitchCase() 2550 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT); in visitSwitchCase() 2551 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT); in visitSwitchCase() 3272 EVT MemVT = in visitICmp() local 3278 if (Op1.getValueType() != MemVT) { in visitICmp() 3279 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT); in visitICmp() 3280 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT); in visitICmp() 4630 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType(); in visitAtomicCmpXchg() local 4631 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other); in visitAtomicCmpXchg() [all …]
|
| H A D | LegalizeDAG.cpp | 512 EVT MemVT = ST->getMemoryVT(); in LegalizeStoreOps() local 514 if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, in LegalizeStoreOps() 622 EVT MemVT = ST->getMemoryVT(); in LegalizeStoreOps() local 625 if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, in LegalizeStoreOps() 683 EVT MemVT = LD->getMemoryVT(); in LegalizeLoadOps() local 687 if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, in LegalizeLoadOps() 868 EVT MemVT = LD->getMemoryVT(); in LegalizeLoadOps() local 870 if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, in LegalizeLoadOps() 1483 EVT MemVT = isa<BuildVectorSDNode>(Node) ? VT.getVectorElementType() in ExpandVectorBuildThroughStack() local 1493 unsigned TypeByteSize = MemVT.getSizeInBits() / 8; in ExpandVectorBuildThroughStack() [all …]
|
| H A D | LegalizeVectorTypes.cpp | 1165 void DAGTypeLegalizer::IncrementPointer(MemSDNode *N, EVT MemVT, in IncrementPointer() argument 1169 unsigned IncrementSize = MemVT.getSizeInBits().getKnownMinValue() / 8; in IncrementPointer() 1171 if (MemVT.isScalableVector()) { in IncrementPointer() 6637 for (EVT MemVT : reverse(MVT::integer_valuetypes())) { in findMemType() local 6638 unsigned MemVTWidth = MemVT.getSizeInBits(); in findMemType() 6639 if (MemVT.getSizeInBits() <= WidenEltWidth) in findMemType() 6641 auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT); in findMemType() 6649 return MemVT; in findMemType() 6650 RetVT = MemVT; in findMemType() 6658 for (EVT MemVT : reverse(MVT::vector_valuetypes())) { in findMemType() local [all …]
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/X86/ |
| H A D | X86ISelDAGToDAG.cpp | 1311 MVT MemVT = (N->getOpcode() == ISD::FP_ROUND) ? DstVT : SrcVT; in PreprocessISelDAG() local 1312 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); in PreprocessISelDAG() 1321 CurDAG->getEntryNode(), dl, N->getOperand(0), MemTmp, MPI, MemVT); in PreprocessISelDAG() 1323 MemTmp, MPI, MemVT); in PreprocessISelDAG() 1367 MVT MemVT = (N->getOpcode() == ISD::STRICT_FP_ROUND) ? DstVT : SrcVT; in PreprocessISelDAG() local 1368 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); in PreprocessISelDAG() 1381 Store = CurDAG->getMemIntrinsicNode(X86ISD::FST, dl, VTs, Ops, MemVT, in PreprocessISelDAG() 1390 assert(SrcVT == MemVT && "Unexpected VT!"); in PreprocessISelDAG() 1399 X86ISD::FLD, dl, VTs, Ops, MemVT, MPI, in PreprocessISelDAG() 1407 assert(DstVT == MemVT && "Unexpected VT!"); in PreprocessISelDAG() [all …]
|
| H A D | X86ISelLowering.h | 1078 bool mergeStoresAfterLegalization(EVT MemVT) const override { in mergeStoresAfterLegalization() argument 1079 return !MemVT.isVector(); in mergeStoresAfterLegalization() 1082 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, 1435 bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem, in storeOfVectorConstantIsCheap() argument
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/SystemZ/ |
| H A D | SystemZISelDAGToDAG.cpp | 1351 EVT MemVT = StoreNode->getMemoryVT(); in tryFoldLoadStoreIntoMemOperand() local 1361 if (MemVT == MVT::i32) in tryFoldLoadStoreIntoMemOperand() 1363 else if (MemVT == MVT::i64) in tryFoldLoadStoreIntoMemOperand() 1372 if (MemVT == MVT::i32) in tryFoldLoadStoreIntoMemOperand() 1374 else if (MemVT == MVT::i64) in tryFoldLoadStoreIntoMemOperand() 1396 Operand = CurDAG->getTargetConstant(OperandV, DL, MemVT); in tryFoldLoadStoreIntoMemOperand()
|
| H A D | SystemZISelLowering.cpp | 4257 EVT MemVT = Node->getMemoryVT(); in lowerATOMIC_LOAD_SUB() local 4258 if (MemVT == MVT::i32 || MemVT == MVT::i64) { in lowerATOMIC_LOAD_SUB() 4260 assert(Op.getValueType() == MemVT && "Mismatched VTs"); in lowerATOMIC_LOAD_SUB() 4270 NegSrc2 = DAG.getConstant(Value, DL, MemVT); in lowerATOMIC_LOAD_SUB() 4273 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT), in lowerATOMIC_LOAD_SUB() 4277 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, in lowerATOMIC_LOAD_SUB() 6391 EVT MemVT = SN->getMemoryVT(); in combineSTORE() local 6396 if (MemVT.isInteger() && SN->isTruncatingStore()) { in combineSTORE() 6398 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) { in combineSTORE() 6424 Ops, MemVT, SN->getMemOperand()); in combineSTORE() [all …]
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/AArch64/ |
| H A D | AArch64ISelLowering.cpp | 5379 EVT MemVT = MGT->getMemoryVT(); in LowerMGATHER() local 5388 DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops, in LowerMGATHER() 5400 if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) { in LowerMGATHER() 5408 return DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops, in LowerMGATHER() 5419 MemVT = MemVT.changeVectorElementTypeToInteger(); in LowerMGATHER() 5441 MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType()); in LowerMGATHER() 5450 DAG.getMaskedGather(DAG.getVTList(ContainerVT, MVT::Other), MemVT, DL, in LowerMGATHER() 5478 EVT MemVT = MSC->getMemoryVT(); in LowerMSCATTER() local 5488 if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) { in LowerMSCATTER() 5496 return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops, in LowerMSCATTER() [all …]
|
| H A D | AArch64ISelLowering.h | 756 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, in canMergeStoresTo() argument 764 return (MemVT.getSizeInBits() <= 64); in canMergeStoresTo()
|
| H A D | AArch64SVEInstrInfo.td | 2792 SDPatternOperator Load, ValueType PredTy, ValueType MemVT, ComplexPattern AddrCP> { 2795 def : Pat<(Ty (Load (PredTy PPR:$gp), (AddrCP GPR64:$base, GPR64:$offset), MemVT)), 2801 …def : Pat<(Ty (Load (PredTy PPR:$gp), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), MemVT)), 2806 def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), 2840 …ass ldnf1<Instruction I, ValueType Ty, SDPatternOperator Load, ValueType PredTy, ValueType MemVT> { 2843 …def : Pat<(Ty (Load (PredTy PPR:$gp), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), MemVT)), 2848 def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), 2880 …tion I, ValueType Ty, SDPatternOperator Load, ValueType PredTy, ValueType MemVT, ComplexPattern Ad… 2883 def : Pat<(Ty (Load (PredTy PPR:$gp), (AddrCP GPR64:$base, GPR64:$offset), MemVT)), 2888 def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), [all …]
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/ARM/ |
| H A D | ARMISelLowering.h | 679 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, in canMergeStoresTo() argument 682 return (MemVT.getSizeInBits() <= 32); in canMergeStoresTo()
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/Mips/ |
| H A D | MipsISelLowering.cpp | 2664 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT(); in createLoadLR() local 2674 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT, in createLoadLR() 2681 EVT MemVT = LD->getMemoryVT(); in lowerLOAD() local 2687 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) || in lowerLOAD() 2688 ((MemVT != MVT::i32) && (MemVT != MVT::i64))) in lowerLOAD() 2746 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType(); in createStoreLR() local 2755 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT, in createStoreLR() 2807 EVT MemVT = SD->getMemoryVT(); in lowerSTORE() local 2811 (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) && in lowerSTORE() 2812 ((MemVT == MVT::i32) || (MemVT == MVT::i64))) in lowerSTORE()
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/PowerPC/ |
| H A D | PPCISelLowering.cpp | 2959 EVT MemVT = LD->getMemoryVT(); in usePartialVectorLoads() local 2960 if (!MemVT.isSimple()) in usePartialVectorLoads() 2962 switch(MemVT.getSimpleVT().SimpleTy) { in usePartialVectorLoads() 8269 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, in canReuseLoadAddress() argument 8293 if (LD->getMemoryVT() != MemVT) in canReuseLoadAddress() 9151 EVT MemVT = InputNode->getMemoryVT(); in isValidSplatLoad() local 9157 (MemVT == Ty.getVectorElementType())) in isValidSplatLoad() 9163 if (MemVT == MVT::i32) { in isValidSplatLoad() 10878 EVT MemVT = AtomicNode->getMemoryVT(); in LowerATOMIC_CMP_SWAP() local 10879 if (MemVT.getSizeInBits() >= 32) in LowerATOMIC_CMP_SWAP() [all …]
|