Home
last modified time | relevance | path

Searched refs:VT (Results 1 – 25 of 441) sorted by relevance

12345678910>>...18

/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AArch64/
H A DAArch64ISelDAGToDAG.cpp184 template<MVT::SimpleValueType VT>
186 return SelectSVEAddSubImm(N, VT, Imm, Shift); in SelectSVEAddSubImm()
189 template <MVT::SimpleValueType VT, bool Invert = false>
191 return SelectSVELogicalImm(N, VT, Imm, Invert); in SelectSVELogicalImm()
194 template <MVT::SimpleValueType VT>
196 return SelectSVEArithImm(N, VT, Imm); in SelectSVEArithImm()
327 bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
329 bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm, bool Invert);
335 bool SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm);
1250 EVT VT = N->getValueType(0); in SelectTable() local
[all …]
H A DAArch64ISelLowering.cpp129 static inline EVT getPackedSVEVectorVT(EVT VT) { in getPackedSVEVectorVT() argument
130 switch (VT.getSimpleVT().SimpleTy) { in getPackedSVEVectorVT()
169 static inline EVT getPromotedVTForPredicate(EVT VT) { in getPromotedVTForPredicate() argument
170 assert(VT.isScalableVector() && (VT.getVectorElementType() == MVT::i1) && in getPromotedVTForPredicate()
172 switch (VT.getVectorMinNumElements()) { in getPromotedVTForPredicate()
191 static inline bool isPackedVectorType(EVT VT, SelectionDAG &DAG) { in isPackedVectorType() argument
192 assert(VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && in isPackedVectorType()
194 return VT.isFixedLengthVector() || in isPackedVectorType()
195 VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock; in isPackedVectorType()
308 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) in AArch64TargetLowering() local
[all …]
H A DAArch64FastISel.cpp182 bool isTypeLegal(Type *Ty, MVT &VT);
183 bool isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed = false);
187 bool simplifyAddress(Address &Addr, MVT VT);
225 unsigned emitLoad(MVT VT, MVT ResultVT, Address Addr, bool WantZExt = true,
227 bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
229 bool emitStoreRelease(MVT VT, unsigned SrcReg, unsigned AddrReg,
236 unsigned emitAdd_ri_(MVT VT, unsigned Op0, int64_t Imm);
265 unsigned materializeInt(const ConstantInt *CI, MVT VT);
266 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
318 static unsigned getImplicitScaleFactor(MVT VT) { in getImplicitScaleFactor() argument
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/CodeGen/
H A DTargetLoweringBase.cpp473 MVT VT) { in getOUTLINE_ATOMIC() argument
475 switch (VT.SimpleTy) { in getOUTLINE_ATOMIC()
549 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) { in getSYNC() argument
552 switch (VT.SimpleTy) { \ in getSYNC()
717 for (MVT VT : MVT::fp_valuetypes()) { in initActions() local
718 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits()); in initActions()
720 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote); in initActions()
721 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT); in initActions()
726 for (MVT VT : MVT::all_valuetypes()) { in initActions() local
730 setIndexedLoadAction(IM, VT, Expand); in initActions()
[all …]
H A DCallingConvLower.cpp95 MVT ArgVT = Ins[i].VT; in AnalyzeFormalArguments()
108 MVT VT = Outs[i].VT; in CheckReturn() local
110 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) in CheckReturn()
122 MVT VT = Outs[i].VT; in AnalyzeReturn() local
124 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) in AnalyzeReturn()
135 MVT ArgVT = Outs[i].VT; in AnalyzeCallOperands()
170 MVT VT = Ins[i].VT; in AnalyzeCallResult() local
172 if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) { in AnalyzeCallResult()
175 << EVT(VT).getEVTString() << '\n'; in AnalyzeCallResult()
183 void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) { in AnalyzeCallResult() argument
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/include/llvm/CodeGen/
H A DValueTypes.h45 bool operator==(EVT VT) const {
46 return !(*this != VT);
48 bool operator!=(EVT VT) const {
49 if (V.SimpleTy != VT.V.SimpleTy)
52 return LLVMTy != VT.LLVMTy;
74 static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements,
76 MVT M = MVT::getVectorVT(VT.V, NumElements, IsScalable);
79 return getExtendedVectorVT(Context, VT, NumElements, IsScalable);
84 static EVT getVectorVT(LLVMContext &Context, EVT VT, ElementCount EC) { in getVectorVT()
85 MVT M = MVT::getVectorVT(VT.V, EC); in getVectorVT()
[all …]
H A DTargetLowering.h432 getPreferredVectorAction(MVT VT) const { in getPreferredVectorAction() argument
434 if (VT.getVectorElementCount().isScalar()) in getPreferredVectorAction()
437 if (!VT.isPow2VectorType()) in getPreferredVectorAction()
468 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; } in isIntDivCheap() argument
471 virtual bool hasStandaloneRem(EVT VT) const { in hasStandaloneRem() argument
492 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
498 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
504 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
510 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
610 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const { in getCustomCtpopCost() argument
[all …]
H A DSelectionDAG.h103 SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
104 FastID(ID), VTs(VT), NumVTs(Num) {
602 SDVTList getVTList(EVT VT);
617 SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
619 SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
622 SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
624 return getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL,
625 VT, IsTarget, IsOpaque);
628 SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
632 SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL,
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/CodeGen/SelectionDAG/
H A DTargetLowering.cpp207 EVT VT = getOptimalMemOpType(Op, FuncAttributes); in findOptimalMemOpLowering() local
209 if (VT == MVT::Other) { in findOptimalMemOpLowering()
213 VT = MVT::i64; in findOptimalMemOpLowering()
215 while (Op.getDstAlign() < (VT.getSizeInBits() / 8) && in findOptimalMemOpLowering()
216 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign())) in findOptimalMemOpLowering()
217 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); in findOptimalMemOpLowering()
218 assert(VT.isInteger()); in findOptimalMemOpLowering()
228 if (VT.bitsGT(LVT)) in findOptimalMemOpLowering()
229 VT = LVT; in findOptimalMemOpLowering()
235 unsigned VTSize = VT.getSizeInBits() / 8; in findOptimalMemOpLowering()
[all …]
H A DDAGCombiner.cpp249 for (MVT VT : MVT::all_valuetypes()) in DAGCombiner() local
250 if (EVT(VT).isSimple() && VT != MVT::Other && in DAGCombiner()
251 TLI.isTypeLegal(EVT(VT)) && in DAGCombiner()
252 VT.getSizeInBits().getKnownMinSize() >= MaximumLegalStoreInBits) in DAGCombiner()
253 MaximumLegalStoreInBits = VT.getSizeInBits().getKnownMinSize(); in DAGCombiner()
554 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
564 SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
764 bool hasOperation(unsigned Opcode, EVT VT) { in hasOperation() argument
765 return TLI.isOperationLegalOrCustom(Opcode, VT, LegalOperations); in hasOperation()
783 bool isTypeLegal(const EVT &VT) { in isTypeLegal() argument
[all …]
H A DSelectionDAG.cpp124 bool ConstantFPSDNode::isValueValidForType(EVT VT, in isValueValidForType() argument
126 assert(VT.isFloatingPoint() && "Can only convert between FP types"); in isValueValidForType()
131 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), in isValueValidForType()
931 EVT VT = N->getValueType(0); in VerifySDNode() local
933 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && in VerifySDNode()
938 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && in VerifySDNode()
940 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && in VerifySDNode()
1007 EVT VT = cast<VTSDNode>(N)->getVT(); in RemoveNodeFromCSEMaps() local
1008 if (VT.isExtended()) { in RemoveNodeFromCSEMaps()
1009 Erased = ExtendedValueTypeNodes.erase(VT); in RemoveNodeFromCSEMaps()
[all …]
H A DLegalizeVectorOps.cpp603 MVT VT = Node->getSimpleValueType(0); in Promote() local
604 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); in Promote()
625 if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) || in Promote()
626 (VT.isVector() && VT.getVectorElementType().isFloatingPoint() && in Promote()
628 Res = DAG.getNode(ISD::FP_ROUND, dl, VT, Res, DAG.getIntPtrConstant(0, dl)); in Promote()
630 Res = DAG.getNode(ISD::BITCAST, dl, VT, Res); in Promote()
640 MVT VT = Node->getOperand(IsStrict ? 1 : 0).getSimpleValueType(); in PromoteINT_TO_FP() local
641 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); in PromoteINT_TO_FP()
642 assert(NVT.getVectorNumElements() == VT.getVectorNumElements() && in PromoteINT_TO_FP()
678 MVT VT = Node->getSimpleValueType(0); in PromoteFP_TO_INT() local
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/X86/
H A DX86ISelLowering.cpp188 for (MVT VT : MVT::integer_valuetypes()) in X86TargetLowering() local
189 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); in X86TargetLowering()
202 for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) { in X86TargetLowering()
203 setCondCodeAction(ISD::SETOEQ, VT, Expand); in X86TargetLowering()
204 setCondCodeAction(ISD::SETUNE, VT, Expand); in X86TargetLowering()
300 for (MVT VT : { MVT::i8, MVT::i16, MVT::i32 }) { in X86TargetLowering()
301 setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom); in X86TargetLowering()
302 setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom); in X86TargetLowering()
336 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { in X86TargetLowering()
337 setOperationAction(ISD::MULHS, VT, Expand); in X86TargetLowering()
[all …]
H A DX86InterleavedAccess.cpp233 static MVT scaleVectorType(MVT VT) { in scaleVectorType() argument
234 unsigned ScalarSize = VT.getVectorElementType().getScalarSizeInBits() * 2; in scaleVectorType()
236 VT.getVectorNumElements() / 2); in scaleVectorType()
261 static void genShuffleBland(MVT VT, ArrayRef<int> Mask, in genShuffleBland() argument
264 assert(VT.getSizeInBits() >= 256 && in genShuffleBland()
266 unsigned NumOfElm = VT.getVectorNumElements(); in genShuffleBland()
291 static void reorderSubVector(MVT VT, SmallVectorImpl<Value *> &TransposedMatrix, in reorderSubVector() argument
306 genShuffleBland(VT, VPShuf, OptimizeShuf, (i / Stride) * 16, in reorderSubVector()
331 MVT VT = MVT::v8i16; in interleave8bitStride4VF8() local
342 createUnpackShuffleMask(VT, MaskLowTemp1, true, false); in interleave8bitStride4VF8()
[all …]
H A DX86ISelLowering.h906 MVT getScalarShiftAmountTy(const DataLayout &, EVT VT) const override { in getScalarShiftAmountTy() argument
938 bool isSafeMemOpType(MVT VT) const override;
942 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment,
962 bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
1000 bool hasBitPreservingFPLogic(EVT VT) const override { in hasBitPreservingFPLogic() argument
1001 return VT == MVT::f32 || VT == MVT::f64 || VT.isVector(); in hasBitPreservingFPLogic()
1045 auto VTIsOk = [](EVT VT) -> bool { in shouldTransformSignedTruncationCheck()
1046 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || in shouldTransformSignedTruncationCheck()
1047 VT == MVT::i64; in shouldTransformSignedTruncationCheck()
1058 bool shouldSplatInsEltVarIndex(EVT VT) const override;
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/RISCV/
H A DRISCVISelLowering.cpp111 auto addRegClassForRVV = [this](MVT VT) { in RISCVTargetLowering() argument
112 unsigned Size = VT.getSizeInBits().getKnownMinValue(); in RISCVTargetLowering()
124 addRegisterClass(VT, RC); in RISCVTargetLowering()
127 for (MVT VT : BoolVecVTs) in RISCVTargetLowering() local
128 addRegClassForRVV(VT); in RISCVTargetLowering()
129 for (MVT VT : IntVecVTs) in RISCVTargetLowering() local
130 addRegClassForRVV(VT); in RISCVTargetLowering()
133 for (MVT VT : F16VecVTs) in RISCVTargetLowering() local
134 addRegClassForRVV(VT); in RISCVTargetLowering()
137 for (MVT VT : F32VecVTs) in RISCVTargetLowering() local
[all …]
H A DRISCVISelDAGToDAG.cpp167 MVT VT = Node->getSimpleValueType(0); in selectVLSEG() local
168 unsigned ScalarSize = VT.getScalarSizeInBits(); in selectVLSEG()
169 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); in selectVLSEG()
195 unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I); in selectVLSEG()
197 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg)); in selectVLSEG()
207 MVT VT = Node->getSimpleValueType(0); in selectVLSEGFF() local
209 unsigned ScalarSize = VT.getScalarSizeInBits(); in selectVLSEGFF()
210 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); in selectVLSEGFF()
238 unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I); in selectVLSEGFF()
240 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg)); in selectVLSEGFF()
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/
H A DAMDGPUISelLowering.cpp38 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) { in getEquivalentMemType() argument
39 unsigned StoreSize = VT.getStoreSizeInBits(); in getEquivalentMemType()
48 EVT VT = Op.getValueType(); in numBitsUnsigned() local
50 return VT.getSizeInBits() - Known.countMinLeadingZeros(); in numBitsUnsigned()
54 EVT VT = Op.getValueType(); in numBitsSigned() local
58 return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op); in numBitsSigned()
122 for (MVT VT : MVT::integer_valuetypes()) { in AMDGPUTargetLowering() local
123 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand); in AMDGPUTargetLowering()
124 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand); in AMDGPUTargetLowering()
125 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand); in AMDGPUTargetLowering()
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AVR/
H A DAVRISelLowering.cpp59 for (MVT VT : MVT::integer_valuetypes()) { in AVRTargetLowering() local
61 setLoadExtAction(N, VT, MVT::i1, Promote); in AVRTargetLowering()
62 setLoadExtAction(N, VT, MVT::i8, Expand); in AVRTargetLowering()
68 for (MVT VT : MVT::integer_valuetypes()) { in AVRTargetLowering() local
69 setOperationAction(ISD::ADDC, VT, Legal); in AVRTargetLowering()
70 setOperationAction(ISD::SUBC, VT, Legal); in AVRTargetLowering()
71 setOperationAction(ISD::ADDE, VT, Legal); in AVRTargetLowering()
72 setOperationAction(ISD::SUBE, VT, Legal); in AVRTargetLowering()
134 for (MVT VT : MVT::integer_valuetypes()) { in AVRTargetLowering() local
135 setOperationAction(ISD::ATOMIC_SWAP, VT, Expand); in AVRTargetLowering()
[all …]
/netbsd-src/sys/arch/amiga/dev/
H A Drtmons7 VBS 401 VSS 402 VSE 409 VBE 418 VT 418
13 VBS 401 VSS 402 VSE 409 VBE 418 VT 418
19 VBS 481 VSS 482 VSE 490 VBE 502 VT 502
25 VBS 601 VSS 602 VSE 612 VBE 628 VT 628
31 VBS 601 VSS 602 VSE 612 VBE 628 VT 628
37 VBS 769 VSS 770 VSE 783 VBE 805 VT 805
43 VBS 601 VSS 602 VSE 612 VBE 628 VT 628
49 VBS 513 VSS 514 VSE 522 VBE 535 VT 535
55 VBS 641 VSS 642 VSE 653 VBE 670 VT 670
61 VBS 601 VSS 602 VSE 612 VBE 628 VT 628
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/ARM/
H A DARMISelLowering.cpp157 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, in addTypeForNEON() argument
159 if (VT != PromotedLdStVT) { in addTypeForNEON()
160 setOperationAction(ISD::LOAD, VT, Promote); in addTypeForNEON()
161 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); in addTypeForNEON()
163 setOperationAction(ISD::STORE, VT, Promote); in addTypeForNEON()
164 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); in addTypeForNEON()
167 MVT ElemTy = VT.getVectorElementType(); in addTypeForNEON()
169 setOperationAction(ISD::SETCC, VT, Custom); in addTypeForNEON()
170 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); in addTypeForNEON()
171 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); in addTypeForNEON()
[all …]
H A DARMFastISel.cpp187 bool isTypeLegal(Type *Ty, MVT &VT);
188 bool isLoadTypeLegal(Type *Ty, MVT &VT);
191 bool ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,
194 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
197 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3);
202 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT);
203 unsigned ARMMaterializeInt(const Constant *C, MVT VT);
204 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT);
205 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg);
206 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg);
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Support/
H A DLowLevelType.cpp18 LLT::LLT(MVT VT) { in LLT() argument
19 if (VT.isVector()) { in LLT()
20 init(/*IsPointer=*/false, VT.getVectorNumElements() > 1, in LLT()
21 VT.getVectorNumElements(), VT.getVectorElementType().getSizeInBits(), in LLT()
23 } else if (VT.isValid()) { in LLT()
26 assert(VT.getSizeInBits().isNonZero() && "invalid zero-sized type"); in LLT()
28 VT.getSizeInBits(), /*AddressSpace=*/0); in LLT()
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/BPF/
H A DBPFISelLowering.cpp84 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) { in BPFTargetLowering()
85 if (VT == MVT::i32) { in BPFTargetLowering()
89 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom); in BPFTargetLowering()
92 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom); in BPFTargetLowering()
93 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom); in BPFTargetLowering()
94 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom); in BPFTargetLowering()
95 setOperationAction(ISD::ATOMIC_SWAP, VT, Custom); in BPFTargetLowering()
96 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom); in BPFTargetLowering()
99 for (auto VT : { MVT::i32, MVT::i64 }) { in BPFTargetLowering()
100 if (VT == MVT::i32 && !STI.getHasAlu32()) in BPFTargetLowering()
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/include/llvm/Support/
H A DMachineValueType.h1044 bool knownBitsGT(MVT VT) const { in knownBitsGT() argument
1045 return TypeSize::isKnownGT(getSizeInBits(), VT.getSizeInBits()); in knownBitsGT()
1050 bool knownBitsGE(MVT VT) const { in knownBitsGE() argument
1051 return TypeSize::isKnownGE(getSizeInBits(), VT.getSizeInBits()); in knownBitsGE()
1055 bool knownBitsLT(MVT VT) const { in knownBitsLT() argument
1056 return TypeSize::isKnownLT(getSizeInBits(), VT.getSizeInBits()); in knownBitsLT()
1061 bool knownBitsLE(MVT VT) const { in knownBitsLE() argument
1062 return TypeSize::isKnownLE(getSizeInBits(), VT.getSizeInBits()); in knownBitsLE()
1066 bool bitsGT(MVT VT) const { in bitsGT() argument
1067 assert(isScalableVector() == VT.isScalableVector() && in bitsGT()
[all …]

12345678910>>...18