Lines Matching defs:AArch64TargetLowering

9 // This file implements the AArch64TargetLowering class.
152 // See [AArch64TargetLowering::fallbackToDAGISel] for implementation details.
376 AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
1829 void AArch64TargetLowering::addTypeForNEON(MVT VT) {
1954 bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
1974 bool AArch64TargetLowering::shouldExpandCttzElements(EVT VT) const {
1985 void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
2127 void AArch64TargetLowering::addDRType(MVT VT) {
2133 void AArch64TargetLowering::addQRType(MVT VT) {
2139 EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &,
2262 bool AArch64TargetLowering::targetShrinkDemandedConstant(
2307 void AArch64TargetLowering::computeKnownBitsForTargetNode(
2434 unsigned AArch64TargetLowering::ComputeNumSignBitsForTargetNode(
2466 MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
2471 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
2496 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
2522 AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
2527 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
2863 AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI,
2922 MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet(
2931 AArch64TargetLowering::EmitDynamicProbedAlloc(MachineInstr &MI,
2947 AArch64TargetLowering::EmitTileLoad(unsigned Opc, unsigned BaseReg,
2965 AArch64TargetLowering::EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const {
2980 MachineBasicBlock *AArch64TargetLowering::EmitZTInstr(MachineInstr &MI,
2997 AArch64TargetLowering::EmitZAInstr(unsigned Opc, unsigned BaseReg,
3031 AArch64TargetLowering::EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const {
3048 AArch64TargetLowering::EmitInitTPIDR2Object(MachineInstr &MI,
3078 AArch64TargetLowering::EmitAllocateZABuffer(MachineInstr &MI,
3121 MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
4064 SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const {
4270 SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
4283 SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
4366 SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op,
4454 SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op,
4487 AArch64TargetLowering::LowerVectorFP_TO_INT_SAT(SDValue Op,
4565 SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
4622 SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
4642 SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
4731 SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
4870 SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
4906 SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
5089 SDValue AArch64TargetLowering::LowerGET_ROUNDING(SDValue Op,
5112 SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
5156 SDValue AArch64TargetLowering::LowerGET_FPMODE(SDValue Op,
5175 SDValue AArch64TargetLowering::LowerSET_FPMODE(SDValue Op,
5190 SDValue AArch64TargetLowering::LowerRESET_FPMODE(SDValue Op,
5278 SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
5440 SDValue AArch64TargetLowering::getRuntimePStateSM(SelectionDAG &DAG,
5542 SDValue AArch64TargetLowering::LowerINTRINSIC_VOID(SDValue Op,
5584 SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
5617 SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
6085 bool AArch64TargetLowering::shouldExtendGSIndex(EVT VT, EVT &EltTy) const {
6094 bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(SDValue Extend,
6110 bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
6185 SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
6284 SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
6365 SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
6426 SDValue AArch64TargetLowering::LowerSTORE(SDValue Op,
6504 SDValue AArch64TargetLowering::LowerStore128(SDValue Op,
6535 SDValue AArch64TargetLowering::LowerLOAD(SDValue Op,
6593 SDValue AArch64TargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
6695 SDValue AArch64TargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
6705 SDValue AArch64TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
6745 SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
7091 bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const {
7095 bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
7159 bool AArch64TargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
7176 CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
7238 AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const {
7256 SDValue AArch64TargetLowering::LowerFormalArguments(
7708 void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
7806 SDValue AArch64TargetLowering::LowerCallResult(
7904 static void analyzeCallOperands(const AArch64TargetLowering &TLI,
7955 bool AArch64TargetLowering::isEligibleForTailCallOptimization(
8111 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
8142 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
8160 void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
8202 SDValue AArch64TargetLowering::changeStreamingMode(SelectionDAG &DAG, SDLoc DL,
8246 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
9002 bool AArch64TargetLowering::CanLowerReturn(
9012 AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
9162 SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
9169 SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
9175 SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
9182 SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty,
9188 SDValue AArch64TargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
9196 SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
9198 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n");
9209 SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG,
9211 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n");
9225 SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
9227 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n");
9239 SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG,
9241 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n");
9248 SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
9310 AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
9376 SDValue AArch64TargetLowering::LowerELFTLSLocalExec(const GlobalValue *GV,
9482 SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr,
9498 AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
9585 AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op,
9643 SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
9688 SDValue AArch64TargetLowering::LowerPtrAuthGlobalAddressStatically(
9712 AArch64TargetLowering::LowerPtrAuthGlobalAddress(SDValue Op,
9801 SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
9938 SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
10031 SDValue AArch64TargetLowering::LowerCTPOP_PARITY(SDValue Op,
10137 SDValue AArch64TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
10148 SDValue AArch64TargetLowering::LowerMinMax(SDValue Op,
10195 SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op,
10309 SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
10397 SDValue AArch64TargetLowering::LowerSETCCCARRY(SDValue Op,
10427 SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
10653 SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
10692 SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op,
10703 SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
10783 SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
10798 SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op,
10841 SDValue AArch64TargetLowering::LowerBRIND(SDValue Op, SelectionDAG &DAG) const {
10868 SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
10885 SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
10920 SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
10934 SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op,
10965 SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
11044 SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
11057 SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
11077 SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
11147 SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
11168 SDValue AArch64TargetLowering::LowerSPONENTRY(SDValue Op,
11183 Register AArch64TargetLowering::
11199 SDValue AArch64TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
11213 SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
11254 SDValue AArch64TargetLowering::LowerShiftParts(SDValue Op,
11261 bool AArch64TargetLowering::isOffsetFoldingLegal(
11268 bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
11346 AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
11356 AArch64TargetLowering::getSqrtResultForDenormInput(SDValue Op,
11361 SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand,
11394 SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand,
11448 const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
11562 SDValue AArch64TargetLowering::LowerAsmOutputForConstraint(
11595 AArch64TargetLowering::ConstraintType
11596 AArch64TargetLowering::getConstraintType(StringRef Constraint) const {
11635 AArch64TargetLowering::getSingleConstraintMatchWeight(
11668 AArch64TargetLowering::getRegForInlineAsmConstraint(
11774 EVT AArch64TargetLowering::getAsmOperandValueType(const DataLayout &DL,
11785 void AArch64TargetLowering::LowerAsmOperandForConstraint(
12044 SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
12047 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n");
13120 AArch64TargetLowering::LowerZERO_EXTEND_VECTOR_INREG(SDValue Op,
13137 SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
13313 SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op,
13344 SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
13788 SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
13941 SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
14386 SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op,
14423 SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
14458 AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
14510 SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
14563 SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
14683 SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {
14732 bool AArch64TargetLowering::shouldExpandBuildVectorWithShuffles(
14739 bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
14772 bool AArch64TargetLowering::isVectorClearMaskLegal(ArrayRef<int> M,
14820 SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op,
14889 SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
15070 SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
15268 SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
15343 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op,
15363 AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(SDValue Op,
15429 AArch64TargetLowering::LowerInlineDYNAMIC_STACKALLOC(SDValue Op,
15456 AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
15468 SDValue AArch64TargetLowering::LowerAVG(SDValue Op, SelectionDAG &DAG,
15477 SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op,
15491 setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL,
15492 AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI) {
15516 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
15692 bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load,
15727 bool AArch64TargetLowering::shouldRemoveRedundantExtend(SDValue Extend) const {
15743 bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
15750 bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
15761 bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
15787 bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
15794 bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
15802 bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
15817 bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
16018 bool AArch64TargetLowering::shouldSinkOperands(
16467 bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
16584 bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
16597 unsigned AArch64TargetLowering::getNumInterleavedAccesses(
16608 AArch64TargetLowering::getTargetMMOFlags(const Instruction &I) const {
16615 bool AArch64TargetLowering::isLegalInterleavedAccessType(
16733 bool AArch64TargetLowering::lowerInterleavedLoad(
16907 bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
17057 bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
17132 bool AArch64TargetLowering::lowerInterleaveIntrinsicToStore(
17195 EVT AArch64TargetLowering::getOptimalMemOpType(
17225 LLT AArch64TargetLowering::getOptimalMemOpLLT(
17256 bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
17271 bool AArch64TargetLowering::isLegalAddScalableImmediate(int64_t Imm) const {
17306 bool AArch64TargetLowering::isMulAddWithConstProfitable(
17334 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const {
17340 bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
17419 AArch64TargetLowering::getPreferredLargeGEPBaseOffset(int64_t MinOffset,
17430 bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const {
17435 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(
17455 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
17466 bool AArch64TargetLowering::generateFMAsInMachineCombiner(
17473 AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const {
17483 ArrayRef<MCPhysReg> AArch64TargetLowering::getRoundingControlRegisters() const {
17489 AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
17519 bool AArch64TargetLowering::isDesirableToCommuteXorWithShift(
17543 bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
17565 bool AArch64TargetLowering::shouldFoldSelectWithIdentityConstant(
17570 bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
17592 bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
17937 AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
17967 AArch64TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor,
18651 const AArch64TargetLowering &TLI) {
18830 const AArch64TargetLowering &TLI) {
22376 const AArch64TargetLowering &TLI,
25255 SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
25607 bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
25644 bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
25648 bool AArch64TargetLowering::isIndexingLegal(MachineInstr &MI, Register Base,
25661 bool AArch64TargetLowering::getIndexedAddressParts(SDNode *N, SDNode *Op,
25711 bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
25732 bool AArch64TargetLowering::getPostIndexedAddressParts(
25799 void AArch64TargetLowering::ReplaceBITCASTResults(
25913 void AArch64TargetLowering::ReplaceExtractSubVectorResults(
26182 void AArch64TargetLowering::ReplaceNodeResults(
26408 bool AArch64TargetLowering::useLoadStackGuardNode() const {
26414 unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const {
26421 AArch64TargetLowering::getPreferredVectorAction(MVT VT) const {
26433 bool AArch64TargetLowering::isOpSuitableForLDPSTP(const Instruction *I) const {
26448 bool AArch64TargetLowering::isOpSuitableForLSE128(const Instruction *I) const {
26470 bool AArch64TargetLowering::isOpSuitableForRCPC3(const Instruction *I) const {
26487 bool AArch64TargetLowering::shouldInsertFencesForAtomic(
26498 bool AArch64TargetLowering::shouldInsertTrailingFenceForAtomicStore(
26526 AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
26543 AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
26576 AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
26625 AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
26647 Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
26686 void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
26692 Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
26730 bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
26744 bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
26759 Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
26774 void AArch64TargetLowering::insertSSPDeclarations(Module &M) const {
26795 Value *AArch64TargetLowering::getSDagStackGuard(const Module &M) const {
26802 Function *AArch64TargetLowering::getSSPStackGuardCheck(const Module &M) const {
26810 AArch64TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
26825 bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial(
26838 bool AArch64TargetLowering::
26852 AArch64TargetLowering::preferredShiftLegalizationStrategy(
26861 void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
26867 void AArch64TargetLowering::insertCopiesSplitCSR(
26908 bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
26920 bool AArch64TargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
26925 bool AArch64TargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
26937 AArch64TargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
26965 bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const {
26970 AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const {
26977 void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
27001 bool AArch64TargetLowering::needsFixedCatchObjects() const {
27005 bool AArch64TargetLowering::shouldLocalize(
27084 bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
27226 SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE(
27284 SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE(
27335 SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
27368 SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE(
27385 SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE(
27451 SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE(
27487 SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
27523 SDValue AArch64TargetLowering::LowerFixedLengthExtractVectorElt(
27536 SDValue AArch64TargetLowering::LowerFixedLengthInsertVectorElt(
27555 SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
27612 SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
27641 SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp,
27669 SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp,
27712 SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode,
27746 AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op,
27770 SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE(
27795 AArch64TargetLowering::LowerFixedLengthBitcastToSVE(SDValue Op,
27809 SDValue AArch64TargetLowering::LowerFixedLengthConcatVectorsToSVE(
27844 AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op,
27869 AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op,
27893 AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op,
27937 AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
27950 SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
27964 SDValue AArch64TargetLowering::LowerVECTOR_HISTOGRAM(SDValue Op,
28023 AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op,
28177 SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
28335 SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op,
28377 bool AArch64TargetLowering::isAllActivePredicate(SelectionDAG &DAG,
28382 EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const {
28386 bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
28466 bool AArch64TargetLowering::isTargetCanonicalConstantNode(SDValue Op) const {
28474 bool AArch64TargetLowering::isComplexDeinterleavingSupported() const {
28479 bool AArch64TargetLowering::isComplexDeinterleavingOperationSupported(
28511 Value *AArch64TargetLowering::createComplexDeinterleavingIR(
28609 bool AArch64TargetLowering::preferScalarizeSplat(SDNode *N) const {
28619 unsigned AArch64TargetLowering::getMinimumJumpTableEntries() const {
28623 MVT AArch64TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
28639 unsigned AArch64TargetLowering::getNumRegistersForCallingConv(
28653 unsigned AArch64TargetLowering::getVectorTypeBreakdownForCallingConv(
28719 bool AArch64TargetLowering::hasInlineStackProbe(
28726 void AArch64TargetLowering::verifyTargetSDNode(const SDNode *N) const {