109467b48Spatrick //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===// 209467b48Spatrick // 309467b48Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 409467b48Spatrick // See https://llvm.org/LICENSE.txt for license information. 509467b48Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 609467b48Spatrick // 709467b48Spatrick //===----------------------------------------------------------------------===// 809467b48Spatrick // 909467b48Spatrick // This file defines the interfaces that ARM uses to lower LLVM code into a 1009467b48Spatrick // selection DAG. 1109467b48Spatrick // 1209467b48Spatrick //===----------------------------------------------------------------------===// 1309467b48Spatrick 1409467b48Spatrick #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 1509467b48Spatrick #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 1609467b48Spatrick 1709467b48Spatrick #include "MCTargetDesc/ARMBaseInfo.h" 1809467b48Spatrick #include "llvm/ADT/SmallVector.h" 1909467b48Spatrick #include "llvm/ADT/StringRef.h" 2009467b48Spatrick #include "llvm/CodeGen/CallingConvLower.h" 2109467b48Spatrick #include "llvm/CodeGen/ISDOpcodes.h" 2209467b48Spatrick #include "llvm/CodeGen/MachineFunction.h" 2309467b48Spatrick #include "llvm/CodeGen/SelectionDAGNodes.h" 2409467b48Spatrick #include "llvm/CodeGen/TargetLowering.h" 2509467b48Spatrick #include "llvm/CodeGen/ValueTypes.h" 2609467b48Spatrick #include "llvm/IR/Attributes.h" 2709467b48Spatrick #include "llvm/IR/CallingConv.h" 2809467b48Spatrick #include "llvm/IR/Function.h" 2909467b48Spatrick #include "llvm/IR/IRBuilder.h" 3009467b48Spatrick #include "llvm/IR/InlineAsm.h" 3109467b48Spatrick #include "llvm/Support/CodeGen.h" 3209467b48Spatrick #include "llvm/Support/MachineValueType.h" 33*d415bd75Srobert #include <optional> 3409467b48Spatrick #include <utility> 3509467b48Spatrick 3609467b48Spatrick namespace llvm { 3709467b48Spatrick 3809467b48Spatrick class ARMSubtarget; 3909467b48Spatrick class DataLayout; 4009467b48Spatrick class FastISel; 4109467b48Spatrick class FunctionLoweringInfo; 4209467b48Spatrick class GlobalValue; 4309467b48Spatrick class InstrItineraryData; 4409467b48Spatrick class Instruction; 4509467b48Spatrick class MachineBasicBlock; 4609467b48Spatrick class MachineInstr; 4709467b48Spatrick class SelectionDAG; 4809467b48Spatrick class TargetLibraryInfo; 4909467b48Spatrick class TargetMachine; 5009467b48Spatrick class TargetRegisterInfo; 5109467b48Spatrick class VectorType; 5209467b48Spatrick 5309467b48Spatrick namespace ARMISD { 5409467b48Spatrick 5509467b48Spatrick // ARM Specific DAG Nodes 5609467b48Spatrick enum NodeType : unsigned { 5709467b48Spatrick // Start the numbering where the builtin ops and target ops leave off. 5809467b48Spatrick FIRST_NUMBER = ISD::BUILTIN_OP_END, 5909467b48Spatrick 6009467b48Spatrick Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 6109467b48Spatrick // TargetExternalSymbol, and TargetGlobalAddress. 6209467b48Spatrick WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 6309467b48Spatrick // PIC mode. 6409467b48Spatrick WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 6509467b48Spatrick 6609467b48Spatrick // Add pseudo op to model memcpy for struct byval. 6709467b48Spatrick COPY_STRUCT_BYVAL, 6809467b48Spatrick 6909467b48Spatrick CALL, // Function call. 7009467b48Spatrick CALL_PRED, // Function call that's predicable. 7109467b48Spatrick CALL_NOLINK, // Function call with branch not branch-and-link. 72097a140dSpatrick tSECALL, // CMSE non-secure function call. 73*d415bd75Srobert t2CALL_BTI, // Thumb function call followed by BTI instruction. 7409467b48Spatrick BRCOND, // Conditional branch. 7509467b48Spatrick BR_JT, // Jumptable branch. 7609467b48Spatrick BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 7709467b48Spatrick RET_FLAG, // Return with a flag operand. 78097a140dSpatrick SERET_FLAG, // CMSE Entry function return with a flag operand. 7909467b48Spatrick INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand. 8009467b48Spatrick 8109467b48Spatrick PIC_ADD, // Add with a PC operand and a PIC label. 8209467b48Spatrick 8309467b48Spatrick ASRL, // MVE long arithmetic shift right. 8409467b48Spatrick LSRL, // MVE long shift right. 8509467b48Spatrick LSLL, // MVE long shift left. 8609467b48Spatrick 8709467b48Spatrick CMP, // ARM compare instructions. 8809467b48Spatrick CMN, // ARM CMN instructions. 8909467b48Spatrick CMPZ, // ARM compare that sets only Z flag. 9009467b48Spatrick CMPFP, // ARM VFP compare instruction, sets FPSCR. 9109467b48Spatrick CMPFPE, // ARM VFP signalling compare instruction, sets FPSCR. 9209467b48Spatrick CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 9373471bf0Spatrick CMPFPEw0, // ARM VFP signalling compare against zero instruction, sets 9473471bf0Spatrick // FPSCR. 9509467b48Spatrick FMSTAT, // ARM fmstat instruction. 9609467b48Spatrick 9709467b48Spatrick CMOV, // ARM conditional move instructions. 9809467b48Spatrick SUBS, // Flag-setting subtraction. 9909467b48Spatrick 10009467b48Spatrick SSAT, // Signed saturation 10109467b48Spatrick USAT, // Unsigned saturation 10209467b48Spatrick 10309467b48Spatrick BCC_i64, 10409467b48Spatrick 10509467b48Spatrick SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 10609467b48Spatrick SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 10709467b48Spatrick RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 10809467b48Spatrick 10909467b48Spatrick ADDC, // Add with carry 11009467b48Spatrick ADDE, // Add using carry 11109467b48Spatrick SUBC, // Sub with carry 11209467b48Spatrick SUBE, // Sub using carry 11309467b48Spatrick LSLS, // Shift left producing carry 11409467b48Spatrick 11509467b48Spatrick VMOVRRD, // double to two gprs. 11609467b48Spatrick VMOVDRR, // Two gprs to double. 11709467b48Spatrick VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr 11809467b48Spatrick 11909467b48Spatrick EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 12009467b48Spatrick EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 12109467b48Spatrick EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch. 12209467b48Spatrick 12309467b48Spatrick TC_RETURN, // Tail call return pseudo. 12409467b48Spatrick 12509467b48Spatrick THREAD_POINTER, 12609467b48Spatrick 12709467b48Spatrick DYN_ALLOC, // Dynamic allocation on the stack. 12809467b48Spatrick 12909467b48Spatrick MEMBARRIER_MCR, // Memory barrier (MCR) 13009467b48Spatrick 13109467b48Spatrick PRELOAD, // Preload 13209467b48Spatrick 13309467b48Spatrick WIN__CHKSTK, // Windows' __chkstk call to do stack probing. 13409467b48Spatrick WIN__DBZCHK, // Windows' divide by zero check 13509467b48Spatrick 13673471bf0Spatrick WLS, // Low-overhead loops, While Loop Start branch. See t2WhileLoopStart 13773471bf0Spatrick WLSSETUP, // Setup for the iteration count of a WLS. See t2WhileLoopSetup. 13809467b48Spatrick LOOP_DEC, // Really a part of LE, performs the sub 13909467b48Spatrick LE, // Low-overhead loops, Loop End 14009467b48Spatrick 14109467b48Spatrick PREDICATE_CAST, // Predicate cast for MVE i1 types 142097a140dSpatrick VECTOR_REG_CAST, // Reinterpret the current contents of a vector register 14309467b48Spatrick 14473471bf0Spatrick MVESEXT, // Legalization aids for extending a vector into two/four vectors. 14573471bf0Spatrick MVEZEXT, // or truncating two/four vectors into one. Eventually becomes 14673471bf0Spatrick MVETRUNC, // stack store/load sequence, if not optimized to anything else. 14773471bf0Spatrick 14809467b48Spatrick VCMP, // Vector compare. 14909467b48Spatrick VCMPZ, // Vector compare to zero. 15009467b48Spatrick VTST, // Vector test bits. 15109467b48Spatrick 15209467b48Spatrick // Vector shift by vector 15309467b48Spatrick VSHLs, // ...left/right by signed 15409467b48Spatrick VSHLu, // ...left/right by unsigned 15509467b48Spatrick 15609467b48Spatrick // Vector shift by immediate: 15709467b48Spatrick VSHLIMM, // ...left 15809467b48Spatrick VSHRsIMM, // ...right (signed) 15909467b48Spatrick VSHRuIMM, // ...right (unsigned) 16009467b48Spatrick 16109467b48Spatrick // Vector rounding shift by immediate: 16209467b48Spatrick VRSHRsIMM, // ...right (signed) 16309467b48Spatrick VRSHRuIMM, // ...right (unsigned) 16409467b48Spatrick VRSHRNIMM, // ...right narrow 16509467b48Spatrick 16609467b48Spatrick // Vector saturating shift by immediate: 16709467b48Spatrick VQSHLsIMM, // ...left (signed) 16809467b48Spatrick VQSHLuIMM, // ...left (unsigned) 16909467b48Spatrick VQSHLsuIMM, // ...left (signed to unsigned) 17009467b48Spatrick VQSHRNsIMM, // ...right narrow (signed) 17109467b48Spatrick VQSHRNuIMM, // ...right narrow (unsigned) 17209467b48Spatrick VQSHRNsuIMM, // ...right narrow (signed to unsigned) 17309467b48Spatrick 17409467b48Spatrick // Vector saturating rounding shift by immediate: 17509467b48Spatrick VQRSHRNsIMM, // ...right narrow (signed) 17609467b48Spatrick VQRSHRNuIMM, // ...right narrow (unsigned) 17709467b48Spatrick VQRSHRNsuIMM, // ...right narrow (signed to unsigned) 17809467b48Spatrick 17909467b48Spatrick // Vector shift and insert: 18009467b48Spatrick VSLIIMM, // ...left 18109467b48Spatrick VSRIIMM, // ...right 18209467b48Spatrick 18309467b48Spatrick // Vector get lane (VMOV scalar to ARM core register) 18409467b48Spatrick // (These are used for 8- and 16-bit element types only.) 18509467b48Spatrick VGETLANEu, // zero-extend vector extract element 18609467b48Spatrick VGETLANEs, // sign-extend vector extract element 18709467b48Spatrick 18809467b48Spatrick // Vector move immediate and move negated immediate: 18909467b48Spatrick VMOVIMM, 19009467b48Spatrick VMVNIMM, 19109467b48Spatrick 19209467b48Spatrick // Vector move f32 immediate: 19309467b48Spatrick VMOVFPIMM, 19409467b48Spatrick 19509467b48Spatrick // Move H <-> R, clearing top 16 bits 19609467b48Spatrick VMOVrh, 19709467b48Spatrick VMOVhr, 19809467b48Spatrick 19909467b48Spatrick // Vector duplicate: 20009467b48Spatrick VDUP, 20109467b48Spatrick VDUPLANE, 20209467b48Spatrick 20309467b48Spatrick // Vector shuffles: 20409467b48Spatrick VEXT, // extract 20509467b48Spatrick VREV64, // reverse elements within 64-bit doublewords 20609467b48Spatrick VREV32, // reverse elements within 32-bit words 20709467b48Spatrick VREV16, // reverse elements within 16-bit halfwords 20809467b48Spatrick VZIP, // zip (interleave) 20909467b48Spatrick VUZP, // unzip (deinterleave) 21009467b48Spatrick VTRN, // transpose 21109467b48Spatrick VTBL1, // 1-register shuffle with mask 21209467b48Spatrick VTBL2, // 2-register shuffle with mask 21309467b48Spatrick VMOVN, // MVE vmovn 21409467b48Spatrick 215097a140dSpatrick // MVE Saturating truncates 216097a140dSpatrick VQMOVNs, // Vector (V) Saturating (Q) Move and Narrow (N), signed (s) 217097a140dSpatrick VQMOVNu, // Vector (V) Saturating (Q) Move and Narrow (N), unsigned (u) 218097a140dSpatrick 219097a140dSpatrick // MVE float <> half converts 22073471bf0Spatrick VCVTN, // MVE vcvt f32 -> f16, truncating into either the bottom or top 22173471bf0Spatrick // lanes 222097a140dSpatrick VCVTL, // MVE vcvt f16 -> f32, extending from either the bottom or top lanes 223097a140dSpatrick 22473471bf0Spatrick // MVE VIDUP instruction, taking a start value and increment. 22573471bf0Spatrick VIDUP, 22673471bf0Spatrick 22709467b48Spatrick // Vector multiply long: 22809467b48Spatrick VMULLs, // ...signed 22909467b48Spatrick VMULLu, // ...unsigned 23009467b48Spatrick 23173471bf0Spatrick VQDMULH, // MVE vqdmulh instruction 23273471bf0Spatrick 233097a140dSpatrick // MVE reductions 234097a140dSpatrick VADDVs, // sign- or zero-extend the elements of a vector to i32, 235097a140dSpatrick VADDVu, // add them all together, and return an i32 of their sum 23673471bf0Spatrick VADDVps, // Same as VADDV[su] but with a v4i1 predicate mask 23773471bf0Spatrick VADDVpu, 238097a140dSpatrick VADDLVs, // sign- or zero-extend elements to i64 and sum, returning 239097a140dSpatrick VADDLVu, // the low and high 32-bit halves of the sum 24073471bf0Spatrick VADDLVAs, // Same as VADDLV[su] but also add an input accumulator 241097a140dSpatrick VADDLVAu, // provided as low and high halves 24273471bf0Spatrick VADDLVps, // Same as VADDLV[su] but with a v4i1 predicate mask 24373471bf0Spatrick VADDLVpu, 24473471bf0Spatrick VADDLVAps, // Same as VADDLVp[su] but with a v4i1 predicate mask 24573471bf0Spatrick VADDLVApu, 24673471bf0Spatrick VMLAVs, // sign- or zero-extend the elements of two vectors to i32, multiply 24773471bf0Spatrick // them 24873471bf0Spatrick VMLAVu, // and add the results together, returning an i32 of their sum 24973471bf0Spatrick VMLAVps, // Same as VMLAV[su] with a v4i1 predicate mask 25073471bf0Spatrick VMLAVpu, 25173471bf0Spatrick VMLALVs, // Same as VMLAV but with i64, returning the low and 25273471bf0Spatrick VMLALVu, // high 32-bit halves of the sum 25373471bf0Spatrick VMLALVps, // Same as VMLALV[su] with a v4i1 predicate mask 25473471bf0Spatrick VMLALVpu, 25573471bf0Spatrick VMLALVAs, // Same as VMLALV but also add an input accumulator 25673471bf0Spatrick VMLALVAu, // provided as low and high halves 25773471bf0Spatrick VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask 25873471bf0Spatrick VMLALVApu, 25973471bf0Spatrick VMINVu, // Find minimum unsigned value of a vector and register 26073471bf0Spatrick VMINVs, // Find minimum signed value of a vector and register 26173471bf0Spatrick VMAXVu, // Find maximum unsigned value of a vector and register 26273471bf0Spatrick VMAXVs, // Find maximum signed value of a vector and register 263097a140dSpatrick 26409467b48Spatrick SMULWB, // Signed multiply word by half word, bottom 26509467b48Spatrick SMULWT, // Signed multiply word by half word, top 26609467b48Spatrick UMLAL, // 64bit Unsigned Accumulate Multiply 26709467b48Spatrick SMLAL, // 64bit Signed Accumulate Multiply 26809467b48Spatrick UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply 26909467b48Spatrick SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16 27009467b48Spatrick SMLALBT, // 64-bit signed accumulate multiply bottom, top 16 27109467b48Spatrick SMLALTB, // 64-bit signed accumulate multiply top, bottom 16 27209467b48Spatrick SMLALTT, // 64-bit signed accumulate multiply top, top 16 27309467b48Spatrick SMLALD, // Signed multiply accumulate long dual 27409467b48Spatrick SMLALDX, // Signed multiply accumulate long dual exchange 27509467b48Spatrick SMLSLD, // Signed multiply subtract long dual 27609467b48Spatrick SMLSLDX, // Signed multiply subtract long dual exchange 27709467b48Spatrick SMMLAR, // Signed multiply long, round and add 27809467b48Spatrick SMMLSR, // Signed multiply long, subtract and round 27909467b48Spatrick 28073471bf0Spatrick // Single Lane QADD8 and QADD16. Only the bottom lane. That's what the b 28173471bf0Spatrick // stands for. 28209467b48Spatrick QADD8b, 28309467b48Spatrick QSUB8b, 28409467b48Spatrick QADD16b, 28509467b48Spatrick QSUB16b, 28673471bf0Spatrick UQADD8b, 28773471bf0Spatrick UQSUB8b, 28873471bf0Spatrick UQADD16b, 28973471bf0Spatrick UQSUB16b, 29009467b48Spatrick 29109467b48Spatrick // Operands of the standard BUILD_VECTOR node are not legalized, which 29209467b48Spatrick // is fine if BUILD_VECTORs are always lowered to shuffles or other 29309467b48Spatrick // operations, but for ARM some BUILD_VECTORs are legal as-is and their 29409467b48Spatrick // operands need to be legalized. Define an ARM-specific version of 29509467b48Spatrick // BUILD_VECTOR for this purpose. 29609467b48Spatrick BUILD_VECTOR, 29709467b48Spatrick 29809467b48Spatrick // Bit-field insert 29909467b48Spatrick BFI, 30009467b48Spatrick 30109467b48Spatrick // Vector OR with immediate 30209467b48Spatrick VORRIMM, 30309467b48Spatrick // Vector AND with NOT of immediate 30409467b48Spatrick VBICIMM, 30509467b48Spatrick 30673471bf0Spatrick // Pseudo vector bitwise select 30773471bf0Spatrick VBSP, 30809467b48Spatrick 30909467b48Spatrick // Pseudo-instruction representing a memory copy using ldm/stm 31009467b48Spatrick // instructions. 31109467b48Spatrick MEMCPY, 31209467b48Spatrick 31373471bf0Spatrick // Pseudo-instruction representing a memory copy using a tail predicated 31473471bf0Spatrick // loop 31573471bf0Spatrick MEMCPYLOOP, 31673471bf0Spatrick // Pseudo-instruction representing a memset using a tail predicated 31773471bf0Spatrick // loop 31873471bf0Spatrick MEMSETLOOP, 31973471bf0Spatrick 32009467b48Spatrick // V8.1MMainline condition select 32109467b48Spatrick CSINV, // Conditional select invert. 32209467b48Spatrick CSNEG, // Conditional select negate. 32309467b48Spatrick CSINC, // Conditional select increment. 32409467b48Spatrick 32509467b48Spatrick // Vector load N-element structure to all lanes: 32609467b48Spatrick VLD1DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 32709467b48Spatrick VLD2DUP, 32809467b48Spatrick VLD3DUP, 32909467b48Spatrick VLD4DUP, 33009467b48Spatrick 33109467b48Spatrick // NEON loads with post-increment base updates: 33209467b48Spatrick VLD1_UPD, 33309467b48Spatrick VLD2_UPD, 33409467b48Spatrick VLD3_UPD, 33509467b48Spatrick VLD4_UPD, 33609467b48Spatrick VLD2LN_UPD, 33709467b48Spatrick VLD3LN_UPD, 33809467b48Spatrick VLD4LN_UPD, 33909467b48Spatrick VLD1DUP_UPD, 34009467b48Spatrick VLD2DUP_UPD, 34109467b48Spatrick VLD3DUP_UPD, 34209467b48Spatrick VLD4DUP_UPD, 34373471bf0Spatrick VLD1x2_UPD, 34473471bf0Spatrick VLD1x3_UPD, 34573471bf0Spatrick VLD1x4_UPD, 34609467b48Spatrick 34709467b48Spatrick // NEON stores with post-increment base updates: 34809467b48Spatrick VST1_UPD, 34909467b48Spatrick VST2_UPD, 35009467b48Spatrick VST3_UPD, 35109467b48Spatrick VST4_UPD, 35209467b48Spatrick VST2LN_UPD, 35309467b48Spatrick VST3LN_UPD, 354097a140dSpatrick VST4LN_UPD, 35573471bf0Spatrick VST1x2_UPD, 35673471bf0Spatrick VST1x3_UPD, 35773471bf0Spatrick VST1x4_UPD, 358097a140dSpatrick 359097a140dSpatrick // Load/Store of dual registers 360097a140dSpatrick LDRD, 361097a140dSpatrick STRD 36209467b48Spatrick }; 36309467b48Spatrick 36409467b48Spatrick } // end namespace ARMISD 36509467b48Spatrick 36673471bf0Spatrick namespace ARM { 36773471bf0Spatrick /// Possible values of current rounding mode, which is specified in bits 36873471bf0Spatrick /// 23:22 of FPSCR. 36973471bf0Spatrick enum Rounding { 37073471bf0Spatrick RN = 0, // Round to Nearest 37173471bf0Spatrick RP = 1, // Round towards Plus infinity 37273471bf0Spatrick RM = 2, // Round towards Minus infinity 37373471bf0Spatrick RZ = 3, // Round towards Zero 37473471bf0Spatrick rmMask = 3 // Bit mask selecting rounding mode 37573471bf0Spatrick }; 37673471bf0Spatrick 37773471bf0Spatrick // Bit position of rounding mode bits in FPSCR. 37873471bf0Spatrick const unsigned RoundingBitsPos = 22; 37973471bf0Spatrick } // namespace ARM 38073471bf0Spatrick 38109467b48Spatrick /// Define some predicates that are used for node matching. 38209467b48Spatrick namespace ARM { 38309467b48Spatrick 38409467b48Spatrick bool isBitFieldInvertedMask(unsigned v); 38509467b48Spatrick 38609467b48Spatrick } // end namespace ARM 38709467b48Spatrick 38809467b48Spatrick //===--------------------------------------------------------------------===// 38909467b48Spatrick // ARMTargetLowering - ARM Implementation of the TargetLowering interface 39009467b48Spatrick 39109467b48Spatrick class ARMTargetLowering : public TargetLowering { 39209467b48Spatrick public: 39309467b48Spatrick explicit ARMTargetLowering(const TargetMachine &TM, 39409467b48Spatrick const ARMSubtarget &STI); 39509467b48Spatrick 39609467b48Spatrick unsigned getJumpTableEncoding() const override; 39709467b48Spatrick bool useSoftFloat() const override; 39809467b48Spatrick 39909467b48Spatrick SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 40009467b48Spatrick 40109467b48Spatrick /// ReplaceNodeResults - Replace the results of node with an illegal result 40209467b48Spatrick /// type with new values built out of custom code. 40309467b48Spatrick void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 40409467b48Spatrick SelectionDAG &DAG) const override; 40509467b48Spatrick 40609467b48Spatrick const char *getTargetNodeName(unsigned Opcode) const override; 40709467b48Spatrick isSelectSupported(SelectSupportKind Kind)40809467b48Spatrick bool isSelectSupported(SelectSupportKind Kind) const override { 40909467b48Spatrick // ARM does not support scalar condition selects on vectors. 41009467b48Spatrick return (Kind != ScalarCondVectorVal); 41109467b48Spatrick } 41209467b48Spatrick 41309467b48Spatrick bool isReadOnly(const GlobalValue *GV) const; 41409467b48Spatrick 41509467b48Spatrick /// getSetCCResultType - Return the value type to use for ISD::SETCC. 41609467b48Spatrick EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 41709467b48Spatrick EVT VT) const override; 41809467b48Spatrick 41909467b48Spatrick MachineBasicBlock * 42009467b48Spatrick EmitInstrWithCustomInserter(MachineInstr &MI, 42109467b48Spatrick MachineBasicBlock *MBB) const override; 42209467b48Spatrick 42309467b48Spatrick void AdjustInstrPostInstrSelection(MachineInstr &MI, 42409467b48Spatrick SDNode *Node) const override; 42509467b48Spatrick 42609467b48Spatrick SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const; 42709467b48Spatrick SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const; 42809467b48Spatrick SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const; 429097a140dSpatrick SDValue PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const; 43073471bf0Spatrick SDValue PerformMVEExtCombine(SDNode *N, DAGCombinerInfo &DCI) const; 43173471bf0Spatrick SDValue PerformMVETruncCombine(SDNode *N, DAGCombinerInfo &DCI) const; 43209467b48Spatrick SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 43309467b48Spatrick 434097a140dSpatrick bool SimplifyDemandedBitsForTargetNode(SDValue Op, 435097a140dSpatrick const APInt &OriginalDemandedBits, 436097a140dSpatrick const APInt &OriginalDemandedElts, 437097a140dSpatrick KnownBits &Known, 438097a140dSpatrick TargetLoweringOpt &TLO, 439097a140dSpatrick unsigned Depth) const override; 440097a140dSpatrick 44109467b48Spatrick bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override; 44209467b48Spatrick 44309467b48Spatrick /// allowsMisalignedMemoryAccesses - Returns true if the target allows 44409467b48Spatrick /// unaligned memory accesses of the specified type. Returns whether it 44509467b48Spatrick /// is "fast" by reference in the second argument. 44609467b48Spatrick bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, 44773471bf0Spatrick Align Alignment, 44809467b48Spatrick MachineMemOperand::Flags Flags, 449*d415bd75Srobert unsigned *Fast) const override; 45009467b48Spatrick 451097a140dSpatrick EVT getOptimalMemOpType(const MemOp &Op, 45209467b48Spatrick const AttributeList &FuncAttributes) const override; 45309467b48Spatrick 45409467b48Spatrick bool isTruncateFree(Type *SrcTy, Type *DstTy) const override; 45509467b48Spatrick bool isTruncateFree(EVT SrcVT, EVT DstVT) const override; 45609467b48Spatrick bool isZExtFree(SDValue Val, EVT VT2) const override; 45709467b48Spatrick bool shouldSinkOperands(Instruction *I, 45809467b48Spatrick SmallVectorImpl<Use *> &Ops) const override; 459097a140dSpatrick Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const override; 46009467b48Spatrick 46109467b48Spatrick bool isFNegFree(EVT VT) const override; 46209467b48Spatrick 46309467b48Spatrick bool isVectorLoadExtDesirable(SDValue ExtVal) const override; 46409467b48Spatrick 46509467b48Spatrick bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; 46609467b48Spatrick 46709467b48Spatrick 46809467b48Spatrick /// isLegalAddressingMode - Return true if the addressing mode represented 46909467b48Spatrick /// by AM is legal for this target, for a load/store of the specified type. 47009467b48Spatrick bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, 47109467b48Spatrick Type *Ty, unsigned AS, 47209467b48Spatrick Instruction *I = nullptr) const override; 47309467b48Spatrick 47409467b48Spatrick bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 47509467b48Spatrick 47609467b48Spatrick /// Returns true if the addressing mode representing by AM is legal 47709467b48Spatrick /// for the Thumb1 target, for a load/store of the specified type. 47809467b48Spatrick bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 47909467b48Spatrick 48009467b48Spatrick /// isLegalICmpImmediate - Return true if the specified immediate is legal 48109467b48Spatrick /// icmp immediate, that is the target has icmp instructions which can 48209467b48Spatrick /// compare a register against the immediate without having to materialize 48309467b48Spatrick /// the immediate into a register. 48409467b48Spatrick bool isLegalICmpImmediate(int64_t Imm) const override; 48509467b48Spatrick 48609467b48Spatrick /// isLegalAddImmediate - Return true if the specified immediate is legal 48709467b48Spatrick /// add immediate, that is the target has add instructions which can 48809467b48Spatrick /// add a register and the immediate without having to materialize 48909467b48Spatrick /// the immediate into a register. 49009467b48Spatrick bool isLegalAddImmediate(int64_t Imm) const override; 49109467b48Spatrick 49209467b48Spatrick /// getPreIndexedAddressParts - returns true by value, base pointer and 49309467b48Spatrick /// offset pointer and addressing mode by reference if the node's address 49409467b48Spatrick /// can be legally represented as pre-indexed load / store address. 49509467b48Spatrick bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, 49609467b48Spatrick ISD::MemIndexedMode &AM, 49709467b48Spatrick SelectionDAG &DAG) const override; 49809467b48Spatrick 49909467b48Spatrick /// getPostIndexedAddressParts - returns true by value, base pointer and 50009467b48Spatrick /// offset pointer and addressing mode by reference if this node can be 50109467b48Spatrick /// combined with a load / store to form a post-indexed load / store. 50209467b48Spatrick bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, 50309467b48Spatrick SDValue &Offset, ISD::MemIndexedMode &AM, 50409467b48Spatrick SelectionDAG &DAG) const override; 50509467b48Spatrick 50609467b48Spatrick void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, 50709467b48Spatrick const APInt &DemandedElts, 50809467b48Spatrick const SelectionDAG &DAG, 50909467b48Spatrick unsigned Depth) const override; 51009467b48Spatrick 511097a140dSpatrick bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 512097a140dSpatrick const APInt &DemandedElts, 51309467b48Spatrick TargetLoweringOpt &TLO) const override; 51409467b48Spatrick 51509467b48Spatrick bool ExpandInlineAsm(CallInst *CI) const override; 51609467b48Spatrick 51709467b48Spatrick ConstraintType getConstraintType(StringRef Constraint) const override; 51809467b48Spatrick 51909467b48Spatrick /// Examine constraint string and operand type and determine a weight value. 52009467b48Spatrick /// The operand object must already have been set up with the operand type. 52109467b48Spatrick ConstraintWeight getSingleConstraintMatchWeight( 52209467b48Spatrick AsmOperandInfo &info, const char *constraint) const override; 52309467b48Spatrick 52409467b48Spatrick std::pair<unsigned, const TargetRegisterClass *> 52509467b48Spatrick getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 52609467b48Spatrick StringRef Constraint, MVT VT) const override; 52709467b48Spatrick 52809467b48Spatrick const char *LowerXConstraint(EVT ConstraintVT) const override; 52909467b48Spatrick 53009467b48Spatrick /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 53109467b48Spatrick /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 53209467b48Spatrick /// true it means one of the asm constraint of the inline asm instruction 53309467b48Spatrick /// being processed is 'm'. 53409467b48Spatrick void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 53509467b48Spatrick std::vector<SDValue> &Ops, 53609467b48Spatrick SelectionDAG &DAG) const override; 53709467b48Spatrick 53809467b48Spatrick unsigned getInlineAsmMemConstraint(StringRef ConstraintCode)53909467b48Spatrick getInlineAsmMemConstraint(StringRef ConstraintCode) const override { 54009467b48Spatrick if (ConstraintCode == "Q") 54109467b48Spatrick return InlineAsm::Constraint_Q; 54209467b48Spatrick else if (ConstraintCode.size() == 2) { 54309467b48Spatrick if (ConstraintCode[0] == 'U') { 54409467b48Spatrick switch(ConstraintCode[1]) { 54509467b48Spatrick default: 54609467b48Spatrick break; 54709467b48Spatrick case 'm': 54809467b48Spatrick return InlineAsm::Constraint_Um; 54909467b48Spatrick case 'n': 55009467b48Spatrick return InlineAsm::Constraint_Un; 55109467b48Spatrick case 'q': 55209467b48Spatrick return InlineAsm::Constraint_Uq; 55309467b48Spatrick case 's': 55409467b48Spatrick return InlineAsm::Constraint_Us; 55509467b48Spatrick case 't': 55609467b48Spatrick return InlineAsm::Constraint_Ut; 55709467b48Spatrick case 'v': 55809467b48Spatrick return InlineAsm::Constraint_Uv; 55909467b48Spatrick case 'y': 56009467b48Spatrick return InlineAsm::Constraint_Uy; 56109467b48Spatrick } 56209467b48Spatrick } 56309467b48Spatrick } 56409467b48Spatrick return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 56509467b48Spatrick } 56609467b48Spatrick getSubtarget()56709467b48Spatrick const ARMSubtarget* getSubtarget() const { 56809467b48Spatrick return Subtarget; 56909467b48Spatrick } 57009467b48Spatrick 57109467b48Spatrick /// getRegClassFor - Return the register class that should be used for the 57209467b48Spatrick /// specified value type. 57309467b48Spatrick const TargetRegisterClass * 57409467b48Spatrick getRegClassFor(MVT VT, bool isDivergent = false) const override; 57509467b48Spatrick 57609467b48Spatrick bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 577*d415bd75Srobert Align &PrefAlign) const override; 57809467b48Spatrick 57909467b48Spatrick /// createFastISel - This method returns a target specific FastISel object, 58009467b48Spatrick /// or null if the target does not support "fast" ISel. 58109467b48Spatrick FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 58209467b48Spatrick const TargetLibraryInfo *libInfo) const override; 58309467b48Spatrick 58409467b48Spatrick Sched::Preference getSchedulingPreference(SDNode *N) const override; 58509467b48Spatrick preferZeroCompareBranch()58673471bf0Spatrick bool preferZeroCompareBranch() const override { return true; } 58773471bf0Spatrick 588*d415bd75Srobert bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override; 589*d415bd75Srobert 59009467b48Spatrick bool 59109467b48Spatrick isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override; 59209467b48Spatrick bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 59309467b48Spatrick 59409467b48Spatrick /// isFPImmLegal - Returns true if the target can instruction select the 59509467b48Spatrick /// specified FP immediate natively. If false, the legalizer will 59609467b48Spatrick /// materialize the FP immediate as a load from a constant pool. 59709467b48Spatrick bool isFPImmLegal(const APFloat &Imm, EVT VT, 59809467b48Spatrick bool ForCodeSize = false) const override; 59909467b48Spatrick 60009467b48Spatrick bool getTgtMemIntrinsic(IntrinsicInfo &Info, 60109467b48Spatrick const CallInst &I, 60209467b48Spatrick MachineFunction &MF, 60309467b48Spatrick unsigned Intrinsic) const override; 60409467b48Spatrick 60509467b48Spatrick /// Returns true if it is beneficial to convert a load of a constant 60609467b48Spatrick /// to just the constant itself. 60709467b48Spatrick bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 60809467b48Spatrick Type *Ty) const override; 60909467b48Spatrick 61009467b48Spatrick /// Return true if EXTRACT_SUBVECTOR is cheap for this result type 61109467b48Spatrick /// with this index. 61209467b48Spatrick bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 61309467b48Spatrick unsigned Index) const override; 61409467b48Spatrick shouldFormOverflowOp(unsigned Opcode,EVT VT,bool MathUsed)615097a140dSpatrick bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 616097a140dSpatrick bool MathUsed) const override { 617097a140dSpatrick // Using overflow ops for overflow checks only should beneficial on ARM. 618097a140dSpatrick return TargetLowering::shouldFormOverflowOp(Opcode, VT, true); 619097a140dSpatrick } 620097a140dSpatrick 62109467b48Spatrick /// Returns true if an argument of type Ty needs to be passed in a 62209467b48Spatrick /// contiguous block of registers in calling convention CallConv. 62309467b48Spatrick bool functionArgumentNeedsConsecutiveRegisters( 62473471bf0Spatrick Type *Ty, CallingConv::ID CallConv, bool isVarArg, 62573471bf0Spatrick const DataLayout &DL) const override; 62609467b48Spatrick 62709467b48Spatrick /// If a physical register, this returns the register that receives the 62809467b48Spatrick /// exception address on entry to an EH pad. 629097a140dSpatrick Register 63009467b48Spatrick getExceptionPointerRegister(const Constant *PersonalityFn) const override; 63109467b48Spatrick 63209467b48Spatrick /// If a physical register, this returns the register that receives the 63309467b48Spatrick /// exception typeid on entry to a landing pad. 634097a140dSpatrick Register 63509467b48Spatrick getExceptionSelectorRegister(const Constant *PersonalityFn) const override; 63609467b48Spatrick 63773471bf0Spatrick Instruction *makeDMB(IRBuilderBase &Builder, ARM_MB::MemBOpt Domain) const; 63873471bf0Spatrick Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, 63909467b48Spatrick AtomicOrdering Ord) const override; 64073471bf0Spatrick Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, 64109467b48Spatrick AtomicOrdering Ord) const override; 64273471bf0Spatrick 64373471bf0Spatrick void 64473471bf0Spatrick emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override; 64573471bf0Spatrick 64673471bf0Spatrick Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, 64773471bf0Spatrick AtomicOrdering Ord) const override; 64873471bf0Spatrick Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, 64909467b48Spatrick AtomicOrdering Ord) const override; 65009467b48Spatrick 65109467b48Spatrick unsigned getMaxSupportedInterleaveFactor() const override; 65209467b48Spatrick 65309467b48Spatrick bool lowerInterleavedLoad(LoadInst *LI, 65409467b48Spatrick ArrayRef<ShuffleVectorInst *> Shuffles, 65509467b48Spatrick ArrayRef<unsigned> Indices, 65609467b48Spatrick unsigned Factor) const override; 65709467b48Spatrick bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, 65809467b48Spatrick unsigned Factor) const override; 65909467b48Spatrick 66009467b48Spatrick bool shouldInsertFencesForAtomic(const Instruction *I) const override; 66109467b48Spatrick TargetLoweringBase::AtomicExpansionKind 66209467b48Spatrick shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 663*d415bd75Srobert TargetLoweringBase::AtomicExpansionKind 664*d415bd75Srobert shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 66509467b48Spatrick TargetLoweringBase::AtomicExpansionKind 66609467b48Spatrick shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 66709467b48Spatrick TargetLoweringBase::AtomicExpansionKind 66809467b48Spatrick shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; 66909467b48Spatrick 67009467b48Spatrick bool useLoadStackGuardNode() const override; 67109467b48Spatrick 67209467b48Spatrick void insertSSPDeclarations(Module &M) const override; 67309467b48Spatrick Value *getSDagStackGuard(const Module &M) const override; 67409467b48Spatrick Function *getSSPStackGuardCheck(const Module &M) const override; 67509467b48Spatrick 67609467b48Spatrick bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 67709467b48Spatrick unsigned &Cost) const override; 67809467b48Spatrick canMergeStoresTo(unsigned AddressSpace,EVT MemVT,const MachineFunction & MF)67909467b48Spatrick bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, 680*d415bd75Srobert const MachineFunction &MF) const override { 68109467b48Spatrick // Do not merge to larger than i32. 68209467b48Spatrick return (MemVT.getSizeInBits() <= 32); 68309467b48Spatrick } 68409467b48Spatrick 685*d415bd75Srobert bool isCheapToSpeculateCttz(Type *Ty) const override; 686*d415bd75Srobert bool isCheapToSpeculateCtlz(Type *Ty) const override; 68709467b48Spatrick convertSetCCLogicToBitwiseLogic(EVT VT)68809467b48Spatrick bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { 68909467b48Spatrick return VT.isScalarInteger(); 69009467b48Spatrick } 69109467b48Spatrick supportSwiftError()69209467b48Spatrick bool supportSwiftError() const override { 69309467b48Spatrick return true; 69409467b48Spatrick } 69509467b48Spatrick hasStandaloneRem(EVT VT)69609467b48Spatrick bool hasStandaloneRem(EVT VT) const override { 69709467b48Spatrick return HasStandaloneRem; 69809467b48Spatrick } 69909467b48Spatrick 700*d415bd75Srobert ShiftLegalizationStrategy 701*d415bd75Srobert preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, 702*d415bd75Srobert unsigned ExpansionFactor) const override; 70309467b48Spatrick 70409467b48Spatrick CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const; 70509467b48Spatrick CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const; 70609467b48Spatrick 70709467b48Spatrick /// Returns true if \p VecTy is a legal interleaved access type. This 70809467b48Spatrick /// function checks the vector element type and the overall width of the 70909467b48Spatrick /// vector. 710097a140dSpatrick bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy, 71173471bf0Spatrick Align Alignment, 71209467b48Spatrick const DataLayout &DL) const; 71309467b48Spatrick 714*d415bd75Srobert bool isMulAddWithConstProfitable(SDValue AddNode, 715*d415bd75Srobert SDValue ConstNode) const override; 716*d415bd75Srobert 71709467b48Spatrick bool alignLoopsWithOptSize() const override; 71809467b48Spatrick 71909467b48Spatrick /// Returns the number of interleaved accesses that will be generated when 72009467b48Spatrick /// lowering accesses of the given type. 72109467b48Spatrick unsigned getNumInterleavedAccesses(VectorType *VecTy, 72209467b48Spatrick const DataLayout &DL) const; 72309467b48Spatrick 72409467b48Spatrick void finalizeLowering(MachineFunction &MF) const override; 72509467b48Spatrick 72609467b48Spatrick /// Return the correct alignment for the current calling convention. 72709467b48Spatrick Align getABIAlignmentForCallingConv(Type *ArgTy, 72873471bf0Spatrick const DataLayout &DL) const override; 72909467b48Spatrick 73009467b48Spatrick bool isDesirableToCommuteWithShift(const SDNode *N, 73109467b48Spatrick CombineLevel Level) const override; 73209467b48Spatrick 733*d415bd75Srobert bool isDesirableToCommuteXorWithShift(const SDNode *N) const override; 734*d415bd75Srobert 73509467b48Spatrick bool shouldFoldConstantShiftPairToMask(const SDNode *N, 73609467b48Spatrick CombineLevel Level) const override; 73709467b48Spatrick 73809467b48Spatrick bool preferIncOfAddToSubOfNot(EVT VT) const override; 73909467b48Spatrick 740*d415bd75Srobert bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override; 741*d415bd75Srobert 742*d415bd75Srobert bool isComplexDeinterleavingSupported() const override; 743*d415bd75Srobert bool isComplexDeinterleavingOperationSupported( 744*d415bd75Srobert ComplexDeinterleavingOperation Operation, Type *Ty) const override; 745*d415bd75Srobert 746*d415bd75Srobert Value *createComplexDeinterleavingIR( 747*d415bd75Srobert Instruction *I, ComplexDeinterleavingOperation OperationType, 748*d415bd75Srobert ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, 749*d415bd75Srobert Value *Accumulator = nullptr) const override; 750*d415bd75Srobert 75109467b48Spatrick protected: 75209467b48Spatrick std::pair<const TargetRegisterClass *, uint8_t> 75309467b48Spatrick findRepresentativeClass(const TargetRegisterInfo *TRI, 75409467b48Spatrick MVT VT) const override; 75509467b48Spatrick 75609467b48Spatrick private: 75709467b48Spatrick /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 75809467b48Spatrick /// make the right decision when generating code for different targets. 75909467b48Spatrick const ARMSubtarget *Subtarget; 76009467b48Spatrick 76109467b48Spatrick const TargetRegisterInfo *RegInfo; 76209467b48Spatrick 76309467b48Spatrick const InstrItineraryData *Itins; 76409467b48Spatrick 76509467b48Spatrick /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 76609467b48Spatrick unsigned ARMPCLabelIndex; 76709467b48Spatrick 76809467b48Spatrick // TODO: remove this, and have shouldInsertFencesForAtomic do the proper 76909467b48Spatrick // check. 77009467b48Spatrick bool InsertFencesForAtomic; 77109467b48Spatrick 77209467b48Spatrick bool HasStandaloneRem = true; 77309467b48Spatrick 77473471bf0Spatrick void addTypeForNEON(MVT VT, MVT PromotedLdStVT); 77509467b48Spatrick void addDRTypeForNEON(MVT VT); 77609467b48Spatrick void addQRTypeForNEON(MVT VT); 77709467b48Spatrick std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const; 77809467b48Spatrick 77909467b48Spatrick using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>; 78009467b48Spatrick 78109467b48Spatrick void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain, 78209467b48Spatrick SDValue &Arg, RegsToPassVector &RegsToPass, 78309467b48Spatrick CCValAssign &VA, CCValAssign &NextVA, 78409467b48Spatrick SDValue &StackPtr, 78509467b48Spatrick SmallVectorImpl<SDValue> &MemOpChains, 78673471bf0Spatrick bool IsTailCall, 78773471bf0Spatrick int SPDiff) const; 78809467b48Spatrick SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 78909467b48Spatrick SDValue &Root, SelectionDAG &DAG, 79009467b48Spatrick const SDLoc &dl) const; 79109467b48Spatrick 79209467b48Spatrick CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC, 79309467b48Spatrick bool isVarArg) const; 79409467b48Spatrick CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 79509467b48Spatrick bool isVarArg) const; 79673471bf0Spatrick std::pair<SDValue, MachinePointerInfo> 79773471bf0Spatrick computeAddrForCallArg(const SDLoc &dl, SelectionDAG &DAG, 79873471bf0Spatrick const CCValAssign &VA, SDValue StackPtr, 79973471bf0Spatrick bool IsTailCall, int SPDiff) const; 80009467b48Spatrick SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 80109467b48Spatrick SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 80209467b48Spatrick SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const; 80309467b48Spatrick SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG, 80409467b48Spatrick const ARMSubtarget *Subtarget) const; 80509467b48Spatrick SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 80609467b48Spatrick const ARMSubtarget *Subtarget) const; 80709467b48Spatrick SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 80809467b48Spatrick SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 80909467b48Spatrick SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 81009467b48Spatrick SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 81109467b48Spatrick SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 81209467b48Spatrick SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const; 81309467b48Spatrick SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 81409467b48Spatrick SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 81509467b48Spatrick SelectionDAG &DAG) const; 81609467b48Spatrick SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 81709467b48Spatrick SelectionDAG &DAG, 81809467b48Spatrick TLSModel::Model model) const; 81909467b48Spatrick SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 82009467b48Spatrick SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const; 82109467b48Spatrick SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 82209467b48Spatrick SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 82309467b48Spatrick SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const; 82409467b48Spatrick SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const; 82509467b48Spatrick SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 82609467b48Spatrick SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 82709467b48Spatrick SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; 82809467b48Spatrick SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 82909467b48Spatrick SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 83009467b48Spatrick SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 83109467b48Spatrick SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 83209467b48Spatrick SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 83309467b48Spatrick SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 834*d415bd75Srobert SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; 83573471bf0Spatrick SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; 83609467b48Spatrick SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG, 83709467b48Spatrick const ARMSubtarget *ST) const; 83809467b48Spatrick SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 83909467b48Spatrick const ARMSubtarget *ST) const; 84009467b48Spatrick SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 84109467b48Spatrick SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 84209467b48Spatrick SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; 84309467b48Spatrick SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const; 84409467b48Spatrick void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed, 84509467b48Spatrick SmallVectorImpl<SDValue> &Results) const; 846097a140dSpatrick SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG, 847097a140dSpatrick const ARMSubtarget *Subtarget) const; 84809467b48Spatrick SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed, 84909467b48Spatrick SDValue &Chain) const; 85009467b48Spatrick SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const; 85109467b48Spatrick SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 85209467b48Spatrick SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; 85309467b48Spatrick SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; 85409467b48Spatrick SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 85509467b48Spatrick SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 85609467b48Spatrick SDValue LowerFSETCC(SDValue Op, SelectionDAG &DAG) const; 857*d415bd75Srobert SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const; 858097a140dSpatrick void LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results, 859097a140dSpatrick SelectionDAG &DAG) const; 86009467b48Spatrick 86109467b48Spatrick Register getRegisterByName(const char* RegName, LLT VT, 86209467b48Spatrick const MachineFunction &MF) const override; 86309467b48Spatrick 86409467b48Spatrick SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, 86509467b48Spatrick SmallVectorImpl<SDNode *> &Created) const override; 86609467b48Spatrick 86709467b48Spatrick bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 86809467b48Spatrick EVT VT) const override; 86909467b48Spatrick 870097a140dSpatrick SDValue MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT, MVT ValVT, 871097a140dSpatrick SDValue Val) const; 872097a140dSpatrick SDValue MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT, 873097a140dSpatrick MVT ValVT, SDValue Val) const; 874097a140dSpatrick 87509467b48Spatrick SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 87609467b48Spatrick 87709467b48Spatrick SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 87809467b48Spatrick CallingConv::ID CallConv, bool isVarArg, 87909467b48Spatrick const SmallVectorImpl<ISD::InputArg> &Ins, 88009467b48Spatrick const SDLoc &dl, SelectionDAG &DAG, 88109467b48Spatrick SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 88209467b48Spatrick SDValue ThisVal) const; 88309467b48Spatrick supportSplitCSR(MachineFunction * MF)88409467b48Spatrick bool supportSplitCSR(MachineFunction *MF) const override { 88509467b48Spatrick return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 88609467b48Spatrick MF->getFunction().hasFnAttribute(Attribute::NoUnwind); 88709467b48Spatrick } 88809467b48Spatrick 88909467b48Spatrick void initializeSplitCSR(MachineBasicBlock *Entry) const override; 89009467b48Spatrick void insertCopiesSplitCSR( 89109467b48Spatrick MachineBasicBlock *Entry, 89209467b48Spatrick const SmallVectorImpl<MachineBasicBlock *> &Exits) const override; 89309467b48Spatrick 894*d415bd75Srobert bool splitValueIntoRegisterParts( 895*d415bd75Srobert SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 896*d415bd75Srobert unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) 897*d415bd75Srobert const override; 898097a140dSpatrick 899*d415bd75Srobert SDValue joinRegisterPartsIntoValue( 900*d415bd75Srobert SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts, 901*d415bd75Srobert unsigned NumParts, MVT PartVT, EVT ValueVT, 902*d415bd75Srobert std::optional<CallingConv::ID> CC) const override; 903097a140dSpatrick 90409467b48Spatrick SDValue 90509467b48Spatrick LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 90609467b48Spatrick const SmallVectorImpl<ISD::InputArg> &Ins, 90709467b48Spatrick const SDLoc &dl, SelectionDAG &DAG, 90809467b48Spatrick SmallVectorImpl<SDValue> &InVals) const override; 90909467b48Spatrick 91009467b48Spatrick int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl, 91109467b48Spatrick SDValue &Chain, const Value *OrigArg, 91209467b48Spatrick unsigned InRegsParamRecordIdx, int ArgOffset, 91309467b48Spatrick unsigned ArgSize) const; 91409467b48Spatrick 91509467b48Spatrick void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 91609467b48Spatrick const SDLoc &dl, SDValue &Chain, 91709467b48Spatrick unsigned ArgOffset, unsigned TotalArgRegsSaveSize, 91809467b48Spatrick bool ForceMutable = false) const; 91909467b48Spatrick 92009467b48Spatrick SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, 92109467b48Spatrick SmallVectorImpl<SDValue> &InVals) const override; 92209467b48Spatrick 92309467b48Spatrick /// HandleByVal - Target-specific cleanup for ByVal support. 924097a140dSpatrick void HandleByVal(CCState *, unsigned &, Align) const override; 92509467b48Spatrick 92609467b48Spatrick /// IsEligibleForTailCallOptimization - Check whether the call is eligible 92709467b48Spatrick /// for tail call optimization. Targets which want to do tail call 92809467b48Spatrick /// optimization should implement this function. 92909467b48Spatrick bool IsEligibleForTailCallOptimization( 93009467b48Spatrick SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, 93109467b48Spatrick bool isCalleeStructRet, bool isCallerStructRet, 93209467b48Spatrick const SmallVectorImpl<ISD::OutputArg> &Outs, 93309467b48Spatrick const SmallVectorImpl<SDValue> &OutVals, 93409467b48Spatrick const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG, 93509467b48Spatrick const bool isIndirect) const; 93609467b48Spatrick 93709467b48Spatrick bool CanLowerReturn(CallingConv::ID CallConv, 93809467b48Spatrick MachineFunction &MF, bool isVarArg, 93909467b48Spatrick const SmallVectorImpl<ISD::OutputArg> &Outs, 94009467b48Spatrick LLVMContext &Context) const override; 94109467b48Spatrick 94209467b48Spatrick SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 94309467b48Spatrick const SmallVectorImpl<ISD::OutputArg> &Outs, 94409467b48Spatrick const SmallVectorImpl<SDValue> &OutVals, 94509467b48Spatrick const SDLoc &dl, SelectionDAG &DAG) const override; 94609467b48Spatrick 94709467b48Spatrick bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; 94809467b48Spatrick 94909467b48Spatrick bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 95009467b48Spatrick shouldConsiderGEPOffsetSplit()95109467b48Spatrick bool shouldConsiderGEPOffsetSplit() const override { return true; } 95209467b48Spatrick 95309467b48Spatrick bool isUnsupportedFloatingType(EVT VT) const; 95409467b48Spatrick 95509467b48Spatrick SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal, 95609467b48Spatrick SDValue ARMcc, SDValue CCR, SDValue Cmp, 95709467b48Spatrick SelectionDAG &DAG) const; 95809467b48Spatrick SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 95909467b48Spatrick SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const; 96009467b48Spatrick SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 96109467b48Spatrick const SDLoc &dl, bool Signaling = false) const; 96209467b48Spatrick SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 96309467b48Spatrick 96409467b48Spatrick SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 96509467b48Spatrick 96609467b48Spatrick void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB, 96709467b48Spatrick MachineBasicBlock *DispatchBB, int FI) const; 96809467b48Spatrick 96909467b48Spatrick void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const; 97009467b48Spatrick 97109467b48Spatrick bool RemapAddSubWithFlags(MachineInstr &MI, MachineBasicBlock *BB) const; 97209467b48Spatrick 97309467b48Spatrick MachineBasicBlock *EmitStructByval(MachineInstr &MI, 97409467b48Spatrick MachineBasicBlock *MBB) const; 97509467b48Spatrick 97609467b48Spatrick MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI, 97709467b48Spatrick MachineBasicBlock *MBB) const; 97809467b48Spatrick MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI, 97909467b48Spatrick MachineBasicBlock *MBB) const; 98009467b48Spatrick void addMVEVectorTypes(bool HasMVEFP); 98109467b48Spatrick void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action); 98209467b48Spatrick void setAllExpand(MVT VT); 98309467b48Spatrick }; 98409467b48Spatrick 98509467b48Spatrick enum VMOVModImmType { 98609467b48Spatrick VMOVModImm, 98709467b48Spatrick VMVNModImm, 98809467b48Spatrick MVEVMVNModImm, 98909467b48Spatrick OtherModImm 99009467b48Spatrick }; 99109467b48Spatrick 99209467b48Spatrick namespace ARM { 99309467b48Spatrick 99409467b48Spatrick FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 99509467b48Spatrick const TargetLibraryInfo *libInfo); 99609467b48Spatrick 99709467b48Spatrick } // end namespace ARM 99809467b48Spatrick 99909467b48Spatrick } // end namespace llvm 100009467b48Spatrick 100109467b48Spatrick #endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 1002