1d81734caSHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause 2ff9e112dSShreyansh Jain * 3ff9e112dSShreyansh Jain * Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved. 458e0420fSVanshika Shukla * Copyright 2017-2024 NXP 5ff9e112dSShreyansh Jain * 6ff9e112dSShreyansh Jain */ 7ff9e112dSShreyansh Jain #ifndef __DPAA_ETHDEV_H__ 8ff9e112dSShreyansh Jain #define __DPAA_ETHDEV_H__ 9ff9e112dSShreyansh Jain 10ff9e112dSShreyansh Jain /* System headers */ 11ff9e112dSShreyansh Jain #include <stdbool.h> 12df96fd0dSBruce Richardson #include <ethdev_driver.h> 131094dd94SDavid Marchand #include <rte_compat.h> 145e745593SSunil Kumar Kori #include <rte_event_eth_rx_adapter.h> 15ff9e112dSShreyansh Jain 16ff9e112dSShreyansh Jain #include <fsl_usd.h> 17ff9e112dSShreyansh Jain #include <fsl_qman.h> 18ff9e112dSShreyansh Jain #include <fsl_bman.h> 198c83f28cSHemant Agrawal #include <dpaa_of.h> 20ff9e112dSShreyansh Jain #include <netcfg.h> 21ff9e112dSShreyansh Jain 22af2828cfSAkhil Goyal #define MAX_DPAA_CORES 4 23ff9e112dSShreyansh Jain #define DPAA_MBUF_HW_ANNOTATION 64 24ff9e112dSShreyansh Jain #define DPAA_FD_PTA_SIZE 64 25ff9e112dSShreyansh Jain 26ff9e112dSShreyansh Jain /* we will re-use the HEADROOM for annotation in RX */ 27ff9e112dSShreyansh Jain #define DPAA_HW_BUF_RESERVE 0 28ff9e112dSShreyansh Jain #define DPAA_PACKET_LAYOUT_ALIGN 64 29ff9e112dSShreyansh Jain 30ff9e112dSShreyansh Jain /* Alignment to use for cpu-local structs to avoid coherency problems. */ 31ff9e112dSShreyansh Jain #define MAX_CACHELINE 64 32ff9e112dSShreyansh Jain 33ff9e112dSShreyansh Jain #define DPAA_MAX_RX_PKT_LEN 10240 34ff9e112dSShreyansh Jain 3555576ac2SHemant Agrawal #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ 3655576ac2SHemant Agrawal 37533c31ccSGagandeep Singh /* Maximum SG segments supported on all cores*/ 38533c31ccSGagandeep Singh #define DPAA_MAX_SGS 128 39533c31ccSGagandeep Singh /* SG pool size */ 40533c31ccSGagandeep Singh #define DPAA_POOL_SIZE 2048 41533c31ccSGagandeep Singh /* SG pool cache size */ 42533c31ccSGagandeep Singh #define DPAA_POOL_CACHE_SIZE 256 43533c31ccSGagandeep Singh 4462f53995SHemant Agrawal /* RX queue tail drop threshold (CGR Based) in frame count */ 4562f53995SHemant Agrawal #define CGR_RX_PERFQ_THRESH 256 46e35ead33SHemant Agrawal #define CGR_TX_CGR_THRESH 512 47ff9e112dSShreyansh Jain 48ff9e112dSShreyansh Jain /*max mac filter for memac(8) including primary mac addr*/ 49ff9e112dSShreyansh Jain #define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1) 50ff9e112dSShreyansh Jain 51ff9e112dSShreyansh Jain /*Maximum number of slots available in TX ring*/ 52b0a87fe2SNipun Gupta #define DPAA_TX_BURST_SIZE 7 53ff9e112dSShreyansh Jain 542c01a48aSShreyansh Jain /* Optimal burst size for RX and TX as default */ 552c01a48aSShreyansh Jain #define DPAA_DEF_RX_BURST_SIZE 7 562c01a48aSShreyansh Jain #define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE 572c01a48aSShreyansh Jain 589658ac3aSAshish Jain #ifndef VLAN_TAG_SIZE 599658ac3aSAshish Jain #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ 609658ac3aSAshish Jain #endif 619658ac3aSAshish Jain 621d57225dSSteve Yang #define DPAA_ETH_MAX_LEN (RTE_ETHER_MTU + \ 631d57225dSSteve Yang RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ 641d57225dSSteve Yang VLAN_TAG_SIZE) 651d57225dSSteve Yang 66ff9e112dSShreyansh Jain /* PCD frame queues */ 67ff9e112dSShreyansh Jain #define DPAA_DEFAULT_NUM_PCD_QUEUES 1 68f5fe3eedSJun Yang #define DPAA_VSP_PROFILE_MAX_NUM 8 69f5fe3eedSJun Yang #define DPAA_MAX_NUM_PCD_QUEUES DPAA_VSP_PROFILE_MAX_NUM 70f5fe3eedSJun Yang /*Same as VSP profile number*/ 71ff9e112dSShreyansh Jain 72ff9e112dSShreyansh Jain #define DPAA_IF_TX_PRIORITY 3 730c504f69SHemant Agrawal #define DPAA_IF_RX_PRIORITY 0 74ff9e112dSShreyansh Jain #define DPAA_IF_DEBUG_PRIORITY 7 75ff9e112dSShreyansh Jain 76ff9e112dSShreyansh Jain #define DPAA_IF_RX_ANNOTATION_STASH 1 77ff9e112dSShreyansh Jain #define DPAA_IF_RX_DATA_STASH 1 78ff9e112dSShreyansh Jain #define DPAA_IF_RX_CONTEXT_STASH 0 79ff9e112dSShreyansh Jain 80ff9e112dSShreyansh Jain /* Each "debug" FQ is represented by one of these */ 819e97abf2SJun Yang enum { 829e97abf2SJun Yang DPAA_DEBUG_FQ_RX_ERROR, 839e97abf2SJun Yang DPAA_DEBUG_FQ_TX_ERROR, 849e97abf2SJun Yang DPAA_DEBUG_FQ_MAX_NUM 859e97abf2SJun Yang }; 86ff9e112dSShreyansh Jain 874fa5e0bbSShreyansh Jain #define DPAA_RSS_OFFLOAD_ALL ( \ 88295968d1SFerruh Yigit RTE_ETH_RSS_L2_PAYLOAD | \ 89295968d1SFerruh Yigit RTE_ETH_RSS_IP | \ 90295968d1SFerruh Yigit RTE_ETH_RSS_UDP | \ 91295968d1SFerruh Yigit RTE_ETH_RSS_TCP | \ 92295968d1SFerruh Yigit RTE_ETH_RSS_SCTP) 934fa5e0bbSShreyansh Jain 94daa02b5cSOlivier Matz #define DPAA_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \ 95daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_CKSUM | \ 96daa02b5cSOlivier Matz RTE_MBUF_F_TX_UDP_CKSUM) 97ff9e112dSShreyansh Jain 98ff9e112dSShreyansh Jain /* DPAA Frame descriptor macros */ 99ff9e112dSShreyansh Jain 100ff9e112dSShreyansh Jain #define DPAA_FD_CMD_FCO 0x80000000 101ff9e112dSShreyansh Jain /**< Frame queue Context Override */ 102ff9e112dSShreyansh Jain #define DPAA_FD_CMD_RPD 0x40000000 103ff9e112dSShreyansh Jain /**< Read Prepended Data */ 104ff9e112dSShreyansh Jain #define DPAA_FD_CMD_UPD 0x20000000 105ff9e112dSShreyansh Jain /**< Update Prepended Data */ 106ff9e112dSShreyansh Jain #define DPAA_FD_CMD_DTC 0x10000000 107ff9e112dSShreyansh Jain /**< Do IP/TCP/UDP Checksum */ 108ff9e112dSShreyansh Jain #define DPAA_FD_CMD_DCL4C 0x10000000 109ff9e112dSShreyansh Jain /**< Didn't calculate L4 Checksum */ 110ff9e112dSShreyansh Jain #define DPAA_FD_CMD_CFQ 0x00ffffff 111ff9e112dSShreyansh Jain /**< Confirmation Frame Queue */ 112ff9e112dSShreyansh Jain 1139e97abf2SJun Yang #define DPAA_1G_MAC_START_IDX 1 1149e97abf2SJun Yang #define DPAA_10G_MAC_START_IDX 9 1159e97abf2SJun Yang #define DPAA_2_5G_MAC_START_IDX DPAA_10G_MAC_START_IDX 1169e97abf2SJun Yang 117e4abd4ffSJun Yang #define DPAA_DEFAULT_RXQ_VSP_ID 1 118e4abd4ffSJun Yang 119f5fe3eedSJun Yang #define FMC_FILE "/tmp/fmc.bin" 120f5fe3eedSJun Yang 121533c31ccSGagandeep Singh extern struct rte_mempool *dpaa_tx_sg_pool; 12258e0420fSVanshika Shukla extern int dpaa_ieee_1588; 123533c31ccSGagandeep Singh 124a0edbb8aSRohit Raj /* PMD related logs */ 125a0edbb8aSRohit Raj extern int dpaa_logtype_pmd; 126a0edbb8aSRohit Raj 1278716c0ecSGagandeep Singh /* structure to free external and indirect 1288716c0ecSGagandeep Singh * buffers. 1298716c0ecSGagandeep Singh */ 1308716c0ecSGagandeep Singh struct dpaa_sw_buf_free { 1318716c0ecSGagandeep Singh /* To which packet this segment belongs */ 1328716c0ecSGagandeep Singh uint16_t pkt_id; 1338716c0ecSGagandeep Singh /* The actual segment */ 1348716c0ecSGagandeep Singh struct rte_mbuf *seg; 1358716c0ecSGagandeep Singh }; 1368716c0ecSGagandeep Singh 137ff9e112dSShreyansh Jain /* Each network interface is represented by one of these */ 138ff9e112dSShreyansh Jain struct dpaa_if { 139ff9e112dSShreyansh Jain int valid; 140ff9e112dSShreyansh Jain char *name; 141ff9e112dSShreyansh Jain const struct fm_eth_port_cfg *cfg; 142ff9e112dSShreyansh Jain struct qman_fq *rx_queues; 14362f53995SHemant Agrawal struct qman_cgr *cgr_rx; 144ff9e112dSShreyansh Jain struct qman_fq *tx_queues; 14558e0420fSVanshika Shukla struct qman_fq *tx_conf_queues; 1469124e65dSGagandeep Singh struct qman_cgr *cgr_tx; 1479e97abf2SJun Yang struct qman_fq debug_queues[DPAA_DEBUG_FQ_MAX_NUM]; 148ff9e112dSShreyansh Jain uint16_t nb_rx_queues; 149ff9e112dSShreyansh Jain uint16_t nb_tx_queues; 150ff9e112dSShreyansh Jain uint32_t ifid; 151ff9e112dSShreyansh Jain struct dpaa_bp_info *bp_info; 152ff9e112dSShreyansh Jain struct rte_eth_fc_conf *fc_conf; 1534defbc8cSSachin Saxena void *port_handle; 1544defbc8cSSachin Saxena void *netenv_handle; 1554defbc8cSSachin Saxena void *scheme_handle[2]; 1564defbc8cSSachin Saxena uint32_t scheme_count; 157615352f5SVanshika Shukla /*stores timestamp of last received packet on dev*/ 158615352f5SVanshika Shukla uint64_t rx_timestamp; 159615352f5SVanshika Shukla /*stores timestamp of last received tx confirmation packet on dev*/ 160615352f5SVanshika Shukla uint64_t tx_timestamp; 161615352f5SVanshika Shukla /* stores pointer to next tx_conf queue that should be processed, 162615352f5SVanshika Shukla * it corresponds to last packet transmitted 163615352f5SVanshika Shukla */ 164615352f5SVanshika Shukla struct qman_fq *next_tx_conf_queue; 165e4abd4ffSJun Yang 166e4abd4ffSJun Yang void *vsp_handle[DPAA_VSP_PROFILE_MAX_NUM]; 167e4abd4ffSJun Yang uint32_t vsp_bpid[DPAA_VSP_PROFILE_MAX_NUM]; 168ff9e112dSShreyansh Jain }; 169ff9e112dSShreyansh Jain 170b21ed3e2SHemant Agrawal struct dpaa_if_stats { 171b21ed3e2SHemant Agrawal /* Rx Statistics Counter */ 172b21ed3e2SHemant Agrawal uint64_t reoct; /**<Rx Eth Octets Counter */ 173b21ed3e2SHemant Agrawal uint64_t roct; /**<Rx Octet Counters */ 174b21ed3e2SHemant Agrawal uint64_t raln; /**<Rx Alignment Error Counter */ 175b21ed3e2SHemant Agrawal uint64_t rxpf; /**<Rx valid Pause Frame */ 176b21ed3e2SHemant Agrawal uint64_t rfrm; /**<Rx Frame counter */ 177b21ed3e2SHemant Agrawal uint64_t rfcs; /**<Rx frame check seq error */ 178b21ed3e2SHemant Agrawal uint64_t rvlan; /**<Rx Vlan Frame Counter */ 179b21ed3e2SHemant Agrawal uint64_t rerr; /**<Rx Frame error */ 180b21ed3e2SHemant Agrawal uint64_t ruca; /**<Rx Unicast */ 181b21ed3e2SHemant Agrawal uint64_t rmca; /**<Rx Multicast */ 182b21ed3e2SHemant Agrawal uint64_t rbca; /**<Rx Broadcast */ 183b21ed3e2SHemant Agrawal uint64_t rdrp; /**<Rx Dropped Packet */ 184b21ed3e2SHemant Agrawal uint64_t rpkt; /**<Rx packet */ 185b21ed3e2SHemant Agrawal uint64_t rund; /**<Rx undersized packets */ 186b21ed3e2SHemant Agrawal uint32_t res_x[14]; 187b21ed3e2SHemant Agrawal uint64_t rovr; /**<Rx oversized but good */ 188b21ed3e2SHemant Agrawal uint64_t rjbr; /**<Rx oversized with bad csum */ 189b21ed3e2SHemant Agrawal uint64_t rfrg; /**<Rx fragment Packet */ 190b21ed3e2SHemant Agrawal uint64_t rcnp; /**<Rx control packets (0x8808 */ 191b21ed3e2SHemant Agrawal uint64_t rdrntp; /**<Rx dropped due to FIFO overflow */ 192b21ed3e2SHemant Agrawal uint32_t res01d0[12]; 193b21ed3e2SHemant Agrawal /* Tx Statistics Counter */ 194b21ed3e2SHemant Agrawal uint64_t teoct; /**<Tx eth octets */ 195b21ed3e2SHemant Agrawal uint64_t toct; /**<Tx Octets */ 196b21ed3e2SHemant Agrawal uint32_t res0210[2]; 197b21ed3e2SHemant Agrawal uint64_t txpf; /**<Tx valid pause frame */ 198b21ed3e2SHemant Agrawal uint64_t tfrm; /**<Tx frame counter */ 199b21ed3e2SHemant Agrawal uint64_t tfcs; /**<Tx FCS error */ 200b21ed3e2SHemant Agrawal uint64_t tvlan; /**<Tx Vlan Frame */ 201b21ed3e2SHemant Agrawal uint64_t terr; /**<Tx frame error */ 202b21ed3e2SHemant Agrawal uint64_t tuca; /**<Tx Unicast */ 203b21ed3e2SHemant Agrawal uint64_t tmca; /**<Tx Multicast */ 204b21ed3e2SHemant Agrawal uint64_t tbca; /**<Tx Broadcast */ 205b21ed3e2SHemant Agrawal uint32_t res0258[2]; 206b21ed3e2SHemant Agrawal uint64_t tpkt; /**<Tx Packet */ 207b21ed3e2SHemant Agrawal uint64_t tund; /**<Tx Undersized */ 208b21ed3e2SHemant Agrawal }; 209b21ed3e2SHemant Agrawal 2106b6ca751SHemant Agrawal __rte_internal 2111e06b6dcSHemant Agrawal int 2121e06b6dcSHemant Agrawal dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 2135e745593SSunil Kumar Kori int eth_rx_queue_id, 2145e745593SSunil Kumar Kori u16 ch_id, 2155e745593SSunil Kumar Kori const struct rte_event_eth_rx_adapter_queue_conf *queue_conf); 2165e745593SSunil Kumar Kori 2176b6ca751SHemant Agrawal __rte_internal 2181e06b6dcSHemant Agrawal int 2191e06b6dcSHemant Agrawal dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 2205e745593SSunil Kumar Kori int eth_rx_queue_id); 2215e745593SSunil Kumar Kori 2225e745593SSunil Kumar Kori enum qman_cb_dqrr_result 2235e745593SSunil Kumar Kori dpaa_rx_cb_parallel(void *event, 2245e745593SSunil Kumar Kori struct qman_portal *qm __always_unused, 2255e745593SSunil Kumar Kori struct qman_fq *fq, 2265e745593SSunil Kumar Kori const struct qm_dqrr_entry *dqrr, 2275e745593SSunil Kumar Kori void **bufs); 2285e745593SSunil Kumar Kori enum qman_cb_dqrr_result 2295e745593SSunil Kumar Kori dpaa_rx_cb_atomic(void *event, 2305e745593SSunil Kumar Kori struct qman_portal *qm __always_unused, 2315e745593SSunil Kumar Kori struct qman_fq *fq, 2325e745593SSunil Kumar Kori const struct qm_dqrr_entry *dqrr, 2335e745593SSunil Kumar Kori void **bufs); 2345e745593SSunil Kumar Kori 235d2536b00SHemant Agrawal struct dpaa_if_rx_bmi_stats { 236d2536b00SHemant Agrawal uint32_t fmbm_rstc; /**< Rx Statistics Counters*/ 237d2536b00SHemant Agrawal uint32_t fmbm_rfrc; /**< Rx Frame Counter*/ 238d2536b00SHemant Agrawal uint32_t fmbm_rfbc; /**< Rx Bad Frames Counter*/ 239d2536b00SHemant Agrawal uint32_t fmbm_rlfc; /**< Rx Large Frames Counter*/ 240d2536b00SHemant Agrawal uint32_t fmbm_rffc; /**< Rx Filter Frames Counter*/ 241d2536b00SHemant Agrawal uint32_t fmbm_rfdc; /**< Rx Frame Discard Counter*/ 242d2536b00SHemant Agrawal uint32_t fmbm_rfldec; /**< Rx Frames List DMA Error Counter*/ 243d2536b00SHemant Agrawal uint32_t fmbm_rodc; /**< Rx Out of Buffers Discard nntr*/ 244d2536b00SHemant Agrawal uint32_t fmbm_rbdc; /**< Rx Buffers Deallocate Counter*/ 245d2536b00SHemant Agrawal }; 246d2536b00SHemant Agrawal 247615352f5SVanshika Shukla int 248615352f5SVanshika Shukla dpaa_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 249615352f5SVanshika Shukla struct timespec *timestamp); 250615352f5SVanshika Shukla 251615352f5SVanshika Shukla int 25273585446SVanshika Shukla dpaa_timesync_enable(struct rte_eth_dev *dev); 25373585446SVanshika Shukla 25473585446SVanshika Shukla int 25573585446SVanshika Shukla dpaa_timesync_disable(struct rte_eth_dev *dev); 25673585446SVanshika Shukla 25773585446SVanshika Shukla int 25873585446SVanshika Shukla dpaa_timesync_read_time(struct rte_eth_dev *dev, 25973585446SVanshika Shukla struct timespec *timestamp); 26073585446SVanshika Shukla 26173585446SVanshika Shukla int 26273585446SVanshika Shukla dpaa_timesync_write_time(struct rte_eth_dev *dev, 26373585446SVanshika Shukla const struct timespec *timestamp); 26473585446SVanshika Shukla int 26573585446SVanshika Shukla dpaa_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 26673585446SVanshika Shukla 26773585446SVanshika Shukla int 268615352f5SVanshika Shukla dpaa_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 269615352f5SVanshika Shukla struct timespec *timestamp, 270615352f5SVanshika Shukla uint32_t flags __rte_unused); 271615352f5SVanshika Shukla 272a0edbb8aSRohit Raj uint8_t 273a0edbb8aSRohit Raj fm_default_vsp_id(struct fman_if *fif); 274a0edbb8aSRohit Raj 275df80d4f8SHemant Agrawal /* PMD related logs */ 276df80d4f8SHemant Agrawal extern int dpaa_logtype_pmd; 2773178e37cSDavid Marchand #define RTE_LOGTYPE_DPAA_PMD dpaa_logtype_pmd 278df80d4f8SHemant Agrawal 2792b843cacSDavid Marchand #define DPAA_PMD_LOG(level, ...) \ 2802b843cacSDavid Marchand RTE_LOG_LINE_PREFIX(level, DPAA_PMD, "%s(): ", __func__, __VA_ARGS__) 281df80d4f8SHemant Agrawal 282df80d4f8SHemant Agrawal #define PMD_INIT_FUNC_TRACE() DPAA_PMD_LOG(DEBUG, " >>") 283df80d4f8SHemant Agrawal 284*fd51012dSAndre Muezerie #define DPAA_PMD_DEBUG(fmt, ...) \ 285*fd51012dSAndre Muezerie DPAA_PMD_LOG(DEBUG, fmt, ## __VA_ARGS__) 286*fd51012dSAndre Muezerie #define DPAA_PMD_ERR(fmt, ...) \ 287*fd51012dSAndre Muezerie DPAA_PMD_LOG(ERR, fmt, ## __VA_ARGS__) 288*fd51012dSAndre Muezerie #define DPAA_PMD_INFO(fmt, ...) \ 289*fd51012dSAndre Muezerie DPAA_PMD_LOG(INFO, fmt, ## __VA_ARGS__) 290*fd51012dSAndre Muezerie #define DPAA_PMD_WARN(fmt, ...) \ 291*fd51012dSAndre Muezerie DPAA_PMD_LOG(WARNING, fmt, ## __VA_ARGS__) 292df80d4f8SHemant Agrawal 293df80d4f8SHemant Agrawal /* DP Logs, toggled out at compile time if level lower than current level */ 2942b843cacSDavid Marchand #define DPAA_DP_LOG(level, ...) \ 2952b843cacSDavid Marchand RTE_LOG_DP_LINE(level, DPAA_PMD, __VA_ARGS__) 296df80d4f8SHemant Agrawal 297ff9e112dSShreyansh Jain #endif 298