1d81734caSHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause 2ff9e112dSShreyansh Jain * 3ff9e112dSShreyansh Jain * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 49124e65dSGagandeep Singh * Copyright 2017-2020 NXP 5ff9e112dSShreyansh Jain * 6ff9e112dSShreyansh Jain */ 7ff9e112dSShreyansh Jain /* System headers */ 8ff9e112dSShreyansh Jain #include <stdio.h> 9ff9e112dSShreyansh Jain #include <inttypes.h> 10ff9e112dSShreyansh Jain #include <unistd.h> 11ff9e112dSShreyansh Jain #include <limits.h> 12ff9e112dSShreyansh Jain #include <sched.h> 13ff9e112dSShreyansh Jain #include <signal.h> 14ff9e112dSShreyansh Jain #include <pthread.h> 15ff9e112dSShreyansh Jain #include <sys/types.h> 16ff9e112dSShreyansh Jain #include <sys/syscall.h> 17ff9e112dSShreyansh Jain 186723c0fcSBruce Richardson #include <rte_string_fns.h> 19ff9e112dSShreyansh Jain #include <rte_byteorder.h> 20ff9e112dSShreyansh Jain #include <rte_common.h> 21ff9e112dSShreyansh Jain #include <rte_interrupts.h> 22ff9e112dSShreyansh Jain #include <rte_log.h> 23ff9e112dSShreyansh Jain #include <rte_debug.h> 24ff9e112dSShreyansh Jain #include <rte_pci.h> 25ff9e112dSShreyansh Jain #include <rte_atomic.h> 26ff9e112dSShreyansh Jain #include <rte_branch_prediction.h> 27ff9e112dSShreyansh Jain #include <rte_memory.h> 28ff9e112dSShreyansh Jain #include <rte_tailq.h> 29ff9e112dSShreyansh Jain #include <rte_eal.h> 30ff9e112dSShreyansh Jain #include <rte_alarm.h> 31ff9e112dSShreyansh Jain #include <rte_ether.h> 32ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 33ff9e112dSShreyansh Jain #include <rte_malloc.h> 34ff9e112dSShreyansh Jain #include <rte_ring.h> 35ff9e112dSShreyansh Jain 36ff9e112dSShreyansh Jain #include <rte_dpaa_bus.h> 37ff9e112dSShreyansh Jain #include <rte_dpaa_logs.h> 3837f9b54bSShreyansh Jain #include <dpaa_mempool.h> 39ff9e112dSShreyansh Jain 40ff9e112dSShreyansh Jain #include <dpaa_ethdev.h> 4137f9b54bSShreyansh Jain #include <dpaa_rxtx.h> 42*4defbc8cSSachin Saxena #include <dpaa_flow.h> 438c3495f5SHemant Agrawal #include <rte_pmd_dpaa.h> 4437f9b54bSShreyansh Jain 4537f9b54bSShreyansh Jain #include <fsl_usd.h> 4637f9b54bSShreyansh Jain #include <fsl_qman.h> 4737f9b54bSShreyansh Jain #include <fsl_bman.h> 4837f9b54bSShreyansh Jain #include <fsl_fman.h> 492aa10990SRohit Raj #include <process.h> 50ff9e112dSShreyansh Jain 51c5836218SSunil Kumar Kori /* Supported Rx offloads */ 52c5836218SSunil Kumar Kori static uint64_t dev_rx_offloads_sup = 5355576ac2SHemant Agrawal DEV_RX_OFFLOAD_JUMBO_FRAME | 5455576ac2SHemant Agrawal DEV_RX_OFFLOAD_SCATTER; 55c5836218SSunil Kumar Kori 56c5836218SSunil Kumar Kori /* Rx offloads which cannot be disabled */ 57c5836218SSunil Kumar Kori static uint64_t dev_rx_offloads_nodis = 58c5836218SSunil Kumar Kori DEV_RX_OFFLOAD_IPV4_CKSUM | 59c5836218SSunil Kumar Kori DEV_RX_OFFLOAD_UDP_CKSUM | 60c5836218SSunil Kumar Kori DEV_RX_OFFLOAD_TCP_CKSUM | 618b945a7fSPavan Nikhilesh DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 628b945a7fSPavan Nikhilesh DEV_RX_OFFLOAD_RSS_HASH; 63c5836218SSunil Kumar Kori 64c5836218SSunil Kumar Kori /* Supported Tx offloads */ 651cd8d4ceSHemant Agrawal static uint64_t dev_tx_offloads_sup = 661cd8d4ceSHemant Agrawal DEV_TX_OFFLOAD_MT_LOCKFREE | 671cd8d4ceSHemant Agrawal DEV_TX_OFFLOAD_MBUF_FAST_FREE; 68c5836218SSunil Kumar Kori 69c5836218SSunil Kumar Kori /* Tx offloads which cannot be disabled */ 70c5836218SSunil Kumar Kori static uint64_t dev_tx_offloads_nodis = 71c5836218SSunil Kumar Kori DEV_TX_OFFLOAD_IPV4_CKSUM | 72c5836218SSunil Kumar Kori DEV_TX_OFFLOAD_UDP_CKSUM | 73c5836218SSunil Kumar Kori DEV_TX_OFFLOAD_TCP_CKSUM | 74c5836218SSunil Kumar Kori DEV_TX_OFFLOAD_SCTP_CKSUM | 75c5836218SSunil Kumar Kori DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 761cd8d4ceSHemant Agrawal DEV_TX_OFFLOAD_MULTI_SEGS; 77c5836218SSunil Kumar Kori 78ff9e112dSShreyansh Jain /* Keep track of whether QMAN and BMAN have been globally initialized */ 79ff9e112dSShreyansh Jain static int is_global_init; 80*4defbc8cSSachin Saxena static int fmc_q = 1; /* Indicates the use of static fmc for distribution */ 818d6fc8b6SHemant Agrawal static int default_q; /* use default queue - FMC is not executed*/ 820b5deefbSShreyansh Jain /* At present we only allow up to 4 push mode queues as default - as each of 830b5deefbSShreyansh Jain * this queue need dedicated portal and we are short of portals. 840c504f69SHemant Agrawal */ 850b5deefbSShreyansh Jain #define DPAA_MAX_PUSH_MODE_QUEUE 8 860b5deefbSShreyansh Jain #define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 870c504f69SHemant Agrawal 880b5deefbSShreyansh Jain static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; 890c504f69SHemant Agrawal static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ 900c504f69SHemant Agrawal 91ff9e112dSShreyansh Jain 929124e65dSGagandeep Singh /* Per RX FQ Taildrop in frame count */ 9362f53995SHemant Agrawal static unsigned int td_threshold = CGR_RX_PERFQ_THRESH; 9462f53995SHemant Agrawal 959124e65dSGagandeep Singh /* Per TX FQ Taildrop in frame count, disabled by default */ 969124e65dSGagandeep Singh static unsigned int td_tx_threshold; 979124e65dSGagandeep Singh 98b21ed3e2SHemant Agrawal struct rte_dpaa_xstats_name_off { 99b21ed3e2SHemant Agrawal char name[RTE_ETH_XSTATS_NAME_SIZE]; 100b21ed3e2SHemant Agrawal uint32_t offset; 101b21ed3e2SHemant Agrawal }; 102b21ed3e2SHemant Agrawal 103b21ed3e2SHemant Agrawal static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { 104b21ed3e2SHemant Agrawal {"rx_align_err", 105b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, raln)}, 106b21ed3e2SHemant Agrawal {"rx_valid_pause", 107b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rxpf)}, 108b21ed3e2SHemant Agrawal {"rx_fcs_err", 109b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rfcs)}, 110b21ed3e2SHemant Agrawal {"rx_vlan_frame", 111b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rvlan)}, 112b21ed3e2SHemant Agrawal {"rx_frame_err", 113b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rerr)}, 114b21ed3e2SHemant Agrawal {"rx_drop_err", 115b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rdrp)}, 116b21ed3e2SHemant Agrawal {"rx_undersized", 117b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rund)}, 118b21ed3e2SHemant Agrawal {"rx_oversize_err", 119b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rovr)}, 120b21ed3e2SHemant Agrawal {"rx_fragment_pkt", 121b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rfrg)}, 122b21ed3e2SHemant Agrawal {"tx_valid_pause", 123b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, txpf)}, 124b21ed3e2SHemant Agrawal {"tx_fcs_err", 125b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, terr)}, 126b21ed3e2SHemant Agrawal {"tx_vlan_frame", 127b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, tvlan)}, 128b21ed3e2SHemant Agrawal {"rx_undersized", 129b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, tund)}, 130b21ed3e2SHemant Agrawal }; 131b21ed3e2SHemant Agrawal 1328c3495f5SHemant Agrawal static struct rte_dpaa_driver rte_dpaa_pmd; 1338c3495f5SHemant Agrawal 134bdad90d1SIvan Ilchenko static int 13516e2c27fSSunil Kumar Kori dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); 13616e2c27fSSunil Kumar Kori 1372aa10990SRohit Raj static int dpaa_eth_link_update(struct rte_eth_dev *dev, 1382aa10990SRohit Raj int wait_to_complete __rte_unused); 1392aa10990SRohit Raj 1402aa10990SRohit Raj static void dpaa_interrupt_handler(void *param); 1412aa10990SRohit Raj 1425e745593SSunil Kumar Kori static inline void 1435e745593SSunil Kumar Kori dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) 1445e745593SSunil Kumar Kori { 1455e745593SSunil Kumar Kori memset(opts, 0, sizeof(struct qm_mcc_initfq)); 1465e745593SSunil Kumar Kori opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 1475e745593SSunil Kumar Kori opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | 1485e745593SSunil Kumar Kori QM_FQCTRL_PREFERINCACHE; 1495e745593SSunil Kumar Kori opts->fqd.context_a.stashing.exclusive = 0; 1505e745593SSunil Kumar Kori if (dpaa_svr_family != SVR_LS1046A_FAMILY) 1515e745593SSunil Kumar Kori opts->fqd.context_a.stashing.annotation_cl = 1525e745593SSunil Kumar Kori DPAA_IF_RX_ANNOTATION_STASH; 1535e745593SSunil Kumar Kori opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 1545e745593SSunil Kumar Kori opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; 1555e745593SSunil Kumar Kori } 1565e745593SSunil Kumar Kori 157ff9e112dSShreyansh Jain static int 1580cbec027SShreyansh Jain dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1590cbec027SShreyansh Jain { 16035b2d13fSOlivier Matz uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN 1619658ac3aSAshish Jain + VLAN_TAG_SIZE; 16255576ac2SHemant Agrawal uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 1630cbec027SShreyansh Jain 1640cbec027SShreyansh Jain PMD_INIT_FUNC_TRACE(); 1650cbec027SShreyansh Jain 16635b2d13fSOlivier Matz if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) 1670cbec027SShreyansh Jain return -EINVAL; 16855576ac2SHemant Agrawal /* 16955576ac2SHemant Agrawal * Refuse mtu that requires the support of scattered packets 17055576ac2SHemant Agrawal * when this feature has not been enabled before. 17155576ac2SHemant Agrawal */ 17255576ac2SHemant Agrawal if (dev->data->min_rx_buf_size && 17355576ac2SHemant Agrawal !dev->data->scattered_rx && frame_size > buffsz) { 17455576ac2SHemant Agrawal DPAA_PMD_ERR("SG not enabled, will not fit in one buffer"); 17555576ac2SHemant Agrawal return -EINVAL; 17655576ac2SHemant Agrawal } 17755576ac2SHemant Agrawal 17855576ac2SHemant Agrawal /* check <seg size> * <max_seg> >= max_frame */ 17955576ac2SHemant Agrawal if (dev->data->min_rx_buf_size && dev->data->scattered_rx && 18055576ac2SHemant Agrawal (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) { 18155576ac2SHemant Agrawal DPAA_PMD_ERR("Too big to fit for Max SG list %d", 18255576ac2SHemant Agrawal buffsz * DPAA_SGT_MAX_ENTRIES); 18355576ac2SHemant Agrawal return -EINVAL; 18455576ac2SHemant Agrawal } 18555576ac2SHemant Agrawal 18635b2d13fSOlivier Matz if (frame_size > RTE_ETHER_MAX_LEN) 18740c79ea0SApeksha Gupta dev->data->dev_conf.rxmode.offloads |= 18816e2c27fSSunil Kumar Kori DEV_RX_OFFLOAD_JUMBO_FRAME; 18925f85419SShreyansh Jain else 19016e2c27fSSunil Kumar Kori dev->data->dev_conf.rxmode.offloads &= 19116e2c27fSSunil Kumar Kori ~DEV_RX_OFFLOAD_JUMBO_FRAME; 19225f85419SShreyansh Jain 1939658ac3aSAshish Jain dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1940cbec027SShreyansh Jain 1956b10d1f7SNipun Gupta fman_if_set_maxfrm(dev->process_private, frame_size); 1960cbec027SShreyansh Jain 1970cbec027SShreyansh Jain return 0; 1980cbec027SShreyansh Jain } 1990cbec027SShreyansh Jain 2000cbec027SShreyansh Jain static int 20116e2c27fSSunil Kumar Kori dpaa_eth_dev_configure(struct rte_eth_dev *dev) 202ff9e112dSShreyansh Jain { 20316e2c27fSSunil Kumar Kori struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 20416e2c27fSSunil Kumar Kori uint64_t rx_offloads = eth_conf->rxmode.offloads; 20516e2c27fSSunil Kumar Kori uint64_t tx_offloads = eth_conf->txmode.offloads; 2062aa10990SRohit Raj struct rte_device *rdev = dev->device; 2072aa10990SRohit Raj struct rte_dpaa_device *dpaa_dev; 2082aa10990SRohit Raj struct fman_if *fif = dev->process_private; 2092aa10990SRohit Raj struct __fman_if *__fif; 2102aa10990SRohit Raj struct rte_intr_handle *intr_handle; 2112aa10990SRohit Raj int ret; 2129658ac3aSAshish Jain 213ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 214ff9e112dSShreyansh Jain 2152aa10990SRohit Raj dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); 2162aa10990SRohit Raj intr_handle = &dpaa_dev->intr_handle; 2172aa10990SRohit Raj __fif = container_of(fif, struct __fman_if, __if); 2182aa10990SRohit Raj 2191cd8d4ceSHemant Agrawal /* Rx offloads which are enabled by default */ 220c5836218SSunil Kumar Kori if (dev_rx_offloads_nodis & ~rx_offloads) { 2211cd8d4ceSHemant Agrawal DPAA_PMD_INFO( 2221cd8d4ceSHemant Agrawal "Some of rx offloads enabled by default - requested 0x%" PRIx64 2231cd8d4ceSHemant Agrawal " fixed are 0x%" PRIx64, 224c5836218SSunil Kumar Kori rx_offloads, dev_rx_offloads_nodis); 22516e2c27fSSunil Kumar Kori } 22616e2c27fSSunil Kumar Kori 2271cd8d4ceSHemant Agrawal /* Tx offloads which are enabled by default */ 228c5836218SSunil Kumar Kori if (dev_tx_offloads_nodis & ~tx_offloads) { 2291cd8d4ceSHemant Agrawal DPAA_PMD_INFO( 2301cd8d4ceSHemant Agrawal "Some of tx offloads enabled by default - requested 0x%" PRIx64 2311cd8d4ceSHemant Agrawal " fixed are 0x%" PRIx64, 232c5836218SSunil Kumar Kori tx_offloads, dev_tx_offloads_nodis); 23316e2c27fSSunil Kumar Kori } 23416e2c27fSSunil Kumar Kori 23516e2c27fSSunil Kumar Kori if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 236deeec8efSHemant Agrawal uint32_t max_len; 237deeec8efSHemant Agrawal 238deeec8efSHemant Agrawal DPAA_PMD_DEBUG("enabling jumbo"); 239deeec8efSHemant Agrawal 24025f85419SShreyansh Jain if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= 241deeec8efSHemant Agrawal DPAA_MAX_RX_PKT_LEN) 242deeec8efSHemant Agrawal max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 243deeec8efSHemant Agrawal else { 244deeec8efSHemant Agrawal DPAA_PMD_INFO("enabling jumbo override conf max len=%d " 245deeec8efSHemant Agrawal "supported is %d", 246deeec8efSHemant Agrawal dev->data->dev_conf.rxmode.max_rx_pkt_len, 247deeec8efSHemant Agrawal DPAA_MAX_RX_PKT_LEN); 248deeec8efSHemant Agrawal max_len = DPAA_MAX_RX_PKT_LEN; 24925f85419SShreyansh Jain } 250deeec8efSHemant Agrawal 2516b10d1f7SNipun Gupta fman_if_set_maxfrm(dev->process_private, max_len); 252deeec8efSHemant Agrawal dev->data->mtu = max_len 25335b2d13fSOlivier Matz - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE; 2549658ac3aSAshish Jain } 25555576ac2SHemant Agrawal 25655576ac2SHemant Agrawal if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) { 25755576ac2SHemant Agrawal DPAA_PMD_DEBUG("enabling scatter mode"); 2586b10d1f7SNipun Gupta fman_if_set_sg(dev->process_private, 1); 25955576ac2SHemant Agrawal dev->data->scattered_rx = 1; 26055576ac2SHemant Agrawal } 26155576ac2SHemant Agrawal 2622aa10990SRohit Raj /* if the interrupts were configured on this devices*/ 2632aa10990SRohit Raj if (intr_handle && intr_handle->fd) { 2642aa10990SRohit Raj if (dev->data->dev_conf.intr_conf.lsc != 0) 2652aa10990SRohit Raj rte_intr_callback_register(intr_handle, 2662aa10990SRohit Raj dpaa_interrupt_handler, 2672aa10990SRohit Raj (void *)dev); 2682aa10990SRohit Raj 2692aa10990SRohit Raj ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd); 2702aa10990SRohit Raj if (ret) { 2712aa10990SRohit Raj if (dev->data->dev_conf.intr_conf.lsc != 0) { 2722aa10990SRohit Raj rte_intr_callback_unregister(intr_handle, 2732aa10990SRohit Raj dpaa_interrupt_handler, 2742aa10990SRohit Raj (void *)dev); 2752aa10990SRohit Raj if (ret == EINVAL) 2762aa10990SRohit Raj printf("Failed to enable interrupt: Not Supported\n"); 2772aa10990SRohit Raj else 2782aa10990SRohit Raj printf("Failed to enable interrupt\n"); 2792aa10990SRohit Raj } 2802aa10990SRohit Raj dev->data->dev_conf.intr_conf.lsc = 0; 2812aa10990SRohit Raj dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 2822aa10990SRohit Raj } 2832aa10990SRohit Raj } 284ff9e112dSShreyansh Jain return 0; 285ff9e112dSShreyansh Jain } 286ff9e112dSShreyansh Jain 287a7bdc3bdSShreyansh Jain static const uint32_t * 288a7bdc3bdSShreyansh Jain dpaa_supported_ptypes_get(struct rte_eth_dev *dev) 289a7bdc3bdSShreyansh Jain { 290a7bdc3bdSShreyansh Jain static const uint32_t ptypes[] = { 291a7bdc3bdSShreyansh Jain RTE_PTYPE_L2_ETHER, 292ec503d8fSHemant Agrawal RTE_PTYPE_L2_ETHER_VLAN, 293ec503d8fSHemant Agrawal RTE_PTYPE_L2_ETHER_ARP, 294ec503d8fSHemant Agrawal RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 295ec503d8fSHemant Agrawal RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 296ec503d8fSHemant Agrawal RTE_PTYPE_L4_ICMP, 297ec503d8fSHemant Agrawal RTE_PTYPE_L4_TCP, 298ec503d8fSHemant Agrawal RTE_PTYPE_L4_UDP, 299ec503d8fSHemant Agrawal RTE_PTYPE_L4_FRAG, 300a7bdc3bdSShreyansh Jain RTE_PTYPE_L4_TCP, 301a7bdc3bdSShreyansh Jain RTE_PTYPE_L4_UDP, 302a7bdc3bdSShreyansh Jain RTE_PTYPE_L4_SCTP 303a7bdc3bdSShreyansh Jain }; 304a7bdc3bdSShreyansh Jain 305a7bdc3bdSShreyansh Jain PMD_INIT_FUNC_TRACE(); 306a7bdc3bdSShreyansh Jain 307a7bdc3bdSShreyansh Jain if (dev->rx_pkt_burst == dpaa_eth_queue_rx) 308a7bdc3bdSShreyansh Jain return ptypes; 309a7bdc3bdSShreyansh Jain return NULL; 310a7bdc3bdSShreyansh Jain } 311a7bdc3bdSShreyansh Jain 3122aa10990SRohit Raj static void dpaa_interrupt_handler(void *param) 3132aa10990SRohit Raj { 3142aa10990SRohit Raj struct rte_eth_dev *dev = param; 3152aa10990SRohit Raj struct rte_device *rdev = dev->device; 3162aa10990SRohit Raj struct rte_dpaa_device *dpaa_dev; 3172aa10990SRohit Raj struct rte_intr_handle *intr_handle; 3182aa10990SRohit Raj uint64_t buf; 3192aa10990SRohit Raj int bytes_read; 3202aa10990SRohit Raj 3212aa10990SRohit Raj dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); 3222aa10990SRohit Raj intr_handle = &dpaa_dev->intr_handle; 3232aa10990SRohit Raj 3242aa10990SRohit Raj bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t)); 3252aa10990SRohit Raj if (bytes_read < 0) 3262aa10990SRohit Raj DPAA_PMD_ERR("Error reading eventfd\n"); 3272aa10990SRohit Raj dpaa_eth_link_update(dev, 0); 3282aa10990SRohit Raj _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 3292aa10990SRohit Raj } 3302aa10990SRohit Raj 331ff9e112dSShreyansh Jain static int dpaa_eth_dev_start(struct rte_eth_dev *dev) 332ff9e112dSShreyansh Jain { 33337f9b54bSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 33437f9b54bSShreyansh Jain 335ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 336ff9e112dSShreyansh Jain 337ff9e112dSShreyansh Jain /* Change tx callback to the real one */ 3389124e65dSGagandeep Singh if (dpaa_intf->cgr_tx) 3399124e65dSGagandeep Singh dev->tx_pkt_burst = dpaa_eth_queue_tx_slow; 3409124e65dSGagandeep Singh else 34137f9b54bSShreyansh Jain dev->tx_pkt_burst = dpaa_eth_queue_tx; 3429124e65dSGagandeep Singh 3436b10d1f7SNipun Gupta fman_if_enable_rx(dev->process_private); 344ff9e112dSShreyansh Jain 345ff9e112dSShreyansh Jain return 0; 346ff9e112dSShreyansh Jain } 347ff9e112dSShreyansh Jain 348ff9e112dSShreyansh Jain static void dpaa_eth_dev_stop(struct rte_eth_dev *dev) 349ff9e112dSShreyansh Jain { 3506b10d1f7SNipun Gupta struct fman_if *fif = dev->process_private; 35137f9b54bSShreyansh Jain 35237f9b54bSShreyansh Jain PMD_INIT_FUNC_TRACE(); 35337f9b54bSShreyansh Jain 3546b10d1f7SNipun Gupta fman_if_disable_rx(fif); 35537f9b54bSShreyansh Jain dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 356ff9e112dSShreyansh Jain } 357ff9e112dSShreyansh Jain 35837f9b54bSShreyansh Jain static void dpaa_eth_dev_close(struct rte_eth_dev *dev) 35937f9b54bSShreyansh Jain { 3602aa10990SRohit Raj struct fman_if *fif = dev->process_private; 3612aa10990SRohit Raj struct __fman_if *__fif; 3622aa10990SRohit Raj struct rte_device *rdev = dev->device; 3632aa10990SRohit Raj struct rte_dpaa_device *dpaa_dev; 3642aa10990SRohit Raj struct rte_intr_handle *intr_handle; 3652aa10990SRohit Raj 36637f9b54bSShreyansh Jain PMD_INIT_FUNC_TRACE(); 36737f9b54bSShreyansh Jain 3682aa10990SRohit Raj dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); 3692aa10990SRohit Raj intr_handle = &dpaa_dev->intr_handle; 3702aa10990SRohit Raj __fif = container_of(fif, struct __fman_if, __if); 3712aa10990SRohit Raj 37237f9b54bSShreyansh Jain dpaa_eth_dev_stop(dev); 3732aa10990SRohit Raj 3742aa10990SRohit Raj if (intr_handle && intr_handle->fd && 3752aa10990SRohit Raj dev->data->dev_conf.intr_conf.lsc != 0) { 3762aa10990SRohit Raj dpaa_intr_disable(__fif->node_name); 3772aa10990SRohit Raj rte_intr_callback_unregister(intr_handle, 3782aa10990SRohit Raj dpaa_interrupt_handler, 3792aa10990SRohit Raj (void *)dev); 3802aa10990SRohit Raj } 38137f9b54bSShreyansh Jain } 38237f9b54bSShreyansh Jain 383cf0fab1dSHemant Agrawal static int 384cf0fab1dSHemant Agrawal dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, 385cf0fab1dSHemant Agrawal char *fw_version, 386cf0fab1dSHemant Agrawal size_t fw_size) 387cf0fab1dSHemant Agrawal { 388cf0fab1dSHemant Agrawal int ret; 389cf0fab1dSHemant Agrawal FILE *svr_file = NULL; 390cf0fab1dSHemant Agrawal unsigned int svr_ver = 0; 391cf0fab1dSHemant Agrawal 392cf0fab1dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 393cf0fab1dSHemant Agrawal 394cf0fab1dSHemant Agrawal svr_file = fopen(DPAA_SOC_ID_FILE, "r"); 395cf0fab1dSHemant Agrawal if (!svr_file) { 396cf0fab1dSHemant Agrawal DPAA_PMD_ERR("Unable to open SoC device"); 397cf0fab1dSHemant Agrawal return -ENOTSUP; /* Not supported on this infra */ 398cf0fab1dSHemant Agrawal } 3993b59b73dSHemant Agrawal if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 4003b59b73dSHemant Agrawal dpaa_svr_family = svr_ver & SVR_MASK; 4013b59b73dSHemant Agrawal else 402cf0fab1dSHemant Agrawal DPAA_PMD_ERR("Unable to read SoC device"); 403cf0fab1dSHemant Agrawal 404a8e78906SHemant Agrawal fclose(svr_file); 405cf0fab1dSHemant Agrawal 406a8e78906SHemant Agrawal ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", 407a8e78906SHemant Agrawal svr_ver, fman_ip_rev); 408cf0fab1dSHemant Agrawal ret += 1; /* add the size of '\0' */ 409a8e78906SHemant Agrawal 410cf0fab1dSHemant Agrawal if (fw_size < (uint32_t)ret) 411cf0fab1dSHemant Agrawal return ret; 412cf0fab1dSHemant Agrawal else 413cf0fab1dSHemant Agrawal return 0; 414cf0fab1dSHemant Agrawal } 415cf0fab1dSHemant Agrawal 416bdad90d1SIvan Ilchenko static int dpaa_eth_dev_info(struct rte_eth_dev *dev, 417799db456SShreyansh Jain struct rte_eth_dev_info *dev_info) 418799db456SShreyansh Jain { 419799db456SShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 4206b10d1f7SNipun Gupta struct fman_if *fif = dev->process_private; 421799db456SShreyansh Jain 42236528452SHemant Agrawal DPAA_PMD_DEBUG(": %s", dpaa_intf->name); 423799db456SShreyansh Jain 424799db456SShreyansh Jain dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; 425799db456SShreyansh Jain dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; 426799db456SShreyansh Jain dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; 427799db456SShreyansh Jain dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; 428799db456SShreyansh Jain dev_info->max_hash_mac_addrs = 0; 429799db456SShreyansh Jain dev_info->max_vfs = 0; 430799db456SShreyansh Jain dev_info->max_vmdq_pools = ETH_16_POOLS; 4314fa5e0bbSShreyansh Jain dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; 432c1752a36SSachin Saxena 4336b10d1f7SNipun Gupta if (fif->mac_type == fman_mac_1g) { 434c1752a36SSachin Saxena dev_info->speed_capa = ETH_LINK_SPEED_1G; 4356b10d1f7SNipun Gupta } else if (fif->mac_type == fman_mac_2_5g) { 436eac3c7b9SSachin Saxena dev_info->speed_capa = ETH_LINK_SPEED_1G 437eac3c7b9SSachin Saxena | ETH_LINK_SPEED_2_5G; 4386b10d1f7SNipun Gupta } else if (fif->mac_type == fman_mac_10g) { 439eac3c7b9SSachin Saxena dev_info->speed_capa = ETH_LINK_SPEED_1G 440eac3c7b9SSachin Saxena | ETH_LINK_SPEED_2_5G 441eac3c7b9SSachin Saxena | ETH_LINK_SPEED_10G; 442bdad90d1SIvan Ilchenko } else { 443c1752a36SSachin Saxena DPAA_PMD_ERR("invalid link_speed: %s, %d", 4446b10d1f7SNipun Gupta dpaa_intf->name, fif->mac_type); 445bdad90d1SIvan Ilchenko return -EINVAL; 446bdad90d1SIvan Ilchenko } 447c1752a36SSachin Saxena 448c5836218SSunil Kumar Kori dev_info->rx_offload_capa = dev_rx_offloads_sup | 449c5836218SSunil Kumar Kori dev_rx_offloads_nodis; 450c5836218SSunil Kumar Kori dev_info->tx_offload_capa = dev_tx_offloads_sup | 451c5836218SSunil Kumar Kori dev_tx_offloads_nodis; 4522c01a48aSShreyansh Jain dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; 4532c01a48aSShreyansh Jain dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; 454e35ead33SHemant Agrawal dev_info->default_rxportconf.nb_queues = 1; 455e35ead33SHemant Agrawal dev_info->default_txportconf.nb_queues = 1; 456e35ead33SHemant Agrawal dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH; 457e35ead33SHemant Agrawal dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH; 458bdad90d1SIvan Ilchenko 459bdad90d1SIvan Ilchenko return 0; 460799db456SShreyansh Jain } 461799db456SShreyansh Jain 4622e6f5657SApeksha Gupta static int 4632e6f5657SApeksha Gupta dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev, 4642e6f5657SApeksha Gupta __rte_unused uint16_t queue_id, 4652e6f5657SApeksha Gupta struct rte_eth_burst_mode *mode) 4662e6f5657SApeksha Gupta { 4672e6f5657SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 4682e6f5657SApeksha Gupta int ret = -EINVAL; 4692e6f5657SApeksha Gupta unsigned int i; 4702e6f5657SApeksha Gupta const struct burst_info { 4712e6f5657SApeksha Gupta uint64_t flags; 4722e6f5657SApeksha Gupta const char *output; 4732e6f5657SApeksha Gupta } rx_offload_map[] = { 4742e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"}, 4752e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}, 4762e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, 4772e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, 4782e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, 4792e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 4802e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"} 4812e6f5657SApeksha Gupta }; 4822e6f5657SApeksha Gupta 4832e6f5657SApeksha Gupta /* Update Rx offload info */ 4842e6f5657SApeksha Gupta for (i = 0; i < RTE_DIM(rx_offload_map); i++) { 4852e6f5657SApeksha Gupta if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) { 4862e6f5657SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s", 4872e6f5657SApeksha Gupta rx_offload_map[i].output); 4882e6f5657SApeksha Gupta ret = 0; 4892e6f5657SApeksha Gupta break; 4902e6f5657SApeksha Gupta } 4912e6f5657SApeksha Gupta } 4922e6f5657SApeksha Gupta return ret; 4932e6f5657SApeksha Gupta } 4942e6f5657SApeksha Gupta 4952e6f5657SApeksha Gupta static int 4962e6f5657SApeksha Gupta dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev, 4972e6f5657SApeksha Gupta __rte_unused uint16_t queue_id, 4982e6f5657SApeksha Gupta struct rte_eth_burst_mode *mode) 4992e6f5657SApeksha Gupta { 5002e6f5657SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 5012e6f5657SApeksha Gupta int ret = -EINVAL; 5022e6f5657SApeksha Gupta unsigned int i; 5032e6f5657SApeksha Gupta const struct burst_info { 5042e6f5657SApeksha Gupta uint64_t flags; 5052e6f5657SApeksha Gupta const char *output; 5062e6f5657SApeksha Gupta } tx_offload_map[] = { 5072e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"}, 5082e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"}, 5092e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, 5102e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, 5112e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, 5122e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, 5132e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 5142e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"} 5152e6f5657SApeksha Gupta }; 5162e6f5657SApeksha Gupta 5172e6f5657SApeksha Gupta /* Update Tx offload info */ 5182e6f5657SApeksha Gupta for (i = 0; i < RTE_DIM(tx_offload_map); i++) { 5192e6f5657SApeksha Gupta if (eth_conf->txmode.offloads & tx_offload_map[i].flags) { 5202e6f5657SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s", 5212e6f5657SApeksha Gupta tx_offload_map[i].output); 5222e6f5657SApeksha Gupta ret = 0; 5232e6f5657SApeksha Gupta break; 5242e6f5657SApeksha Gupta } 5252e6f5657SApeksha Gupta } 5262e6f5657SApeksha Gupta return ret; 5272e6f5657SApeksha Gupta } 5282e6f5657SApeksha Gupta 529e124a69fSShreyansh Jain static int dpaa_eth_link_update(struct rte_eth_dev *dev, 530e124a69fSShreyansh Jain int wait_to_complete __rte_unused) 531e124a69fSShreyansh Jain { 532e124a69fSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 533e124a69fSShreyansh Jain struct rte_eth_link *link = &dev->data->dev_link; 5346b10d1f7SNipun Gupta struct fman_if *fif = dev->process_private; 5352aa10990SRohit Raj struct __fman_if *__fif = container_of(fif, struct __fman_if, __if); 5362aa10990SRohit Raj int ret; 537e124a69fSShreyansh Jain 538e124a69fSShreyansh Jain PMD_INIT_FUNC_TRACE(); 539e124a69fSShreyansh Jain 5406b10d1f7SNipun Gupta if (fif->mac_type == fman_mac_1g) 5411633d3c4SFerruh Yigit link->link_speed = ETH_SPEED_NUM_1G; 5426b10d1f7SNipun Gupta else if (fif->mac_type == fman_mac_2_5g) 543eac3c7b9SSachin Saxena link->link_speed = ETH_SPEED_NUM_2_5G; 5446b10d1f7SNipun Gupta else if (fif->mac_type == fman_mac_10g) 5451633d3c4SFerruh Yigit link->link_speed = ETH_SPEED_NUM_10G; 546e124a69fSShreyansh Jain else 547e124a69fSShreyansh Jain DPAA_PMD_ERR("invalid link_speed: %s, %d", 5486b10d1f7SNipun Gupta dpaa_intf->name, fif->mac_type); 549e124a69fSShreyansh Jain 550f231d48dSRohit Raj if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 5512aa10990SRohit Raj ret = dpaa_get_link_status(__fif->node_name); 552f231d48dSRohit Raj if (ret < 0) 5532aa10990SRohit Raj return ret; 554f231d48dSRohit Raj link->link_status = ret; 555f231d48dSRohit Raj } else { 556f231d48dSRohit Raj link->link_status = dpaa_intf->valid; 5572aa10990SRohit Raj } 5582aa10990SRohit Raj 559e124a69fSShreyansh Jain link->link_duplex = ETH_LINK_FULL_DUPLEX; 560e124a69fSShreyansh Jain link->link_autoneg = ETH_LINK_AUTONEG; 5612aa10990SRohit Raj 5622aa10990SRohit Raj DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, 5632aa10990SRohit Raj link->link_status ? "Up" : "Down"); 564e124a69fSShreyansh Jain return 0; 565e124a69fSShreyansh Jain } 566e124a69fSShreyansh Jain 567d5b0924bSMatan Azrad static int dpaa_eth_stats_get(struct rte_eth_dev *dev, 568e1ad3a05SShreyansh Jain struct rte_eth_stats *stats) 569e1ad3a05SShreyansh Jain { 570e1ad3a05SShreyansh Jain PMD_INIT_FUNC_TRACE(); 571e1ad3a05SShreyansh Jain 5726b10d1f7SNipun Gupta fman_if_stats_get(dev->process_private, stats); 573d5b0924bSMatan Azrad return 0; 574e1ad3a05SShreyansh Jain } 575e1ad3a05SShreyansh Jain 5769970a9adSIgor Romanov static int dpaa_eth_stats_reset(struct rte_eth_dev *dev) 577e1ad3a05SShreyansh Jain { 578e1ad3a05SShreyansh Jain PMD_INIT_FUNC_TRACE(); 579e1ad3a05SShreyansh Jain 5806b10d1f7SNipun Gupta fman_if_stats_reset(dev->process_private); 5819970a9adSIgor Romanov 5829970a9adSIgor Romanov return 0; 583e1ad3a05SShreyansh Jain } 58495ef603dSShreyansh Jain 585b21ed3e2SHemant Agrawal static int 586b21ed3e2SHemant Agrawal dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 587b21ed3e2SHemant Agrawal unsigned int n) 588b21ed3e2SHemant Agrawal { 589b21ed3e2SHemant Agrawal unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); 590b21ed3e2SHemant Agrawal uint64_t values[sizeof(struct dpaa_if_stats) / 8]; 591b21ed3e2SHemant Agrawal 592b21ed3e2SHemant Agrawal if (n < num) 593b21ed3e2SHemant Agrawal return num; 594b21ed3e2SHemant Agrawal 595339c1025SHemant Agrawal if (xstats == NULL) 596339c1025SHemant Agrawal return 0; 597339c1025SHemant Agrawal 5986b10d1f7SNipun Gupta fman_if_stats_get_all(dev->process_private, values, 599b21ed3e2SHemant Agrawal sizeof(struct dpaa_if_stats) / 8); 600b21ed3e2SHemant Agrawal 601b21ed3e2SHemant Agrawal for (i = 0; i < num; i++) { 602b21ed3e2SHemant Agrawal xstats[i].id = i; 603b21ed3e2SHemant Agrawal xstats[i].value = values[dpaa_xstats_strings[i].offset / 8]; 604b21ed3e2SHemant Agrawal } 605b21ed3e2SHemant Agrawal return i; 606b21ed3e2SHemant Agrawal } 607b21ed3e2SHemant Agrawal 608b21ed3e2SHemant Agrawal static int 609b21ed3e2SHemant Agrawal dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 610b21ed3e2SHemant Agrawal struct rte_eth_xstat_name *xstats_names, 6115c3fc73eSHemant Agrawal unsigned int limit) 612b21ed3e2SHemant Agrawal { 613b21ed3e2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 614b21ed3e2SHemant Agrawal 6155c3fc73eSHemant Agrawal if (limit < stat_cnt) 6165c3fc73eSHemant Agrawal return stat_cnt; 6175c3fc73eSHemant Agrawal 618b21ed3e2SHemant Agrawal if (xstats_names != NULL) 619b21ed3e2SHemant Agrawal for (i = 0; i < stat_cnt; i++) 6206723c0fcSBruce Richardson strlcpy(xstats_names[i].name, 6216723c0fcSBruce Richardson dpaa_xstats_strings[i].name, 6226723c0fcSBruce Richardson sizeof(xstats_names[i].name)); 623b21ed3e2SHemant Agrawal 624b21ed3e2SHemant Agrawal return stat_cnt; 625b21ed3e2SHemant Agrawal } 626b21ed3e2SHemant Agrawal 627b21ed3e2SHemant Agrawal static int 628b21ed3e2SHemant Agrawal dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 629b21ed3e2SHemant Agrawal uint64_t *values, unsigned int n) 630b21ed3e2SHemant Agrawal { 631b21ed3e2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 632b21ed3e2SHemant Agrawal uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8]; 633b21ed3e2SHemant Agrawal 634b21ed3e2SHemant Agrawal if (!ids) { 635b21ed3e2SHemant Agrawal if (n < stat_cnt) 636b21ed3e2SHemant Agrawal return stat_cnt; 637b21ed3e2SHemant Agrawal 638b21ed3e2SHemant Agrawal if (!values) 639b21ed3e2SHemant Agrawal return 0; 640b21ed3e2SHemant Agrawal 6416b10d1f7SNipun Gupta fman_if_stats_get_all(dev->process_private, values_copy, 6425c3fc73eSHemant Agrawal sizeof(struct dpaa_if_stats) / 8); 643b21ed3e2SHemant Agrawal 644b21ed3e2SHemant Agrawal for (i = 0; i < stat_cnt; i++) 645b21ed3e2SHemant Agrawal values[i] = 646b21ed3e2SHemant Agrawal values_copy[dpaa_xstats_strings[i].offset / 8]; 647b21ed3e2SHemant Agrawal 648b21ed3e2SHemant Agrawal return stat_cnt; 649b21ed3e2SHemant Agrawal } 650b21ed3e2SHemant Agrawal 651b21ed3e2SHemant Agrawal dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 652b21ed3e2SHemant Agrawal 653b21ed3e2SHemant Agrawal for (i = 0; i < n; i++) { 654b21ed3e2SHemant Agrawal if (ids[i] >= stat_cnt) { 655b21ed3e2SHemant Agrawal DPAA_PMD_ERR("id value isn't valid"); 656b21ed3e2SHemant Agrawal return -1; 657b21ed3e2SHemant Agrawal } 658b21ed3e2SHemant Agrawal values[i] = values_copy[ids[i]]; 659b21ed3e2SHemant Agrawal } 660b21ed3e2SHemant Agrawal return n; 661b21ed3e2SHemant Agrawal } 662b21ed3e2SHemant Agrawal 663b21ed3e2SHemant Agrawal static int 664b21ed3e2SHemant Agrawal dpaa_xstats_get_names_by_id( 665b21ed3e2SHemant Agrawal struct rte_eth_dev *dev, 666b21ed3e2SHemant Agrawal struct rte_eth_xstat_name *xstats_names, 667b21ed3e2SHemant Agrawal const uint64_t *ids, 668b21ed3e2SHemant Agrawal unsigned int limit) 669b21ed3e2SHemant Agrawal { 670b21ed3e2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 671b21ed3e2SHemant Agrawal struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 672b21ed3e2SHemant Agrawal 673b21ed3e2SHemant Agrawal if (!ids) 674b21ed3e2SHemant Agrawal return dpaa_xstats_get_names(dev, xstats_names, limit); 675b21ed3e2SHemant Agrawal 676b21ed3e2SHemant Agrawal dpaa_xstats_get_names(dev, xstats_names_copy, limit); 677b21ed3e2SHemant Agrawal 678b21ed3e2SHemant Agrawal for (i = 0; i < limit; i++) { 679b21ed3e2SHemant Agrawal if (ids[i] >= stat_cnt) { 680b21ed3e2SHemant Agrawal DPAA_PMD_ERR("id value isn't valid"); 681b21ed3e2SHemant Agrawal return -1; 682b21ed3e2SHemant Agrawal } 683b21ed3e2SHemant Agrawal strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 684b21ed3e2SHemant Agrawal } 685b21ed3e2SHemant Agrawal return limit; 686b21ed3e2SHemant Agrawal } 687b21ed3e2SHemant Agrawal 6889039c812SAndrew Rybchenko static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) 68995ef603dSShreyansh Jain { 69095ef603dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 69195ef603dSShreyansh Jain 6926b10d1f7SNipun Gupta fman_if_promiscuous_enable(dev->process_private); 6939039c812SAndrew Rybchenko 6949039c812SAndrew Rybchenko return 0; 69595ef603dSShreyansh Jain } 69695ef603dSShreyansh Jain 6979039c812SAndrew Rybchenko static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) 69895ef603dSShreyansh Jain { 69995ef603dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 70095ef603dSShreyansh Jain 7016b10d1f7SNipun Gupta fman_if_promiscuous_disable(dev->process_private); 7029039c812SAndrew Rybchenko 7039039c812SAndrew Rybchenko return 0; 70495ef603dSShreyansh Jain } 70595ef603dSShreyansh Jain 706ca041cd4SIvan Ilchenko static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev) 70744dd70a3SShreyansh Jain { 70844dd70a3SShreyansh Jain PMD_INIT_FUNC_TRACE(); 70944dd70a3SShreyansh Jain 7106b10d1f7SNipun Gupta fman_if_set_mcast_filter_table(dev->process_private); 711ca041cd4SIvan Ilchenko 712ca041cd4SIvan Ilchenko return 0; 71344dd70a3SShreyansh Jain } 71444dd70a3SShreyansh Jain 715ca041cd4SIvan Ilchenko static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev) 71644dd70a3SShreyansh Jain { 71744dd70a3SShreyansh Jain PMD_INIT_FUNC_TRACE(); 71844dd70a3SShreyansh Jain 7196b10d1f7SNipun Gupta fman_if_reset_mcast_filter_table(dev->process_private); 720ca041cd4SIvan Ilchenko 721ca041cd4SIvan Ilchenko return 0; 72244dd70a3SShreyansh Jain } 72344dd70a3SShreyansh Jain 72437f9b54bSShreyansh Jain static 72537f9b54bSShreyansh Jain int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 72662f53995SHemant Agrawal uint16_t nb_desc, 72737f9b54bSShreyansh Jain unsigned int socket_id __rte_unused, 728e335cce4SHemant Agrawal const struct rte_eth_rxconf *rx_conf, 72937f9b54bSShreyansh Jain struct rte_mempool *mp) 73037f9b54bSShreyansh Jain { 73137f9b54bSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 7326b10d1f7SNipun Gupta struct fman_if *fif = dev->process_private; 73362f53995SHemant Agrawal struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; 7340c504f69SHemant Agrawal struct qm_mcc_initfq opts = {0}; 7350c504f69SHemant Agrawal u32 flags = 0; 7360c504f69SHemant Agrawal int ret; 73755576ac2SHemant Agrawal u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 73837f9b54bSShreyansh Jain 73937f9b54bSShreyansh Jain PMD_INIT_FUNC_TRACE(); 74037f9b54bSShreyansh Jain 7416fd3639aSHemant Agrawal if (queue_idx >= dev->data->nb_rx_queues) { 7426fd3639aSHemant Agrawal rte_errno = EOVERFLOW; 7436fd3639aSHemant Agrawal DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", 7446fd3639aSHemant Agrawal (void *)dev, queue_idx, dev->data->nb_rx_queues); 7456fd3639aSHemant Agrawal return -rte_errno; 7466fd3639aSHemant Agrawal } 7476fd3639aSHemant Agrawal 748e335cce4SHemant Agrawal /* Rx deferred start is not supported */ 749e335cce4SHemant Agrawal if (rx_conf->rx_deferred_start) { 750e335cce4SHemant Agrawal DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev); 751e335cce4SHemant Agrawal return -EINVAL; 752e335cce4SHemant Agrawal } 7532cf9264fSHemant Agrawal rxq->nb_desc = UINT16_MAX; 7542cf9264fSHemant Agrawal rxq->offloads = rx_conf->offloads; 755e335cce4SHemant Agrawal 7566fd3639aSHemant Agrawal DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", 7576fd3639aSHemant Agrawal queue_idx, rxq->fqid); 75837f9b54bSShreyansh Jain 75955576ac2SHemant Agrawal /* Max packet can fit in single buffer */ 76055576ac2SHemant Agrawal if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) { 76155576ac2SHemant Agrawal ; 76255576ac2SHemant Agrawal } else if (dev->data->dev_conf.rxmode.offloads & 76355576ac2SHemant Agrawal DEV_RX_OFFLOAD_SCATTER) { 76455576ac2SHemant Agrawal if (dev->data->dev_conf.rxmode.max_rx_pkt_len > 76555576ac2SHemant Agrawal buffsz * DPAA_SGT_MAX_ENTRIES) { 76655576ac2SHemant Agrawal DPAA_PMD_ERR("max RxPkt size %d too big to fit " 76755576ac2SHemant Agrawal "MaxSGlist %d", 76855576ac2SHemant Agrawal dev->data->dev_conf.rxmode.max_rx_pkt_len, 76955576ac2SHemant Agrawal buffsz * DPAA_SGT_MAX_ENTRIES); 77055576ac2SHemant Agrawal rte_errno = EOVERFLOW; 77155576ac2SHemant Agrawal return -rte_errno; 77255576ac2SHemant Agrawal } 77355576ac2SHemant Agrawal } else { 77455576ac2SHemant Agrawal DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" 77555576ac2SHemant Agrawal " larger than a single mbuf (%u) and scattered" 77655576ac2SHemant Agrawal " mode has not been requested", 77755576ac2SHemant Agrawal dev->data->dev_conf.rxmode.max_rx_pkt_len, 77855576ac2SHemant Agrawal buffsz - RTE_PKTMBUF_HEADROOM); 77955576ac2SHemant Agrawal } 78055576ac2SHemant Agrawal 78137f9b54bSShreyansh Jain if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { 78237f9b54bSShreyansh Jain struct fman_if_ic_params icp; 78337f9b54bSShreyansh Jain uint32_t fd_offset; 78437f9b54bSShreyansh Jain uint32_t bp_size; 78537f9b54bSShreyansh Jain 78637f9b54bSShreyansh Jain if (!mp->pool_data) { 78737f9b54bSShreyansh Jain DPAA_PMD_ERR("Not an offloaded buffer pool!"); 78837f9b54bSShreyansh Jain return -1; 78937f9b54bSShreyansh Jain } 79037f9b54bSShreyansh Jain dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 79137f9b54bSShreyansh Jain 79237f9b54bSShreyansh Jain memset(&icp, 0, sizeof(icp)); 79337f9b54bSShreyansh Jain /* set ICEOF for to the default value , which is 0*/ 79437f9b54bSShreyansh Jain icp.iciof = DEFAULT_ICIOF; 79537f9b54bSShreyansh Jain icp.iceof = DEFAULT_RX_ICEOF; 79637f9b54bSShreyansh Jain icp.icsz = DEFAULT_ICSZ; 7976b10d1f7SNipun Gupta fman_if_set_ic_params(fif, &icp); 79837f9b54bSShreyansh Jain 79937f9b54bSShreyansh Jain fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; 8006b10d1f7SNipun Gupta fman_if_set_fdoff(fif, fd_offset); 80137f9b54bSShreyansh Jain 80237f9b54bSShreyansh Jain /* Buffer pool size should be equal to Dataroom Size*/ 80337f9b54bSShreyansh Jain bp_size = rte_pktmbuf_data_room_size(mp); 8046b10d1f7SNipun Gupta fman_if_set_bp(fif, mp->size, 80537f9b54bSShreyansh Jain dpaa_intf->bp_info->bpid, bp_size); 80637f9b54bSShreyansh Jain dpaa_intf->valid = 1; 807079a67c2SHemant Agrawal DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d", 80837f9b54bSShreyansh Jain dpaa_intf->name, fd_offset, 8096b10d1f7SNipun Gupta fman_if_get_fdoff(fif)); 81037f9b54bSShreyansh Jain } 81155576ac2SHemant Agrawal DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name, 8126b10d1f7SNipun Gupta fman_if_get_sg_enable(fif), 81355576ac2SHemant Agrawal dev->data->dev_conf.rxmode.max_rx_pkt_len); 8140c504f69SHemant Agrawal /* checking if push mode only, no error check for now */ 815a6a75240SNipun Gupta if (!rxq->is_static && 816a6a75240SNipun Gupta dpaa_push_mode_max_queue > dpaa_push_queue_idx) { 817b9c94167SNipun Gupta struct qman_portal *qp; 818a6a75240SNipun Gupta int q_fd; 819b9c94167SNipun Gupta 8200c504f69SHemant Agrawal dpaa_push_queue_idx++; 8210c504f69SHemant Agrawal opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 8220c504f69SHemant Agrawal opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | 8230c504f69SHemant Agrawal QM_FQCTRL_CTXASTASHING | 8240c504f69SHemant Agrawal QM_FQCTRL_PREFERINCACHE; 8250c504f69SHemant Agrawal opts.fqd.context_a.stashing.exclusive = 0; 826b9083ea5SNipun Gupta /* In muticore scenario stashing becomes a bottleneck on LS1046. 827b9083ea5SNipun Gupta * So do not enable stashing in this case 828b9083ea5SNipun Gupta */ 829b9083ea5SNipun Gupta if (dpaa_svr_family != SVR_LS1046A_FAMILY) 8300c504f69SHemant Agrawal opts.fqd.context_a.stashing.annotation_cl = 8310c504f69SHemant Agrawal DPAA_IF_RX_ANNOTATION_STASH; 8320c504f69SHemant Agrawal opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 8330c504f69SHemant Agrawal opts.fqd.context_a.stashing.context_cl = 8340c504f69SHemant Agrawal DPAA_IF_RX_CONTEXT_STASH; 83562f53995SHemant Agrawal 8360c504f69SHemant Agrawal /*Create a channel and associate given queue with the channel*/ 8370c504f69SHemant Agrawal qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); 8380c504f69SHemant Agrawal opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 8390c504f69SHemant Agrawal opts.fqd.dest.channel = rxq->ch_id; 8400c504f69SHemant Agrawal opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; 8410c504f69SHemant Agrawal flags = QMAN_INITFQ_FLAG_SCHED; 8420c504f69SHemant Agrawal 8430c504f69SHemant Agrawal /* Configure tail drop */ 8440c504f69SHemant Agrawal if (dpaa_intf->cgr_rx) { 8450c504f69SHemant Agrawal opts.we_mask |= QM_INITFQ_WE_CGID; 8460c504f69SHemant Agrawal opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid; 8470c504f69SHemant Agrawal opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 8480c504f69SHemant Agrawal } 8490c504f69SHemant Agrawal ret = qman_init_fq(rxq, flags, &opts); 8506fd3639aSHemant Agrawal if (ret) { 8516fd3639aSHemant Agrawal DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x " 8526fd3639aSHemant Agrawal "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); 8536fd3639aSHemant Agrawal return ret; 8546fd3639aSHemant Agrawal } 85519b4aba2SHemant Agrawal if (dpaa_svr_family == SVR_LS1043A_FAMILY) { 85619b4aba2SHemant Agrawal rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch; 85719b4aba2SHemant Agrawal } else { 858b9083ea5SNipun Gupta rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; 859b9083ea5SNipun Gupta rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; 86019b4aba2SHemant Agrawal } 86119b4aba2SHemant Agrawal 8620c504f69SHemant Agrawal rxq->is_static = true; 863b9c94167SNipun Gupta 864b9c94167SNipun Gupta /* Allocate qman specific portals */ 865a6a75240SNipun Gupta qp = fsl_qman_fq_portal_create(&q_fd); 866b9c94167SNipun Gupta if (!qp) { 867b9c94167SNipun Gupta DPAA_PMD_ERR("Unable to alloc fq portal"); 868b9c94167SNipun Gupta return -1; 869b9c94167SNipun Gupta } 870b9c94167SNipun Gupta rxq->qp = qp; 871a6a75240SNipun Gupta 872a6a75240SNipun Gupta /* Set up the device interrupt handler */ 873a6a75240SNipun Gupta if (!dev->intr_handle) { 874a6a75240SNipun Gupta struct rte_dpaa_device *dpaa_dev; 875a6a75240SNipun Gupta struct rte_device *rdev = dev->device; 876a6a75240SNipun Gupta 877a6a75240SNipun Gupta dpaa_dev = container_of(rdev, struct rte_dpaa_device, 878a6a75240SNipun Gupta device); 879a6a75240SNipun Gupta dev->intr_handle = &dpaa_dev->intr_handle; 880a6a75240SNipun Gupta dev->intr_handle->intr_vec = rte_zmalloc(NULL, 881a6a75240SNipun Gupta dpaa_push_mode_max_queue, 0); 882a6a75240SNipun Gupta if (!dev->intr_handle->intr_vec) { 883a6a75240SNipun Gupta DPAA_PMD_ERR("intr_vec alloc failed"); 884a6a75240SNipun Gupta return -ENOMEM; 885a6a75240SNipun Gupta } 886a6a75240SNipun Gupta dev->intr_handle->nb_efd = dpaa_push_mode_max_queue; 887a6a75240SNipun Gupta dev->intr_handle->max_intr = dpaa_push_mode_max_queue; 888a6a75240SNipun Gupta } 889a6a75240SNipun Gupta 890a6a75240SNipun Gupta dev->intr_handle->type = RTE_INTR_HANDLE_EXT; 891a6a75240SNipun Gupta dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1; 892a6a75240SNipun Gupta dev->intr_handle->efds[queue_idx] = q_fd; 893a6a75240SNipun Gupta rxq->q_fd = q_fd; 8940c504f69SHemant Agrawal } 895e1797f4bSAkhil Goyal rxq->bp_array = rte_dpaa_bpid_info; 89662f53995SHemant Agrawal dev->data->rx_queues[queue_idx] = rxq; 89762f53995SHemant Agrawal 89862f53995SHemant Agrawal /* configure the CGR size as per the desc size */ 89962f53995SHemant Agrawal if (dpaa_intf->cgr_rx) { 90062f53995SHemant Agrawal struct qm_mcc_initcgr cgr_opts = {0}; 90162f53995SHemant Agrawal 9022cf9264fSHemant Agrawal rxq->nb_desc = nb_desc; 90362f53995SHemant Agrawal /* Enable tail drop with cgr on this queue */ 90462f53995SHemant Agrawal qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0); 90562f53995SHemant Agrawal ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts); 90662f53995SHemant Agrawal if (ret) { 90762f53995SHemant Agrawal DPAA_PMD_WARN( 90862f53995SHemant Agrawal "rx taildrop modify fail on fqid %d (ret=%d)", 90962f53995SHemant Agrawal rxq->fqid, ret); 91062f53995SHemant Agrawal } 91162f53995SHemant Agrawal } 91237f9b54bSShreyansh Jain 91337f9b54bSShreyansh Jain return 0; 91437f9b54bSShreyansh Jain } 91537f9b54bSShreyansh Jain 9161e06b6dcSHemant Agrawal int 91777b7b81eSNeil Horman dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 9185e745593SSunil Kumar Kori int eth_rx_queue_id, 9195e745593SSunil Kumar Kori u16 ch_id, 9205e745593SSunil Kumar Kori const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 9215e745593SSunil Kumar Kori { 9225e745593SSunil Kumar Kori int ret; 9235e745593SSunil Kumar Kori u32 flags = 0; 9245e745593SSunil Kumar Kori struct dpaa_if *dpaa_intf = dev->data->dev_private; 9255e745593SSunil Kumar Kori struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 9265e745593SSunil Kumar Kori struct qm_mcc_initfq opts = {0}; 9275e745593SSunil Kumar Kori 9285e745593SSunil Kumar Kori if (dpaa_push_mode_max_queue) 929079a67c2SHemant Agrawal DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n" 930079a67c2SHemant Agrawal "PUSH mode already enabled for first %d queues.\n" 9315e745593SSunil Kumar Kori "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n", 9325e745593SSunil Kumar Kori dpaa_push_mode_max_queue); 9335e745593SSunil Kumar Kori 9345e745593SSunil Kumar Kori dpaa_poll_queue_default_config(&opts); 9355e745593SSunil Kumar Kori 9365e745593SSunil Kumar Kori switch (queue_conf->ev.sched_type) { 9375e745593SSunil Kumar Kori case RTE_SCHED_TYPE_ATOMIC: 9385e745593SSunil Kumar Kori opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 9395e745593SSunil Kumar Kori /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 9405e745593SSunil Kumar Kori * configuration with HOLD_ACTIVE setting 9415e745593SSunil Kumar Kori */ 9425e745593SSunil Kumar Kori opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 9435e745593SSunil Kumar Kori rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic; 9445e745593SSunil Kumar Kori break; 9455e745593SSunil Kumar Kori case RTE_SCHED_TYPE_ORDERED: 9465e745593SSunil Kumar Kori DPAA_PMD_ERR("Ordered queue schedule type is not supported\n"); 9475e745593SSunil Kumar Kori return -1; 9485e745593SSunil Kumar Kori default: 9495e745593SSunil Kumar Kori opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 9505e745593SSunil Kumar Kori rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel; 9515e745593SSunil Kumar Kori break; 9525e745593SSunil Kumar Kori } 9535e745593SSunil Kumar Kori 9545e745593SSunil Kumar Kori opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 9555e745593SSunil Kumar Kori opts.fqd.dest.channel = ch_id; 9565e745593SSunil Kumar Kori opts.fqd.dest.wq = queue_conf->ev.priority; 9575e745593SSunil Kumar Kori 9585e745593SSunil Kumar Kori if (dpaa_intf->cgr_rx) { 9595e745593SSunil Kumar Kori opts.we_mask |= QM_INITFQ_WE_CGID; 9605e745593SSunil Kumar Kori opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 9615e745593SSunil Kumar Kori opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 9625e745593SSunil Kumar Kori } 9635e745593SSunil Kumar Kori 9645e745593SSunil Kumar Kori flags = QMAN_INITFQ_FLAG_SCHED; 9655e745593SSunil Kumar Kori 9665e745593SSunil Kumar Kori ret = qman_init_fq(rxq, flags, &opts); 9675e745593SSunil Kumar Kori if (ret) { 9686fd3639aSHemant Agrawal DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x " 9696fd3639aSHemant Agrawal "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); 9705e745593SSunil Kumar Kori return ret; 9715e745593SSunil Kumar Kori } 9725e745593SSunil Kumar Kori 9735e745593SSunil Kumar Kori /* copy configuration which needs to be filled during dequeue */ 9745e745593SSunil Kumar Kori memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event)); 9755e745593SSunil Kumar Kori dev->data->rx_queues[eth_rx_queue_id] = rxq; 9765e745593SSunil Kumar Kori 9775e745593SSunil Kumar Kori return ret; 9785e745593SSunil Kumar Kori } 9795e745593SSunil Kumar Kori 9801e06b6dcSHemant Agrawal int 98177b7b81eSNeil Horman dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 9825e745593SSunil Kumar Kori int eth_rx_queue_id) 9835e745593SSunil Kumar Kori { 9845e745593SSunil Kumar Kori struct qm_mcc_initfq opts; 9855e745593SSunil Kumar Kori int ret; 9865e745593SSunil Kumar Kori u32 flags = 0; 9875e745593SSunil Kumar Kori struct dpaa_if *dpaa_intf = dev->data->dev_private; 9885e745593SSunil Kumar Kori struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 9895e745593SSunil Kumar Kori 9905e745593SSunil Kumar Kori dpaa_poll_queue_default_config(&opts); 9915e745593SSunil Kumar Kori 9925e745593SSunil Kumar Kori if (dpaa_intf->cgr_rx) { 9935e745593SSunil Kumar Kori opts.we_mask |= QM_INITFQ_WE_CGID; 9945e745593SSunil Kumar Kori opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 9955e745593SSunil Kumar Kori opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 9965e745593SSunil Kumar Kori } 9975e745593SSunil Kumar Kori 9985e745593SSunil Kumar Kori ret = qman_init_fq(rxq, flags, &opts); 9995e745593SSunil Kumar Kori if (ret) { 10005e745593SSunil Kumar Kori DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", 10015e745593SSunil Kumar Kori rxq->fqid, ret); 10025e745593SSunil Kumar Kori } 10035e745593SSunil Kumar Kori 10045e745593SSunil Kumar Kori rxq->cb.dqrr_dpdk_cb = NULL; 10055e745593SSunil Kumar Kori dev->data->rx_queues[eth_rx_queue_id] = NULL; 10065e745593SSunil Kumar Kori 10075e745593SSunil Kumar Kori return 0; 10085e745593SSunil Kumar Kori } 10095e745593SSunil Kumar Kori 101037f9b54bSShreyansh Jain static 101137f9b54bSShreyansh Jain void dpaa_eth_rx_queue_release(void *rxq __rte_unused) 101237f9b54bSShreyansh Jain { 101337f9b54bSShreyansh Jain PMD_INIT_FUNC_TRACE(); 101437f9b54bSShreyansh Jain } 101537f9b54bSShreyansh Jain 101637f9b54bSShreyansh Jain static 101737f9b54bSShreyansh Jain int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 101837f9b54bSShreyansh Jain uint16_t nb_desc __rte_unused, 101937f9b54bSShreyansh Jain unsigned int socket_id __rte_unused, 1020e335cce4SHemant Agrawal const struct rte_eth_txconf *tx_conf) 102137f9b54bSShreyansh Jain { 102237f9b54bSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 10232cf9264fSHemant Agrawal struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx]; 102437f9b54bSShreyansh Jain 102537f9b54bSShreyansh Jain PMD_INIT_FUNC_TRACE(); 102637f9b54bSShreyansh Jain 1027e335cce4SHemant Agrawal /* Tx deferred start is not supported */ 1028e335cce4SHemant Agrawal if (tx_conf->tx_deferred_start) { 1029e335cce4SHemant Agrawal DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev); 1030e335cce4SHemant Agrawal return -EINVAL; 1031e335cce4SHemant Agrawal } 10322cf9264fSHemant Agrawal txq->nb_desc = UINT16_MAX; 10332cf9264fSHemant Agrawal txq->offloads = tx_conf->offloads; 10342cf9264fSHemant Agrawal 10356fd3639aSHemant Agrawal if (queue_idx >= dev->data->nb_tx_queues) { 10366fd3639aSHemant Agrawal rte_errno = EOVERFLOW; 10376fd3639aSHemant Agrawal DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", 10386fd3639aSHemant Agrawal (void *)dev, queue_idx, dev->data->nb_tx_queues); 10396fd3639aSHemant Agrawal return -rte_errno; 10406fd3639aSHemant Agrawal } 10416fd3639aSHemant Agrawal 10426fd3639aSHemant Agrawal DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)", 10432cf9264fSHemant Agrawal queue_idx, txq->fqid); 10442cf9264fSHemant Agrawal dev->data->tx_queues[queue_idx] = txq; 10459124e65dSGagandeep Singh 104637f9b54bSShreyansh Jain return 0; 104737f9b54bSShreyansh Jain } 104837f9b54bSShreyansh Jain 104937f9b54bSShreyansh Jain static void dpaa_eth_tx_queue_release(void *txq __rte_unused) 1050ff9e112dSShreyansh Jain { 1051ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1052ff9e112dSShreyansh Jain } 1053ff9e112dSShreyansh Jain 1054b005d729SHemant Agrawal static uint32_t 1055b005d729SHemant Agrawal dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1056b005d729SHemant Agrawal { 1057b005d729SHemant Agrawal struct dpaa_if *dpaa_intf = dev->data->dev_private; 1058b005d729SHemant Agrawal struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id]; 1059b005d729SHemant Agrawal u32 frm_cnt = 0; 1060b005d729SHemant Agrawal 1061b005d729SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1062b005d729SHemant Agrawal 1063b005d729SHemant Agrawal if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { 1064b7c7ff6eSStephen Hemminger DPAA_PMD_DEBUG("RX frame count for q(%d) is %u", 1065b005d729SHemant Agrawal rx_queue_id, frm_cnt); 1066b005d729SHemant Agrawal } 1067b005d729SHemant Agrawal return frm_cnt; 1068b005d729SHemant Agrawal } 1069b005d729SHemant Agrawal 1070e124a69fSShreyansh Jain static int dpaa_link_down(struct rte_eth_dev *dev) 1071e124a69fSShreyansh Jain { 1072f231d48dSRohit Raj struct fman_if *fif = dev->process_private; 1073f231d48dSRohit Raj struct __fman_if *__fif; 1074f231d48dSRohit Raj 1075e124a69fSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1076e124a69fSShreyansh Jain 1077f231d48dSRohit Raj __fif = container_of(fif, struct __fman_if, __if); 1078f231d48dSRohit Raj 1079f231d48dSRohit Raj if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1080f231d48dSRohit Raj dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN); 1081f231d48dSRohit Raj else 1082e124a69fSShreyansh Jain dpaa_eth_dev_stop(dev); 1083e124a69fSShreyansh Jain return 0; 1084e124a69fSShreyansh Jain } 1085e124a69fSShreyansh Jain 1086e124a69fSShreyansh Jain static int dpaa_link_up(struct rte_eth_dev *dev) 1087e124a69fSShreyansh Jain { 1088f231d48dSRohit Raj struct fman_if *fif = dev->process_private; 1089f231d48dSRohit Raj struct __fman_if *__fif; 1090f231d48dSRohit Raj 1091e124a69fSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1092e124a69fSShreyansh Jain 1093f231d48dSRohit Raj __fif = container_of(fif, struct __fman_if, __if); 1094f231d48dSRohit Raj 1095f231d48dSRohit Raj if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1096f231d48dSRohit Raj dpaa_update_link_status(__fif->node_name, ETH_LINK_UP); 1097f231d48dSRohit Raj else 1098e124a69fSShreyansh Jain dpaa_eth_dev_start(dev); 1099e124a69fSShreyansh Jain return 0; 1100e124a69fSShreyansh Jain } 1101e124a69fSShreyansh Jain 1102fe6c6032SShreyansh Jain static int 110312a4678aSShreyansh Jain dpaa_flow_ctrl_set(struct rte_eth_dev *dev, 110412a4678aSShreyansh Jain struct rte_eth_fc_conf *fc_conf) 110512a4678aSShreyansh Jain { 110612a4678aSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 110712a4678aSShreyansh Jain struct rte_eth_fc_conf *net_fc; 110812a4678aSShreyansh Jain 110912a4678aSShreyansh Jain PMD_INIT_FUNC_TRACE(); 111012a4678aSShreyansh Jain 111112a4678aSShreyansh Jain if (!(dpaa_intf->fc_conf)) { 111212a4678aSShreyansh Jain dpaa_intf->fc_conf = rte_zmalloc(NULL, 111312a4678aSShreyansh Jain sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 111412a4678aSShreyansh Jain if (!dpaa_intf->fc_conf) { 111512a4678aSShreyansh Jain DPAA_PMD_ERR("unable to save flow control info"); 111612a4678aSShreyansh Jain return -ENOMEM; 111712a4678aSShreyansh Jain } 111812a4678aSShreyansh Jain } 111912a4678aSShreyansh Jain net_fc = dpaa_intf->fc_conf; 112012a4678aSShreyansh Jain 112112a4678aSShreyansh Jain if (fc_conf->high_water < fc_conf->low_water) { 112212a4678aSShreyansh Jain DPAA_PMD_ERR("Incorrect Flow Control Configuration"); 112312a4678aSShreyansh Jain return -EINVAL; 112412a4678aSShreyansh Jain } 112512a4678aSShreyansh Jain 112612a4678aSShreyansh Jain if (fc_conf->mode == RTE_FC_NONE) { 112712a4678aSShreyansh Jain return 0; 112812a4678aSShreyansh Jain } else if (fc_conf->mode == RTE_FC_TX_PAUSE || 112912a4678aSShreyansh Jain fc_conf->mode == RTE_FC_FULL) { 11306b10d1f7SNipun Gupta fman_if_set_fc_threshold(dev->process_private, 11316b10d1f7SNipun Gupta fc_conf->high_water, 113212a4678aSShreyansh Jain fc_conf->low_water, 113312a4678aSShreyansh Jain dpaa_intf->bp_info->bpid); 113412a4678aSShreyansh Jain if (fc_conf->pause_time) 11356b10d1f7SNipun Gupta fman_if_set_fc_quanta(dev->process_private, 113612a4678aSShreyansh Jain fc_conf->pause_time); 113712a4678aSShreyansh Jain } 113812a4678aSShreyansh Jain 113912a4678aSShreyansh Jain /* Save the information in dpaa device */ 114012a4678aSShreyansh Jain net_fc->pause_time = fc_conf->pause_time; 114112a4678aSShreyansh Jain net_fc->high_water = fc_conf->high_water; 114212a4678aSShreyansh Jain net_fc->low_water = fc_conf->low_water; 114312a4678aSShreyansh Jain net_fc->send_xon = fc_conf->send_xon; 114412a4678aSShreyansh Jain net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 114512a4678aSShreyansh Jain net_fc->mode = fc_conf->mode; 114612a4678aSShreyansh Jain net_fc->autoneg = fc_conf->autoneg; 114712a4678aSShreyansh Jain 114812a4678aSShreyansh Jain return 0; 114912a4678aSShreyansh Jain } 115012a4678aSShreyansh Jain 115112a4678aSShreyansh Jain static int 115212a4678aSShreyansh Jain dpaa_flow_ctrl_get(struct rte_eth_dev *dev, 115312a4678aSShreyansh Jain struct rte_eth_fc_conf *fc_conf) 115412a4678aSShreyansh Jain { 115512a4678aSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 115612a4678aSShreyansh Jain struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf; 115712a4678aSShreyansh Jain int ret; 115812a4678aSShreyansh Jain 115912a4678aSShreyansh Jain PMD_INIT_FUNC_TRACE(); 116012a4678aSShreyansh Jain 116112a4678aSShreyansh Jain if (net_fc) { 116212a4678aSShreyansh Jain fc_conf->pause_time = net_fc->pause_time; 116312a4678aSShreyansh Jain fc_conf->high_water = net_fc->high_water; 116412a4678aSShreyansh Jain fc_conf->low_water = net_fc->low_water; 116512a4678aSShreyansh Jain fc_conf->send_xon = net_fc->send_xon; 116612a4678aSShreyansh Jain fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd; 116712a4678aSShreyansh Jain fc_conf->mode = net_fc->mode; 116812a4678aSShreyansh Jain fc_conf->autoneg = net_fc->autoneg; 116912a4678aSShreyansh Jain return 0; 117012a4678aSShreyansh Jain } 11716b10d1f7SNipun Gupta ret = fman_if_get_fc_threshold(dev->process_private); 117212a4678aSShreyansh Jain if (ret) { 117312a4678aSShreyansh Jain fc_conf->mode = RTE_FC_TX_PAUSE; 11746b10d1f7SNipun Gupta fc_conf->pause_time = 11756b10d1f7SNipun Gupta fman_if_get_fc_quanta(dev->process_private); 117612a4678aSShreyansh Jain } else { 117712a4678aSShreyansh Jain fc_conf->mode = RTE_FC_NONE; 117812a4678aSShreyansh Jain } 117912a4678aSShreyansh Jain 118012a4678aSShreyansh Jain return 0; 118112a4678aSShreyansh Jain } 118212a4678aSShreyansh Jain 118312a4678aSShreyansh Jain static int 1184fe6c6032SShreyansh Jain dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, 11856d13ea8eSOlivier Matz struct rte_ether_addr *addr, 1186fe6c6032SShreyansh Jain uint32_t index, 1187fe6c6032SShreyansh Jain __rte_unused uint32_t pool) 1188fe6c6032SShreyansh Jain { 1189fe6c6032SShreyansh Jain int ret; 1190fe6c6032SShreyansh Jain 1191fe6c6032SShreyansh Jain PMD_INIT_FUNC_TRACE(); 1192fe6c6032SShreyansh Jain 11936b10d1f7SNipun Gupta ret = fman_if_add_mac_addr(dev->process_private, 11946b10d1f7SNipun Gupta addr->addr_bytes, index); 1195fe6c6032SShreyansh Jain 1196fe6c6032SShreyansh Jain if (ret) 1197b7c7ff6eSStephen Hemminger DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret); 1198fe6c6032SShreyansh Jain return 0; 1199fe6c6032SShreyansh Jain } 1200fe6c6032SShreyansh Jain 1201fe6c6032SShreyansh Jain static void 1202fe6c6032SShreyansh Jain dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, 1203fe6c6032SShreyansh Jain uint32_t index) 1204fe6c6032SShreyansh Jain { 1205fe6c6032SShreyansh Jain PMD_INIT_FUNC_TRACE(); 1206fe6c6032SShreyansh Jain 12076b10d1f7SNipun Gupta fman_if_clear_mac_addr(dev->process_private, index); 1208fe6c6032SShreyansh Jain } 1209fe6c6032SShreyansh Jain 1210caccf8b3SOlivier Matz static int 1211fe6c6032SShreyansh Jain dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, 12126d13ea8eSOlivier Matz struct rte_ether_addr *addr) 1213fe6c6032SShreyansh Jain { 1214fe6c6032SShreyansh Jain int ret; 1215fe6c6032SShreyansh Jain 1216fe6c6032SShreyansh Jain PMD_INIT_FUNC_TRACE(); 1217fe6c6032SShreyansh Jain 12186b10d1f7SNipun Gupta ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0); 1219fe6c6032SShreyansh Jain if (ret) 1220b7c7ff6eSStephen Hemminger DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret); 1221caccf8b3SOlivier Matz 1222caccf8b3SOlivier Matz return ret; 1223fe6c6032SShreyansh Jain } 1224fe6c6032SShreyansh Jain 1225b1b5d6c9SNipun Gupta static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev, 1226b1b5d6c9SNipun Gupta uint16_t queue_id) 1227b1b5d6c9SNipun Gupta { 1228b1b5d6c9SNipun Gupta struct dpaa_if *dpaa_intf = dev->data->dev_private; 1229b1b5d6c9SNipun Gupta struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; 1230b1b5d6c9SNipun Gupta 1231b1b5d6c9SNipun Gupta if (!rxq->is_static) 1232b1b5d6c9SNipun Gupta return -EINVAL; 1233b1b5d6c9SNipun Gupta 1234b1b5d6c9SNipun Gupta return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI); 1235b1b5d6c9SNipun Gupta } 1236b1b5d6c9SNipun Gupta 1237b1b5d6c9SNipun Gupta static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev, 1238b1b5d6c9SNipun Gupta uint16_t queue_id) 1239b1b5d6c9SNipun Gupta { 1240b1b5d6c9SNipun Gupta struct dpaa_if *dpaa_intf = dev->data->dev_private; 1241b1b5d6c9SNipun Gupta struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; 1242b1b5d6c9SNipun Gupta uint32_t temp; 1243b1b5d6c9SNipun Gupta ssize_t temp1; 1244b1b5d6c9SNipun Gupta 1245b1b5d6c9SNipun Gupta if (!rxq->is_static) 1246b1b5d6c9SNipun Gupta return -EINVAL; 1247b1b5d6c9SNipun Gupta 1248b1b5d6c9SNipun Gupta qman_fq_portal_irqsource_remove(rxq->qp, ~0); 1249b1b5d6c9SNipun Gupta 1250b1b5d6c9SNipun Gupta temp1 = read(rxq->q_fd, &temp, sizeof(temp)); 1251b1b5d6c9SNipun Gupta if (temp1 != sizeof(temp)) 1252df80d4f8SHemant Agrawal DPAA_PMD_ERR("irq read error"); 1253b1b5d6c9SNipun Gupta 1254b1b5d6c9SNipun Gupta qman_fq_portal_thread_irq(rxq->qp); 1255b1b5d6c9SNipun Gupta 1256b1b5d6c9SNipun Gupta return 0; 1257b1b5d6c9SNipun Gupta } 1258b1b5d6c9SNipun Gupta 12592cf9264fSHemant Agrawal static void 12602cf9264fSHemant Agrawal dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 12612cf9264fSHemant Agrawal struct rte_eth_rxq_info *qinfo) 12622cf9264fSHemant Agrawal { 12632cf9264fSHemant Agrawal struct dpaa_if *dpaa_intf = dev->data->dev_private; 12642cf9264fSHemant Agrawal struct qman_fq *rxq; 12652cf9264fSHemant Agrawal 12662cf9264fSHemant Agrawal rxq = dev->data->rx_queues[queue_id]; 12672cf9264fSHemant Agrawal 12682cf9264fSHemant Agrawal qinfo->mp = dpaa_intf->bp_info->mp; 12692cf9264fSHemant Agrawal qinfo->scattered_rx = dev->data->scattered_rx; 12702cf9264fSHemant Agrawal qinfo->nb_desc = rxq->nb_desc; 12712cf9264fSHemant Agrawal qinfo->conf.rx_free_thresh = 1; 12722cf9264fSHemant Agrawal qinfo->conf.rx_drop_en = 1; 12732cf9264fSHemant Agrawal qinfo->conf.rx_deferred_start = 0; 12742cf9264fSHemant Agrawal qinfo->conf.offloads = rxq->offloads; 12752cf9264fSHemant Agrawal } 12762cf9264fSHemant Agrawal 12772cf9264fSHemant Agrawal static void 12782cf9264fSHemant Agrawal dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 12792cf9264fSHemant Agrawal struct rte_eth_txq_info *qinfo) 12802cf9264fSHemant Agrawal { 12812cf9264fSHemant Agrawal struct qman_fq *txq; 12822cf9264fSHemant Agrawal 12832cf9264fSHemant Agrawal txq = dev->data->tx_queues[queue_id]; 12842cf9264fSHemant Agrawal 12852cf9264fSHemant Agrawal qinfo->nb_desc = txq->nb_desc; 12862cf9264fSHemant Agrawal qinfo->conf.tx_thresh.pthresh = 0; 12872cf9264fSHemant Agrawal qinfo->conf.tx_thresh.hthresh = 0; 12882cf9264fSHemant Agrawal qinfo->conf.tx_thresh.wthresh = 0; 12892cf9264fSHemant Agrawal 12902cf9264fSHemant Agrawal qinfo->conf.tx_free_thresh = 0; 12912cf9264fSHemant Agrawal qinfo->conf.tx_rs_thresh = 0; 12922cf9264fSHemant Agrawal qinfo->conf.offloads = txq->offloads; 12932cf9264fSHemant Agrawal qinfo->conf.tx_deferred_start = 0; 12942cf9264fSHemant Agrawal } 12952cf9264fSHemant Agrawal 1296ff9e112dSShreyansh Jain static struct eth_dev_ops dpaa_devops = { 1297ff9e112dSShreyansh Jain .dev_configure = dpaa_eth_dev_configure, 1298ff9e112dSShreyansh Jain .dev_start = dpaa_eth_dev_start, 1299ff9e112dSShreyansh Jain .dev_stop = dpaa_eth_dev_stop, 1300ff9e112dSShreyansh Jain .dev_close = dpaa_eth_dev_close, 1301799db456SShreyansh Jain .dev_infos_get = dpaa_eth_dev_info, 1302a7bdc3bdSShreyansh Jain .dev_supported_ptypes_get = dpaa_supported_ptypes_get, 130337f9b54bSShreyansh Jain 130437f9b54bSShreyansh Jain .rx_queue_setup = dpaa_eth_rx_queue_setup, 130537f9b54bSShreyansh Jain .tx_queue_setup = dpaa_eth_tx_queue_setup, 130637f9b54bSShreyansh Jain .rx_queue_release = dpaa_eth_rx_queue_release, 130737f9b54bSShreyansh Jain .tx_queue_release = dpaa_eth_tx_queue_release, 1308b005d729SHemant Agrawal .rx_queue_count = dpaa_dev_rx_queue_count, 13092e6f5657SApeksha Gupta .rx_burst_mode_get = dpaa_dev_rx_burst_mode_get, 13102e6f5657SApeksha Gupta .tx_burst_mode_get = dpaa_dev_tx_burst_mode_get, 13112cf9264fSHemant Agrawal .rxq_info_get = dpaa_rxq_info_get, 13122cf9264fSHemant Agrawal .txq_info_get = dpaa_txq_info_get, 13132cf9264fSHemant Agrawal 131412a4678aSShreyansh Jain .flow_ctrl_get = dpaa_flow_ctrl_get, 131512a4678aSShreyansh Jain .flow_ctrl_set = dpaa_flow_ctrl_set, 131612a4678aSShreyansh Jain 1317e124a69fSShreyansh Jain .link_update = dpaa_eth_link_update, 1318e1ad3a05SShreyansh Jain .stats_get = dpaa_eth_stats_get, 1319b21ed3e2SHemant Agrawal .xstats_get = dpaa_dev_xstats_get, 1320b21ed3e2SHemant Agrawal .xstats_get_by_id = dpaa_xstats_get_by_id, 1321b21ed3e2SHemant Agrawal .xstats_get_names_by_id = dpaa_xstats_get_names_by_id, 1322b21ed3e2SHemant Agrawal .xstats_get_names = dpaa_xstats_get_names, 1323b21ed3e2SHemant Agrawal .xstats_reset = dpaa_eth_stats_reset, 1324e1ad3a05SShreyansh Jain .stats_reset = dpaa_eth_stats_reset, 132595ef603dSShreyansh Jain .promiscuous_enable = dpaa_eth_promiscuous_enable, 132695ef603dSShreyansh Jain .promiscuous_disable = dpaa_eth_promiscuous_disable, 132744dd70a3SShreyansh Jain .allmulticast_enable = dpaa_eth_multicast_enable, 132844dd70a3SShreyansh Jain .allmulticast_disable = dpaa_eth_multicast_disable, 13290cbec027SShreyansh Jain .mtu_set = dpaa_mtu_set, 1330e124a69fSShreyansh Jain .dev_set_link_down = dpaa_link_down, 1331e124a69fSShreyansh Jain .dev_set_link_up = dpaa_link_up, 1332fe6c6032SShreyansh Jain .mac_addr_add = dpaa_dev_add_mac_addr, 1333fe6c6032SShreyansh Jain .mac_addr_remove = dpaa_dev_remove_mac_addr, 1334fe6c6032SShreyansh Jain .mac_addr_set = dpaa_dev_set_mac_addr, 1335fe6c6032SShreyansh Jain 1336cf0fab1dSHemant Agrawal .fw_version_get = dpaa_fw_version_get, 1337b1b5d6c9SNipun Gupta 1338b1b5d6c9SNipun Gupta .rx_queue_intr_enable = dpaa_dev_queue_intr_enable, 1339b1b5d6c9SNipun Gupta .rx_queue_intr_disable = dpaa_dev_queue_intr_disable, 1340ff9e112dSShreyansh Jain }; 1341ff9e112dSShreyansh Jain 13428c3495f5SHemant Agrawal static bool 13438c3495f5SHemant Agrawal is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv) 13448c3495f5SHemant Agrawal { 13458c3495f5SHemant Agrawal if (strcmp(dev->device->driver->name, 13468c3495f5SHemant Agrawal drv->driver.name)) 13478c3495f5SHemant Agrawal return false; 13488c3495f5SHemant Agrawal 13498c3495f5SHemant Agrawal return true; 13508c3495f5SHemant Agrawal } 13518c3495f5SHemant Agrawal 13528c3495f5SHemant Agrawal static bool 13538c3495f5SHemant Agrawal is_dpaa_supported(struct rte_eth_dev *dev) 13548c3495f5SHemant Agrawal { 13558c3495f5SHemant Agrawal return is_device_supported(dev, &rte_dpaa_pmd); 13568c3495f5SHemant Agrawal } 13578c3495f5SHemant Agrawal 13581e06b6dcSHemant Agrawal int 13598c3495f5SHemant Agrawal rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) 13608c3495f5SHemant Agrawal { 13618c3495f5SHemant Agrawal struct rte_eth_dev *dev; 13628c3495f5SHemant Agrawal 13638c3495f5SHemant Agrawal RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 13648c3495f5SHemant Agrawal 13658c3495f5SHemant Agrawal dev = &rte_eth_devices[port]; 13668c3495f5SHemant Agrawal 13678c3495f5SHemant Agrawal if (!is_dpaa_supported(dev)) 13688c3495f5SHemant Agrawal return -ENOTSUP; 13698c3495f5SHemant Agrawal 13708c3495f5SHemant Agrawal if (on) 13716b10d1f7SNipun Gupta fman_if_loopback_enable(dev->process_private); 13728c3495f5SHemant Agrawal else 13736b10d1f7SNipun Gupta fman_if_loopback_disable(dev->process_private); 13748c3495f5SHemant Agrawal 13758c3495f5SHemant Agrawal return 0; 13768c3495f5SHemant Agrawal } 13778c3495f5SHemant Agrawal 13786b10d1f7SNipun Gupta static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf, 13796b10d1f7SNipun Gupta struct fman_if *fman_intf) 138012a4678aSShreyansh Jain { 138112a4678aSShreyansh Jain struct rte_eth_fc_conf *fc_conf; 138212a4678aSShreyansh Jain int ret; 138312a4678aSShreyansh Jain 138412a4678aSShreyansh Jain PMD_INIT_FUNC_TRACE(); 138512a4678aSShreyansh Jain 138612a4678aSShreyansh Jain if (!(dpaa_intf->fc_conf)) { 138712a4678aSShreyansh Jain dpaa_intf->fc_conf = rte_zmalloc(NULL, 138812a4678aSShreyansh Jain sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 138912a4678aSShreyansh Jain if (!dpaa_intf->fc_conf) { 139012a4678aSShreyansh Jain DPAA_PMD_ERR("unable to save flow control info"); 139112a4678aSShreyansh Jain return -ENOMEM; 139212a4678aSShreyansh Jain } 139312a4678aSShreyansh Jain } 139412a4678aSShreyansh Jain fc_conf = dpaa_intf->fc_conf; 13956b10d1f7SNipun Gupta ret = fman_if_get_fc_threshold(fman_intf); 139612a4678aSShreyansh Jain if (ret) { 139712a4678aSShreyansh Jain fc_conf->mode = RTE_FC_TX_PAUSE; 13986b10d1f7SNipun Gupta fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf); 139912a4678aSShreyansh Jain } else { 140012a4678aSShreyansh Jain fc_conf->mode = RTE_FC_NONE; 140112a4678aSShreyansh Jain } 140212a4678aSShreyansh Jain 140312a4678aSShreyansh Jain return 0; 140412a4678aSShreyansh Jain } 140512a4678aSShreyansh Jain 140637f9b54bSShreyansh Jain /* Initialise an Rx FQ */ 140762f53995SHemant Agrawal static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, 140837f9b54bSShreyansh Jain uint32_t fqid) 140937f9b54bSShreyansh Jain { 14108d804cf1SHemant Agrawal struct qm_mcc_initfq opts = {0}; 141137f9b54bSShreyansh Jain int ret; 1412f04e7139SHemant Agrawal u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE; 141362f53995SHemant Agrawal struct qm_mcc_initcgr cgr_opts = { 141462f53995SHemant Agrawal .we_mask = QM_CGR_WE_CS_THRES | 141562f53995SHemant Agrawal QM_CGR_WE_CSTD_EN | 141662f53995SHemant Agrawal QM_CGR_WE_MODE, 141762f53995SHemant Agrawal .cgr = { 141862f53995SHemant Agrawal .cstd_en = QM_CGR_EN, 141962f53995SHemant Agrawal .mode = QMAN_CGR_MODE_FRAME 142062f53995SHemant Agrawal } 142162f53995SHemant Agrawal }; 142237f9b54bSShreyansh Jain 1423*4defbc8cSSachin Saxena if (fmc_q || default_q) { 142437f9b54bSShreyansh Jain ret = qman_reserve_fqid(fqid); 142537f9b54bSShreyansh Jain if (ret) { 1426*4defbc8cSSachin Saxena DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d", 142737f9b54bSShreyansh Jain fqid, ret); 142837f9b54bSShreyansh Jain return -EINVAL; 142937f9b54bSShreyansh Jain } 1430f04e7139SHemant Agrawal } 1431*4defbc8cSSachin Saxena 14328d6fc8b6SHemant Agrawal DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); 1433f04e7139SHemant Agrawal ret = qman_create_fq(fqid, flags, fq); 143437f9b54bSShreyansh Jain if (ret) { 14356fd3639aSHemant Agrawal DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d", 143637f9b54bSShreyansh Jain fqid, ret); 143737f9b54bSShreyansh Jain return ret; 143837f9b54bSShreyansh Jain } 14390c504f69SHemant Agrawal fq->is_static = false; 14405e745593SSunil Kumar Kori 14415e745593SSunil Kumar Kori dpaa_poll_queue_default_config(&opts); 144237f9b54bSShreyansh Jain 144362f53995SHemant Agrawal if (cgr_rx) { 144462f53995SHemant Agrawal /* Enable tail drop with cgr on this queue */ 144562f53995SHemant Agrawal qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0); 144662f53995SHemant Agrawal cgr_rx->cb = NULL; 144762f53995SHemant Agrawal ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT, 144862f53995SHemant Agrawal &cgr_opts); 144962f53995SHemant Agrawal if (ret) { 145062f53995SHemant Agrawal DPAA_PMD_WARN( 14518d6fc8b6SHemant Agrawal "rx taildrop init fail on rx fqid 0x%x(ret=%d)", 1452f04e7139SHemant Agrawal fq->fqid, ret); 145362f53995SHemant Agrawal goto without_cgr; 145462f53995SHemant Agrawal } 145562f53995SHemant Agrawal opts.we_mask |= QM_INITFQ_WE_CGID; 145662f53995SHemant Agrawal opts.fqd.cgid = cgr_rx->cgrid; 145762f53995SHemant Agrawal opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 145862f53995SHemant Agrawal } 145962f53995SHemant Agrawal without_cgr: 1460f04e7139SHemant Agrawal ret = qman_init_fq(fq, 0, &opts); 146137f9b54bSShreyansh Jain if (ret) 14628d6fc8b6SHemant Agrawal DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret); 146337f9b54bSShreyansh Jain return ret; 146437f9b54bSShreyansh Jain } 146537f9b54bSShreyansh Jain 146637f9b54bSShreyansh Jain /* Initialise a Tx FQ */ 146737f9b54bSShreyansh Jain static int dpaa_tx_queue_init(struct qman_fq *fq, 14689124e65dSGagandeep Singh struct fman_if *fman_intf, 14699124e65dSGagandeep Singh struct qman_cgr *cgr_tx) 147037f9b54bSShreyansh Jain { 14718d804cf1SHemant Agrawal struct qm_mcc_initfq opts = {0}; 14729124e65dSGagandeep Singh struct qm_mcc_initcgr cgr_opts = { 14739124e65dSGagandeep Singh .we_mask = QM_CGR_WE_CS_THRES | 14749124e65dSGagandeep Singh QM_CGR_WE_CSTD_EN | 14759124e65dSGagandeep Singh QM_CGR_WE_MODE, 14769124e65dSGagandeep Singh .cgr = { 14779124e65dSGagandeep Singh .cstd_en = QM_CGR_EN, 14789124e65dSGagandeep Singh .mode = QMAN_CGR_MODE_FRAME 14799124e65dSGagandeep Singh } 14809124e65dSGagandeep Singh }; 148137f9b54bSShreyansh Jain int ret; 148237f9b54bSShreyansh Jain 148337f9b54bSShreyansh Jain ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | 148437f9b54bSShreyansh Jain QMAN_FQ_FLAG_TO_DCPORTAL, fq); 148537f9b54bSShreyansh Jain if (ret) { 148637f9b54bSShreyansh Jain DPAA_PMD_ERR("create tx fq failed with ret: %d", ret); 148737f9b54bSShreyansh Jain return ret; 148837f9b54bSShreyansh Jain } 148937f9b54bSShreyansh Jain opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 149037f9b54bSShreyansh Jain QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; 149137f9b54bSShreyansh Jain opts.fqd.dest.channel = fman_intf->tx_channel_id; 149237f9b54bSShreyansh Jain opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY; 149337f9b54bSShreyansh Jain opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; 149437f9b54bSShreyansh Jain opts.fqd.context_b = 0; 149537f9b54bSShreyansh Jain /* no tx-confirmation */ 149637f9b54bSShreyansh Jain opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; 149737f9b54bSShreyansh Jain opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; 14988d6fc8b6SHemant Agrawal DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid); 14999124e65dSGagandeep Singh 15009124e65dSGagandeep Singh if (cgr_tx) { 15019124e65dSGagandeep Singh /* Enable tail drop with cgr on this queue */ 15029124e65dSGagandeep Singh qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, 15039124e65dSGagandeep Singh td_tx_threshold, 0); 15049124e65dSGagandeep Singh cgr_tx->cb = NULL; 15059124e65dSGagandeep Singh ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT, 15069124e65dSGagandeep Singh &cgr_opts); 15079124e65dSGagandeep Singh if (ret) { 15089124e65dSGagandeep Singh DPAA_PMD_WARN( 15099124e65dSGagandeep Singh "rx taildrop init fail on rx fqid 0x%x(ret=%d)", 15109124e65dSGagandeep Singh fq->fqid, ret); 15119124e65dSGagandeep Singh goto without_cgr; 15129124e65dSGagandeep Singh } 15139124e65dSGagandeep Singh opts.we_mask |= QM_INITFQ_WE_CGID; 15149124e65dSGagandeep Singh opts.fqd.cgid = cgr_tx->cgrid; 15159124e65dSGagandeep Singh opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 15169124e65dSGagandeep Singh DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n", 15179124e65dSGagandeep Singh td_tx_threshold); 15189124e65dSGagandeep Singh } 15199124e65dSGagandeep Singh without_cgr: 152037f9b54bSShreyansh Jain ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); 152137f9b54bSShreyansh Jain if (ret) 15228d6fc8b6SHemant Agrawal DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret); 152337f9b54bSShreyansh Jain return ret; 152437f9b54bSShreyansh Jain } 152537f9b54bSShreyansh Jain 152605ba55bcSShreyansh Jain #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 152705ba55bcSShreyansh Jain /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */ 152805ba55bcSShreyansh Jain static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) 152905ba55bcSShreyansh Jain { 15308d804cf1SHemant Agrawal struct qm_mcc_initfq opts = {0}; 153105ba55bcSShreyansh Jain int ret; 153205ba55bcSShreyansh Jain 153305ba55bcSShreyansh Jain PMD_INIT_FUNC_TRACE(); 153405ba55bcSShreyansh Jain 153505ba55bcSShreyansh Jain ret = qman_reserve_fqid(fqid); 153605ba55bcSShreyansh Jain if (ret) { 153705ba55bcSShreyansh Jain DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d", 153805ba55bcSShreyansh Jain fqid, ret); 153905ba55bcSShreyansh Jain return -EINVAL; 154005ba55bcSShreyansh Jain } 154105ba55bcSShreyansh Jain /* "map" this Rx FQ to one of the interfaces Tx FQID */ 154205ba55bcSShreyansh Jain DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid); 154305ba55bcSShreyansh Jain ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); 154405ba55bcSShreyansh Jain if (ret) { 154505ba55bcSShreyansh Jain DPAA_PMD_ERR("create debug fqid %d failed with ret: %d", 154605ba55bcSShreyansh Jain fqid, ret); 154705ba55bcSShreyansh Jain return ret; 154805ba55bcSShreyansh Jain } 154905ba55bcSShreyansh Jain opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; 155005ba55bcSShreyansh Jain opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY; 155105ba55bcSShreyansh Jain ret = qman_init_fq(fq, 0, &opts); 155205ba55bcSShreyansh Jain if (ret) 155305ba55bcSShreyansh Jain DPAA_PMD_ERR("init debug fqid %d failed with ret: %d", 155405ba55bcSShreyansh Jain fqid, ret); 155505ba55bcSShreyansh Jain return ret; 155605ba55bcSShreyansh Jain } 155705ba55bcSShreyansh Jain #endif 155805ba55bcSShreyansh Jain 1559ff9e112dSShreyansh Jain /* Initialise a network interface */ 1560ff9e112dSShreyansh Jain static int 15616b10d1f7SNipun Gupta dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev) 15626b10d1f7SNipun Gupta { 15636b10d1f7SNipun Gupta struct rte_dpaa_device *dpaa_device; 15646b10d1f7SNipun Gupta struct fm_eth_port_cfg *cfg; 15656b10d1f7SNipun Gupta struct dpaa_if *dpaa_intf; 15666b10d1f7SNipun Gupta struct fman_if *fman_intf; 15676b10d1f7SNipun Gupta int dev_id; 15686b10d1f7SNipun Gupta 15696b10d1f7SNipun Gupta PMD_INIT_FUNC_TRACE(); 15706b10d1f7SNipun Gupta 15716b10d1f7SNipun Gupta dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); 15726b10d1f7SNipun Gupta dev_id = dpaa_device->id.dev_id; 15736b10d1f7SNipun Gupta cfg = dpaa_get_eth_port_cfg(dev_id); 15746b10d1f7SNipun Gupta fman_intf = cfg->fman_if; 15756b10d1f7SNipun Gupta eth_dev->process_private = fman_intf; 15766b10d1f7SNipun Gupta 15776b10d1f7SNipun Gupta /* Plugging of UCODE burst API not supported in Secondary */ 15786b10d1f7SNipun Gupta dpaa_intf = eth_dev->data->dev_private; 15796b10d1f7SNipun Gupta eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 15806b10d1f7SNipun Gupta if (dpaa_intf->cgr_tx) 15816b10d1f7SNipun Gupta eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow; 15826b10d1f7SNipun Gupta else 15836b10d1f7SNipun Gupta eth_dev->tx_pkt_burst = dpaa_eth_queue_tx; 15846b10d1f7SNipun Gupta #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 15856b10d1f7SNipun Gupta qman_set_fq_lookup_table( 15866b10d1f7SNipun Gupta dpaa_intf->rx_queues->qman_fq_lookup_table); 15876b10d1f7SNipun Gupta #endif 15886b10d1f7SNipun Gupta 15896b10d1f7SNipun Gupta return 0; 15906b10d1f7SNipun Gupta } 15916b10d1f7SNipun Gupta 15926b10d1f7SNipun Gupta /* Initialise a network interface */ 15936b10d1f7SNipun Gupta static int 1594ff9e112dSShreyansh Jain dpaa_dev_init(struct rte_eth_dev *eth_dev) 1595ff9e112dSShreyansh Jain { 1596af2828cfSAkhil Goyal int num_rx_fqs, fqid; 159737f9b54bSShreyansh Jain int loop, ret = 0; 1598ff9e112dSShreyansh Jain int dev_id; 1599ff9e112dSShreyansh Jain struct rte_dpaa_device *dpaa_device; 1600ff9e112dSShreyansh Jain struct dpaa_if *dpaa_intf; 160137f9b54bSShreyansh Jain struct fm_eth_port_cfg *cfg; 160237f9b54bSShreyansh Jain struct fman_if *fman_intf; 160337f9b54bSShreyansh Jain struct fman_if_bpool *bp, *tmp_bp; 160462f53995SHemant Agrawal uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; 16059124e65dSGagandeep Singh uint32_t cgrid_tx[MAX_DPAA_CORES]; 1606*4defbc8cSSachin Saxena uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES]; 1607ff9e112dSShreyansh Jain 1608ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1609ff9e112dSShreyansh Jain 1610ff9e112dSShreyansh Jain dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); 1611ff9e112dSShreyansh Jain dev_id = dpaa_device->id.dev_id; 1612ff9e112dSShreyansh Jain dpaa_intf = eth_dev->data->dev_private; 1613051ae3afSHemant Agrawal cfg = dpaa_get_eth_port_cfg(dev_id); 161437f9b54bSShreyansh Jain fman_intf = cfg->fman_if; 1615ff9e112dSShreyansh Jain 1616ff9e112dSShreyansh Jain dpaa_intf->name = dpaa_device->name; 1617ff9e112dSShreyansh Jain 161837f9b54bSShreyansh Jain /* save fman_if & cfg in the interface struture */ 16196b10d1f7SNipun Gupta eth_dev->process_private = fman_intf; 1620ff9e112dSShreyansh Jain dpaa_intf->ifid = dev_id; 162137f9b54bSShreyansh Jain dpaa_intf->cfg = cfg; 1622ff9e112dSShreyansh Jain 1623*4defbc8cSSachin Saxena memset((char *)dev_rx_fqids, 0, 1624*4defbc8cSSachin Saxena sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES); 1625*4defbc8cSSachin Saxena 162637f9b54bSShreyansh Jain /* Initialize Rx FQ's */ 16278d6fc8b6SHemant Agrawal if (default_q) { 16288d6fc8b6SHemant Agrawal num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; 1629*4defbc8cSSachin Saxena } else if (fmc_q) { 1630*4defbc8cSSachin Saxena num_rx_fqs = 1; 16318d6fc8b6SHemant Agrawal } else { 1632*4defbc8cSSachin Saxena /* FMCLESS mode, load balance to multiple cores.*/ 1633*4defbc8cSSachin Saxena num_rx_fqs = rte_lcore_count(); 16348d6fc8b6SHemant Agrawal } 16358d6fc8b6SHemant Agrawal 1636e4f931ccSHemant Agrawal /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX 163737f9b54bSShreyansh Jain * queues. 163837f9b54bSShreyansh Jain */ 1639*4defbc8cSSachin Saxena if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { 164037f9b54bSShreyansh Jain DPAA_PMD_ERR("Invalid number of RX queues\n"); 164137f9b54bSShreyansh Jain return -EINVAL; 164237f9b54bSShreyansh Jain } 164337f9b54bSShreyansh Jain 1644*4defbc8cSSachin Saxena if (num_rx_fqs > 0) { 164537f9b54bSShreyansh Jain dpaa_intf->rx_queues = rte_zmalloc(NULL, 164637f9b54bSShreyansh Jain sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); 16470ff76833SYong Wang if (!dpaa_intf->rx_queues) { 16480ff76833SYong Wang DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); 16490ff76833SYong Wang return -ENOMEM; 16500ff76833SYong Wang } 1651*4defbc8cSSachin Saxena } else { 1652*4defbc8cSSachin Saxena dpaa_intf->rx_queues = NULL; 1653*4defbc8cSSachin Saxena } 165462f53995SHemant Agrawal 16559124e65dSGagandeep Singh memset(cgrid, 0, sizeof(cgrid)); 16569124e65dSGagandeep Singh memset(cgrid_tx, 0, sizeof(cgrid_tx)); 16579124e65dSGagandeep Singh 16589124e65dSGagandeep Singh /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means 16599124e65dSGagandeep Singh * Tx tail drop is disabled. 16609124e65dSGagandeep Singh */ 16619124e65dSGagandeep Singh if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) { 16629124e65dSGagandeep Singh td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD")); 16639124e65dSGagandeep Singh DPAA_PMD_DEBUG("Tail drop threshold env configured: %u", 16649124e65dSGagandeep Singh td_tx_threshold); 16659124e65dSGagandeep Singh /* if a very large value is being configured */ 16669124e65dSGagandeep Singh if (td_tx_threshold > UINT16_MAX) 16679124e65dSGagandeep Singh td_tx_threshold = CGR_RX_PERFQ_THRESH; 16689124e65dSGagandeep Singh } 16699124e65dSGagandeep Singh 167062f53995SHemant Agrawal /* If congestion control is enabled globally*/ 1671*4defbc8cSSachin Saxena if (num_rx_fqs > 0 && td_threshold) { 167262f53995SHemant Agrawal dpaa_intf->cgr_rx = rte_zmalloc(NULL, 167362f53995SHemant Agrawal sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE); 16740ff76833SYong Wang if (!dpaa_intf->cgr_rx) { 16750ff76833SYong Wang DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n"); 16760ff76833SYong Wang ret = -ENOMEM; 16770ff76833SYong Wang goto free_rx; 16780ff76833SYong Wang } 167962f53995SHemant Agrawal 168062f53995SHemant Agrawal ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0); 168162f53995SHemant Agrawal if (ret != num_rx_fqs) { 168262f53995SHemant Agrawal DPAA_PMD_WARN("insufficient CGRIDs available"); 16830ff76833SYong Wang ret = -EINVAL; 16840ff76833SYong Wang goto free_rx; 168562f53995SHemant Agrawal } 168662f53995SHemant Agrawal } else { 168762f53995SHemant Agrawal dpaa_intf->cgr_rx = NULL; 168862f53995SHemant Agrawal } 168962f53995SHemant Agrawal 1690*4defbc8cSSachin Saxena if (!fmc_q && !default_q) { 1691*4defbc8cSSachin Saxena ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs, 1692*4defbc8cSSachin Saxena num_rx_fqs, 0); 1693*4defbc8cSSachin Saxena if (ret < 0) { 1694*4defbc8cSSachin Saxena DPAA_PMD_ERR("Failed to alloc rx fqid's\n"); 1695*4defbc8cSSachin Saxena goto free_rx; 1696*4defbc8cSSachin Saxena } 1697*4defbc8cSSachin Saxena } 1698*4defbc8cSSachin Saxena 169937f9b54bSShreyansh Jain for (loop = 0; loop < num_rx_fqs; loop++) { 17008d6fc8b6SHemant Agrawal if (default_q) 17018d6fc8b6SHemant Agrawal fqid = cfg->rx_def; 17028d6fc8b6SHemant Agrawal else 1703*4defbc8cSSachin Saxena fqid = dev_rx_fqids[loop]; 170462f53995SHemant Agrawal 170562f53995SHemant Agrawal if (dpaa_intf->cgr_rx) 170662f53995SHemant Agrawal dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; 170762f53995SHemant Agrawal 170862f53995SHemant Agrawal ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], 170962f53995SHemant Agrawal dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL, 171062f53995SHemant Agrawal fqid); 171137f9b54bSShreyansh Jain if (ret) 17120ff76833SYong Wang goto free_rx; 171337f9b54bSShreyansh Jain dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; 171437f9b54bSShreyansh Jain } 171537f9b54bSShreyansh Jain dpaa_intf->nb_rx_queues = num_rx_fqs; 171637f9b54bSShreyansh Jain 17170ff76833SYong Wang /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ 171837f9b54bSShreyansh Jain dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * 1719af2828cfSAkhil Goyal MAX_DPAA_CORES, MAX_CACHELINE); 17200ff76833SYong Wang if (!dpaa_intf->tx_queues) { 17210ff76833SYong Wang DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); 17220ff76833SYong Wang ret = -ENOMEM; 17230ff76833SYong Wang goto free_rx; 17240ff76833SYong Wang } 172537f9b54bSShreyansh Jain 17269124e65dSGagandeep Singh /* If congestion control is enabled globally*/ 17279124e65dSGagandeep Singh if (td_tx_threshold) { 17289124e65dSGagandeep Singh dpaa_intf->cgr_tx = rte_zmalloc(NULL, 17299124e65dSGagandeep Singh sizeof(struct qman_cgr) * MAX_DPAA_CORES, 17309124e65dSGagandeep Singh MAX_CACHELINE); 17319124e65dSGagandeep Singh if (!dpaa_intf->cgr_tx) { 17329124e65dSGagandeep Singh DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n"); 17339124e65dSGagandeep Singh ret = -ENOMEM; 17349124e65dSGagandeep Singh goto free_rx; 17359124e65dSGagandeep Singh } 17369124e65dSGagandeep Singh 17379124e65dSGagandeep Singh ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES, 17389124e65dSGagandeep Singh 1, 0); 17399124e65dSGagandeep Singh if (ret != MAX_DPAA_CORES) { 17409124e65dSGagandeep Singh DPAA_PMD_WARN("insufficient CGRIDs available"); 17419124e65dSGagandeep Singh ret = -EINVAL; 17429124e65dSGagandeep Singh goto free_rx; 17439124e65dSGagandeep Singh } 17449124e65dSGagandeep Singh } else { 17459124e65dSGagandeep Singh dpaa_intf->cgr_tx = NULL; 17469124e65dSGagandeep Singh } 17479124e65dSGagandeep Singh 17489124e65dSGagandeep Singh 1749af2828cfSAkhil Goyal for (loop = 0; loop < MAX_DPAA_CORES; loop++) { 17509124e65dSGagandeep Singh if (dpaa_intf->cgr_tx) 17519124e65dSGagandeep Singh dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop]; 17529124e65dSGagandeep Singh 175337f9b54bSShreyansh Jain ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], 17549124e65dSGagandeep Singh fman_intf, 17559124e65dSGagandeep Singh dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL); 175637f9b54bSShreyansh Jain if (ret) 17570ff76833SYong Wang goto free_tx; 175837f9b54bSShreyansh Jain dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; 175937f9b54bSShreyansh Jain } 1760af2828cfSAkhil Goyal dpaa_intf->nb_tx_queues = MAX_DPAA_CORES; 176137f9b54bSShreyansh Jain 176205ba55bcSShreyansh Jain #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 176305ba55bcSShreyansh Jain dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 176405ba55bcSShreyansh Jain DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); 176505ba55bcSShreyansh Jain dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; 176605ba55bcSShreyansh Jain dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 176705ba55bcSShreyansh Jain DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); 176805ba55bcSShreyansh Jain dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; 176905ba55bcSShreyansh Jain #endif 177005ba55bcSShreyansh Jain 177137f9b54bSShreyansh Jain DPAA_PMD_DEBUG("All frame queues created"); 177237f9b54bSShreyansh Jain 177312a4678aSShreyansh Jain /* Get the initial configuration for flow control */ 17746b10d1f7SNipun Gupta dpaa_fc_set_default(dpaa_intf, fman_intf); 177512a4678aSShreyansh Jain 177637f9b54bSShreyansh Jain /* reset bpool list, initialize bpool dynamically */ 177737f9b54bSShreyansh Jain list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) { 177837f9b54bSShreyansh Jain list_del(&bp->node); 17794762b3d4SHemant Agrawal rte_free(bp); 178037f9b54bSShreyansh Jain } 178137f9b54bSShreyansh Jain 178237f9b54bSShreyansh Jain /* Populate ethdev structure */ 1783ff9e112dSShreyansh Jain eth_dev->dev_ops = &dpaa_devops; 178437f9b54bSShreyansh Jain eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 178537f9b54bSShreyansh Jain eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 178637f9b54bSShreyansh Jain 178737f9b54bSShreyansh Jain /* Allocate memory for storing MAC addresses */ 178837f9b54bSShreyansh Jain eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 178935b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); 179037f9b54bSShreyansh Jain if (eth_dev->data->mac_addrs == NULL) { 179137f9b54bSShreyansh Jain DPAA_PMD_ERR("Failed to allocate %d bytes needed to " 179237f9b54bSShreyansh Jain "store MAC addresses", 179335b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); 17940ff76833SYong Wang ret = -ENOMEM; 17950ff76833SYong Wang goto free_tx; 179637f9b54bSShreyansh Jain } 179737f9b54bSShreyansh Jain 179837f9b54bSShreyansh Jain /* copy the primary mac address */ 1799538da7a1SOlivier Matz rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); 180037f9b54bSShreyansh Jain 1801*4defbc8cSSachin Saxena RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n", 1802*4defbc8cSSachin Saxena dpaa_device->name, 1803*4defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[0], 1804*4defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[1], 1805*4defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[2], 1806*4defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[3], 1807*4defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[4], 1808*4defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[5]); 1809*4defbc8cSSachin Saxena 181037f9b54bSShreyansh Jain 181137f9b54bSShreyansh Jain /* Disable RX mode */ 181237f9b54bSShreyansh Jain fman_if_discard_rx_errors(fman_intf); 181337f9b54bSShreyansh Jain fman_if_disable_rx(fman_intf); 181437f9b54bSShreyansh Jain /* Disable promiscuous mode */ 181537f9b54bSShreyansh Jain fman_if_promiscuous_disable(fman_intf); 181637f9b54bSShreyansh Jain /* Disable multicast */ 181737f9b54bSShreyansh Jain fman_if_reset_mcast_filter_table(fman_intf); 181837f9b54bSShreyansh Jain /* Reset interface statistics */ 181937f9b54bSShreyansh Jain fman_if_stats_reset(fman_intf); 182055576ac2SHemant Agrawal /* Disable SG by default */ 182155576ac2SHemant Agrawal fman_if_set_sg(fman_intf, 0); 182235b2d13fSOlivier Matz fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE); 1823ff9e112dSShreyansh Jain 1824ff9e112dSShreyansh Jain return 0; 18250ff76833SYong Wang 18260ff76833SYong Wang free_tx: 18270ff76833SYong Wang rte_free(dpaa_intf->tx_queues); 18280ff76833SYong Wang dpaa_intf->tx_queues = NULL; 18290ff76833SYong Wang dpaa_intf->nb_tx_queues = 0; 18300ff76833SYong Wang 18310ff76833SYong Wang free_rx: 18320ff76833SYong Wang rte_free(dpaa_intf->cgr_rx); 18339124e65dSGagandeep Singh rte_free(dpaa_intf->cgr_tx); 18340ff76833SYong Wang rte_free(dpaa_intf->rx_queues); 18350ff76833SYong Wang dpaa_intf->rx_queues = NULL; 18360ff76833SYong Wang dpaa_intf->nb_rx_queues = 0; 18370ff76833SYong Wang return ret; 1838ff9e112dSShreyansh Jain } 1839ff9e112dSShreyansh Jain 1840ff9e112dSShreyansh Jain static int 1841ff9e112dSShreyansh Jain dpaa_dev_uninit(struct rte_eth_dev *dev) 1842ff9e112dSShreyansh Jain { 1843ff9e112dSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 184462f53995SHemant Agrawal int loop; 1845ff9e112dSShreyansh Jain 1846ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1847ff9e112dSShreyansh Jain 1848ff9e112dSShreyansh Jain if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1849ff9e112dSShreyansh Jain return -EPERM; 1850ff9e112dSShreyansh Jain 1851ff9e112dSShreyansh Jain if (!dpaa_intf) { 1852ff9e112dSShreyansh Jain DPAA_PMD_WARN("Already closed or not started"); 1853ff9e112dSShreyansh Jain return -1; 1854ff9e112dSShreyansh Jain } 1855ff9e112dSShreyansh Jain 1856*4defbc8cSSachin Saxena /* DPAA FM deconfig */ 1857*4defbc8cSSachin Saxena if (!(default_q || fmc_q)) { 1858*4defbc8cSSachin Saxena if (dpaa_fm_deconfig(dpaa_intf, dev->process_private)) 1859*4defbc8cSSachin Saxena DPAA_PMD_WARN("DPAA FM deconfig failed\n"); 1860*4defbc8cSSachin Saxena } 1861*4defbc8cSSachin Saxena 1862ff9e112dSShreyansh Jain dpaa_eth_dev_close(dev); 1863ff9e112dSShreyansh Jain 186437f9b54bSShreyansh Jain /* release configuration memory */ 186537f9b54bSShreyansh Jain if (dpaa_intf->fc_conf) 186637f9b54bSShreyansh Jain rte_free(dpaa_intf->fc_conf); 186737f9b54bSShreyansh Jain 186862f53995SHemant Agrawal /* Release RX congestion Groups */ 186962f53995SHemant Agrawal if (dpaa_intf->cgr_rx) { 187062f53995SHemant Agrawal for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) 187162f53995SHemant Agrawal qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); 187262f53995SHemant Agrawal 187362f53995SHemant Agrawal qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, 187462f53995SHemant Agrawal dpaa_intf->nb_rx_queues); 187562f53995SHemant Agrawal } 187662f53995SHemant Agrawal 187762f53995SHemant Agrawal rte_free(dpaa_intf->cgr_rx); 187862f53995SHemant Agrawal dpaa_intf->cgr_rx = NULL; 187962f53995SHemant Agrawal 18809124e65dSGagandeep Singh /* Release TX congestion Groups */ 18819124e65dSGagandeep Singh if (dpaa_intf->cgr_tx) { 18829124e65dSGagandeep Singh for (loop = 0; loop < MAX_DPAA_CORES; loop++) 18839124e65dSGagandeep Singh qman_delete_cgr(&dpaa_intf->cgr_tx[loop]); 18849124e65dSGagandeep Singh 18859124e65dSGagandeep Singh qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid, 18869124e65dSGagandeep Singh MAX_DPAA_CORES); 18879124e65dSGagandeep Singh rte_free(dpaa_intf->cgr_tx); 18889124e65dSGagandeep Singh dpaa_intf->cgr_tx = NULL; 18899124e65dSGagandeep Singh } 18909124e65dSGagandeep Singh 189137f9b54bSShreyansh Jain rte_free(dpaa_intf->rx_queues); 189237f9b54bSShreyansh Jain dpaa_intf->rx_queues = NULL; 189337f9b54bSShreyansh Jain 189437f9b54bSShreyansh Jain rte_free(dpaa_intf->tx_queues); 189537f9b54bSShreyansh Jain dpaa_intf->tx_queues = NULL; 189637f9b54bSShreyansh Jain 1897ff9e112dSShreyansh Jain dev->dev_ops = NULL; 1898ff9e112dSShreyansh Jain dev->rx_pkt_burst = NULL; 1899ff9e112dSShreyansh Jain dev->tx_pkt_burst = NULL; 1900ff9e112dSShreyansh Jain 1901ff9e112dSShreyansh Jain return 0; 1902ff9e112dSShreyansh Jain } 1903ff9e112dSShreyansh Jain 1904ff9e112dSShreyansh Jain static int 1905*4defbc8cSSachin Saxena rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, 1906ff9e112dSShreyansh Jain struct rte_dpaa_device *dpaa_dev) 1907ff9e112dSShreyansh Jain { 1908ff9e112dSShreyansh Jain int diag; 1909ff9e112dSShreyansh Jain int ret; 1910ff9e112dSShreyansh Jain struct rte_eth_dev *eth_dev; 1911ff9e112dSShreyansh Jain 1912ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1913ff9e112dSShreyansh Jain 191447854c18SHemant Agrawal if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > 191547854c18SHemant Agrawal RTE_PKTMBUF_HEADROOM) { 191647854c18SHemant Agrawal DPAA_PMD_ERR( 191747854c18SHemant Agrawal "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)", 191847854c18SHemant Agrawal RTE_PKTMBUF_HEADROOM, 191947854c18SHemant Agrawal DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE); 192047854c18SHemant Agrawal 192147854c18SHemant Agrawal return -1; 192247854c18SHemant Agrawal } 192347854c18SHemant Agrawal 1924ff9e112dSShreyansh Jain /* In case of secondary process, the device is already configured 1925ff9e112dSShreyansh Jain * and no further action is required, except portal initialization 1926ff9e112dSShreyansh Jain * and verifying secondary attachment to port name. 1927ff9e112dSShreyansh Jain */ 1928ff9e112dSShreyansh Jain if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1929ff9e112dSShreyansh Jain eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); 1930ff9e112dSShreyansh Jain if (!eth_dev) 1931ff9e112dSShreyansh Jain return -ENOMEM; 1932d1c3ab22SFerruh Yigit eth_dev->device = &dpaa_dev->device; 1933d1c3ab22SFerruh Yigit eth_dev->dev_ops = &dpaa_devops; 19346b10d1f7SNipun Gupta 19356b10d1f7SNipun Gupta ret = dpaa_dev_init_secondary(eth_dev); 19366b10d1f7SNipun Gupta if (ret != 0) { 19376b10d1f7SNipun Gupta RTE_LOG(ERR, PMD, "secondary dev init failed\n"); 19386b10d1f7SNipun Gupta return ret; 19396b10d1f7SNipun Gupta } 19406b10d1f7SNipun Gupta 1941fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev); 1942ff9e112dSShreyansh Jain return 0; 1943ff9e112dSShreyansh Jain } 1944ff9e112dSShreyansh Jain 1945af2828cfSAkhil Goyal if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) { 19468d6fc8b6SHemant Agrawal if (access("/tmp/fmc.bin", F_OK) == -1) { 1947b7c7ff6eSStephen Hemminger DPAA_PMD_INFO("* FMC not configured.Enabling default mode"); 19488d6fc8b6SHemant Agrawal default_q = 1; 19498d6fc8b6SHemant Agrawal } 19508d6fc8b6SHemant Agrawal 1951*4defbc8cSSachin Saxena if (!(default_q || fmc_q)) { 1952*4defbc8cSSachin Saxena if (dpaa_fm_init()) { 1953*4defbc8cSSachin Saxena DPAA_PMD_ERR("FM init failed\n"); 1954*4defbc8cSSachin Saxena return -1; 1955*4defbc8cSSachin Saxena } 1956*4defbc8cSSachin Saxena } 1957*4defbc8cSSachin Saxena 1958e507498dSHemant Agrawal /* disabling the default push mode for LS1043 */ 1959e507498dSHemant Agrawal if (dpaa_svr_family == SVR_LS1043A_FAMILY) 1960e507498dSHemant Agrawal dpaa_push_mode_max_queue = 0; 1961e507498dSHemant Agrawal 1962e507498dSHemant Agrawal /* if push mode queues to be enabled. Currenly we are allowing 1963e507498dSHemant Agrawal * only one queue per thread. 1964e507498dSHemant Agrawal */ 1965e507498dSHemant Agrawal if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { 1966e507498dSHemant Agrawal dpaa_push_mode_max_queue = 1967e507498dSHemant Agrawal atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); 1968e507498dSHemant Agrawal if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) 1969e507498dSHemant Agrawal dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; 1970e507498dSHemant Agrawal } 1971e507498dSHemant Agrawal 1972ff9e112dSShreyansh Jain is_global_init = 1; 1973ff9e112dSShreyansh Jain } 1974ff9e112dSShreyansh Jain 1975e5872221SRohit Raj if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 1976ff9e112dSShreyansh Jain ret = rte_dpaa_portal_init((void *)1); 1977ff9e112dSShreyansh Jain if (ret) { 1978ff9e112dSShreyansh Jain DPAA_PMD_ERR("Unable to initialize portal"); 1979ff9e112dSShreyansh Jain return ret; 1980ff9e112dSShreyansh Jain } 19815d944582SNipun Gupta } 1982ff9e112dSShreyansh Jain 19836b10d1f7SNipun Gupta eth_dev = rte_eth_dev_allocate(dpaa_dev->name); 1984af2828cfSAkhil Goyal if (!eth_dev) 1985af2828cfSAkhil Goyal return -ENOMEM; 1986ff9e112dSShreyansh Jain 19876b10d1f7SNipun Gupta eth_dev->data->dev_private = 19886b10d1f7SNipun Gupta rte_zmalloc("ethdev private structure", 1989ff9e112dSShreyansh Jain sizeof(struct dpaa_if), 1990ff9e112dSShreyansh Jain RTE_CACHE_LINE_SIZE); 1991ff9e112dSShreyansh Jain if (!eth_dev->data->dev_private) { 1992ff9e112dSShreyansh Jain DPAA_PMD_ERR("Cannot allocate memzone for port data"); 1993ff9e112dSShreyansh Jain rte_eth_dev_release_port(eth_dev); 1994ff9e112dSShreyansh Jain return -ENOMEM; 1995ff9e112dSShreyansh Jain } 19966b10d1f7SNipun Gupta 1997ff9e112dSShreyansh Jain eth_dev->device = &dpaa_dev->device; 1998ff9e112dSShreyansh Jain dpaa_dev->eth_dev = eth_dev; 1999ff9e112dSShreyansh Jain 20009124e65dSGagandeep Singh qman_ern_register_cb(dpaa_free_mbuf); 20019124e65dSGagandeep Singh 20022aa10990SRohit Raj if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC) 20032aa10990SRohit Raj eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 20042aa10990SRohit Raj 2005ff9e112dSShreyansh Jain /* Invoke PMD device initialization function */ 2006ff9e112dSShreyansh Jain diag = dpaa_dev_init(eth_dev); 2007fbe90cddSThomas Monjalon if (diag == 0) { 2008fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev); 2009ff9e112dSShreyansh Jain return 0; 2010fbe90cddSThomas Monjalon } 2011ff9e112dSShreyansh Jain 2012ff9e112dSShreyansh Jain rte_eth_dev_release_port(eth_dev); 2013ff9e112dSShreyansh Jain return diag; 2014ff9e112dSShreyansh Jain } 2015ff9e112dSShreyansh Jain 2016ff9e112dSShreyansh Jain static int 2017ff9e112dSShreyansh Jain rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) 2018ff9e112dSShreyansh Jain { 2019ff9e112dSShreyansh Jain struct rte_eth_dev *eth_dev; 2020ff9e112dSShreyansh Jain 2021ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 2022ff9e112dSShreyansh Jain 2023ff9e112dSShreyansh Jain eth_dev = dpaa_dev->eth_dev; 2024ff9e112dSShreyansh Jain dpaa_dev_uninit(eth_dev); 2025ff9e112dSShreyansh Jain 2026ff9e112dSShreyansh Jain rte_eth_dev_release_port(eth_dev); 2027ff9e112dSShreyansh Jain 2028ff9e112dSShreyansh Jain return 0; 2029ff9e112dSShreyansh Jain } 2030ff9e112dSShreyansh Jain 2031*4defbc8cSSachin Saxena static void __attribute__((destructor(102))) dpaa_finish(void) 2032*4defbc8cSSachin Saxena { 2033*4defbc8cSSachin Saxena /* For secondary, primary will do all the cleanup */ 2034*4defbc8cSSachin Saxena if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2035*4defbc8cSSachin Saxena return; 2036*4defbc8cSSachin Saxena 2037*4defbc8cSSachin Saxena if (!(default_q || fmc_q)) { 2038*4defbc8cSSachin Saxena unsigned int i; 2039*4defbc8cSSachin Saxena 2040*4defbc8cSSachin Saxena for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 2041*4defbc8cSSachin Saxena if (rte_eth_devices[i].dev_ops == &dpaa_devops) { 2042*4defbc8cSSachin Saxena struct rte_eth_dev *dev = &rte_eth_devices[i]; 2043*4defbc8cSSachin Saxena struct dpaa_if *dpaa_intf = 2044*4defbc8cSSachin Saxena dev->data->dev_private; 2045*4defbc8cSSachin Saxena struct fman_if *fif = 2046*4defbc8cSSachin Saxena dev->process_private; 2047*4defbc8cSSachin Saxena if (dpaa_intf->port_handle) 2048*4defbc8cSSachin Saxena if (dpaa_fm_deconfig(dpaa_intf, fif)) 2049*4defbc8cSSachin Saxena DPAA_PMD_WARN("DPAA FM " 2050*4defbc8cSSachin Saxena "deconfig failed\n"); 2051*4defbc8cSSachin Saxena } 2052*4defbc8cSSachin Saxena } 2053*4defbc8cSSachin Saxena if (is_global_init) 2054*4defbc8cSSachin Saxena if (dpaa_fm_term()) 2055*4defbc8cSSachin Saxena DPAA_PMD_WARN("DPAA FM term failed\n"); 2056*4defbc8cSSachin Saxena 2057*4defbc8cSSachin Saxena is_global_init = 0; 2058*4defbc8cSSachin Saxena 2059*4defbc8cSSachin Saxena DPAA_PMD_INFO("DPAA fman cleaned up"); 2060*4defbc8cSSachin Saxena } 2061*4defbc8cSSachin Saxena } 2062*4defbc8cSSachin Saxena 2063ff9e112dSShreyansh Jain static struct rte_dpaa_driver rte_dpaa_pmd = { 20642aa10990SRohit Raj .drv_flags = RTE_DPAA_DRV_INTR_LSC, 2065ff9e112dSShreyansh Jain .drv_type = FSL_DPAA_ETH, 2066ff9e112dSShreyansh Jain .probe = rte_dpaa_probe, 2067ff9e112dSShreyansh Jain .remove = rte_dpaa_remove, 2068ff9e112dSShreyansh Jain }; 2069ff9e112dSShreyansh Jain 2070ff9e112dSShreyansh Jain RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); 20719c99878aSJerin Jacob RTE_LOG_REGISTER(dpaa_logtype_pmd, pmd.net.dpaa, NOTICE); 2072