1d81734caSHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause 2ff9e112dSShreyansh Jain * 3ff9e112dSShreyansh Jain * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 49124e65dSGagandeep Singh * Copyright 2017-2020 NXP 5ff9e112dSShreyansh Jain * 6ff9e112dSShreyansh Jain */ 7ff9e112dSShreyansh Jain /* System headers */ 8ff9e112dSShreyansh Jain #include <stdio.h> 9ff9e112dSShreyansh Jain #include <inttypes.h> 10ff9e112dSShreyansh Jain #include <unistd.h> 11ff9e112dSShreyansh Jain #include <limits.h> 12ff9e112dSShreyansh Jain #include <sched.h> 13ff9e112dSShreyansh Jain #include <signal.h> 14ff9e112dSShreyansh Jain #include <pthread.h> 15ff9e112dSShreyansh Jain #include <sys/types.h> 16ff9e112dSShreyansh Jain #include <sys/syscall.h> 17ff9e112dSShreyansh Jain 186723c0fcSBruce Richardson #include <rte_string_fns.h> 19ff9e112dSShreyansh Jain #include <rte_byteorder.h> 20ff9e112dSShreyansh Jain #include <rte_common.h> 21ff9e112dSShreyansh Jain #include <rte_interrupts.h> 22ff9e112dSShreyansh Jain #include <rte_log.h> 23ff9e112dSShreyansh Jain #include <rte_debug.h> 24ff9e112dSShreyansh Jain #include <rte_pci.h> 25ff9e112dSShreyansh Jain #include <rte_atomic.h> 26ff9e112dSShreyansh Jain #include <rte_branch_prediction.h> 27ff9e112dSShreyansh Jain #include <rte_memory.h> 28ff9e112dSShreyansh Jain #include <rte_tailq.h> 29ff9e112dSShreyansh Jain #include <rte_eal.h> 30ff9e112dSShreyansh Jain #include <rte_alarm.h> 31ff9e112dSShreyansh Jain #include <rte_ether.h> 32ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 33ff9e112dSShreyansh Jain #include <rte_malloc.h> 34ff9e112dSShreyansh Jain #include <rte_ring.h> 35ff9e112dSShreyansh Jain 36ff9e112dSShreyansh Jain #include <rte_dpaa_bus.h> 37ff9e112dSShreyansh Jain #include <rte_dpaa_logs.h> 3837f9b54bSShreyansh Jain #include <dpaa_mempool.h> 39ff9e112dSShreyansh Jain 40ff9e112dSShreyansh Jain #include <dpaa_ethdev.h> 4137f9b54bSShreyansh Jain #include <dpaa_rxtx.h> 424defbc8cSSachin Saxena #include <dpaa_flow.h> 438c3495f5SHemant Agrawal #include <rte_pmd_dpaa.h> 4437f9b54bSShreyansh Jain 4537f9b54bSShreyansh Jain #include <fsl_usd.h> 4637f9b54bSShreyansh Jain #include <fsl_qman.h> 4737f9b54bSShreyansh Jain #include <fsl_bman.h> 4837f9b54bSShreyansh Jain #include <fsl_fman.h> 492aa10990SRohit Raj #include <process.h> 50ff9e112dSShreyansh Jain 51c5836218SSunil Kumar Kori /* Supported Rx offloads */ 52c5836218SSunil Kumar Kori static uint64_t dev_rx_offloads_sup = 5355576ac2SHemant Agrawal DEV_RX_OFFLOAD_JUMBO_FRAME | 5455576ac2SHemant Agrawal DEV_RX_OFFLOAD_SCATTER; 55c5836218SSunil Kumar Kori 56c5836218SSunil Kumar Kori /* Rx offloads which cannot be disabled */ 57c5836218SSunil Kumar Kori static uint64_t dev_rx_offloads_nodis = 58c5836218SSunil Kumar Kori DEV_RX_OFFLOAD_IPV4_CKSUM | 59c5836218SSunil Kumar Kori DEV_RX_OFFLOAD_UDP_CKSUM | 60c5836218SSunil Kumar Kori DEV_RX_OFFLOAD_TCP_CKSUM | 618b945a7fSPavan Nikhilesh DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 628b945a7fSPavan Nikhilesh DEV_RX_OFFLOAD_RSS_HASH; 63c5836218SSunil Kumar Kori 64c5836218SSunil Kumar Kori /* Supported Tx offloads */ 651cd8d4ceSHemant Agrawal static uint64_t dev_tx_offloads_sup = 661cd8d4ceSHemant Agrawal DEV_TX_OFFLOAD_MT_LOCKFREE | 671cd8d4ceSHemant Agrawal DEV_TX_OFFLOAD_MBUF_FAST_FREE; 68c5836218SSunil Kumar Kori 69c5836218SSunil Kumar Kori /* Tx offloads which cannot be disabled */ 70c5836218SSunil Kumar Kori static uint64_t dev_tx_offloads_nodis = 71c5836218SSunil Kumar Kori DEV_TX_OFFLOAD_IPV4_CKSUM | 72c5836218SSunil Kumar Kori DEV_TX_OFFLOAD_UDP_CKSUM | 73c5836218SSunil Kumar Kori DEV_TX_OFFLOAD_TCP_CKSUM | 74c5836218SSunil Kumar Kori DEV_TX_OFFLOAD_SCTP_CKSUM | 75c5836218SSunil Kumar Kori DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 761cd8d4ceSHemant Agrawal DEV_TX_OFFLOAD_MULTI_SEGS; 77c5836218SSunil Kumar Kori 78ff9e112dSShreyansh Jain /* Keep track of whether QMAN and BMAN have been globally initialized */ 79ff9e112dSShreyansh Jain static int is_global_init; 804defbc8cSSachin Saxena static int fmc_q = 1; /* Indicates the use of static fmc for distribution */ 818d6fc8b6SHemant Agrawal static int default_q; /* use default queue - FMC is not executed*/ 820b5deefbSShreyansh Jain /* At present we only allow up to 4 push mode queues as default - as each of 830b5deefbSShreyansh Jain * this queue need dedicated portal and we are short of portals. 840c504f69SHemant Agrawal */ 850b5deefbSShreyansh Jain #define DPAA_MAX_PUSH_MODE_QUEUE 8 860b5deefbSShreyansh Jain #define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 870c504f69SHemant Agrawal 880b5deefbSShreyansh Jain static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; 890c504f69SHemant Agrawal static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ 900c504f69SHemant Agrawal 91ff9e112dSShreyansh Jain 929124e65dSGagandeep Singh /* Per RX FQ Taildrop in frame count */ 9362f53995SHemant Agrawal static unsigned int td_threshold = CGR_RX_PERFQ_THRESH; 9462f53995SHemant Agrawal 959124e65dSGagandeep Singh /* Per TX FQ Taildrop in frame count, disabled by default */ 969124e65dSGagandeep Singh static unsigned int td_tx_threshold; 979124e65dSGagandeep Singh 98b21ed3e2SHemant Agrawal struct rte_dpaa_xstats_name_off { 99b21ed3e2SHemant Agrawal char name[RTE_ETH_XSTATS_NAME_SIZE]; 100b21ed3e2SHemant Agrawal uint32_t offset; 101b21ed3e2SHemant Agrawal }; 102b21ed3e2SHemant Agrawal 103b21ed3e2SHemant Agrawal static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { 104b21ed3e2SHemant Agrawal {"rx_align_err", 105b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, raln)}, 106b21ed3e2SHemant Agrawal {"rx_valid_pause", 107b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rxpf)}, 108b21ed3e2SHemant Agrawal {"rx_fcs_err", 109b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rfcs)}, 110b21ed3e2SHemant Agrawal {"rx_vlan_frame", 111b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rvlan)}, 112b21ed3e2SHemant Agrawal {"rx_frame_err", 113b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rerr)}, 114b21ed3e2SHemant Agrawal {"rx_drop_err", 115b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rdrp)}, 116b21ed3e2SHemant Agrawal {"rx_undersized", 117b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rund)}, 118b21ed3e2SHemant Agrawal {"rx_oversize_err", 119b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rovr)}, 120b21ed3e2SHemant Agrawal {"rx_fragment_pkt", 121b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, rfrg)}, 122b21ed3e2SHemant Agrawal {"tx_valid_pause", 123b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, txpf)}, 124b21ed3e2SHemant Agrawal {"tx_fcs_err", 125b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, terr)}, 126b21ed3e2SHemant Agrawal {"tx_vlan_frame", 127b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, tvlan)}, 128b21ed3e2SHemant Agrawal {"rx_undersized", 129b21ed3e2SHemant Agrawal offsetof(struct dpaa_if_stats, tund)}, 130b21ed3e2SHemant Agrawal }; 131b21ed3e2SHemant Agrawal 1328c3495f5SHemant Agrawal static struct rte_dpaa_driver rte_dpaa_pmd; 1338c3495f5SHemant Agrawal 134bdad90d1SIvan Ilchenko static int 13516e2c27fSSunil Kumar Kori dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); 13616e2c27fSSunil Kumar Kori 1372aa10990SRohit Raj static int dpaa_eth_link_update(struct rte_eth_dev *dev, 1382aa10990SRohit Raj int wait_to_complete __rte_unused); 1392aa10990SRohit Raj 1402aa10990SRohit Raj static void dpaa_interrupt_handler(void *param); 1412aa10990SRohit Raj 1425e745593SSunil Kumar Kori static inline void 1435e745593SSunil Kumar Kori dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) 1445e745593SSunil Kumar Kori { 1455e745593SSunil Kumar Kori memset(opts, 0, sizeof(struct qm_mcc_initfq)); 1465e745593SSunil Kumar Kori opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 1475e745593SSunil Kumar Kori opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | 1485e745593SSunil Kumar Kori QM_FQCTRL_PREFERINCACHE; 1495e745593SSunil Kumar Kori opts->fqd.context_a.stashing.exclusive = 0; 1505e745593SSunil Kumar Kori if (dpaa_svr_family != SVR_LS1046A_FAMILY) 1515e745593SSunil Kumar Kori opts->fqd.context_a.stashing.annotation_cl = 1525e745593SSunil Kumar Kori DPAA_IF_RX_ANNOTATION_STASH; 1535e745593SSunil Kumar Kori opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 1545e745593SSunil Kumar Kori opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; 1555e745593SSunil Kumar Kori } 1565e745593SSunil Kumar Kori 157ff9e112dSShreyansh Jain static int 1580cbec027SShreyansh Jain dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1590cbec027SShreyansh Jain { 16035b2d13fSOlivier Matz uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN 1619658ac3aSAshish Jain + VLAN_TAG_SIZE; 16255576ac2SHemant Agrawal uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 1630cbec027SShreyansh Jain 1640cbec027SShreyansh Jain PMD_INIT_FUNC_TRACE(); 1650cbec027SShreyansh Jain 16635b2d13fSOlivier Matz if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) 1670cbec027SShreyansh Jain return -EINVAL; 16855576ac2SHemant Agrawal /* 16955576ac2SHemant Agrawal * Refuse mtu that requires the support of scattered packets 17055576ac2SHemant Agrawal * when this feature has not been enabled before. 17155576ac2SHemant Agrawal */ 17255576ac2SHemant Agrawal if (dev->data->min_rx_buf_size && 17355576ac2SHemant Agrawal !dev->data->scattered_rx && frame_size > buffsz) { 17455576ac2SHemant Agrawal DPAA_PMD_ERR("SG not enabled, will not fit in one buffer"); 17555576ac2SHemant Agrawal return -EINVAL; 17655576ac2SHemant Agrawal } 17755576ac2SHemant Agrawal 17855576ac2SHemant Agrawal /* check <seg size> * <max_seg> >= max_frame */ 17955576ac2SHemant Agrawal if (dev->data->min_rx_buf_size && dev->data->scattered_rx && 18055576ac2SHemant Agrawal (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) { 18155576ac2SHemant Agrawal DPAA_PMD_ERR("Too big to fit for Max SG list %d", 18255576ac2SHemant Agrawal buffsz * DPAA_SGT_MAX_ENTRIES); 18355576ac2SHemant Agrawal return -EINVAL; 18455576ac2SHemant Agrawal } 18555576ac2SHemant Agrawal 18635b2d13fSOlivier Matz if (frame_size > RTE_ETHER_MAX_LEN) 18740c79ea0SApeksha Gupta dev->data->dev_conf.rxmode.offloads |= 18816e2c27fSSunil Kumar Kori DEV_RX_OFFLOAD_JUMBO_FRAME; 18925f85419SShreyansh Jain else 19016e2c27fSSunil Kumar Kori dev->data->dev_conf.rxmode.offloads &= 19116e2c27fSSunil Kumar Kori ~DEV_RX_OFFLOAD_JUMBO_FRAME; 19225f85419SShreyansh Jain 1939658ac3aSAshish Jain dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1940cbec027SShreyansh Jain 1956b10d1f7SNipun Gupta fman_if_set_maxfrm(dev->process_private, frame_size); 1960cbec027SShreyansh Jain 1970cbec027SShreyansh Jain return 0; 1980cbec027SShreyansh Jain } 1990cbec027SShreyansh Jain 2000cbec027SShreyansh Jain static int 20116e2c27fSSunil Kumar Kori dpaa_eth_dev_configure(struct rte_eth_dev *dev) 202ff9e112dSShreyansh Jain { 20316e2c27fSSunil Kumar Kori struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 20416e2c27fSSunil Kumar Kori uint64_t rx_offloads = eth_conf->rxmode.offloads; 20516e2c27fSSunil Kumar Kori uint64_t tx_offloads = eth_conf->txmode.offloads; 2062aa10990SRohit Raj struct rte_device *rdev = dev->device; 2072aa10990SRohit Raj struct rte_dpaa_device *dpaa_dev; 2082aa10990SRohit Raj struct fman_if *fif = dev->process_private; 2092aa10990SRohit Raj struct __fman_if *__fif; 2102aa10990SRohit Raj struct rte_intr_handle *intr_handle; 2112aa10990SRohit Raj int ret; 2129658ac3aSAshish Jain 213ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 214ff9e112dSShreyansh Jain 2152aa10990SRohit Raj dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); 2162aa10990SRohit Raj intr_handle = &dpaa_dev->intr_handle; 2172aa10990SRohit Raj __fif = container_of(fif, struct __fman_if, __if); 2182aa10990SRohit Raj 2191cd8d4ceSHemant Agrawal /* Rx offloads which are enabled by default */ 220c5836218SSunil Kumar Kori if (dev_rx_offloads_nodis & ~rx_offloads) { 2211cd8d4ceSHemant Agrawal DPAA_PMD_INFO( 2221cd8d4ceSHemant Agrawal "Some of rx offloads enabled by default - requested 0x%" PRIx64 2231cd8d4ceSHemant Agrawal " fixed are 0x%" PRIx64, 224c5836218SSunil Kumar Kori rx_offloads, dev_rx_offloads_nodis); 22516e2c27fSSunil Kumar Kori } 22616e2c27fSSunil Kumar Kori 2271cd8d4ceSHemant Agrawal /* Tx offloads which are enabled by default */ 228c5836218SSunil Kumar Kori if (dev_tx_offloads_nodis & ~tx_offloads) { 2291cd8d4ceSHemant Agrawal DPAA_PMD_INFO( 2301cd8d4ceSHemant Agrawal "Some of tx offloads enabled by default - requested 0x%" PRIx64 2311cd8d4ceSHemant Agrawal " fixed are 0x%" PRIx64, 232c5836218SSunil Kumar Kori tx_offloads, dev_tx_offloads_nodis); 23316e2c27fSSunil Kumar Kori } 23416e2c27fSSunil Kumar Kori 23516e2c27fSSunil Kumar Kori if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 236deeec8efSHemant Agrawal uint32_t max_len; 237deeec8efSHemant Agrawal 238deeec8efSHemant Agrawal DPAA_PMD_DEBUG("enabling jumbo"); 239deeec8efSHemant Agrawal 24025f85419SShreyansh Jain if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= 241deeec8efSHemant Agrawal DPAA_MAX_RX_PKT_LEN) 242deeec8efSHemant Agrawal max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 243deeec8efSHemant Agrawal else { 244deeec8efSHemant Agrawal DPAA_PMD_INFO("enabling jumbo override conf max len=%d " 245deeec8efSHemant Agrawal "supported is %d", 246deeec8efSHemant Agrawal dev->data->dev_conf.rxmode.max_rx_pkt_len, 247deeec8efSHemant Agrawal DPAA_MAX_RX_PKT_LEN); 248deeec8efSHemant Agrawal max_len = DPAA_MAX_RX_PKT_LEN; 24925f85419SShreyansh Jain } 250deeec8efSHemant Agrawal 2516b10d1f7SNipun Gupta fman_if_set_maxfrm(dev->process_private, max_len); 252deeec8efSHemant Agrawal dev->data->mtu = max_len 25335b2d13fSOlivier Matz - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE; 2549658ac3aSAshish Jain } 25555576ac2SHemant Agrawal 25655576ac2SHemant Agrawal if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) { 25755576ac2SHemant Agrawal DPAA_PMD_DEBUG("enabling scatter mode"); 2586b10d1f7SNipun Gupta fman_if_set_sg(dev->process_private, 1); 25955576ac2SHemant Agrawal dev->data->scattered_rx = 1; 26055576ac2SHemant Agrawal } 26155576ac2SHemant Agrawal 262*f5fe3eedSJun Yang if (!(default_q || fmc_q)) { 263*f5fe3eedSJun Yang if (dpaa_fm_config(dev, 264*f5fe3eedSJun Yang eth_conf->rx_adv_conf.rss_conf.rss_hf)) { 265*f5fe3eedSJun Yang dpaa_write_fm_config_to_file(); 266*f5fe3eedSJun Yang DPAA_PMD_ERR("FM port configuration: Failed\n"); 267*f5fe3eedSJun Yang return -1; 268*f5fe3eedSJun Yang } 269*f5fe3eedSJun Yang dpaa_write_fm_config_to_file(); 270*f5fe3eedSJun Yang } 271*f5fe3eedSJun Yang 2722aa10990SRohit Raj /* if the interrupts were configured on this devices*/ 2732aa10990SRohit Raj if (intr_handle && intr_handle->fd) { 2742aa10990SRohit Raj if (dev->data->dev_conf.intr_conf.lsc != 0) 2752aa10990SRohit Raj rte_intr_callback_register(intr_handle, 2762aa10990SRohit Raj dpaa_interrupt_handler, 2772aa10990SRohit Raj (void *)dev); 2782aa10990SRohit Raj 2792aa10990SRohit Raj ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd); 2802aa10990SRohit Raj if (ret) { 2812aa10990SRohit Raj if (dev->data->dev_conf.intr_conf.lsc != 0) { 2822aa10990SRohit Raj rte_intr_callback_unregister(intr_handle, 2832aa10990SRohit Raj dpaa_interrupt_handler, 2842aa10990SRohit Raj (void *)dev); 2852aa10990SRohit Raj if (ret == EINVAL) 2862aa10990SRohit Raj printf("Failed to enable interrupt: Not Supported\n"); 2872aa10990SRohit Raj else 2882aa10990SRohit Raj printf("Failed to enable interrupt\n"); 2892aa10990SRohit Raj } 2902aa10990SRohit Raj dev->data->dev_conf.intr_conf.lsc = 0; 2912aa10990SRohit Raj dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 2922aa10990SRohit Raj } 2932aa10990SRohit Raj } 294ff9e112dSShreyansh Jain return 0; 295ff9e112dSShreyansh Jain } 296ff9e112dSShreyansh Jain 297a7bdc3bdSShreyansh Jain static const uint32_t * 298a7bdc3bdSShreyansh Jain dpaa_supported_ptypes_get(struct rte_eth_dev *dev) 299a7bdc3bdSShreyansh Jain { 300a7bdc3bdSShreyansh Jain static const uint32_t ptypes[] = { 301a7bdc3bdSShreyansh Jain RTE_PTYPE_L2_ETHER, 302ec503d8fSHemant Agrawal RTE_PTYPE_L2_ETHER_VLAN, 303ec503d8fSHemant Agrawal RTE_PTYPE_L2_ETHER_ARP, 304ec503d8fSHemant Agrawal RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 305ec503d8fSHemant Agrawal RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 306ec503d8fSHemant Agrawal RTE_PTYPE_L4_ICMP, 307ec503d8fSHemant Agrawal RTE_PTYPE_L4_TCP, 308ec503d8fSHemant Agrawal RTE_PTYPE_L4_UDP, 309ec503d8fSHemant Agrawal RTE_PTYPE_L4_FRAG, 310a7bdc3bdSShreyansh Jain RTE_PTYPE_L4_TCP, 311a7bdc3bdSShreyansh Jain RTE_PTYPE_L4_UDP, 312a7bdc3bdSShreyansh Jain RTE_PTYPE_L4_SCTP 313a7bdc3bdSShreyansh Jain }; 314a7bdc3bdSShreyansh Jain 315a7bdc3bdSShreyansh Jain PMD_INIT_FUNC_TRACE(); 316a7bdc3bdSShreyansh Jain 317a7bdc3bdSShreyansh Jain if (dev->rx_pkt_burst == dpaa_eth_queue_rx) 318a7bdc3bdSShreyansh Jain return ptypes; 319a7bdc3bdSShreyansh Jain return NULL; 320a7bdc3bdSShreyansh Jain } 321a7bdc3bdSShreyansh Jain 3222aa10990SRohit Raj static void dpaa_interrupt_handler(void *param) 3232aa10990SRohit Raj { 3242aa10990SRohit Raj struct rte_eth_dev *dev = param; 3252aa10990SRohit Raj struct rte_device *rdev = dev->device; 3262aa10990SRohit Raj struct rte_dpaa_device *dpaa_dev; 3272aa10990SRohit Raj struct rte_intr_handle *intr_handle; 3282aa10990SRohit Raj uint64_t buf; 3292aa10990SRohit Raj int bytes_read; 3302aa10990SRohit Raj 3312aa10990SRohit Raj dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); 3322aa10990SRohit Raj intr_handle = &dpaa_dev->intr_handle; 3332aa10990SRohit Raj 3342aa10990SRohit Raj bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t)); 3352aa10990SRohit Raj if (bytes_read < 0) 3362aa10990SRohit Raj DPAA_PMD_ERR("Error reading eventfd\n"); 3372aa10990SRohit Raj dpaa_eth_link_update(dev, 0); 3382aa10990SRohit Raj _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 3392aa10990SRohit Raj } 3402aa10990SRohit Raj 341ff9e112dSShreyansh Jain static int dpaa_eth_dev_start(struct rte_eth_dev *dev) 342ff9e112dSShreyansh Jain { 34337f9b54bSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 34437f9b54bSShreyansh Jain 345ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 346ff9e112dSShreyansh Jain 347*f5fe3eedSJun Yang if (!(default_q || fmc_q)) 348*f5fe3eedSJun Yang dpaa_write_fm_config_to_file(); 349*f5fe3eedSJun Yang 350ff9e112dSShreyansh Jain /* Change tx callback to the real one */ 3519124e65dSGagandeep Singh if (dpaa_intf->cgr_tx) 3529124e65dSGagandeep Singh dev->tx_pkt_burst = dpaa_eth_queue_tx_slow; 3539124e65dSGagandeep Singh else 35437f9b54bSShreyansh Jain dev->tx_pkt_burst = dpaa_eth_queue_tx; 3559124e65dSGagandeep Singh 3566b10d1f7SNipun Gupta fman_if_enable_rx(dev->process_private); 357ff9e112dSShreyansh Jain 358ff9e112dSShreyansh Jain return 0; 359ff9e112dSShreyansh Jain } 360ff9e112dSShreyansh Jain 361ff9e112dSShreyansh Jain static void dpaa_eth_dev_stop(struct rte_eth_dev *dev) 362ff9e112dSShreyansh Jain { 3636b10d1f7SNipun Gupta struct fman_if *fif = dev->process_private; 36437f9b54bSShreyansh Jain 36537f9b54bSShreyansh Jain PMD_INIT_FUNC_TRACE(); 36637f9b54bSShreyansh Jain 367133332f0SRadu Bulie if (!fif->is_shared_mac) 3686b10d1f7SNipun Gupta fman_if_disable_rx(fif); 36937f9b54bSShreyansh Jain dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 370ff9e112dSShreyansh Jain } 371ff9e112dSShreyansh Jain 37237f9b54bSShreyansh Jain static void dpaa_eth_dev_close(struct rte_eth_dev *dev) 37337f9b54bSShreyansh Jain { 3742aa10990SRohit Raj struct fman_if *fif = dev->process_private; 3752aa10990SRohit Raj struct __fman_if *__fif; 3762aa10990SRohit Raj struct rte_device *rdev = dev->device; 3772aa10990SRohit Raj struct rte_dpaa_device *dpaa_dev; 3782aa10990SRohit Raj struct rte_intr_handle *intr_handle; 3792aa10990SRohit Raj 38037f9b54bSShreyansh Jain PMD_INIT_FUNC_TRACE(); 38137f9b54bSShreyansh Jain 3822aa10990SRohit Raj dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); 3832aa10990SRohit Raj intr_handle = &dpaa_dev->intr_handle; 3842aa10990SRohit Raj __fif = container_of(fif, struct __fman_if, __if); 3852aa10990SRohit Raj 38637f9b54bSShreyansh Jain dpaa_eth_dev_stop(dev); 3872aa10990SRohit Raj 3882aa10990SRohit Raj if (intr_handle && intr_handle->fd && 3892aa10990SRohit Raj dev->data->dev_conf.intr_conf.lsc != 0) { 3902aa10990SRohit Raj dpaa_intr_disable(__fif->node_name); 3912aa10990SRohit Raj rte_intr_callback_unregister(intr_handle, 3922aa10990SRohit Raj dpaa_interrupt_handler, 3932aa10990SRohit Raj (void *)dev); 3942aa10990SRohit Raj } 39537f9b54bSShreyansh Jain } 39637f9b54bSShreyansh Jain 397cf0fab1dSHemant Agrawal static int 398cf0fab1dSHemant Agrawal dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, 399cf0fab1dSHemant Agrawal char *fw_version, 400cf0fab1dSHemant Agrawal size_t fw_size) 401cf0fab1dSHemant Agrawal { 402cf0fab1dSHemant Agrawal int ret; 403cf0fab1dSHemant Agrawal FILE *svr_file = NULL; 404cf0fab1dSHemant Agrawal unsigned int svr_ver = 0; 405cf0fab1dSHemant Agrawal 406cf0fab1dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 407cf0fab1dSHemant Agrawal 408cf0fab1dSHemant Agrawal svr_file = fopen(DPAA_SOC_ID_FILE, "r"); 409cf0fab1dSHemant Agrawal if (!svr_file) { 410cf0fab1dSHemant Agrawal DPAA_PMD_ERR("Unable to open SoC device"); 411cf0fab1dSHemant Agrawal return -ENOTSUP; /* Not supported on this infra */ 412cf0fab1dSHemant Agrawal } 4133b59b73dSHemant Agrawal if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 4143b59b73dSHemant Agrawal dpaa_svr_family = svr_ver & SVR_MASK; 4153b59b73dSHemant Agrawal else 416cf0fab1dSHemant Agrawal DPAA_PMD_ERR("Unable to read SoC device"); 417cf0fab1dSHemant Agrawal 418a8e78906SHemant Agrawal fclose(svr_file); 419cf0fab1dSHemant Agrawal 420a8e78906SHemant Agrawal ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", 421a8e78906SHemant Agrawal svr_ver, fman_ip_rev); 422cf0fab1dSHemant Agrawal ret += 1; /* add the size of '\0' */ 423a8e78906SHemant Agrawal 424cf0fab1dSHemant Agrawal if (fw_size < (uint32_t)ret) 425cf0fab1dSHemant Agrawal return ret; 426cf0fab1dSHemant Agrawal else 427cf0fab1dSHemant Agrawal return 0; 428cf0fab1dSHemant Agrawal } 429cf0fab1dSHemant Agrawal 430bdad90d1SIvan Ilchenko static int dpaa_eth_dev_info(struct rte_eth_dev *dev, 431799db456SShreyansh Jain struct rte_eth_dev_info *dev_info) 432799db456SShreyansh Jain { 433799db456SShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 4346b10d1f7SNipun Gupta struct fman_if *fif = dev->process_private; 435799db456SShreyansh Jain 43636528452SHemant Agrawal DPAA_PMD_DEBUG(": %s", dpaa_intf->name); 437799db456SShreyansh Jain 438799db456SShreyansh Jain dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; 439799db456SShreyansh Jain dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; 440799db456SShreyansh Jain dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; 441799db456SShreyansh Jain dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; 442799db456SShreyansh Jain dev_info->max_hash_mac_addrs = 0; 443799db456SShreyansh Jain dev_info->max_vfs = 0; 444799db456SShreyansh Jain dev_info->max_vmdq_pools = ETH_16_POOLS; 4454fa5e0bbSShreyansh Jain dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; 446c1752a36SSachin Saxena 4476b10d1f7SNipun Gupta if (fif->mac_type == fman_mac_1g) { 448c1752a36SSachin Saxena dev_info->speed_capa = ETH_LINK_SPEED_1G; 4496b10d1f7SNipun Gupta } else if (fif->mac_type == fman_mac_2_5g) { 450eac3c7b9SSachin Saxena dev_info->speed_capa = ETH_LINK_SPEED_1G 451eac3c7b9SSachin Saxena | ETH_LINK_SPEED_2_5G; 4526b10d1f7SNipun Gupta } else if (fif->mac_type == fman_mac_10g) { 453eac3c7b9SSachin Saxena dev_info->speed_capa = ETH_LINK_SPEED_1G 454eac3c7b9SSachin Saxena | ETH_LINK_SPEED_2_5G 455eac3c7b9SSachin Saxena | ETH_LINK_SPEED_10G; 456bdad90d1SIvan Ilchenko } else { 457c1752a36SSachin Saxena DPAA_PMD_ERR("invalid link_speed: %s, %d", 4586b10d1f7SNipun Gupta dpaa_intf->name, fif->mac_type); 459bdad90d1SIvan Ilchenko return -EINVAL; 460bdad90d1SIvan Ilchenko } 461c1752a36SSachin Saxena 462c5836218SSunil Kumar Kori dev_info->rx_offload_capa = dev_rx_offloads_sup | 463c5836218SSunil Kumar Kori dev_rx_offloads_nodis; 464c5836218SSunil Kumar Kori dev_info->tx_offload_capa = dev_tx_offloads_sup | 465c5836218SSunil Kumar Kori dev_tx_offloads_nodis; 4662c01a48aSShreyansh Jain dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; 4672c01a48aSShreyansh Jain dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; 468e35ead33SHemant Agrawal dev_info->default_rxportconf.nb_queues = 1; 469e35ead33SHemant Agrawal dev_info->default_txportconf.nb_queues = 1; 470e35ead33SHemant Agrawal dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH; 471e35ead33SHemant Agrawal dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH; 472bdad90d1SIvan Ilchenko 473bdad90d1SIvan Ilchenko return 0; 474799db456SShreyansh Jain } 475799db456SShreyansh Jain 4762e6f5657SApeksha Gupta static int 4772e6f5657SApeksha Gupta dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev, 4782e6f5657SApeksha Gupta __rte_unused uint16_t queue_id, 4792e6f5657SApeksha Gupta struct rte_eth_burst_mode *mode) 4802e6f5657SApeksha Gupta { 4812e6f5657SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 4822e6f5657SApeksha Gupta int ret = -EINVAL; 4832e6f5657SApeksha Gupta unsigned int i; 4842e6f5657SApeksha Gupta const struct burst_info { 4852e6f5657SApeksha Gupta uint64_t flags; 4862e6f5657SApeksha Gupta const char *output; 4872e6f5657SApeksha Gupta } rx_offload_map[] = { 4882e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"}, 4892e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}, 4902e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, 4912e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, 4922e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, 4932e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 4942e6f5657SApeksha Gupta {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"} 4952e6f5657SApeksha Gupta }; 4962e6f5657SApeksha Gupta 4972e6f5657SApeksha Gupta /* Update Rx offload info */ 4982e6f5657SApeksha Gupta for (i = 0; i < RTE_DIM(rx_offload_map); i++) { 4992e6f5657SApeksha Gupta if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) { 5002e6f5657SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s", 5012e6f5657SApeksha Gupta rx_offload_map[i].output); 5022e6f5657SApeksha Gupta ret = 0; 5032e6f5657SApeksha Gupta break; 5042e6f5657SApeksha Gupta } 5052e6f5657SApeksha Gupta } 5062e6f5657SApeksha Gupta return ret; 5072e6f5657SApeksha Gupta } 5082e6f5657SApeksha Gupta 5092e6f5657SApeksha Gupta static int 5102e6f5657SApeksha Gupta dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev, 5112e6f5657SApeksha Gupta __rte_unused uint16_t queue_id, 5122e6f5657SApeksha Gupta struct rte_eth_burst_mode *mode) 5132e6f5657SApeksha Gupta { 5142e6f5657SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 5152e6f5657SApeksha Gupta int ret = -EINVAL; 5162e6f5657SApeksha Gupta unsigned int i; 5172e6f5657SApeksha Gupta const struct burst_info { 5182e6f5657SApeksha Gupta uint64_t flags; 5192e6f5657SApeksha Gupta const char *output; 5202e6f5657SApeksha Gupta } tx_offload_map[] = { 5212e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"}, 5222e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"}, 5232e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, 5242e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, 5252e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, 5262e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, 5272e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 5282e6f5657SApeksha Gupta {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"} 5292e6f5657SApeksha Gupta }; 5302e6f5657SApeksha Gupta 5312e6f5657SApeksha Gupta /* Update Tx offload info */ 5322e6f5657SApeksha Gupta for (i = 0; i < RTE_DIM(tx_offload_map); i++) { 5332e6f5657SApeksha Gupta if (eth_conf->txmode.offloads & tx_offload_map[i].flags) { 5342e6f5657SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s", 5352e6f5657SApeksha Gupta tx_offload_map[i].output); 5362e6f5657SApeksha Gupta ret = 0; 5372e6f5657SApeksha Gupta break; 5382e6f5657SApeksha Gupta } 5392e6f5657SApeksha Gupta } 5402e6f5657SApeksha Gupta return ret; 5412e6f5657SApeksha Gupta } 5422e6f5657SApeksha Gupta 543e124a69fSShreyansh Jain static int dpaa_eth_link_update(struct rte_eth_dev *dev, 544e124a69fSShreyansh Jain int wait_to_complete __rte_unused) 545e124a69fSShreyansh Jain { 546e124a69fSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 547e124a69fSShreyansh Jain struct rte_eth_link *link = &dev->data->dev_link; 5486b10d1f7SNipun Gupta struct fman_if *fif = dev->process_private; 5492aa10990SRohit Raj struct __fman_if *__fif = container_of(fif, struct __fman_if, __if); 5502aa10990SRohit Raj int ret; 551e124a69fSShreyansh Jain 552e124a69fSShreyansh Jain PMD_INIT_FUNC_TRACE(); 553e124a69fSShreyansh Jain 5546b10d1f7SNipun Gupta if (fif->mac_type == fman_mac_1g) 5551633d3c4SFerruh Yigit link->link_speed = ETH_SPEED_NUM_1G; 5566b10d1f7SNipun Gupta else if (fif->mac_type == fman_mac_2_5g) 557eac3c7b9SSachin Saxena link->link_speed = ETH_SPEED_NUM_2_5G; 5586b10d1f7SNipun Gupta else if (fif->mac_type == fman_mac_10g) 5591633d3c4SFerruh Yigit link->link_speed = ETH_SPEED_NUM_10G; 560e124a69fSShreyansh Jain else 561e124a69fSShreyansh Jain DPAA_PMD_ERR("invalid link_speed: %s, %d", 5626b10d1f7SNipun Gupta dpaa_intf->name, fif->mac_type); 563e124a69fSShreyansh Jain 564f231d48dSRohit Raj if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 5652aa10990SRohit Raj ret = dpaa_get_link_status(__fif->node_name); 566f231d48dSRohit Raj if (ret < 0) 5672aa10990SRohit Raj return ret; 568f231d48dSRohit Raj link->link_status = ret; 569f231d48dSRohit Raj } else { 570f231d48dSRohit Raj link->link_status = dpaa_intf->valid; 5712aa10990SRohit Raj } 5722aa10990SRohit Raj 573e124a69fSShreyansh Jain link->link_duplex = ETH_LINK_FULL_DUPLEX; 574e124a69fSShreyansh Jain link->link_autoneg = ETH_LINK_AUTONEG; 5752aa10990SRohit Raj 5762aa10990SRohit Raj DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, 5772aa10990SRohit Raj link->link_status ? "Up" : "Down"); 578e124a69fSShreyansh Jain return 0; 579e124a69fSShreyansh Jain } 580e124a69fSShreyansh Jain 581d5b0924bSMatan Azrad static int dpaa_eth_stats_get(struct rte_eth_dev *dev, 582e1ad3a05SShreyansh Jain struct rte_eth_stats *stats) 583e1ad3a05SShreyansh Jain { 584e1ad3a05SShreyansh Jain PMD_INIT_FUNC_TRACE(); 585e1ad3a05SShreyansh Jain 5866b10d1f7SNipun Gupta fman_if_stats_get(dev->process_private, stats); 587d5b0924bSMatan Azrad return 0; 588e1ad3a05SShreyansh Jain } 589e1ad3a05SShreyansh Jain 5909970a9adSIgor Romanov static int dpaa_eth_stats_reset(struct rte_eth_dev *dev) 591e1ad3a05SShreyansh Jain { 592e1ad3a05SShreyansh Jain PMD_INIT_FUNC_TRACE(); 593e1ad3a05SShreyansh Jain 5946b10d1f7SNipun Gupta fman_if_stats_reset(dev->process_private); 5959970a9adSIgor Romanov 5969970a9adSIgor Romanov return 0; 597e1ad3a05SShreyansh Jain } 59895ef603dSShreyansh Jain 599b21ed3e2SHemant Agrawal static int 600b21ed3e2SHemant Agrawal dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 601b21ed3e2SHemant Agrawal unsigned int n) 602b21ed3e2SHemant Agrawal { 603b21ed3e2SHemant Agrawal unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); 604b21ed3e2SHemant Agrawal uint64_t values[sizeof(struct dpaa_if_stats) / 8]; 605b21ed3e2SHemant Agrawal 606b21ed3e2SHemant Agrawal if (n < num) 607b21ed3e2SHemant Agrawal return num; 608b21ed3e2SHemant Agrawal 609339c1025SHemant Agrawal if (xstats == NULL) 610339c1025SHemant Agrawal return 0; 611339c1025SHemant Agrawal 6126b10d1f7SNipun Gupta fman_if_stats_get_all(dev->process_private, values, 613b21ed3e2SHemant Agrawal sizeof(struct dpaa_if_stats) / 8); 614b21ed3e2SHemant Agrawal 615b21ed3e2SHemant Agrawal for (i = 0; i < num; i++) { 616b21ed3e2SHemant Agrawal xstats[i].id = i; 617b21ed3e2SHemant Agrawal xstats[i].value = values[dpaa_xstats_strings[i].offset / 8]; 618b21ed3e2SHemant Agrawal } 619b21ed3e2SHemant Agrawal return i; 620b21ed3e2SHemant Agrawal } 621b21ed3e2SHemant Agrawal 622b21ed3e2SHemant Agrawal static int 623b21ed3e2SHemant Agrawal dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 624b21ed3e2SHemant Agrawal struct rte_eth_xstat_name *xstats_names, 6255c3fc73eSHemant Agrawal unsigned int limit) 626b21ed3e2SHemant Agrawal { 627b21ed3e2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 628b21ed3e2SHemant Agrawal 6295c3fc73eSHemant Agrawal if (limit < stat_cnt) 6305c3fc73eSHemant Agrawal return stat_cnt; 6315c3fc73eSHemant Agrawal 632b21ed3e2SHemant Agrawal if (xstats_names != NULL) 633b21ed3e2SHemant Agrawal for (i = 0; i < stat_cnt; i++) 6346723c0fcSBruce Richardson strlcpy(xstats_names[i].name, 6356723c0fcSBruce Richardson dpaa_xstats_strings[i].name, 6366723c0fcSBruce Richardson sizeof(xstats_names[i].name)); 637b21ed3e2SHemant Agrawal 638b21ed3e2SHemant Agrawal return stat_cnt; 639b21ed3e2SHemant Agrawal } 640b21ed3e2SHemant Agrawal 641b21ed3e2SHemant Agrawal static int 642b21ed3e2SHemant Agrawal dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 643b21ed3e2SHemant Agrawal uint64_t *values, unsigned int n) 644b21ed3e2SHemant Agrawal { 645b21ed3e2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 646b21ed3e2SHemant Agrawal uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8]; 647b21ed3e2SHemant Agrawal 648b21ed3e2SHemant Agrawal if (!ids) { 649b21ed3e2SHemant Agrawal if (n < stat_cnt) 650b21ed3e2SHemant Agrawal return stat_cnt; 651b21ed3e2SHemant Agrawal 652b21ed3e2SHemant Agrawal if (!values) 653b21ed3e2SHemant Agrawal return 0; 654b21ed3e2SHemant Agrawal 6556b10d1f7SNipun Gupta fman_if_stats_get_all(dev->process_private, values_copy, 6565c3fc73eSHemant Agrawal sizeof(struct dpaa_if_stats) / 8); 657b21ed3e2SHemant Agrawal 658b21ed3e2SHemant Agrawal for (i = 0; i < stat_cnt; i++) 659b21ed3e2SHemant Agrawal values[i] = 660b21ed3e2SHemant Agrawal values_copy[dpaa_xstats_strings[i].offset / 8]; 661b21ed3e2SHemant Agrawal 662b21ed3e2SHemant Agrawal return stat_cnt; 663b21ed3e2SHemant Agrawal } 664b21ed3e2SHemant Agrawal 665b21ed3e2SHemant Agrawal dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 666b21ed3e2SHemant Agrawal 667b21ed3e2SHemant Agrawal for (i = 0; i < n; i++) { 668b21ed3e2SHemant Agrawal if (ids[i] >= stat_cnt) { 669b21ed3e2SHemant Agrawal DPAA_PMD_ERR("id value isn't valid"); 670b21ed3e2SHemant Agrawal return -1; 671b21ed3e2SHemant Agrawal } 672b21ed3e2SHemant Agrawal values[i] = values_copy[ids[i]]; 673b21ed3e2SHemant Agrawal } 674b21ed3e2SHemant Agrawal return n; 675b21ed3e2SHemant Agrawal } 676b21ed3e2SHemant Agrawal 677b21ed3e2SHemant Agrawal static int 678b21ed3e2SHemant Agrawal dpaa_xstats_get_names_by_id( 679b21ed3e2SHemant Agrawal struct rte_eth_dev *dev, 680b21ed3e2SHemant Agrawal struct rte_eth_xstat_name *xstats_names, 681b21ed3e2SHemant Agrawal const uint64_t *ids, 682b21ed3e2SHemant Agrawal unsigned int limit) 683b21ed3e2SHemant Agrawal { 684b21ed3e2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 685b21ed3e2SHemant Agrawal struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 686b21ed3e2SHemant Agrawal 687b21ed3e2SHemant Agrawal if (!ids) 688b21ed3e2SHemant Agrawal return dpaa_xstats_get_names(dev, xstats_names, limit); 689b21ed3e2SHemant Agrawal 690b21ed3e2SHemant Agrawal dpaa_xstats_get_names(dev, xstats_names_copy, limit); 691b21ed3e2SHemant Agrawal 692b21ed3e2SHemant Agrawal for (i = 0; i < limit; i++) { 693b21ed3e2SHemant Agrawal if (ids[i] >= stat_cnt) { 694b21ed3e2SHemant Agrawal DPAA_PMD_ERR("id value isn't valid"); 695b21ed3e2SHemant Agrawal return -1; 696b21ed3e2SHemant Agrawal } 697b21ed3e2SHemant Agrawal strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 698b21ed3e2SHemant Agrawal } 699b21ed3e2SHemant Agrawal return limit; 700b21ed3e2SHemant Agrawal } 701b21ed3e2SHemant Agrawal 7029039c812SAndrew Rybchenko static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) 70395ef603dSShreyansh Jain { 70495ef603dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 70595ef603dSShreyansh Jain 7066b10d1f7SNipun Gupta fman_if_promiscuous_enable(dev->process_private); 7079039c812SAndrew Rybchenko 7089039c812SAndrew Rybchenko return 0; 70995ef603dSShreyansh Jain } 71095ef603dSShreyansh Jain 7119039c812SAndrew Rybchenko static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) 71295ef603dSShreyansh Jain { 71395ef603dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 71495ef603dSShreyansh Jain 7156b10d1f7SNipun Gupta fman_if_promiscuous_disable(dev->process_private); 7169039c812SAndrew Rybchenko 7179039c812SAndrew Rybchenko return 0; 71895ef603dSShreyansh Jain } 71995ef603dSShreyansh Jain 720ca041cd4SIvan Ilchenko static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev) 72144dd70a3SShreyansh Jain { 72244dd70a3SShreyansh Jain PMD_INIT_FUNC_TRACE(); 72344dd70a3SShreyansh Jain 7246b10d1f7SNipun Gupta fman_if_set_mcast_filter_table(dev->process_private); 725ca041cd4SIvan Ilchenko 726ca041cd4SIvan Ilchenko return 0; 72744dd70a3SShreyansh Jain } 72844dd70a3SShreyansh Jain 729ca041cd4SIvan Ilchenko static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev) 73044dd70a3SShreyansh Jain { 73144dd70a3SShreyansh Jain PMD_INIT_FUNC_TRACE(); 73244dd70a3SShreyansh Jain 7336b10d1f7SNipun Gupta fman_if_reset_mcast_filter_table(dev->process_private); 734ca041cd4SIvan Ilchenko 735ca041cd4SIvan Ilchenko return 0; 73644dd70a3SShreyansh Jain } 73744dd70a3SShreyansh Jain 738e4abd4ffSJun Yang static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev) 739e4abd4ffSJun Yang { 740e4abd4ffSJun Yang struct dpaa_if *dpaa_intf = dev->data->dev_private; 741e4abd4ffSJun Yang struct fman_if_ic_params icp; 742e4abd4ffSJun Yang uint32_t fd_offset; 743e4abd4ffSJun Yang uint32_t bp_size; 744e4abd4ffSJun Yang 745e4abd4ffSJun Yang memset(&icp, 0, sizeof(icp)); 746e4abd4ffSJun Yang /* set ICEOF for to the default value , which is 0*/ 747e4abd4ffSJun Yang icp.iciof = DEFAULT_ICIOF; 748e4abd4ffSJun Yang icp.iceof = DEFAULT_RX_ICEOF; 749e4abd4ffSJun Yang icp.icsz = DEFAULT_ICSZ; 750e4abd4ffSJun Yang fman_if_set_ic_params(dev->process_private, &icp); 751e4abd4ffSJun Yang 752e4abd4ffSJun Yang fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; 753e4abd4ffSJun Yang fman_if_set_fdoff(dev->process_private, fd_offset); 754e4abd4ffSJun Yang 755e4abd4ffSJun Yang /* Buffer pool size should be equal to Dataroom Size*/ 756e4abd4ffSJun Yang bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp); 757e4abd4ffSJun Yang 758e4abd4ffSJun Yang fman_if_set_bp(dev->process_private, 759e4abd4ffSJun Yang dpaa_intf->bp_info->mp->size, 760e4abd4ffSJun Yang dpaa_intf->bp_info->bpid, bp_size); 761e4abd4ffSJun Yang } 762e4abd4ffSJun Yang 763e4abd4ffSJun Yang static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev, 764e4abd4ffSJun Yang int8_t vsp_id, uint32_t bpid) 765e4abd4ffSJun Yang { 766e4abd4ffSJun Yang struct dpaa_if *dpaa_intf = dev->data->dev_private; 767e4abd4ffSJun Yang struct fman_if *fif = dev->process_private; 768e4abd4ffSJun Yang 769e4abd4ffSJun Yang if (fif->num_profiles) { 770e4abd4ffSJun Yang if (vsp_id < 0) 771e4abd4ffSJun Yang vsp_id = fif->base_profile_id; 772e4abd4ffSJun Yang } else { 773e4abd4ffSJun Yang if (vsp_id < 0) 774e4abd4ffSJun Yang vsp_id = 0; 775e4abd4ffSJun Yang } 776e4abd4ffSJun Yang 777e4abd4ffSJun Yang if (dpaa_intf->vsp_bpid[vsp_id] && 778e4abd4ffSJun Yang bpid != dpaa_intf->vsp_bpid[vsp_id]) { 779e4abd4ffSJun Yang DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP"); 780e4abd4ffSJun Yang 781e4abd4ffSJun Yang return -1; 782e4abd4ffSJun Yang } 783e4abd4ffSJun Yang 784e4abd4ffSJun Yang return 0; 785e4abd4ffSJun Yang } 786e4abd4ffSJun Yang 78737f9b54bSShreyansh Jain static 78837f9b54bSShreyansh Jain int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 78962f53995SHemant Agrawal uint16_t nb_desc, 79037f9b54bSShreyansh Jain unsigned int socket_id __rte_unused, 791e335cce4SHemant Agrawal const struct rte_eth_rxconf *rx_conf, 79237f9b54bSShreyansh Jain struct rte_mempool *mp) 79337f9b54bSShreyansh Jain { 79437f9b54bSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 7956b10d1f7SNipun Gupta struct fman_if *fif = dev->process_private; 79662f53995SHemant Agrawal struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; 7970c504f69SHemant Agrawal struct qm_mcc_initfq opts = {0}; 7980c504f69SHemant Agrawal u32 flags = 0; 7990c504f69SHemant Agrawal int ret; 80055576ac2SHemant Agrawal u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 80137f9b54bSShreyansh Jain 80237f9b54bSShreyansh Jain PMD_INIT_FUNC_TRACE(); 80337f9b54bSShreyansh Jain 8046fd3639aSHemant Agrawal if (queue_idx >= dev->data->nb_rx_queues) { 8056fd3639aSHemant Agrawal rte_errno = EOVERFLOW; 8066fd3639aSHemant Agrawal DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", 8076fd3639aSHemant Agrawal (void *)dev, queue_idx, dev->data->nb_rx_queues); 8086fd3639aSHemant Agrawal return -rte_errno; 8096fd3639aSHemant Agrawal } 8106fd3639aSHemant Agrawal 811e335cce4SHemant Agrawal /* Rx deferred start is not supported */ 812e335cce4SHemant Agrawal if (rx_conf->rx_deferred_start) { 813e335cce4SHemant Agrawal DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev); 814e335cce4SHemant Agrawal return -EINVAL; 815e335cce4SHemant Agrawal } 8162cf9264fSHemant Agrawal rxq->nb_desc = UINT16_MAX; 8172cf9264fSHemant Agrawal rxq->offloads = rx_conf->offloads; 818e335cce4SHemant Agrawal 8196fd3639aSHemant Agrawal DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", 8206fd3639aSHemant Agrawal queue_idx, rxq->fqid); 82137f9b54bSShreyansh Jain 822e4abd4ffSJun Yang if (!fif->num_profiles) { 823e4abd4ffSJun Yang if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp && 824e4abd4ffSJun Yang dpaa_intf->bp_info->mp != mp) { 825e4abd4ffSJun Yang DPAA_PMD_WARN("Multiple pools on same interface not" 826e4abd4ffSJun Yang " supported"); 827e4abd4ffSJun Yang return -EINVAL; 828e4abd4ffSJun Yang } 829e4abd4ffSJun Yang } else { 830e4abd4ffSJun Yang if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id, 831e4abd4ffSJun Yang DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) { 832e4abd4ffSJun Yang return -EINVAL; 833e4abd4ffSJun Yang } 834e4abd4ffSJun Yang } 835e4abd4ffSJun Yang 83655576ac2SHemant Agrawal /* Max packet can fit in single buffer */ 83755576ac2SHemant Agrawal if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) { 83855576ac2SHemant Agrawal ; 83955576ac2SHemant Agrawal } else if (dev->data->dev_conf.rxmode.offloads & 84055576ac2SHemant Agrawal DEV_RX_OFFLOAD_SCATTER) { 84155576ac2SHemant Agrawal if (dev->data->dev_conf.rxmode.max_rx_pkt_len > 84255576ac2SHemant Agrawal buffsz * DPAA_SGT_MAX_ENTRIES) { 84355576ac2SHemant Agrawal DPAA_PMD_ERR("max RxPkt size %d too big to fit " 84455576ac2SHemant Agrawal "MaxSGlist %d", 84555576ac2SHemant Agrawal dev->data->dev_conf.rxmode.max_rx_pkt_len, 84655576ac2SHemant Agrawal buffsz * DPAA_SGT_MAX_ENTRIES); 84755576ac2SHemant Agrawal rte_errno = EOVERFLOW; 84855576ac2SHemant Agrawal return -rte_errno; 84955576ac2SHemant Agrawal } 85055576ac2SHemant Agrawal } else { 85155576ac2SHemant Agrawal DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" 85255576ac2SHemant Agrawal " larger than a single mbuf (%u) and scattered" 85355576ac2SHemant Agrawal " mode has not been requested", 85455576ac2SHemant Agrawal dev->data->dev_conf.rxmode.max_rx_pkt_len, 85555576ac2SHemant Agrawal buffsz - RTE_PKTMBUF_HEADROOM); 85655576ac2SHemant Agrawal } 85755576ac2SHemant Agrawal 85837f9b54bSShreyansh Jain dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 85937f9b54bSShreyansh Jain 860e4abd4ffSJun Yang /* For shared interface, it's done in kernel, skip.*/ 861e4abd4ffSJun Yang if (!fif->is_shared_mac) 862e4abd4ffSJun Yang dpaa_fman_if_pool_setup(dev); 86337f9b54bSShreyansh Jain 864e4abd4ffSJun Yang if (fif->num_profiles) { 865e4abd4ffSJun Yang int8_t vsp_id = rxq->vsp_id; 86637f9b54bSShreyansh Jain 867e4abd4ffSJun Yang if (vsp_id >= 0) { 868e4abd4ffSJun Yang ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id, 869e4abd4ffSJun Yang DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid, 870e4abd4ffSJun Yang fif); 871e4abd4ffSJun Yang if (ret) { 872e4abd4ffSJun Yang DPAA_PMD_ERR("dpaa_port_vsp_update failed"); 873e4abd4ffSJun Yang return ret; 87437f9b54bSShreyansh Jain } 875e4abd4ffSJun Yang } else { 876e4abd4ffSJun Yang DPAA_PMD_INFO("Base profile is associated to" 877e4abd4ffSJun Yang " RXQ fqid:%d\r\n", rxq->fqid); 878e4abd4ffSJun Yang if (fif->is_shared_mac) { 879e4abd4ffSJun Yang DPAA_PMD_ERR("Fatal: Base profile is associated" 880e4abd4ffSJun Yang " to shared interface on DPDK."); 881e4abd4ffSJun Yang return -EINVAL; 882e4abd4ffSJun Yang } 883e4abd4ffSJun Yang dpaa_intf->vsp_bpid[fif->base_profile_id] = 884e4abd4ffSJun Yang DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid; 885e4abd4ffSJun Yang } 886e4abd4ffSJun Yang } else { 887e4abd4ffSJun Yang dpaa_intf->vsp_bpid[0] = 888e4abd4ffSJun Yang DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid; 889e4abd4ffSJun Yang } 890e4abd4ffSJun Yang 891e4abd4ffSJun Yang dpaa_intf->valid = 1; 89255576ac2SHemant Agrawal DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name, 8936b10d1f7SNipun Gupta fman_if_get_sg_enable(fif), 89455576ac2SHemant Agrawal dev->data->dev_conf.rxmode.max_rx_pkt_len); 8950c504f69SHemant Agrawal /* checking if push mode only, no error check for now */ 896a6a75240SNipun Gupta if (!rxq->is_static && 897a6a75240SNipun Gupta dpaa_push_mode_max_queue > dpaa_push_queue_idx) { 898b9c94167SNipun Gupta struct qman_portal *qp; 899a6a75240SNipun Gupta int q_fd; 900b9c94167SNipun Gupta 9010c504f69SHemant Agrawal dpaa_push_queue_idx++; 9020c504f69SHemant Agrawal opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 9030c504f69SHemant Agrawal opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | 9040c504f69SHemant Agrawal QM_FQCTRL_CTXASTASHING | 9050c504f69SHemant Agrawal QM_FQCTRL_PREFERINCACHE; 9060c504f69SHemant Agrawal opts.fqd.context_a.stashing.exclusive = 0; 907b9083ea5SNipun Gupta /* In muticore scenario stashing becomes a bottleneck on LS1046. 908b9083ea5SNipun Gupta * So do not enable stashing in this case 909b9083ea5SNipun Gupta */ 910b9083ea5SNipun Gupta if (dpaa_svr_family != SVR_LS1046A_FAMILY) 9110c504f69SHemant Agrawal opts.fqd.context_a.stashing.annotation_cl = 9120c504f69SHemant Agrawal DPAA_IF_RX_ANNOTATION_STASH; 9130c504f69SHemant Agrawal opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 9140c504f69SHemant Agrawal opts.fqd.context_a.stashing.context_cl = 9150c504f69SHemant Agrawal DPAA_IF_RX_CONTEXT_STASH; 91662f53995SHemant Agrawal 9170c504f69SHemant Agrawal /*Create a channel and associate given queue with the channel*/ 9180c504f69SHemant Agrawal qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); 9190c504f69SHemant Agrawal opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 9200c504f69SHemant Agrawal opts.fqd.dest.channel = rxq->ch_id; 9210c504f69SHemant Agrawal opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; 9220c504f69SHemant Agrawal flags = QMAN_INITFQ_FLAG_SCHED; 9230c504f69SHemant Agrawal 9240c504f69SHemant Agrawal /* Configure tail drop */ 9250c504f69SHemant Agrawal if (dpaa_intf->cgr_rx) { 9260c504f69SHemant Agrawal opts.we_mask |= QM_INITFQ_WE_CGID; 9270c504f69SHemant Agrawal opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid; 9280c504f69SHemant Agrawal opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 9290c504f69SHemant Agrawal } 9300c504f69SHemant Agrawal ret = qman_init_fq(rxq, flags, &opts); 9316fd3639aSHemant Agrawal if (ret) { 9326fd3639aSHemant Agrawal DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x " 9336fd3639aSHemant Agrawal "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); 9346fd3639aSHemant Agrawal return ret; 9356fd3639aSHemant Agrawal } 93619b4aba2SHemant Agrawal if (dpaa_svr_family == SVR_LS1043A_FAMILY) { 93719b4aba2SHemant Agrawal rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch; 93819b4aba2SHemant Agrawal } else { 939b9083ea5SNipun Gupta rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; 940b9083ea5SNipun Gupta rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; 94119b4aba2SHemant Agrawal } 94219b4aba2SHemant Agrawal 9430c504f69SHemant Agrawal rxq->is_static = true; 944b9c94167SNipun Gupta 945b9c94167SNipun Gupta /* Allocate qman specific portals */ 946a6a75240SNipun Gupta qp = fsl_qman_fq_portal_create(&q_fd); 947b9c94167SNipun Gupta if (!qp) { 948b9c94167SNipun Gupta DPAA_PMD_ERR("Unable to alloc fq portal"); 949b9c94167SNipun Gupta return -1; 950b9c94167SNipun Gupta } 951b9c94167SNipun Gupta rxq->qp = qp; 952a6a75240SNipun Gupta 953a6a75240SNipun Gupta /* Set up the device interrupt handler */ 954a6a75240SNipun Gupta if (!dev->intr_handle) { 955a6a75240SNipun Gupta struct rte_dpaa_device *dpaa_dev; 956a6a75240SNipun Gupta struct rte_device *rdev = dev->device; 957a6a75240SNipun Gupta 958a6a75240SNipun Gupta dpaa_dev = container_of(rdev, struct rte_dpaa_device, 959a6a75240SNipun Gupta device); 960a6a75240SNipun Gupta dev->intr_handle = &dpaa_dev->intr_handle; 961a6a75240SNipun Gupta dev->intr_handle->intr_vec = rte_zmalloc(NULL, 962a6a75240SNipun Gupta dpaa_push_mode_max_queue, 0); 963a6a75240SNipun Gupta if (!dev->intr_handle->intr_vec) { 964a6a75240SNipun Gupta DPAA_PMD_ERR("intr_vec alloc failed"); 965a6a75240SNipun Gupta return -ENOMEM; 966a6a75240SNipun Gupta } 967a6a75240SNipun Gupta dev->intr_handle->nb_efd = dpaa_push_mode_max_queue; 968a6a75240SNipun Gupta dev->intr_handle->max_intr = dpaa_push_mode_max_queue; 969a6a75240SNipun Gupta } 970a6a75240SNipun Gupta 971a6a75240SNipun Gupta dev->intr_handle->type = RTE_INTR_HANDLE_EXT; 972a6a75240SNipun Gupta dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1; 973a6a75240SNipun Gupta dev->intr_handle->efds[queue_idx] = q_fd; 974a6a75240SNipun Gupta rxq->q_fd = q_fd; 9750c504f69SHemant Agrawal } 976e1797f4bSAkhil Goyal rxq->bp_array = rte_dpaa_bpid_info; 97762f53995SHemant Agrawal dev->data->rx_queues[queue_idx] = rxq; 97862f53995SHemant Agrawal 97962f53995SHemant Agrawal /* configure the CGR size as per the desc size */ 98062f53995SHemant Agrawal if (dpaa_intf->cgr_rx) { 98162f53995SHemant Agrawal struct qm_mcc_initcgr cgr_opts = {0}; 98262f53995SHemant Agrawal 9832cf9264fSHemant Agrawal rxq->nb_desc = nb_desc; 98462f53995SHemant Agrawal /* Enable tail drop with cgr on this queue */ 98562f53995SHemant Agrawal qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0); 98662f53995SHemant Agrawal ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts); 98762f53995SHemant Agrawal if (ret) { 98862f53995SHemant Agrawal DPAA_PMD_WARN( 98962f53995SHemant Agrawal "rx taildrop modify fail on fqid %d (ret=%d)", 99062f53995SHemant Agrawal rxq->fqid, ret); 99162f53995SHemant Agrawal } 99262f53995SHemant Agrawal } 99337f9b54bSShreyansh Jain 99437f9b54bSShreyansh Jain return 0; 99537f9b54bSShreyansh Jain } 99637f9b54bSShreyansh Jain 9971e06b6dcSHemant Agrawal int 99877b7b81eSNeil Horman dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 9995e745593SSunil Kumar Kori int eth_rx_queue_id, 10005e745593SSunil Kumar Kori u16 ch_id, 10015e745593SSunil Kumar Kori const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 10025e745593SSunil Kumar Kori { 10035e745593SSunil Kumar Kori int ret; 10045e745593SSunil Kumar Kori u32 flags = 0; 10055e745593SSunil Kumar Kori struct dpaa_if *dpaa_intf = dev->data->dev_private; 10065e745593SSunil Kumar Kori struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 10075e745593SSunil Kumar Kori struct qm_mcc_initfq opts = {0}; 10085e745593SSunil Kumar Kori 10095e745593SSunil Kumar Kori if (dpaa_push_mode_max_queue) 1010079a67c2SHemant Agrawal DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n" 1011079a67c2SHemant Agrawal "PUSH mode already enabled for first %d queues.\n" 10125e745593SSunil Kumar Kori "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n", 10135e745593SSunil Kumar Kori dpaa_push_mode_max_queue); 10145e745593SSunil Kumar Kori 10155e745593SSunil Kumar Kori dpaa_poll_queue_default_config(&opts); 10165e745593SSunil Kumar Kori 10175e745593SSunil Kumar Kori switch (queue_conf->ev.sched_type) { 10185e745593SSunil Kumar Kori case RTE_SCHED_TYPE_ATOMIC: 10195e745593SSunil Kumar Kori opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 10205e745593SSunil Kumar Kori /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 10215e745593SSunil Kumar Kori * configuration with HOLD_ACTIVE setting 10225e745593SSunil Kumar Kori */ 10235e745593SSunil Kumar Kori opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 10245e745593SSunil Kumar Kori rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic; 10255e745593SSunil Kumar Kori break; 10265e745593SSunil Kumar Kori case RTE_SCHED_TYPE_ORDERED: 10275e745593SSunil Kumar Kori DPAA_PMD_ERR("Ordered queue schedule type is not supported\n"); 10285e745593SSunil Kumar Kori return -1; 10295e745593SSunil Kumar Kori default: 10305e745593SSunil Kumar Kori opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 10315e745593SSunil Kumar Kori rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel; 10325e745593SSunil Kumar Kori break; 10335e745593SSunil Kumar Kori } 10345e745593SSunil Kumar Kori 10355e745593SSunil Kumar Kori opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 10365e745593SSunil Kumar Kori opts.fqd.dest.channel = ch_id; 10375e745593SSunil Kumar Kori opts.fqd.dest.wq = queue_conf->ev.priority; 10385e745593SSunil Kumar Kori 10395e745593SSunil Kumar Kori if (dpaa_intf->cgr_rx) { 10405e745593SSunil Kumar Kori opts.we_mask |= QM_INITFQ_WE_CGID; 10415e745593SSunil Kumar Kori opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 10425e745593SSunil Kumar Kori opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 10435e745593SSunil Kumar Kori } 10445e745593SSunil Kumar Kori 10455e745593SSunil Kumar Kori flags = QMAN_INITFQ_FLAG_SCHED; 10465e745593SSunil Kumar Kori 10475e745593SSunil Kumar Kori ret = qman_init_fq(rxq, flags, &opts); 10485e745593SSunil Kumar Kori if (ret) { 10496fd3639aSHemant Agrawal DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x " 10506fd3639aSHemant Agrawal "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); 10515e745593SSunil Kumar Kori return ret; 10525e745593SSunil Kumar Kori } 10535e745593SSunil Kumar Kori 10545e745593SSunil Kumar Kori /* copy configuration which needs to be filled during dequeue */ 10555e745593SSunil Kumar Kori memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event)); 10565e745593SSunil Kumar Kori dev->data->rx_queues[eth_rx_queue_id] = rxq; 10575e745593SSunil Kumar Kori 10585e745593SSunil Kumar Kori return ret; 10595e745593SSunil Kumar Kori } 10605e745593SSunil Kumar Kori 10611e06b6dcSHemant Agrawal int 106277b7b81eSNeil Horman dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 10635e745593SSunil Kumar Kori int eth_rx_queue_id) 10645e745593SSunil Kumar Kori { 10655e745593SSunil Kumar Kori struct qm_mcc_initfq opts; 10665e745593SSunil Kumar Kori int ret; 10675e745593SSunil Kumar Kori u32 flags = 0; 10685e745593SSunil Kumar Kori struct dpaa_if *dpaa_intf = dev->data->dev_private; 10695e745593SSunil Kumar Kori struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 10705e745593SSunil Kumar Kori 10715e745593SSunil Kumar Kori dpaa_poll_queue_default_config(&opts); 10725e745593SSunil Kumar Kori 10735e745593SSunil Kumar Kori if (dpaa_intf->cgr_rx) { 10745e745593SSunil Kumar Kori opts.we_mask |= QM_INITFQ_WE_CGID; 10755e745593SSunil Kumar Kori opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 10765e745593SSunil Kumar Kori opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 10775e745593SSunil Kumar Kori } 10785e745593SSunil Kumar Kori 10795e745593SSunil Kumar Kori ret = qman_init_fq(rxq, flags, &opts); 10805e745593SSunil Kumar Kori if (ret) { 10815e745593SSunil Kumar Kori DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", 10825e745593SSunil Kumar Kori rxq->fqid, ret); 10835e745593SSunil Kumar Kori } 10845e745593SSunil Kumar Kori 10855e745593SSunil Kumar Kori rxq->cb.dqrr_dpdk_cb = NULL; 10865e745593SSunil Kumar Kori dev->data->rx_queues[eth_rx_queue_id] = NULL; 10875e745593SSunil Kumar Kori 10885e745593SSunil Kumar Kori return 0; 10895e745593SSunil Kumar Kori } 10905e745593SSunil Kumar Kori 109137f9b54bSShreyansh Jain static 109237f9b54bSShreyansh Jain void dpaa_eth_rx_queue_release(void *rxq __rte_unused) 109337f9b54bSShreyansh Jain { 109437f9b54bSShreyansh Jain PMD_INIT_FUNC_TRACE(); 109537f9b54bSShreyansh Jain } 109637f9b54bSShreyansh Jain 109737f9b54bSShreyansh Jain static 109837f9b54bSShreyansh Jain int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 109937f9b54bSShreyansh Jain uint16_t nb_desc __rte_unused, 110037f9b54bSShreyansh Jain unsigned int socket_id __rte_unused, 1101e335cce4SHemant Agrawal const struct rte_eth_txconf *tx_conf) 110237f9b54bSShreyansh Jain { 110337f9b54bSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 11042cf9264fSHemant Agrawal struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx]; 110537f9b54bSShreyansh Jain 110637f9b54bSShreyansh Jain PMD_INIT_FUNC_TRACE(); 110737f9b54bSShreyansh Jain 1108e335cce4SHemant Agrawal /* Tx deferred start is not supported */ 1109e335cce4SHemant Agrawal if (tx_conf->tx_deferred_start) { 1110e335cce4SHemant Agrawal DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev); 1111e335cce4SHemant Agrawal return -EINVAL; 1112e335cce4SHemant Agrawal } 11132cf9264fSHemant Agrawal txq->nb_desc = UINT16_MAX; 11142cf9264fSHemant Agrawal txq->offloads = tx_conf->offloads; 11152cf9264fSHemant Agrawal 11166fd3639aSHemant Agrawal if (queue_idx >= dev->data->nb_tx_queues) { 11176fd3639aSHemant Agrawal rte_errno = EOVERFLOW; 11186fd3639aSHemant Agrawal DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", 11196fd3639aSHemant Agrawal (void *)dev, queue_idx, dev->data->nb_tx_queues); 11206fd3639aSHemant Agrawal return -rte_errno; 11216fd3639aSHemant Agrawal } 11226fd3639aSHemant Agrawal 11236fd3639aSHemant Agrawal DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)", 11242cf9264fSHemant Agrawal queue_idx, txq->fqid); 11252cf9264fSHemant Agrawal dev->data->tx_queues[queue_idx] = txq; 11269124e65dSGagandeep Singh 112737f9b54bSShreyansh Jain return 0; 112837f9b54bSShreyansh Jain } 112937f9b54bSShreyansh Jain 113037f9b54bSShreyansh Jain static void dpaa_eth_tx_queue_release(void *txq __rte_unused) 1131ff9e112dSShreyansh Jain { 1132ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1133ff9e112dSShreyansh Jain } 1134ff9e112dSShreyansh Jain 1135b005d729SHemant Agrawal static uint32_t 1136b005d729SHemant Agrawal dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1137b005d729SHemant Agrawal { 1138b005d729SHemant Agrawal struct dpaa_if *dpaa_intf = dev->data->dev_private; 1139b005d729SHemant Agrawal struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id]; 1140b005d729SHemant Agrawal u32 frm_cnt = 0; 1141b005d729SHemant Agrawal 1142b005d729SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1143b005d729SHemant Agrawal 1144b005d729SHemant Agrawal if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { 1145b7c7ff6eSStephen Hemminger DPAA_PMD_DEBUG("RX frame count for q(%d) is %u", 1146b005d729SHemant Agrawal rx_queue_id, frm_cnt); 1147b005d729SHemant Agrawal } 1148b005d729SHemant Agrawal return frm_cnt; 1149b005d729SHemant Agrawal } 1150b005d729SHemant Agrawal 1151e124a69fSShreyansh Jain static int dpaa_link_down(struct rte_eth_dev *dev) 1152e124a69fSShreyansh Jain { 1153f231d48dSRohit Raj struct fman_if *fif = dev->process_private; 1154f231d48dSRohit Raj struct __fman_if *__fif; 1155f231d48dSRohit Raj 1156e124a69fSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1157e124a69fSShreyansh Jain 1158f231d48dSRohit Raj __fif = container_of(fif, struct __fman_if, __if); 1159f231d48dSRohit Raj 1160f231d48dSRohit Raj if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1161f231d48dSRohit Raj dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN); 1162f231d48dSRohit Raj else 1163e124a69fSShreyansh Jain dpaa_eth_dev_stop(dev); 1164e124a69fSShreyansh Jain return 0; 1165e124a69fSShreyansh Jain } 1166e124a69fSShreyansh Jain 1167e124a69fSShreyansh Jain static int dpaa_link_up(struct rte_eth_dev *dev) 1168e124a69fSShreyansh Jain { 1169f231d48dSRohit Raj struct fman_if *fif = dev->process_private; 1170f231d48dSRohit Raj struct __fman_if *__fif; 1171f231d48dSRohit Raj 1172e124a69fSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1173e124a69fSShreyansh Jain 1174f231d48dSRohit Raj __fif = container_of(fif, struct __fman_if, __if); 1175f231d48dSRohit Raj 1176f231d48dSRohit Raj if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1177f231d48dSRohit Raj dpaa_update_link_status(__fif->node_name, ETH_LINK_UP); 1178f231d48dSRohit Raj else 1179e124a69fSShreyansh Jain dpaa_eth_dev_start(dev); 1180e124a69fSShreyansh Jain return 0; 1181e124a69fSShreyansh Jain } 1182e124a69fSShreyansh Jain 1183fe6c6032SShreyansh Jain static int 118412a4678aSShreyansh Jain dpaa_flow_ctrl_set(struct rte_eth_dev *dev, 118512a4678aSShreyansh Jain struct rte_eth_fc_conf *fc_conf) 118612a4678aSShreyansh Jain { 118712a4678aSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 118812a4678aSShreyansh Jain struct rte_eth_fc_conf *net_fc; 118912a4678aSShreyansh Jain 119012a4678aSShreyansh Jain PMD_INIT_FUNC_TRACE(); 119112a4678aSShreyansh Jain 119212a4678aSShreyansh Jain if (!(dpaa_intf->fc_conf)) { 119312a4678aSShreyansh Jain dpaa_intf->fc_conf = rte_zmalloc(NULL, 119412a4678aSShreyansh Jain sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 119512a4678aSShreyansh Jain if (!dpaa_intf->fc_conf) { 119612a4678aSShreyansh Jain DPAA_PMD_ERR("unable to save flow control info"); 119712a4678aSShreyansh Jain return -ENOMEM; 119812a4678aSShreyansh Jain } 119912a4678aSShreyansh Jain } 120012a4678aSShreyansh Jain net_fc = dpaa_intf->fc_conf; 120112a4678aSShreyansh Jain 120212a4678aSShreyansh Jain if (fc_conf->high_water < fc_conf->low_water) { 120312a4678aSShreyansh Jain DPAA_PMD_ERR("Incorrect Flow Control Configuration"); 120412a4678aSShreyansh Jain return -EINVAL; 120512a4678aSShreyansh Jain } 120612a4678aSShreyansh Jain 120712a4678aSShreyansh Jain if (fc_conf->mode == RTE_FC_NONE) { 120812a4678aSShreyansh Jain return 0; 120912a4678aSShreyansh Jain } else if (fc_conf->mode == RTE_FC_TX_PAUSE || 121012a4678aSShreyansh Jain fc_conf->mode == RTE_FC_FULL) { 12116b10d1f7SNipun Gupta fman_if_set_fc_threshold(dev->process_private, 12126b10d1f7SNipun Gupta fc_conf->high_water, 121312a4678aSShreyansh Jain fc_conf->low_water, 121412a4678aSShreyansh Jain dpaa_intf->bp_info->bpid); 121512a4678aSShreyansh Jain if (fc_conf->pause_time) 12166b10d1f7SNipun Gupta fman_if_set_fc_quanta(dev->process_private, 121712a4678aSShreyansh Jain fc_conf->pause_time); 121812a4678aSShreyansh Jain } 121912a4678aSShreyansh Jain 122012a4678aSShreyansh Jain /* Save the information in dpaa device */ 122112a4678aSShreyansh Jain net_fc->pause_time = fc_conf->pause_time; 122212a4678aSShreyansh Jain net_fc->high_water = fc_conf->high_water; 122312a4678aSShreyansh Jain net_fc->low_water = fc_conf->low_water; 122412a4678aSShreyansh Jain net_fc->send_xon = fc_conf->send_xon; 122512a4678aSShreyansh Jain net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 122612a4678aSShreyansh Jain net_fc->mode = fc_conf->mode; 122712a4678aSShreyansh Jain net_fc->autoneg = fc_conf->autoneg; 122812a4678aSShreyansh Jain 122912a4678aSShreyansh Jain return 0; 123012a4678aSShreyansh Jain } 123112a4678aSShreyansh Jain 123212a4678aSShreyansh Jain static int 123312a4678aSShreyansh Jain dpaa_flow_ctrl_get(struct rte_eth_dev *dev, 123412a4678aSShreyansh Jain struct rte_eth_fc_conf *fc_conf) 123512a4678aSShreyansh Jain { 123612a4678aSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 123712a4678aSShreyansh Jain struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf; 123812a4678aSShreyansh Jain int ret; 123912a4678aSShreyansh Jain 124012a4678aSShreyansh Jain PMD_INIT_FUNC_TRACE(); 124112a4678aSShreyansh Jain 124212a4678aSShreyansh Jain if (net_fc) { 124312a4678aSShreyansh Jain fc_conf->pause_time = net_fc->pause_time; 124412a4678aSShreyansh Jain fc_conf->high_water = net_fc->high_water; 124512a4678aSShreyansh Jain fc_conf->low_water = net_fc->low_water; 124612a4678aSShreyansh Jain fc_conf->send_xon = net_fc->send_xon; 124712a4678aSShreyansh Jain fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd; 124812a4678aSShreyansh Jain fc_conf->mode = net_fc->mode; 124912a4678aSShreyansh Jain fc_conf->autoneg = net_fc->autoneg; 125012a4678aSShreyansh Jain return 0; 125112a4678aSShreyansh Jain } 12526b10d1f7SNipun Gupta ret = fman_if_get_fc_threshold(dev->process_private); 125312a4678aSShreyansh Jain if (ret) { 125412a4678aSShreyansh Jain fc_conf->mode = RTE_FC_TX_PAUSE; 12556b10d1f7SNipun Gupta fc_conf->pause_time = 12566b10d1f7SNipun Gupta fman_if_get_fc_quanta(dev->process_private); 125712a4678aSShreyansh Jain } else { 125812a4678aSShreyansh Jain fc_conf->mode = RTE_FC_NONE; 125912a4678aSShreyansh Jain } 126012a4678aSShreyansh Jain 126112a4678aSShreyansh Jain return 0; 126212a4678aSShreyansh Jain } 126312a4678aSShreyansh Jain 126412a4678aSShreyansh Jain static int 1265fe6c6032SShreyansh Jain dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, 12666d13ea8eSOlivier Matz struct rte_ether_addr *addr, 1267fe6c6032SShreyansh Jain uint32_t index, 1268fe6c6032SShreyansh Jain __rte_unused uint32_t pool) 1269fe6c6032SShreyansh Jain { 1270fe6c6032SShreyansh Jain int ret; 1271fe6c6032SShreyansh Jain 1272fe6c6032SShreyansh Jain PMD_INIT_FUNC_TRACE(); 1273fe6c6032SShreyansh Jain 12746b10d1f7SNipun Gupta ret = fman_if_add_mac_addr(dev->process_private, 12756b10d1f7SNipun Gupta addr->addr_bytes, index); 1276fe6c6032SShreyansh Jain 1277fe6c6032SShreyansh Jain if (ret) 1278b7c7ff6eSStephen Hemminger DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret); 1279fe6c6032SShreyansh Jain return 0; 1280fe6c6032SShreyansh Jain } 1281fe6c6032SShreyansh Jain 1282fe6c6032SShreyansh Jain static void 1283fe6c6032SShreyansh Jain dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, 1284fe6c6032SShreyansh Jain uint32_t index) 1285fe6c6032SShreyansh Jain { 1286fe6c6032SShreyansh Jain PMD_INIT_FUNC_TRACE(); 1287fe6c6032SShreyansh Jain 12886b10d1f7SNipun Gupta fman_if_clear_mac_addr(dev->process_private, index); 1289fe6c6032SShreyansh Jain } 1290fe6c6032SShreyansh Jain 1291caccf8b3SOlivier Matz static int 1292fe6c6032SShreyansh Jain dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, 12936d13ea8eSOlivier Matz struct rte_ether_addr *addr) 1294fe6c6032SShreyansh Jain { 1295fe6c6032SShreyansh Jain int ret; 1296fe6c6032SShreyansh Jain 1297fe6c6032SShreyansh Jain PMD_INIT_FUNC_TRACE(); 1298fe6c6032SShreyansh Jain 12996b10d1f7SNipun Gupta ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0); 1300fe6c6032SShreyansh Jain if (ret) 1301b7c7ff6eSStephen Hemminger DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret); 1302caccf8b3SOlivier Matz 1303caccf8b3SOlivier Matz return ret; 1304fe6c6032SShreyansh Jain } 1305fe6c6032SShreyansh Jain 1306b1b5d6c9SNipun Gupta static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev, 1307b1b5d6c9SNipun Gupta uint16_t queue_id) 1308b1b5d6c9SNipun Gupta { 1309b1b5d6c9SNipun Gupta struct dpaa_if *dpaa_intf = dev->data->dev_private; 1310b1b5d6c9SNipun Gupta struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; 1311b1b5d6c9SNipun Gupta 1312b1b5d6c9SNipun Gupta if (!rxq->is_static) 1313b1b5d6c9SNipun Gupta return -EINVAL; 1314b1b5d6c9SNipun Gupta 1315b1b5d6c9SNipun Gupta return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI); 1316b1b5d6c9SNipun Gupta } 1317b1b5d6c9SNipun Gupta 1318b1b5d6c9SNipun Gupta static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev, 1319b1b5d6c9SNipun Gupta uint16_t queue_id) 1320b1b5d6c9SNipun Gupta { 1321b1b5d6c9SNipun Gupta struct dpaa_if *dpaa_intf = dev->data->dev_private; 1322b1b5d6c9SNipun Gupta struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; 1323b1b5d6c9SNipun Gupta uint32_t temp; 1324b1b5d6c9SNipun Gupta ssize_t temp1; 1325b1b5d6c9SNipun Gupta 1326b1b5d6c9SNipun Gupta if (!rxq->is_static) 1327b1b5d6c9SNipun Gupta return -EINVAL; 1328b1b5d6c9SNipun Gupta 1329b1b5d6c9SNipun Gupta qman_fq_portal_irqsource_remove(rxq->qp, ~0); 1330b1b5d6c9SNipun Gupta 1331b1b5d6c9SNipun Gupta temp1 = read(rxq->q_fd, &temp, sizeof(temp)); 1332b1b5d6c9SNipun Gupta if (temp1 != sizeof(temp)) 1333df80d4f8SHemant Agrawal DPAA_PMD_ERR("irq read error"); 1334b1b5d6c9SNipun Gupta 1335b1b5d6c9SNipun Gupta qman_fq_portal_thread_irq(rxq->qp); 1336b1b5d6c9SNipun Gupta 1337b1b5d6c9SNipun Gupta return 0; 1338b1b5d6c9SNipun Gupta } 1339b1b5d6c9SNipun Gupta 13402cf9264fSHemant Agrawal static void 13412cf9264fSHemant Agrawal dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 13422cf9264fSHemant Agrawal struct rte_eth_rxq_info *qinfo) 13432cf9264fSHemant Agrawal { 13442cf9264fSHemant Agrawal struct dpaa_if *dpaa_intf = dev->data->dev_private; 13452cf9264fSHemant Agrawal struct qman_fq *rxq; 13462cf9264fSHemant Agrawal 13472cf9264fSHemant Agrawal rxq = dev->data->rx_queues[queue_id]; 13482cf9264fSHemant Agrawal 13492cf9264fSHemant Agrawal qinfo->mp = dpaa_intf->bp_info->mp; 13502cf9264fSHemant Agrawal qinfo->scattered_rx = dev->data->scattered_rx; 13512cf9264fSHemant Agrawal qinfo->nb_desc = rxq->nb_desc; 13522cf9264fSHemant Agrawal qinfo->conf.rx_free_thresh = 1; 13532cf9264fSHemant Agrawal qinfo->conf.rx_drop_en = 1; 13542cf9264fSHemant Agrawal qinfo->conf.rx_deferred_start = 0; 13552cf9264fSHemant Agrawal qinfo->conf.offloads = rxq->offloads; 13562cf9264fSHemant Agrawal } 13572cf9264fSHemant Agrawal 13582cf9264fSHemant Agrawal static void 13592cf9264fSHemant Agrawal dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 13602cf9264fSHemant Agrawal struct rte_eth_txq_info *qinfo) 13612cf9264fSHemant Agrawal { 13622cf9264fSHemant Agrawal struct qman_fq *txq; 13632cf9264fSHemant Agrawal 13642cf9264fSHemant Agrawal txq = dev->data->tx_queues[queue_id]; 13652cf9264fSHemant Agrawal 13662cf9264fSHemant Agrawal qinfo->nb_desc = txq->nb_desc; 13672cf9264fSHemant Agrawal qinfo->conf.tx_thresh.pthresh = 0; 13682cf9264fSHemant Agrawal qinfo->conf.tx_thresh.hthresh = 0; 13692cf9264fSHemant Agrawal qinfo->conf.tx_thresh.wthresh = 0; 13702cf9264fSHemant Agrawal 13712cf9264fSHemant Agrawal qinfo->conf.tx_free_thresh = 0; 13722cf9264fSHemant Agrawal qinfo->conf.tx_rs_thresh = 0; 13732cf9264fSHemant Agrawal qinfo->conf.offloads = txq->offloads; 13742cf9264fSHemant Agrawal qinfo->conf.tx_deferred_start = 0; 13752cf9264fSHemant Agrawal } 13762cf9264fSHemant Agrawal 1377ff9e112dSShreyansh Jain static struct eth_dev_ops dpaa_devops = { 1378ff9e112dSShreyansh Jain .dev_configure = dpaa_eth_dev_configure, 1379ff9e112dSShreyansh Jain .dev_start = dpaa_eth_dev_start, 1380ff9e112dSShreyansh Jain .dev_stop = dpaa_eth_dev_stop, 1381ff9e112dSShreyansh Jain .dev_close = dpaa_eth_dev_close, 1382799db456SShreyansh Jain .dev_infos_get = dpaa_eth_dev_info, 1383a7bdc3bdSShreyansh Jain .dev_supported_ptypes_get = dpaa_supported_ptypes_get, 138437f9b54bSShreyansh Jain 138537f9b54bSShreyansh Jain .rx_queue_setup = dpaa_eth_rx_queue_setup, 138637f9b54bSShreyansh Jain .tx_queue_setup = dpaa_eth_tx_queue_setup, 138737f9b54bSShreyansh Jain .rx_queue_release = dpaa_eth_rx_queue_release, 138837f9b54bSShreyansh Jain .tx_queue_release = dpaa_eth_tx_queue_release, 1389b005d729SHemant Agrawal .rx_queue_count = dpaa_dev_rx_queue_count, 13902e6f5657SApeksha Gupta .rx_burst_mode_get = dpaa_dev_rx_burst_mode_get, 13912e6f5657SApeksha Gupta .tx_burst_mode_get = dpaa_dev_tx_burst_mode_get, 13922cf9264fSHemant Agrawal .rxq_info_get = dpaa_rxq_info_get, 13932cf9264fSHemant Agrawal .txq_info_get = dpaa_txq_info_get, 13942cf9264fSHemant Agrawal 139512a4678aSShreyansh Jain .flow_ctrl_get = dpaa_flow_ctrl_get, 139612a4678aSShreyansh Jain .flow_ctrl_set = dpaa_flow_ctrl_set, 139712a4678aSShreyansh Jain 1398e124a69fSShreyansh Jain .link_update = dpaa_eth_link_update, 1399e1ad3a05SShreyansh Jain .stats_get = dpaa_eth_stats_get, 1400b21ed3e2SHemant Agrawal .xstats_get = dpaa_dev_xstats_get, 1401b21ed3e2SHemant Agrawal .xstats_get_by_id = dpaa_xstats_get_by_id, 1402b21ed3e2SHemant Agrawal .xstats_get_names_by_id = dpaa_xstats_get_names_by_id, 1403b21ed3e2SHemant Agrawal .xstats_get_names = dpaa_xstats_get_names, 1404b21ed3e2SHemant Agrawal .xstats_reset = dpaa_eth_stats_reset, 1405e1ad3a05SShreyansh Jain .stats_reset = dpaa_eth_stats_reset, 140695ef603dSShreyansh Jain .promiscuous_enable = dpaa_eth_promiscuous_enable, 140795ef603dSShreyansh Jain .promiscuous_disable = dpaa_eth_promiscuous_disable, 140844dd70a3SShreyansh Jain .allmulticast_enable = dpaa_eth_multicast_enable, 140944dd70a3SShreyansh Jain .allmulticast_disable = dpaa_eth_multicast_disable, 14100cbec027SShreyansh Jain .mtu_set = dpaa_mtu_set, 1411e124a69fSShreyansh Jain .dev_set_link_down = dpaa_link_down, 1412e124a69fSShreyansh Jain .dev_set_link_up = dpaa_link_up, 1413fe6c6032SShreyansh Jain .mac_addr_add = dpaa_dev_add_mac_addr, 1414fe6c6032SShreyansh Jain .mac_addr_remove = dpaa_dev_remove_mac_addr, 1415fe6c6032SShreyansh Jain .mac_addr_set = dpaa_dev_set_mac_addr, 1416fe6c6032SShreyansh Jain 1417cf0fab1dSHemant Agrawal .fw_version_get = dpaa_fw_version_get, 1418b1b5d6c9SNipun Gupta 1419b1b5d6c9SNipun Gupta .rx_queue_intr_enable = dpaa_dev_queue_intr_enable, 1420b1b5d6c9SNipun Gupta .rx_queue_intr_disable = dpaa_dev_queue_intr_disable, 1421ff9e112dSShreyansh Jain }; 1422ff9e112dSShreyansh Jain 14238c3495f5SHemant Agrawal static bool 14248c3495f5SHemant Agrawal is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv) 14258c3495f5SHemant Agrawal { 14268c3495f5SHemant Agrawal if (strcmp(dev->device->driver->name, 14278c3495f5SHemant Agrawal drv->driver.name)) 14288c3495f5SHemant Agrawal return false; 14298c3495f5SHemant Agrawal 14308c3495f5SHemant Agrawal return true; 14318c3495f5SHemant Agrawal } 14328c3495f5SHemant Agrawal 14338c3495f5SHemant Agrawal static bool 14348c3495f5SHemant Agrawal is_dpaa_supported(struct rte_eth_dev *dev) 14358c3495f5SHemant Agrawal { 14368c3495f5SHemant Agrawal return is_device_supported(dev, &rte_dpaa_pmd); 14378c3495f5SHemant Agrawal } 14388c3495f5SHemant Agrawal 14391e06b6dcSHemant Agrawal int 14408c3495f5SHemant Agrawal rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) 14418c3495f5SHemant Agrawal { 14428c3495f5SHemant Agrawal struct rte_eth_dev *dev; 14438c3495f5SHemant Agrawal 14448c3495f5SHemant Agrawal RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 14458c3495f5SHemant Agrawal 14468c3495f5SHemant Agrawal dev = &rte_eth_devices[port]; 14478c3495f5SHemant Agrawal 14488c3495f5SHemant Agrawal if (!is_dpaa_supported(dev)) 14498c3495f5SHemant Agrawal return -ENOTSUP; 14508c3495f5SHemant Agrawal 14518c3495f5SHemant Agrawal if (on) 14526b10d1f7SNipun Gupta fman_if_loopback_enable(dev->process_private); 14538c3495f5SHemant Agrawal else 14546b10d1f7SNipun Gupta fman_if_loopback_disable(dev->process_private); 14558c3495f5SHemant Agrawal 14568c3495f5SHemant Agrawal return 0; 14578c3495f5SHemant Agrawal } 14588c3495f5SHemant Agrawal 14596b10d1f7SNipun Gupta static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf, 14606b10d1f7SNipun Gupta struct fman_if *fman_intf) 146112a4678aSShreyansh Jain { 146212a4678aSShreyansh Jain struct rte_eth_fc_conf *fc_conf; 146312a4678aSShreyansh Jain int ret; 146412a4678aSShreyansh Jain 146512a4678aSShreyansh Jain PMD_INIT_FUNC_TRACE(); 146612a4678aSShreyansh Jain 146712a4678aSShreyansh Jain if (!(dpaa_intf->fc_conf)) { 146812a4678aSShreyansh Jain dpaa_intf->fc_conf = rte_zmalloc(NULL, 146912a4678aSShreyansh Jain sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 147012a4678aSShreyansh Jain if (!dpaa_intf->fc_conf) { 147112a4678aSShreyansh Jain DPAA_PMD_ERR("unable to save flow control info"); 147212a4678aSShreyansh Jain return -ENOMEM; 147312a4678aSShreyansh Jain } 147412a4678aSShreyansh Jain } 147512a4678aSShreyansh Jain fc_conf = dpaa_intf->fc_conf; 14766b10d1f7SNipun Gupta ret = fman_if_get_fc_threshold(fman_intf); 147712a4678aSShreyansh Jain if (ret) { 147812a4678aSShreyansh Jain fc_conf->mode = RTE_FC_TX_PAUSE; 14796b10d1f7SNipun Gupta fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf); 148012a4678aSShreyansh Jain } else { 148112a4678aSShreyansh Jain fc_conf->mode = RTE_FC_NONE; 148212a4678aSShreyansh Jain } 148312a4678aSShreyansh Jain 148412a4678aSShreyansh Jain return 0; 148512a4678aSShreyansh Jain } 148612a4678aSShreyansh Jain 148737f9b54bSShreyansh Jain /* Initialise an Rx FQ */ 148862f53995SHemant Agrawal static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, 148937f9b54bSShreyansh Jain uint32_t fqid) 149037f9b54bSShreyansh Jain { 14918d804cf1SHemant Agrawal struct qm_mcc_initfq opts = {0}; 149237f9b54bSShreyansh Jain int ret; 1493f04e7139SHemant Agrawal u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE; 149462f53995SHemant Agrawal struct qm_mcc_initcgr cgr_opts = { 149562f53995SHemant Agrawal .we_mask = QM_CGR_WE_CS_THRES | 149662f53995SHemant Agrawal QM_CGR_WE_CSTD_EN | 149762f53995SHemant Agrawal QM_CGR_WE_MODE, 149862f53995SHemant Agrawal .cgr = { 149962f53995SHemant Agrawal .cstd_en = QM_CGR_EN, 150062f53995SHemant Agrawal .mode = QMAN_CGR_MODE_FRAME 150162f53995SHemant Agrawal } 150262f53995SHemant Agrawal }; 150337f9b54bSShreyansh Jain 15044defbc8cSSachin Saxena if (fmc_q || default_q) { 150537f9b54bSShreyansh Jain ret = qman_reserve_fqid(fqid); 150637f9b54bSShreyansh Jain if (ret) { 15074defbc8cSSachin Saxena DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d", 150837f9b54bSShreyansh Jain fqid, ret); 150937f9b54bSShreyansh Jain return -EINVAL; 151037f9b54bSShreyansh Jain } 1511f04e7139SHemant Agrawal } 15124defbc8cSSachin Saxena 15138d6fc8b6SHemant Agrawal DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); 1514f04e7139SHemant Agrawal ret = qman_create_fq(fqid, flags, fq); 151537f9b54bSShreyansh Jain if (ret) { 15166fd3639aSHemant Agrawal DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d", 151737f9b54bSShreyansh Jain fqid, ret); 151837f9b54bSShreyansh Jain return ret; 151937f9b54bSShreyansh Jain } 15200c504f69SHemant Agrawal fq->is_static = false; 15215e745593SSunil Kumar Kori 15225e745593SSunil Kumar Kori dpaa_poll_queue_default_config(&opts); 152337f9b54bSShreyansh Jain 152462f53995SHemant Agrawal if (cgr_rx) { 152562f53995SHemant Agrawal /* Enable tail drop with cgr on this queue */ 152662f53995SHemant Agrawal qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0); 152762f53995SHemant Agrawal cgr_rx->cb = NULL; 152862f53995SHemant Agrawal ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT, 152962f53995SHemant Agrawal &cgr_opts); 153062f53995SHemant Agrawal if (ret) { 153162f53995SHemant Agrawal DPAA_PMD_WARN( 15328d6fc8b6SHemant Agrawal "rx taildrop init fail on rx fqid 0x%x(ret=%d)", 1533f04e7139SHemant Agrawal fq->fqid, ret); 153462f53995SHemant Agrawal goto without_cgr; 153562f53995SHemant Agrawal } 153662f53995SHemant Agrawal opts.we_mask |= QM_INITFQ_WE_CGID; 153762f53995SHemant Agrawal opts.fqd.cgid = cgr_rx->cgrid; 153862f53995SHemant Agrawal opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 153962f53995SHemant Agrawal } 154062f53995SHemant Agrawal without_cgr: 1541f04e7139SHemant Agrawal ret = qman_init_fq(fq, 0, &opts); 154237f9b54bSShreyansh Jain if (ret) 15438d6fc8b6SHemant Agrawal DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret); 154437f9b54bSShreyansh Jain return ret; 154537f9b54bSShreyansh Jain } 154637f9b54bSShreyansh Jain 154737f9b54bSShreyansh Jain /* Initialise a Tx FQ */ 154837f9b54bSShreyansh Jain static int dpaa_tx_queue_init(struct qman_fq *fq, 15499124e65dSGagandeep Singh struct fman_if *fman_intf, 15509124e65dSGagandeep Singh struct qman_cgr *cgr_tx) 155137f9b54bSShreyansh Jain { 15528d804cf1SHemant Agrawal struct qm_mcc_initfq opts = {0}; 15539124e65dSGagandeep Singh struct qm_mcc_initcgr cgr_opts = { 15549124e65dSGagandeep Singh .we_mask = QM_CGR_WE_CS_THRES | 15559124e65dSGagandeep Singh QM_CGR_WE_CSTD_EN | 15569124e65dSGagandeep Singh QM_CGR_WE_MODE, 15579124e65dSGagandeep Singh .cgr = { 15589124e65dSGagandeep Singh .cstd_en = QM_CGR_EN, 15599124e65dSGagandeep Singh .mode = QMAN_CGR_MODE_FRAME 15609124e65dSGagandeep Singh } 15619124e65dSGagandeep Singh }; 156237f9b54bSShreyansh Jain int ret; 156337f9b54bSShreyansh Jain 156437f9b54bSShreyansh Jain ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | 156537f9b54bSShreyansh Jain QMAN_FQ_FLAG_TO_DCPORTAL, fq); 156637f9b54bSShreyansh Jain if (ret) { 156737f9b54bSShreyansh Jain DPAA_PMD_ERR("create tx fq failed with ret: %d", ret); 156837f9b54bSShreyansh Jain return ret; 156937f9b54bSShreyansh Jain } 157037f9b54bSShreyansh Jain opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 157137f9b54bSShreyansh Jain QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; 157237f9b54bSShreyansh Jain opts.fqd.dest.channel = fman_intf->tx_channel_id; 157337f9b54bSShreyansh Jain opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY; 157437f9b54bSShreyansh Jain opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; 157537f9b54bSShreyansh Jain opts.fqd.context_b = 0; 157637f9b54bSShreyansh Jain /* no tx-confirmation */ 157737f9b54bSShreyansh Jain opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; 157837f9b54bSShreyansh Jain opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; 15798d6fc8b6SHemant Agrawal DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid); 15809124e65dSGagandeep Singh 15819124e65dSGagandeep Singh if (cgr_tx) { 15829124e65dSGagandeep Singh /* Enable tail drop with cgr on this queue */ 15839124e65dSGagandeep Singh qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, 15849124e65dSGagandeep Singh td_tx_threshold, 0); 15859124e65dSGagandeep Singh cgr_tx->cb = NULL; 15869124e65dSGagandeep Singh ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT, 15879124e65dSGagandeep Singh &cgr_opts); 15889124e65dSGagandeep Singh if (ret) { 15899124e65dSGagandeep Singh DPAA_PMD_WARN( 15909124e65dSGagandeep Singh "rx taildrop init fail on rx fqid 0x%x(ret=%d)", 15919124e65dSGagandeep Singh fq->fqid, ret); 15929124e65dSGagandeep Singh goto without_cgr; 15939124e65dSGagandeep Singh } 15949124e65dSGagandeep Singh opts.we_mask |= QM_INITFQ_WE_CGID; 15959124e65dSGagandeep Singh opts.fqd.cgid = cgr_tx->cgrid; 15969124e65dSGagandeep Singh opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 15979124e65dSGagandeep Singh DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n", 15989124e65dSGagandeep Singh td_tx_threshold); 15999124e65dSGagandeep Singh } 16009124e65dSGagandeep Singh without_cgr: 160137f9b54bSShreyansh Jain ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); 160237f9b54bSShreyansh Jain if (ret) 16038d6fc8b6SHemant Agrawal DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret); 160437f9b54bSShreyansh Jain return ret; 160537f9b54bSShreyansh Jain } 160637f9b54bSShreyansh Jain 160705ba55bcSShreyansh Jain #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 160805ba55bcSShreyansh Jain /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */ 160905ba55bcSShreyansh Jain static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) 161005ba55bcSShreyansh Jain { 16118d804cf1SHemant Agrawal struct qm_mcc_initfq opts = {0}; 161205ba55bcSShreyansh Jain int ret; 161305ba55bcSShreyansh Jain 161405ba55bcSShreyansh Jain PMD_INIT_FUNC_TRACE(); 161505ba55bcSShreyansh Jain 161605ba55bcSShreyansh Jain ret = qman_reserve_fqid(fqid); 161705ba55bcSShreyansh Jain if (ret) { 161805ba55bcSShreyansh Jain DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d", 161905ba55bcSShreyansh Jain fqid, ret); 162005ba55bcSShreyansh Jain return -EINVAL; 162105ba55bcSShreyansh Jain } 162205ba55bcSShreyansh Jain /* "map" this Rx FQ to one of the interfaces Tx FQID */ 162305ba55bcSShreyansh Jain DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid); 162405ba55bcSShreyansh Jain ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); 162505ba55bcSShreyansh Jain if (ret) { 162605ba55bcSShreyansh Jain DPAA_PMD_ERR("create debug fqid %d failed with ret: %d", 162705ba55bcSShreyansh Jain fqid, ret); 162805ba55bcSShreyansh Jain return ret; 162905ba55bcSShreyansh Jain } 163005ba55bcSShreyansh Jain opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; 163105ba55bcSShreyansh Jain opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY; 163205ba55bcSShreyansh Jain ret = qman_init_fq(fq, 0, &opts); 163305ba55bcSShreyansh Jain if (ret) 163405ba55bcSShreyansh Jain DPAA_PMD_ERR("init debug fqid %d failed with ret: %d", 163505ba55bcSShreyansh Jain fqid, ret); 163605ba55bcSShreyansh Jain return ret; 163705ba55bcSShreyansh Jain } 163805ba55bcSShreyansh Jain #endif 163905ba55bcSShreyansh Jain 1640ff9e112dSShreyansh Jain /* Initialise a network interface */ 1641ff9e112dSShreyansh Jain static int 16426b10d1f7SNipun Gupta dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev) 16436b10d1f7SNipun Gupta { 16446b10d1f7SNipun Gupta struct rte_dpaa_device *dpaa_device; 16456b10d1f7SNipun Gupta struct fm_eth_port_cfg *cfg; 16466b10d1f7SNipun Gupta struct dpaa_if *dpaa_intf; 16476b10d1f7SNipun Gupta struct fman_if *fman_intf; 16486b10d1f7SNipun Gupta int dev_id; 16496b10d1f7SNipun Gupta 16506b10d1f7SNipun Gupta PMD_INIT_FUNC_TRACE(); 16516b10d1f7SNipun Gupta 16526b10d1f7SNipun Gupta dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); 16536b10d1f7SNipun Gupta dev_id = dpaa_device->id.dev_id; 16546b10d1f7SNipun Gupta cfg = dpaa_get_eth_port_cfg(dev_id); 16556b10d1f7SNipun Gupta fman_intf = cfg->fman_if; 16566b10d1f7SNipun Gupta eth_dev->process_private = fman_intf; 16576b10d1f7SNipun Gupta 16586b10d1f7SNipun Gupta /* Plugging of UCODE burst API not supported in Secondary */ 16596b10d1f7SNipun Gupta dpaa_intf = eth_dev->data->dev_private; 16606b10d1f7SNipun Gupta eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 16616b10d1f7SNipun Gupta if (dpaa_intf->cgr_tx) 16626b10d1f7SNipun Gupta eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow; 16636b10d1f7SNipun Gupta else 16646b10d1f7SNipun Gupta eth_dev->tx_pkt_burst = dpaa_eth_queue_tx; 16656b10d1f7SNipun Gupta #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 16666b10d1f7SNipun Gupta qman_set_fq_lookup_table( 16676b10d1f7SNipun Gupta dpaa_intf->rx_queues->qman_fq_lookup_table); 16686b10d1f7SNipun Gupta #endif 16696b10d1f7SNipun Gupta 16706b10d1f7SNipun Gupta return 0; 16716b10d1f7SNipun Gupta } 16726b10d1f7SNipun Gupta 16736b10d1f7SNipun Gupta /* Initialise a network interface */ 16746b10d1f7SNipun Gupta static int 1675ff9e112dSShreyansh Jain dpaa_dev_init(struct rte_eth_dev *eth_dev) 1676ff9e112dSShreyansh Jain { 1677af2828cfSAkhil Goyal int num_rx_fqs, fqid; 167837f9b54bSShreyansh Jain int loop, ret = 0; 1679ff9e112dSShreyansh Jain int dev_id; 1680ff9e112dSShreyansh Jain struct rte_dpaa_device *dpaa_device; 1681ff9e112dSShreyansh Jain struct dpaa_if *dpaa_intf; 168237f9b54bSShreyansh Jain struct fm_eth_port_cfg *cfg; 168337f9b54bSShreyansh Jain struct fman_if *fman_intf; 168437f9b54bSShreyansh Jain struct fman_if_bpool *bp, *tmp_bp; 168562f53995SHemant Agrawal uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; 16869124e65dSGagandeep Singh uint32_t cgrid_tx[MAX_DPAA_CORES]; 16874defbc8cSSachin Saxena uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES]; 1688e4abd4ffSJun Yang int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES]; 1689e4abd4ffSJun Yang int8_t vsp_id = -1; 1690ff9e112dSShreyansh Jain 1691ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1692ff9e112dSShreyansh Jain 1693ff9e112dSShreyansh Jain dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); 1694ff9e112dSShreyansh Jain dev_id = dpaa_device->id.dev_id; 1695ff9e112dSShreyansh Jain dpaa_intf = eth_dev->data->dev_private; 1696051ae3afSHemant Agrawal cfg = dpaa_get_eth_port_cfg(dev_id); 169737f9b54bSShreyansh Jain fman_intf = cfg->fman_if; 1698ff9e112dSShreyansh Jain 1699ff9e112dSShreyansh Jain dpaa_intf->name = dpaa_device->name; 1700ff9e112dSShreyansh Jain 170137f9b54bSShreyansh Jain /* save fman_if & cfg in the interface struture */ 17026b10d1f7SNipun Gupta eth_dev->process_private = fman_intf; 1703ff9e112dSShreyansh Jain dpaa_intf->ifid = dev_id; 170437f9b54bSShreyansh Jain dpaa_intf->cfg = cfg; 1705ff9e112dSShreyansh Jain 17064defbc8cSSachin Saxena memset((char *)dev_rx_fqids, 0, 17074defbc8cSSachin Saxena sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES); 17084defbc8cSSachin Saxena 1709e4abd4ffSJun Yang memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES); 1710e4abd4ffSJun Yang 171137f9b54bSShreyansh Jain /* Initialize Rx FQ's */ 17128d6fc8b6SHemant Agrawal if (default_q) { 17138d6fc8b6SHemant Agrawal num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; 17144defbc8cSSachin Saxena } else if (fmc_q) { 1715*f5fe3eedSJun Yang num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids, 1716*f5fe3eedSJun Yang dev_vspids, 1717*f5fe3eedSJun Yang DPAA_MAX_NUM_PCD_QUEUES); 1718*f5fe3eedSJun Yang if (num_rx_fqs < 0) { 1719*f5fe3eedSJun Yang DPAA_PMD_ERR("%s FMC initializes failed!", 1720*f5fe3eedSJun Yang dpaa_intf->name); 1721*f5fe3eedSJun Yang goto free_rx; 1722*f5fe3eedSJun Yang } 1723*f5fe3eedSJun Yang if (!num_rx_fqs) { 1724*f5fe3eedSJun Yang DPAA_PMD_WARN("%s is not configured by FMC.", 1725*f5fe3eedSJun Yang dpaa_intf->name); 1726*f5fe3eedSJun Yang } 17278d6fc8b6SHemant Agrawal } else { 17284defbc8cSSachin Saxena /* FMCLESS mode, load balance to multiple cores.*/ 17294defbc8cSSachin Saxena num_rx_fqs = rte_lcore_count(); 17308d6fc8b6SHemant Agrawal } 17318d6fc8b6SHemant Agrawal 1732e4f931ccSHemant Agrawal /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX 173337f9b54bSShreyansh Jain * queues. 173437f9b54bSShreyansh Jain */ 17354defbc8cSSachin Saxena if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { 173637f9b54bSShreyansh Jain DPAA_PMD_ERR("Invalid number of RX queues\n"); 173737f9b54bSShreyansh Jain return -EINVAL; 173837f9b54bSShreyansh Jain } 173937f9b54bSShreyansh Jain 17404defbc8cSSachin Saxena if (num_rx_fqs > 0) { 174137f9b54bSShreyansh Jain dpaa_intf->rx_queues = rte_zmalloc(NULL, 174237f9b54bSShreyansh Jain sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); 17430ff76833SYong Wang if (!dpaa_intf->rx_queues) { 17440ff76833SYong Wang DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); 17450ff76833SYong Wang return -ENOMEM; 17460ff76833SYong Wang } 17474defbc8cSSachin Saxena } else { 17484defbc8cSSachin Saxena dpaa_intf->rx_queues = NULL; 17494defbc8cSSachin Saxena } 175062f53995SHemant Agrawal 17519124e65dSGagandeep Singh memset(cgrid, 0, sizeof(cgrid)); 17529124e65dSGagandeep Singh memset(cgrid_tx, 0, sizeof(cgrid_tx)); 17539124e65dSGagandeep Singh 17549124e65dSGagandeep Singh /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means 17559124e65dSGagandeep Singh * Tx tail drop is disabled. 17569124e65dSGagandeep Singh */ 17579124e65dSGagandeep Singh if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) { 17589124e65dSGagandeep Singh td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD")); 17599124e65dSGagandeep Singh DPAA_PMD_DEBUG("Tail drop threshold env configured: %u", 17609124e65dSGagandeep Singh td_tx_threshold); 17619124e65dSGagandeep Singh /* if a very large value is being configured */ 17629124e65dSGagandeep Singh if (td_tx_threshold > UINT16_MAX) 17639124e65dSGagandeep Singh td_tx_threshold = CGR_RX_PERFQ_THRESH; 17649124e65dSGagandeep Singh } 17659124e65dSGagandeep Singh 176662f53995SHemant Agrawal /* If congestion control is enabled globally*/ 17674defbc8cSSachin Saxena if (num_rx_fqs > 0 && td_threshold) { 176862f53995SHemant Agrawal dpaa_intf->cgr_rx = rte_zmalloc(NULL, 176962f53995SHemant Agrawal sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE); 17700ff76833SYong Wang if (!dpaa_intf->cgr_rx) { 17710ff76833SYong Wang DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n"); 17720ff76833SYong Wang ret = -ENOMEM; 17730ff76833SYong Wang goto free_rx; 17740ff76833SYong Wang } 177562f53995SHemant Agrawal 177662f53995SHemant Agrawal ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0); 177762f53995SHemant Agrawal if (ret != num_rx_fqs) { 177862f53995SHemant Agrawal DPAA_PMD_WARN("insufficient CGRIDs available"); 17790ff76833SYong Wang ret = -EINVAL; 17800ff76833SYong Wang goto free_rx; 178162f53995SHemant Agrawal } 178262f53995SHemant Agrawal } else { 178362f53995SHemant Agrawal dpaa_intf->cgr_rx = NULL; 178462f53995SHemant Agrawal } 178562f53995SHemant Agrawal 17864defbc8cSSachin Saxena if (!fmc_q && !default_q) { 17874defbc8cSSachin Saxena ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs, 17884defbc8cSSachin Saxena num_rx_fqs, 0); 17894defbc8cSSachin Saxena if (ret < 0) { 17904defbc8cSSachin Saxena DPAA_PMD_ERR("Failed to alloc rx fqid's\n"); 17914defbc8cSSachin Saxena goto free_rx; 17924defbc8cSSachin Saxena } 17934defbc8cSSachin Saxena } 17944defbc8cSSachin Saxena 179537f9b54bSShreyansh Jain for (loop = 0; loop < num_rx_fqs; loop++) { 17968d6fc8b6SHemant Agrawal if (default_q) 17978d6fc8b6SHemant Agrawal fqid = cfg->rx_def; 17988d6fc8b6SHemant Agrawal else 17994defbc8cSSachin Saxena fqid = dev_rx_fqids[loop]; 180062f53995SHemant Agrawal 1801e4abd4ffSJun Yang vsp_id = dev_vspids[loop]; 1802e4abd4ffSJun Yang 180362f53995SHemant Agrawal if (dpaa_intf->cgr_rx) 180462f53995SHemant Agrawal dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; 180562f53995SHemant Agrawal 180662f53995SHemant Agrawal ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], 180762f53995SHemant Agrawal dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL, 180862f53995SHemant Agrawal fqid); 180937f9b54bSShreyansh Jain if (ret) 18100ff76833SYong Wang goto free_rx; 1811e4abd4ffSJun Yang dpaa_intf->rx_queues[loop].vsp_id = vsp_id; 181237f9b54bSShreyansh Jain dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; 181337f9b54bSShreyansh Jain } 181437f9b54bSShreyansh Jain dpaa_intf->nb_rx_queues = num_rx_fqs; 181537f9b54bSShreyansh Jain 18160ff76833SYong Wang /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ 181737f9b54bSShreyansh Jain dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * 1818af2828cfSAkhil Goyal MAX_DPAA_CORES, MAX_CACHELINE); 18190ff76833SYong Wang if (!dpaa_intf->tx_queues) { 18200ff76833SYong Wang DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); 18210ff76833SYong Wang ret = -ENOMEM; 18220ff76833SYong Wang goto free_rx; 18230ff76833SYong Wang } 182437f9b54bSShreyansh Jain 18259124e65dSGagandeep Singh /* If congestion control is enabled globally*/ 18269124e65dSGagandeep Singh if (td_tx_threshold) { 18279124e65dSGagandeep Singh dpaa_intf->cgr_tx = rte_zmalloc(NULL, 18289124e65dSGagandeep Singh sizeof(struct qman_cgr) * MAX_DPAA_CORES, 18299124e65dSGagandeep Singh MAX_CACHELINE); 18309124e65dSGagandeep Singh if (!dpaa_intf->cgr_tx) { 18319124e65dSGagandeep Singh DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n"); 18329124e65dSGagandeep Singh ret = -ENOMEM; 18339124e65dSGagandeep Singh goto free_rx; 18349124e65dSGagandeep Singh } 18359124e65dSGagandeep Singh 18369124e65dSGagandeep Singh ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES, 18379124e65dSGagandeep Singh 1, 0); 18389124e65dSGagandeep Singh if (ret != MAX_DPAA_CORES) { 18399124e65dSGagandeep Singh DPAA_PMD_WARN("insufficient CGRIDs available"); 18409124e65dSGagandeep Singh ret = -EINVAL; 18419124e65dSGagandeep Singh goto free_rx; 18429124e65dSGagandeep Singh } 18439124e65dSGagandeep Singh } else { 18449124e65dSGagandeep Singh dpaa_intf->cgr_tx = NULL; 18459124e65dSGagandeep Singh } 18469124e65dSGagandeep Singh 18479124e65dSGagandeep Singh 1848af2828cfSAkhil Goyal for (loop = 0; loop < MAX_DPAA_CORES; loop++) { 18499124e65dSGagandeep Singh if (dpaa_intf->cgr_tx) 18509124e65dSGagandeep Singh dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop]; 18519124e65dSGagandeep Singh 185237f9b54bSShreyansh Jain ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], 18539124e65dSGagandeep Singh fman_intf, 18549124e65dSGagandeep Singh dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL); 185537f9b54bSShreyansh Jain if (ret) 18560ff76833SYong Wang goto free_tx; 185737f9b54bSShreyansh Jain dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; 185837f9b54bSShreyansh Jain } 1859af2828cfSAkhil Goyal dpaa_intf->nb_tx_queues = MAX_DPAA_CORES; 186037f9b54bSShreyansh Jain 186105ba55bcSShreyansh Jain #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 186205ba55bcSShreyansh Jain dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 186305ba55bcSShreyansh Jain DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); 186405ba55bcSShreyansh Jain dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; 186505ba55bcSShreyansh Jain dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 186605ba55bcSShreyansh Jain DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); 186705ba55bcSShreyansh Jain dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; 186805ba55bcSShreyansh Jain #endif 186905ba55bcSShreyansh Jain 187037f9b54bSShreyansh Jain DPAA_PMD_DEBUG("All frame queues created"); 187137f9b54bSShreyansh Jain 187212a4678aSShreyansh Jain /* Get the initial configuration for flow control */ 18736b10d1f7SNipun Gupta dpaa_fc_set_default(dpaa_intf, fman_intf); 187412a4678aSShreyansh Jain 187537f9b54bSShreyansh Jain /* reset bpool list, initialize bpool dynamically */ 187637f9b54bSShreyansh Jain list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) { 187737f9b54bSShreyansh Jain list_del(&bp->node); 18784762b3d4SHemant Agrawal rte_free(bp); 187937f9b54bSShreyansh Jain } 188037f9b54bSShreyansh Jain 188137f9b54bSShreyansh Jain /* Populate ethdev structure */ 1882ff9e112dSShreyansh Jain eth_dev->dev_ops = &dpaa_devops; 188337f9b54bSShreyansh Jain eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 188437f9b54bSShreyansh Jain eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 188537f9b54bSShreyansh Jain 188637f9b54bSShreyansh Jain /* Allocate memory for storing MAC addresses */ 188737f9b54bSShreyansh Jain eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 188835b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); 188937f9b54bSShreyansh Jain if (eth_dev->data->mac_addrs == NULL) { 189037f9b54bSShreyansh Jain DPAA_PMD_ERR("Failed to allocate %d bytes needed to " 189137f9b54bSShreyansh Jain "store MAC addresses", 189235b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); 18930ff76833SYong Wang ret = -ENOMEM; 18940ff76833SYong Wang goto free_tx; 189537f9b54bSShreyansh Jain } 189637f9b54bSShreyansh Jain 189737f9b54bSShreyansh Jain /* copy the primary mac address */ 1898538da7a1SOlivier Matz rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); 189937f9b54bSShreyansh Jain 19004defbc8cSSachin Saxena RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n", 19014defbc8cSSachin Saxena dpaa_device->name, 19024defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[0], 19034defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[1], 19044defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[2], 19054defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[3], 19064defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[4], 19074defbc8cSSachin Saxena fman_intf->mac_addr.addr_bytes[5]); 19084defbc8cSSachin Saxena 1909133332f0SRadu Bulie if (!fman_intf->is_shared_mac) { 191037f9b54bSShreyansh Jain /* Disable RX mode */ 191137f9b54bSShreyansh Jain fman_if_discard_rx_errors(fman_intf); 191237f9b54bSShreyansh Jain fman_if_disable_rx(fman_intf); 191337f9b54bSShreyansh Jain /* Disable promiscuous mode */ 191437f9b54bSShreyansh Jain fman_if_promiscuous_disable(fman_intf); 191537f9b54bSShreyansh Jain /* Disable multicast */ 191637f9b54bSShreyansh Jain fman_if_reset_mcast_filter_table(fman_intf); 191737f9b54bSShreyansh Jain /* Reset interface statistics */ 191837f9b54bSShreyansh Jain fman_if_stats_reset(fman_intf); 191955576ac2SHemant Agrawal /* Disable SG by default */ 192055576ac2SHemant Agrawal fman_if_set_sg(fman_intf, 0); 1921133332f0SRadu Bulie fman_if_set_maxfrm(fman_intf, 1922133332f0SRadu Bulie RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE); 1923133332f0SRadu Bulie } 1924ff9e112dSShreyansh Jain 1925ff9e112dSShreyansh Jain return 0; 19260ff76833SYong Wang 19270ff76833SYong Wang free_tx: 19280ff76833SYong Wang rte_free(dpaa_intf->tx_queues); 19290ff76833SYong Wang dpaa_intf->tx_queues = NULL; 19300ff76833SYong Wang dpaa_intf->nb_tx_queues = 0; 19310ff76833SYong Wang 19320ff76833SYong Wang free_rx: 19330ff76833SYong Wang rte_free(dpaa_intf->cgr_rx); 19349124e65dSGagandeep Singh rte_free(dpaa_intf->cgr_tx); 19350ff76833SYong Wang rte_free(dpaa_intf->rx_queues); 19360ff76833SYong Wang dpaa_intf->rx_queues = NULL; 19370ff76833SYong Wang dpaa_intf->nb_rx_queues = 0; 19380ff76833SYong Wang return ret; 1939ff9e112dSShreyansh Jain } 1940ff9e112dSShreyansh Jain 1941ff9e112dSShreyansh Jain static int 1942ff9e112dSShreyansh Jain dpaa_dev_uninit(struct rte_eth_dev *dev) 1943ff9e112dSShreyansh Jain { 1944ff9e112dSShreyansh Jain struct dpaa_if *dpaa_intf = dev->data->dev_private; 194562f53995SHemant Agrawal int loop; 1946ff9e112dSShreyansh Jain 1947ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 1948ff9e112dSShreyansh Jain 1949ff9e112dSShreyansh Jain if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1950ff9e112dSShreyansh Jain return -EPERM; 1951ff9e112dSShreyansh Jain 1952ff9e112dSShreyansh Jain if (!dpaa_intf) { 1953ff9e112dSShreyansh Jain DPAA_PMD_WARN("Already closed or not started"); 1954ff9e112dSShreyansh Jain return -1; 1955ff9e112dSShreyansh Jain } 1956ff9e112dSShreyansh Jain 19574defbc8cSSachin Saxena /* DPAA FM deconfig */ 19584defbc8cSSachin Saxena if (!(default_q || fmc_q)) { 19594defbc8cSSachin Saxena if (dpaa_fm_deconfig(dpaa_intf, dev->process_private)) 19604defbc8cSSachin Saxena DPAA_PMD_WARN("DPAA FM deconfig failed\n"); 19614defbc8cSSachin Saxena } 19624defbc8cSSachin Saxena 1963ff9e112dSShreyansh Jain dpaa_eth_dev_close(dev); 1964ff9e112dSShreyansh Jain 196537f9b54bSShreyansh Jain /* release configuration memory */ 196637f9b54bSShreyansh Jain if (dpaa_intf->fc_conf) 196737f9b54bSShreyansh Jain rte_free(dpaa_intf->fc_conf); 196837f9b54bSShreyansh Jain 196962f53995SHemant Agrawal /* Release RX congestion Groups */ 197062f53995SHemant Agrawal if (dpaa_intf->cgr_rx) { 197162f53995SHemant Agrawal for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) 197262f53995SHemant Agrawal qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); 197362f53995SHemant Agrawal 197462f53995SHemant Agrawal qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, 197562f53995SHemant Agrawal dpaa_intf->nb_rx_queues); 197662f53995SHemant Agrawal } 197762f53995SHemant Agrawal 197862f53995SHemant Agrawal rte_free(dpaa_intf->cgr_rx); 197962f53995SHemant Agrawal dpaa_intf->cgr_rx = NULL; 198062f53995SHemant Agrawal 19819124e65dSGagandeep Singh /* Release TX congestion Groups */ 19829124e65dSGagandeep Singh if (dpaa_intf->cgr_tx) { 19839124e65dSGagandeep Singh for (loop = 0; loop < MAX_DPAA_CORES; loop++) 19849124e65dSGagandeep Singh qman_delete_cgr(&dpaa_intf->cgr_tx[loop]); 19859124e65dSGagandeep Singh 19869124e65dSGagandeep Singh qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid, 19879124e65dSGagandeep Singh MAX_DPAA_CORES); 19889124e65dSGagandeep Singh rte_free(dpaa_intf->cgr_tx); 19899124e65dSGagandeep Singh dpaa_intf->cgr_tx = NULL; 19909124e65dSGagandeep Singh } 19919124e65dSGagandeep Singh 199237f9b54bSShreyansh Jain rte_free(dpaa_intf->rx_queues); 199337f9b54bSShreyansh Jain dpaa_intf->rx_queues = NULL; 199437f9b54bSShreyansh Jain 199537f9b54bSShreyansh Jain rte_free(dpaa_intf->tx_queues); 199637f9b54bSShreyansh Jain dpaa_intf->tx_queues = NULL; 199737f9b54bSShreyansh Jain 1998ff9e112dSShreyansh Jain dev->dev_ops = NULL; 1999ff9e112dSShreyansh Jain dev->rx_pkt_burst = NULL; 2000ff9e112dSShreyansh Jain dev->tx_pkt_burst = NULL; 2001ff9e112dSShreyansh Jain 2002ff9e112dSShreyansh Jain return 0; 2003ff9e112dSShreyansh Jain } 2004ff9e112dSShreyansh Jain 2005ff9e112dSShreyansh Jain static int 20064defbc8cSSachin Saxena rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, 2007ff9e112dSShreyansh Jain struct rte_dpaa_device *dpaa_dev) 2008ff9e112dSShreyansh Jain { 2009ff9e112dSShreyansh Jain int diag; 2010ff9e112dSShreyansh Jain int ret; 2011ff9e112dSShreyansh Jain struct rte_eth_dev *eth_dev; 2012ff9e112dSShreyansh Jain 2013ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 2014ff9e112dSShreyansh Jain 201547854c18SHemant Agrawal if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > 201647854c18SHemant Agrawal RTE_PKTMBUF_HEADROOM) { 201747854c18SHemant Agrawal DPAA_PMD_ERR( 201847854c18SHemant Agrawal "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)", 201947854c18SHemant Agrawal RTE_PKTMBUF_HEADROOM, 202047854c18SHemant Agrawal DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE); 202147854c18SHemant Agrawal 202247854c18SHemant Agrawal return -1; 202347854c18SHemant Agrawal } 202447854c18SHemant Agrawal 2025ff9e112dSShreyansh Jain /* In case of secondary process, the device is already configured 2026ff9e112dSShreyansh Jain * and no further action is required, except portal initialization 2027ff9e112dSShreyansh Jain * and verifying secondary attachment to port name. 2028ff9e112dSShreyansh Jain */ 2029ff9e112dSShreyansh Jain if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2030ff9e112dSShreyansh Jain eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); 2031ff9e112dSShreyansh Jain if (!eth_dev) 2032ff9e112dSShreyansh Jain return -ENOMEM; 2033d1c3ab22SFerruh Yigit eth_dev->device = &dpaa_dev->device; 2034d1c3ab22SFerruh Yigit eth_dev->dev_ops = &dpaa_devops; 20356b10d1f7SNipun Gupta 20366b10d1f7SNipun Gupta ret = dpaa_dev_init_secondary(eth_dev); 20376b10d1f7SNipun Gupta if (ret != 0) { 20386b10d1f7SNipun Gupta RTE_LOG(ERR, PMD, "secondary dev init failed\n"); 20396b10d1f7SNipun Gupta return ret; 20406b10d1f7SNipun Gupta } 20416b10d1f7SNipun Gupta 2042fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev); 2043ff9e112dSShreyansh Jain return 0; 2044ff9e112dSShreyansh Jain } 2045ff9e112dSShreyansh Jain 2046af2828cfSAkhil Goyal if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) { 20478d6fc8b6SHemant Agrawal if (access("/tmp/fmc.bin", F_OK) == -1) { 2048b7c7ff6eSStephen Hemminger DPAA_PMD_INFO("* FMC not configured.Enabling default mode"); 20498d6fc8b6SHemant Agrawal default_q = 1; 20508d6fc8b6SHemant Agrawal } 20518d6fc8b6SHemant Agrawal 20524defbc8cSSachin Saxena if (!(default_q || fmc_q)) { 20534defbc8cSSachin Saxena if (dpaa_fm_init()) { 20544defbc8cSSachin Saxena DPAA_PMD_ERR("FM init failed\n"); 20554defbc8cSSachin Saxena return -1; 20564defbc8cSSachin Saxena } 20574defbc8cSSachin Saxena } 20584defbc8cSSachin Saxena 2059e507498dSHemant Agrawal /* disabling the default push mode for LS1043 */ 2060e507498dSHemant Agrawal if (dpaa_svr_family == SVR_LS1043A_FAMILY) 2061e507498dSHemant Agrawal dpaa_push_mode_max_queue = 0; 2062e507498dSHemant Agrawal 2063e507498dSHemant Agrawal /* if push mode queues to be enabled. Currenly we are allowing 2064e507498dSHemant Agrawal * only one queue per thread. 2065e507498dSHemant Agrawal */ 2066e507498dSHemant Agrawal if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { 2067e507498dSHemant Agrawal dpaa_push_mode_max_queue = 2068e507498dSHemant Agrawal atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); 2069e507498dSHemant Agrawal if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) 2070e507498dSHemant Agrawal dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; 2071e507498dSHemant Agrawal } 2072e507498dSHemant Agrawal 2073ff9e112dSShreyansh Jain is_global_init = 1; 2074ff9e112dSShreyansh Jain } 2075ff9e112dSShreyansh Jain 2076e5872221SRohit Raj if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 2077ff9e112dSShreyansh Jain ret = rte_dpaa_portal_init((void *)1); 2078ff9e112dSShreyansh Jain if (ret) { 2079ff9e112dSShreyansh Jain DPAA_PMD_ERR("Unable to initialize portal"); 2080ff9e112dSShreyansh Jain return ret; 2081ff9e112dSShreyansh Jain } 20825d944582SNipun Gupta } 2083ff9e112dSShreyansh Jain 20846b10d1f7SNipun Gupta eth_dev = rte_eth_dev_allocate(dpaa_dev->name); 2085af2828cfSAkhil Goyal if (!eth_dev) 2086af2828cfSAkhil Goyal return -ENOMEM; 2087ff9e112dSShreyansh Jain 20886b10d1f7SNipun Gupta eth_dev->data->dev_private = 20896b10d1f7SNipun Gupta rte_zmalloc("ethdev private structure", 2090ff9e112dSShreyansh Jain sizeof(struct dpaa_if), 2091ff9e112dSShreyansh Jain RTE_CACHE_LINE_SIZE); 2092ff9e112dSShreyansh Jain if (!eth_dev->data->dev_private) { 2093ff9e112dSShreyansh Jain DPAA_PMD_ERR("Cannot allocate memzone for port data"); 2094ff9e112dSShreyansh Jain rte_eth_dev_release_port(eth_dev); 2095ff9e112dSShreyansh Jain return -ENOMEM; 2096ff9e112dSShreyansh Jain } 20976b10d1f7SNipun Gupta 2098ff9e112dSShreyansh Jain eth_dev->device = &dpaa_dev->device; 2099ff9e112dSShreyansh Jain dpaa_dev->eth_dev = eth_dev; 2100ff9e112dSShreyansh Jain 21019124e65dSGagandeep Singh qman_ern_register_cb(dpaa_free_mbuf); 21029124e65dSGagandeep Singh 21032aa10990SRohit Raj if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC) 21042aa10990SRohit Raj eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 21052aa10990SRohit Raj 2106ff9e112dSShreyansh Jain /* Invoke PMD device initialization function */ 2107ff9e112dSShreyansh Jain diag = dpaa_dev_init(eth_dev); 2108fbe90cddSThomas Monjalon if (diag == 0) { 2109fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev); 2110ff9e112dSShreyansh Jain return 0; 2111fbe90cddSThomas Monjalon } 2112ff9e112dSShreyansh Jain 2113ff9e112dSShreyansh Jain rte_eth_dev_release_port(eth_dev); 2114ff9e112dSShreyansh Jain return diag; 2115ff9e112dSShreyansh Jain } 2116ff9e112dSShreyansh Jain 2117ff9e112dSShreyansh Jain static int 2118ff9e112dSShreyansh Jain rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) 2119ff9e112dSShreyansh Jain { 2120ff9e112dSShreyansh Jain struct rte_eth_dev *eth_dev; 2121ff9e112dSShreyansh Jain 2122ff9e112dSShreyansh Jain PMD_INIT_FUNC_TRACE(); 2123ff9e112dSShreyansh Jain 2124ff9e112dSShreyansh Jain eth_dev = dpaa_dev->eth_dev; 2125ff9e112dSShreyansh Jain dpaa_dev_uninit(eth_dev); 2126ff9e112dSShreyansh Jain 2127ff9e112dSShreyansh Jain rte_eth_dev_release_port(eth_dev); 2128ff9e112dSShreyansh Jain 2129ff9e112dSShreyansh Jain return 0; 2130ff9e112dSShreyansh Jain } 2131ff9e112dSShreyansh Jain 21324defbc8cSSachin Saxena static void __attribute__((destructor(102))) dpaa_finish(void) 21334defbc8cSSachin Saxena { 21344defbc8cSSachin Saxena /* For secondary, primary will do all the cleanup */ 21354defbc8cSSachin Saxena if (rte_eal_process_type() != RTE_PROC_PRIMARY) 21364defbc8cSSachin Saxena return; 21374defbc8cSSachin Saxena 21384defbc8cSSachin Saxena if (!(default_q || fmc_q)) { 21394defbc8cSSachin Saxena unsigned int i; 21404defbc8cSSachin Saxena 21414defbc8cSSachin Saxena for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 21424defbc8cSSachin Saxena if (rte_eth_devices[i].dev_ops == &dpaa_devops) { 21434defbc8cSSachin Saxena struct rte_eth_dev *dev = &rte_eth_devices[i]; 21444defbc8cSSachin Saxena struct dpaa_if *dpaa_intf = 21454defbc8cSSachin Saxena dev->data->dev_private; 21464defbc8cSSachin Saxena struct fman_if *fif = 21474defbc8cSSachin Saxena dev->process_private; 21484defbc8cSSachin Saxena if (dpaa_intf->port_handle) 21494defbc8cSSachin Saxena if (dpaa_fm_deconfig(dpaa_intf, fif)) 21504defbc8cSSachin Saxena DPAA_PMD_WARN("DPAA FM " 21514defbc8cSSachin Saxena "deconfig failed\n"); 2152e4abd4ffSJun Yang if (fif->num_profiles) { 2153e4abd4ffSJun Yang if (dpaa_port_vsp_cleanup(dpaa_intf, 2154e4abd4ffSJun Yang fif)) 2155e4abd4ffSJun Yang DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n"); 2156e4abd4ffSJun Yang } 21574defbc8cSSachin Saxena } 21584defbc8cSSachin Saxena } 21594defbc8cSSachin Saxena if (is_global_init) 21604defbc8cSSachin Saxena if (dpaa_fm_term()) 21614defbc8cSSachin Saxena DPAA_PMD_WARN("DPAA FM term failed\n"); 21624defbc8cSSachin Saxena 21634defbc8cSSachin Saxena is_global_init = 0; 21644defbc8cSSachin Saxena 21654defbc8cSSachin Saxena DPAA_PMD_INFO("DPAA fman cleaned up"); 21664defbc8cSSachin Saxena } 21674defbc8cSSachin Saxena } 21684defbc8cSSachin Saxena 2169ff9e112dSShreyansh Jain static struct rte_dpaa_driver rte_dpaa_pmd = { 21702aa10990SRohit Raj .drv_flags = RTE_DPAA_DRV_INTR_LSC, 2171ff9e112dSShreyansh Jain .drv_type = FSL_DPAA_ETH, 2172ff9e112dSShreyansh Jain .probe = rte_dpaa_probe, 2173ff9e112dSShreyansh Jain .remove = rte_dpaa_remove, 2174ff9e112dSShreyansh Jain }; 2175ff9e112dSShreyansh Jain 2176ff9e112dSShreyansh Jain RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); 21779c99878aSJerin Jacob RTE_LOG_REGISTER(dpaa_logtype_pmd, pmd.net.dpaa, NOTICE); 2178