1131a75b6SHemant Agrawal /* * SPDX-License-Identifier: BSD-3-Clause 2c147eae0SHemant Agrawal * 3c147eae0SHemant Agrawal * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4*12d98eceSJun Yang * Copyright 2016-2024 NXP 5c147eae0SHemant Agrawal * 6c147eae0SHemant Agrawal */ 7c147eae0SHemant Agrawal 8c147eae0SHemant Agrawal #include <time.h> 9c147eae0SHemant Agrawal #include <net/if.h> 10c147eae0SHemant Agrawal 11c147eae0SHemant Agrawal #include <rte_mbuf.h> 12df96fd0dSBruce Richardson #include <ethdev_driver.h> 13c147eae0SHemant Agrawal #include <rte_malloc.h> 14c147eae0SHemant Agrawal #include <rte_memcpy.h> 15c147eae0SHemant Agrawal #include <rte_string_fns.h> 16c147eae0SHemant Agrawal #include <rte_cycles.h> 17c147eae0SHemant Agrawal #include <rte_kvargs.h> 181acb7f54SDavid Marchand #include <dev_driver.h> 19b4f22ca5SDavid Marchand #include <bus_fslmc_driver.h> 20fe2b986aSSunil Kumar Kori #include <rte_flow_driver.h> 216ac5a55bSJun Yang #include "rte_dpaa2_mempool.h" 22c147eae0SHemant Agrawal 23a10a988aSShreyansh Jain #include "dpaa2_pmd_logs.h" 24c147eae0SHemant Agrawal #include <fslmc_vfio.h> 253e5a335dSHemant Agrawal #include <dpaa2_hw_pvt.h> 26bee61d86SHemant Agrawal #include <dpaa2_hw_mempool.h> 273cf50ff5SHemant Agrawal #include <dpaa2_hw_dpio.h> 28748eccb9SHemant Agrawal #include <mc/fsl_dpmng.h> 29c147eae0SHemant Agrawal #include "dpaa2_ethdev.h" 3072ec7a67SSunil Kumar Kori #include "dpaa2_sparser.h" 31f40adb40SHemant Agrawal #include <fsl_qbman_debug.h> 32c147eae0SHemant Agrawal 33c7ec1ba8SHemant Agrawal #define DRIVER_LOOPBACK_MODE "drv_loopback" 3420191ab3SNipun Gupta #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch" 358d21c563SHemant Agrawal #define DRIVER_TX_CONF "drv_tx_conf" 364690a611SNipun Gupta #define DRIVER_ERROR_QUEUE "drv_err_queue" 37eadcfd95SRohit Raj #define CHECK_INTERVAL 100 /* 100ms */ 38eadcfd95SRohit Raj #define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */ 39a3a997f0SHemant Agrawal 40175fe7d9SSunil Kumar Kori /* Supported Rx offloads */ 41175fe7d9SSunil Kumar Kori static uint64_t dev_rx_offloads_sup = 42295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_CHECKSUM | 43295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | 44295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 45295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 46295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 47295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 48295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TIMESTAMP; 49175fe7d9SSunil Kumar Kori 50175fe7d9SSunil Kumar Kori /* Rx offloads which cannot be disabled */ 51175fe7d9SSunil Kumar Kori static uint64_t dev_rx_offloads_nodis = 52295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_RSS_HASH | 53295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCATTER; 54175fe7d9SSunil Kumar Kori 55175fe7d9SSunil Kumar Kori /* Supported Tx offloads */ 56175fe7d9SSunil Kumar Kori static uint64_t dev_tx_offloads_sup = 57295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 58295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 59295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 60295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 61295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | 62295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 63295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | 64295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 65175fe7d9SSunil Kumar Kori 66175fe7d9SSunil Kumar Kori /* Tx offloads which cannot be disabled */ 67175fe7d9SSunil Kumar Kori static uint64_t dev_tx_offloads_nodis = 68295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 69175fe7d9SSunil Kumar Kori 70c1870f65SAkhil Goyal /* enable timestamp in mbuf */ 71724f79dfSHemant Agrawal bool dpaa2_enable_ts[RTE_MAX_ETHPORTS]; 7261c41e2eSThomas Monjalon uint64_t dpaa2_timestamp_rx_dynflag; 7361c41e2eSThomas Monjalon int dpaa2_timestamp_dynfield_offset = -1; 74c1870f65SAkhil Goyal 754690a611SNipun Gupta /* Enable error queue */ 764690a611SNipun Gupta bool dpaa2_enable_err_queue; 774690a611SNipun Gupta 7835dc25d1SRohit Raj #define MAX_NB_RX_DESC 11264 7935dc25d1SRohit Raj int total_nb_rx_desc; 8035dc25d1SRohit Raj 8175e2a1d4SGagandeep Singh int dpaa2_valid_dev; 8275e2a1d4SGagandeep Singh struct rte_mempool *dpaa2_tx_sg_pool; 8375e2a1d4SGagandeep Singh 841d6329b2SHemant Agrawal struct rte_dpaa2_xstats_name_off { 851d6329b2SHemant Agrawal char name[RTE_ETH_XSTATS_NAME_SIZE]; 861d6329b2SHemant Agrawal uint8_t page_id; /* dpni statistics page id */ 871d6329b2SHemant Agrawal uint8_t stats_id; /* stats id in the given page */ 881d6329b2SHemant Agrawal }; 891d6329b2SHemant Agrawal 901d6329b2SHemant Agrawal static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 911d6329b2SHemant Agrawal {"ingress_multicast_frames", 0, 2}, 921d6329b2SHemant Agrawal {"ingress_multicast_bytes", 0, 3}, 931d6329b2SHemant Agrawal {"ingress_broadcast_frames", 0, 4}, 941d6329b2SHemant Agrawal {"ingress_broadcast_bytes", 0, 5}, 951d6329b2SHemant Agrawal {"egress_multicast_frames", 1, 2}, 961d6329b2SHemant Agrawal {"egress_multicast_bytes", 1, 3}, 971d6329b2SHemant Agrawal {"egress_broadcast_frames", 1, 4}, 981d6329b2SHemant Agrawal {"egress_broadcast_bytes", 1, 5}, 991d6329b2SHemant Agrawal {"ingress_filtered_frames", 2, 0}, 1001d6329b2SHemant Agrawal {"ingress_discarded_frames", 2, 1}, 1011d6329b2SHemant Agrawal {"ingress_nobuffer_discards", 2, 2}, 1021d6329b2SHemant Agrawal {"egress_discarded_frames", 2, 3}, 1031d6329b2SHemant Agrawal {"egress_confirmed_frames", 2, 4}, 104c720c5f6SHemant Agrawal {"cgr_reject_frames", 4, 0}, 105c720c5f6SHemant Agrawal {"cgr_reject_bytes", 4, 1}, 1061d6329b2SHemant Agrawal }; 1071d6329b2SHemant Agrawal 108c147eae0SHemant Agrawal static struct rte_dpaa2_driver rte_dpaa2_pmd; 109c5acbb5eSHemant Agrawal static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 110c5acbb5eSHemant Agrawal int wait_to_complete); 111a1f3a12cSHemant Agrawal static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 112a1f3a12cSHemant Agrawal static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 113e1640849SHemant Agrawal static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 114c147eae0SHemant Agrawal 1153ce294f2SHemant Agrawal static int 1163ce294f2SHemant Agrawal dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1173ce294f2SHemant Agrawal { 1183ce294f2SHemant Agrawal int ret; 1193ce294f2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 12081c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 1213ce294f2SHemant Agrawal 1223ce294f2SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1233ce294f2SHemant Agrawal 1243ce294f2SHemant Agrawal if (dpni == NULL) { 125a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1263ce294f2SHemant Agrawal return -1; 1273ce294f2SHemant Agrawal } 1283ce294f2SHemant Agrawal 1293ce294f2SHemant Agrawal if (on) 13096f7bfe8SSachin Saxena ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, 13196f7bfe8SSachin Saxena vlan_id, 0, 0, 0); 1323ce294f2SHemant Agrawal else 1333ce294f2SHemant Agrawal ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 1343ce294f2SHemant Agrawal priv->token, vlan_id); 1353ce294f2SHemant Agrawal 1363ce294f2SHemant Agrawal if (ret < 0) 137a10a988aSShreyansh Jain DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", 1383ce294f2SHemant Agrawal ret, vlan_id, priv->hw_id); 1393ce294f2SHemant Agrawal 1403ce294f2SHemant Agrawal return ret; 1413ce294f2SHemant Agrawal } 1423ce294f2SHemant Agrawal 143289ba0c0SDavid Harton static int 1443ce294f2SHemant Agrawal dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1453ce294f2SHemant Agrawal { 1463ce294f2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 14781c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 14850ce3e7aSWei Hu (Xavier) int ret = 0; 1493ce294f2SHemant Agrawal 1503ce294f2SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1513ce294f2SHemant Agrawal 152295968d1SFerruh Yigit if (mask & RTE_ETH_VLAN_FILTER_MASK) { 1537be78d02SJosh Soref /* VLAN Filter not available */ 154c172f85eSHemant Agrawal if (!priv->max_vlan_filters) { 155a10a988aSShreyansh Jain DPAA2_PMD_INFO("VLAN filter not available"); 15650ce3e7aSWei Hu (Xavier) return -ENOTSUP; 157c172f85eSHemant Agrawal } 158c172f85eSHemant Agrawal 1590ebce612SSunil Kumar Kori if (dev->data->dev_conf.rxmode.offloads & 160295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1613ce294f2SHemant Agrawal ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 1623ce294f2SHemant Agrawal priv->token, true); 1633ce294f2SHemant Agrawal else 1643ce294f2SHemant Agrawal ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 1653ce294f2SHemant Agrawal priv->token, false); 1663ce294f2SHemant Agrawal if (ret < 0) 167a10a988aSShreyansh Jain DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); 1683ce294f2SHemant Agrawal } 169289ba0c0SDavid Harton 17050ce3e7aSWei Hu (Xavier) return ret; 1713ce294f2SHemant Agrawal } 1723ce294f2SHemant Agrawal 173748eccb9SHemant Agrawal static int 174e59b75ffSHemant Agrawal dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, 175e59b75ffSHemant Agrawal enum rte_vlan_type vlan_type __rte_unused, 176e59b75ffSHemant Agrawal uint16_t tpid) 177e59b75ffSHemant Agrawal { 178e59b75ffSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 17981c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 180e59b75ffSHemant Agrawal int ret = -ENOTSUP; 181e59b75ffSHemant Agrawal 182e59b75ffSHemant Agrawal PMD_INIT_FUNC_TRACE(); 183e59b75ffSHemant Agrawal 184e59b75ffSHemant Agrawal /* nothing to be done for standard vlan tpids */ 185e59b75ffSHemant Agrawal if (tpid == 0x8100 || tpid == 0x88A8) 186e59b75ffSHemant Agrawal return 0; 187e59b75ffSHemant Agrawal 188e59b75ffSHemant Agrawal ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, 189e59b75ffSHemant Agrawal priv->token, tpid); 190e59b75ffSHemant Agrawal if (ret < 0) 191e59b75ffSHemant Agrawal DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret); 192e59b75ffSHemant Agrawal /* if already configured tpids, remove them first */ 193e59b75ffSHemant Agrawal if (ret == -EBUSY) { 194e59b75ffSHemant Agrawal struct dpni_custom_tpid_cfg tpid_list = {0}; 195e59b75ffSHemant Agrawal 196e59b75ffSHemant Agrawal ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW, 197e59b75ffSHemant Agrawal priv->token, &tpid_list); 198e59b75ffSHemant Agrawal if (ret < 0) 199e59b75ffSHemant Agrawal goto fail; 200e59b75ffSHemant Agrawal ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW, 201e59b75ffSHemant Agrawal priv->token, tpid_list.tpid1); 202e59b75ffSHemant Agrawal if (ret < 0) 203e59b75ffSHemant Agrawal goto fail; 204e59b75ffSHemant Agrawal ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, 205e59b75ffSHemant Agrawal priv->token, tpid); 206e59b75ffSHemant Agrawal } 207e59b75ffSHemant Agrawal fail: 208e59b75ffSHemant Agrawal return ret; 209e59b75ffSHemant Agrawal } 210e59b75ffSHemant Agrawal 211e59b75ffSHemant Agrawal static int 212748eccb9SHemant Agrawal dpaa2_fw_version_get(struct rte_eth_dev *dev, 213748eccb9SHemant Agrawal char *fw_version, 214748eccb9SHemant Agrawal size_t fw_size) 215748eccb9SHemant Agrawal { 216748eccb9SHemant Agrawal int ret; 21781c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 218748eccb9SHemant Agrawal struct mc_soc_version mc_plat_info = {0}; 219748eccb9SHemant Agrawal struct mc_version mc_ver_info = {0}; 220748eccb9SHemant Agrawal 221748eccb9SHemant Agrawal PMD_INIT_FUNC_TRACE(); 222748eccb9SHemant Agrawal 223748eccb9SHemant Agrawal if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 224a10a988aSShreyansh Jain DPAA2_PMD_WARN("\tmc_get_soc_version failed"); 225748eccb9SHemant Agrawal 226748eccb9SHemant Agrawal if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 227a10a988aSShreyansh Jain DPAA2_PMD_WARN("\tmc_get_version failed"); 228748eccb9SHemant Agrawal 229748eccb9SHemant Agrawal ret = snprintf(fw_version, fw_size, 230748eccb9SHemant Agrawal "%x-%d.%d.%d", 231748eccb9SHemant Agrawal mc_plat_info.svr, 232748eccb9SHemant Agrawal mc_ver_info.major, 233748eccb9SHemant Agrawal mc_ver_info.minor, 234748eccb9SHemant Agrawal mc_ver_info.revision); 235d345d6c9SFerruh Yigit if (ret < 0) 236d345d6c9SFerruh Yigit return -EINVAL; 237748eccb9SHemant Agrawal 238748eccb9SHemant Agrawal ret += 1; /* add the size of '\0' */ 239d345d6c9SFerruh Yigit if (fw_size < (size_t)ret) 240748eccb9SHemant Agrawal return ret; 241748eccb9SHemant Agrawal else 242748eccb9SHemant Agrawal return 0; 243748eccb9SHemant Agrawal } 244748eccb9SHemant Agrawal 245bdad90d1SIvan Ilchenko static int 2463e5a335dSHemant Agrawal dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2473e5a335dSHemant Agrawal { 2483e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 2493e5a335dSHemant Agrawal 2503e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 2513e5a335dSHemant Agrawal 25233fad432SHemant Agrawal dev_info->max_mac_addrs = priv->max_mac_filters; 253bee61d86SHemant Agrawal dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 254bee61d86SHemant Agrawal dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 2553e5a335dSHemant Agrawal dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 2563e5a335dSHemant Agrawal dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 257175fe7d9SSunil Kumar Kori dev_info->rx_offload_capa = dev_rx_offloads_sup | 258175fe7d9SSunil Kumar Kori dev_rx_offloads_nodis; 259175fe7d9SSunil Kumar Kori dev_info->tx_offload_capa = dev_tx_offloads_sup | 260175fe7d9SSunil Kumar Kori dev_tx_offloads_nodis; 261295968d1SFerruh Yigit dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | 262295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_2_5G | 263295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_10G; 2642fe6f1b7SDmitry Kozlyuk dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 265762b275fSHemant Agrawal 266762b275fSHemant Agrawal dev_info->max_hash_mac_addrs = 0; 267762b275fSHemant Agrawal dev_info->max_vfs = 0; 268295968d1SFerruh Yigit dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 269762b275fSHemant Agrawal dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; 270bdad90d1SIvan Ilchenko 271e35ead33SHemant Agrawal dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size; 272e35ead33SHemant Agrawal /* same is rx size for best perf */ 273e35ead33SHemant Agrawal dev_info->default_txportconf.burst_size = dpaa2_dqrr_size; 274e35ead33SHemant Agrawal 275e35ead33SHemant Agrawal dev_info->default_rxportconf.nb_queues = 1; 276e35ead33SHemant Agrawal dev_info->default_txportconf.nb_queues = 1; 277e35ead33SHemant Agrawal dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD; 278e35ead33SHemant Agrawal dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC; 279e35ead33SHemant Agrawal 2807e2c3f14SHemant Agrawal if (dpaa2_svr_family == SVR_LX2160A) { 281295968d1SFerruh Yigit dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G | 282295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_40G | 283295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_50G | 284295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_100G; 2857e2c3f14SHemant Agrawal } 2867e2c3f14SHemant Agrawal 287bdad90d1SIvan Ilchenko return 0; 2883e5a335dSHemant Agrawal } 2893e5a335dSHemant Agrawal 2903e5a335dSHemant Agrawal static int 291ddbc2b66SApeksha Gupta dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev, 292ddbc2b66SApeksha Gupta __rte_unused uint16_t queue_id, 293ddbc2b66SApeksha Gupta struct rte_eth_burst_mode *mode) 294ddbc2b66SApeksha Gupta { 295ddbc2b66SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 296ddbc2b66SApeksha Gupta int ret = -EINVAL; 297ddbc2b66SApeksha Gupta unsigned int i; 298ddbc2b66SApeksha Gupta const struct burst_info { 299ddbc2b66SApeksha Gupta uint64_t flags; 300ddbc2b66SApeksha Gupta const char *output; 301ddbc2b66SApeksha Gupta } rx_offload_map[] = { 302295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"}, 303295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, 304295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 305295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"}, 306295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"}, 307295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"}, 308295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"}, 309295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}, 310295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"} 311ddbc2b66SApeksha Gupta }; 312ddbc2b66SApeksha Gupta 313ddbc2b66SApeksha Gupta /* Update Rx offload info */ 314ddbc2b66SApeksha Gupta for (i = 0; i < RTE_DIM(rx_offload_map); i++) { 315ddbc2b66SApeksha Gupta if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) { 316ddbc2b66SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s", 317ddbc2b66SApeksha Gupta rx_offload_map[i].output); 318ddbc2b66SApeksha Gupta ret = 0; 319ddbc2b66SApeksha Gupta break; 320ddbc2b66SApeksha Gupta } 321ddbc2b66SApeksha Gupta } 322ddbc2b66SApeksha Gupta return ret; 323ddbc2b66SApeksha Gupta } 324ddbc2b66SApeksha Gupta 325ddbc2b66SApeksha Gupta static int 326ddbc2b66SApeksha Gupta dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev, 327ddbc2b66SApeksha Gupta __rte_unused uint16_t queue_id, 328ddbc2b66SApeksha Gupta struct rte_eth_burst_mode *mode) 329ddbc2b66SApeksha Gupta { 330ddbc2b66SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 331ddbc2b66SApeksha Gupta int ret = -EINVAL; 332ddbc2b66SApeksha Gupta unsigned int i; 333ddbc2b66SApeksha Gupta const struct burst_info { 334ddbc2b66SApeksha Gupta uint64_t flags; 335ddbc2b66SApeksha Gupta const char *output; 336ddbc2b66SApeksha Gupta } tx_offload_map[] = { 337295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"}, 338295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, 339295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, 340295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, 341295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, 342295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 343295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"}, 344295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"}, 345295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"} 346ddbc2b66SApeksha Gupta }; 347ddbc2b66SApeksha Gupta 348ddbc2b66SApeksha Gupta /* Update Tx offload info */ 349ddbc2b66SApeksha Gupta for (i = 0; i < RTE_DIM(tx_offload_map); i++) { 350ddbc2b66SApeksha Gupta if (eth_conf->txmode.offloads & tx_offload_map[i].flags) { 351ddbc2b66SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s", 352ddbc2b66SApeksha Gupta tx_offload_map[i].output); 353ddbc2b66SApeksha Gupta ret = 0; 354ddbc2b66SApeksha Gupta break; 355ddbc2b66SApeksha Gupta } 356ddbc2b66SApeksha Gupta } 357ddbc2b66SApeksha Gupta return ret; 358ddbc2b66SApeksha Gupta } 359ddbc2b66SApeksha Gupta 360ddbc2b66SApeksha Gupta static int 3613e5a335dSHemant Agrawal dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 3623e5a335dSHemant Agrawal { 3633e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 3643e5a335dSHemant Agrawal uint16_t dist_idx; 3653e5a335dSHemant Agrawal uint32_t vq_id; 3662d5f7f52SAshish Jain uint8_t num_rxqueue_per_tc; 3673e5a335dSHemant Agrawal struct dpaa2_queue *mc_q, *mcq; 3683e5a335dSHemant Agrawal uint32_t tot_queues; 369*12d98eceSJun Yang int i, ret = 0; 3703e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 3713e5a335dSHemant Agrawal 3723e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 3733e5a335dSHemant Agrawal 3742d5f7f52SAshish Jain num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc); 3758d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) 3769ceacab7SPriyanka Jain tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues; 3779ceacab7SPriyanka Jain else 3783e5a335dSHemant Agrawal tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 3793e5a335dSHemant Agrawal mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 3803e5a335dSHemant Agrawal RTE_CACHE_LINE_SIZE); 3813e5a335dSHemant Agrawal if (!mc_q) { 382a10a988aSShreyansh Jain DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); 3833e5a335dSHemant Agrawal return -1; 3843e5a335dSHemant Agrawal } 3853e5a335dSHemant Agrawal 3863e5a335dSHemant Agrawal for (i = 0; i < priv->nb_rx_queues; i++) { 38785ee5ddaSShreyansh Jain mc_q->eth_data = dev->data; 3883e5a335dSHemant Agrawal priv->rx_vq[i] = mc_q++; 389*12d98eceSJun Yang dpaa2_q = priv->rx_vq[i]; 390*12d98eceSJun Yang ret = dpaa2_queue_storage_alloc(dpaa2_q, 391*12d98eceSJun Yang RTE_MAX_LCORE); 392*12d98eceSJun Yang if (ret) 3933cf50ff5SHemant Agrawal goto fail; 3943e5a335dSHemant Agrawal } 3953e5a335dSHemant Agrawal 3964690a611SNipun Gupta if (dpaa2_enable_err_queue) { 3974690a611SNipun Gupta priv->rx_err_vq = rte_zmalloc("dpni_rx_err", 3984690a611SNipun Gupta sizeof(struct dpaa2_queue), 0); 39929e5519dSWeiguo Li if (!priv->rx_err_vq) 40029e5519dSWeiguo Li goto fail; 4014690a611SNipun Gupta 402*12d98eceSJun Yang dpaa2_q = priv->rx_err_vq; 403*12d98eceSJun Yang ret = dpaa2_queue_storage_alloc(dpaa2_q, 404*12d98eceSJun Yang RTE_MAX_LCORE); 405*12d98eceSJun Yang if (ret) 4064690a611SNipun Gupta goto fail; 4074690a611SNipun Gupta } 4084690a611SNipun Gupta 4093e5a335dSHemant Agrawal for (i = 0; i < priv->nb_tx_queues; i++) { 41085ee5ddaSShreyansh Jain mc_q->eth_data = dev->data; 4117ae777d0SHemant Agrawal mc_q->flow_id = 0xffff; 4123e5a335dSHemant Agrawal priv->tx_vq[i] = mc_q++; 4137ae777d0SHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 4147ae777d0SHemant Agrawal dpaa2_q->cscn = rte_malloc(NULL, 4157ae777d0SHemant Agrawal sizeof(struct qbman_result), 16); 4167ae777d0SHemant Agrawal if (!dpaa2_q->cscn) 4177ae777d0SHemant Agrawal goto fail_tx; 4183e5a335dSHemant Agrawal } 4193e5a335dSHemant Agrawal 4208d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 4219ceacab7SPriyanka Jain /*Setup tx confirmation queues*/ 4229ceacab7SPriyanka Jain for (i = 0; i < priv->nb_tx_queues; i++) { 4239ceacab7SPriyanka Jain mc_q->eth_data = dev->data; 4249ceacab7SPriyanka Jain mc_q->tc_index = i; 4259ceacab7SPriyanka Jain mc_q->flow_id = 0; 4269ceacab7SPriyanka Jain priv->tx_conf_vq[i] = mc_q++; 427*12d98eceSJun Yang dpaa2_q = priv->tx_conf_vq[i]; 428*12d98eceSJun Yang ret = dpaa2_queue_storage_alloc(dpaa2_q, 429*12d98eceSJun Yang RTE_MAX_LCORE); 430*12d98eceSJun Yang if (ret) 4319ceacab7SPriyanka Jain goto fail_tx_conf; 4329ceacab7SPriyanka Jain } 4339ceacab7SPriyanka Jain } 4349ceacab7SPriyanka Jain 4353e5a335dSHemant Agrawal vq_id = 0; 436599017a2SHemant Agrawal for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 437*12d98eceSJun Yang mcq = priv->rx_vq[vq_id]; 4382d5f7f52SAshish Jain mcq->tc_index = dist_idx / num_rxqueue_per_tc; 4392d5f7f52SAshish Jain mcq->flow_id = dist_idx % num_rxqueue_per_tc; 4403e5a335dSHemant Agrawal vq_id++; 4413e5a335dSHemant Agrawal } 4423e5a335dSHemant Agrawal 4433e5a335dSHemant Agrawal return 0; 4449ceacab7SPriyanka Jain fail_tx_conf: 4459ceacab7SPriyanka Jain i -= 1; 4469ceacab7SPriyanka Jain while (i >= 0) { 447*12d98eceSJun Yang dpaa2_q = priv->tx_conf_vq[i]; 448*12d98eceSJun Yang dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE); 4499ceacab7SPriyanka Jain priv->tx_conf_vq[i--] = NULL; 4509ceacab7SPriyanka Jain } 4519ceacab7SPriyanka Jain i = priv->nb_tx_queues; 4527ae777d0SHemant Agrawal fail_tx: 4537ae777d0SHemant Agrawal i -= 1; 4547ae777d0SHemant Agrawal while (i >= 0) { 455*12d98eceSJun Yang dpaa2_q = priv->tx_vq[i]; 4567ae777d0SHemant Agrawal rte_free(dpaa2_q->cscn); 4577ae777d0SHemant Agrawal priv->tx_vq[i--] = NULL; 4587ae777d0SHemant Agrawal } 4597ae777d0SHemant Agrawal i = priv->nb_rx_queues; 4603e5a335dSHemant Agrawal fail: 4613e5a335dSHemant Agrawal i -= 1; 4623e5a335dSHemant Agrawal mc_q = priv->rx_vq[0]; 4633e5a335dSHemant Agrawal while (i >= 0) { 464*12d98eceSJun Yang dpaa2_q = priv->rx_vq[i]; 465*12d98eceSJun Yang dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE); 4663e5a335dSHemant Agrawal priv->rx_vq[i--] = NULL; 4673e5a335dSHemant Agrawal } 4684690a611SNipun Gupta 4694690a611SNipun Gupta if (dpaa2_enable_err_queue) { 470*12d98eceSJun Yang dpaa2_q = priv->rx_err_vq; 471*12d98eceSJun Yang dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE); 4724690a611SNipun Gupta } 4734690a611SNipun Gupta 4743e5a335dSHemant Agrawal rte_free(mc_q); 4753e5a335dSHemant Agrawal return -1; 4763e5a335dSHemant Agrawal } 4773e5a335dSHemant Agrawal 4785d9a1e4dSHemant Agrawal static void 4795d9a1e4dSHemant Agrawal dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) 4805d9a1e4dSHemant Agrawal { 4815d9a1e4dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 4825d9a1e4dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 4835d9a1e4dSHemant Agrawal int i; 4845d9a1e4dSHemant Agrawal 4855d9a1e4dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 4865d9a1e4dSHemant Agrawal 4875d9a1e4dSHemant Agrawal /* Queue allocation base */ 4885d9a1e4dSHemant Agrawal if (priv->rx_vq[0]) { 4895d9a1e4dSHemant Agrawal /* cleaning up queue storage */ 4905d9a1e4dSHemant Agrawal for (i = 0; i < priv->nb_rx_queues; i++) { 491*12d98eceSJun Yang dpaa2_q = priv->rx_vq[i]; 492*12d98eceSJun Yang dpaa2_queue_storage_free(dpaa2_q, 493*12d98eceSJun Yang RTE_MAX_LCORE); 4945d9a1e4dSHemant Agrawal } 4955d9a1e4dSHemant Agrawal /* cleanup tx queue cscn */ 4965d9a1e4dSHemant Agrawal for (i = 0; i < priv->nb_tx_queues; i++) { 497*12d98eceSJun Yang dpaa2_q = priv->tx_vq[i]; 4985d9a1e4dSHemant Agrawal rte_free(dpaa2_q->cscn); 4995d9a1e4dSHemant Agrawal } 5008d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 5019ceacab7SPriyanka Jain /* cleanup tx conf queue storage */ 5029ceacab7SPriyanka Jain for (i = 0; i < priv->nb_tx_queues; i++) { 503*12d98eceSJun Yang dpaa2_q = priv->tx_conf_vq[i]; 504*12d98eceSJun Yang dpaa2_queue_storage_free(dpaa2_q, 505*12d98eceSJun Yang RTE_MAX_LCORE); 5069ceacab7SPriyanka Jain } 5079ceacab7SPriyanka Jain } 5085d9a1e4dSHemant Agrawal /*free memory for all queues (RX+TX) */ 5095d9a1e4dSHemant Agrawal rte_free(priv->rx_vq[0]); 5105d9a1e4dSHemant Agrawal priv->rx_vq[0] = NULL; 5115d9a1e4dSHemant Agrawal } 5125d9a1e4dSHemant Agrawal } 5135d9a1e4dSHemant Agrawal 5143e5a335dSHemant Agrawal static int 5153e5a335dSHemant Agrawal dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 5163e5a335dSHemant Agrawal { 51721ce788cSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 51881c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 51921ce788cSHemant Agrawal struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 5200ebce612SSunil Kumar Kori uint64_t rx_offloads = eth_conf->rxmode.offloads; 5210ebce612SSunil Kumar Kori uint64_t tx_offloads = eth_conf->txmode.offloads; 5220ebce612SSunil Kumar Kori int rx_l3_csum_offload = false; 5230ebce612SSunil Kumar Kori int rx_l4_csum_offload = false; 5240ebce612SSunil Kumar Kori int tx_l3_csum_offload = false; 5250ebce612SSunil Kumar Kori int tx_l4_csum_offload = false; 526271f5aeeSJun Yang int ret, tc_index; 5271bb4a528SFerruh Yigit uint32_t max_rx_pktlen; 5283e5a335dSHemant Agrawal 5293e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 5303e5a335dSHemant Agrawal 5317bdf45f9SHemant Agrawal /* Rx offloads which are enabled by default */ 532175fe7d9SSunil Kumar Kori if (dev_rx_offloads_nodis & ~rx_offloads) { 5337bdf45f9SHemant Agrawal DPAA2_PMD_INFO( 5347bdf45f9SHemant Agrawal "Some of rx offloads enabled by default - requested 0x%" PRIx64 5357bdf45f9SHemant Agrawal " fixed are 0x%" PRIx64, 536175fe7d9SSunil Kumar Kori rx_offloads, dev_rx_offloads_nodis); 537175fe7d9SSunil Kumar Kori } 5380ebce612SSunil Kumar Kori 5397bdf45f9SHemant Agrawal /* Tx offloads which are enabled by default */ 540175fe7d9SSunil Kumar Kori if (dev_tx_offloads_nodis & ~tx_offloads) { 5417bdf45f9SHemant Agrawal DPAA2_PMD_INFO( 5427bdf45f9SHemant Agrawal "Some of tx offloads enabled by default - requested 0x%" PRIx64 5437bdf45f9SHemant Agrawal " fixed are 0x%" PRIx64, 544175fe7d9SSunil Kumar Kori tx_offloads, dev_tx_offloads_nodis); 545175fe7d9SSunil Kumar Kori } 5460ebce612SSunil Kumar Kori 5471bb4a528SFerruh Yigit max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN + 5481bb4a528SFerruh Yigit RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; 5491bb4a528SFerruh Yigit if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) { 55044ea7355SAshish Jain ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 5511bb4a528SFerruh Yigit priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN); 5521bb4a528SFerruh Yigit if (ret != 0) { 5531bb4a528SFerruh Yigit DPAA2_PMD_ERR("Unable to set mtu. check config"); 554e1640849SHemant Agrawal return ret; 555e1640849SHemant Agrawal } 55679ef9825SHemant Agrawal DPAA2_PMD_INFO("MTU configured for the device: %d", 55779ef9825SHemant Agrawal dev->data->mtu); 558e1640849SHemant Agrawal } else { 559e1640849SHemant Agrawal return -1; 560e1640849SHemant Agrawal } 561e1640849SHemant Agrawal 562295968d1SFerruh Yigit if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) { 563271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { 56489c2ea8fSHemant Agrawal ret = dpaa2_setup_flow_dist(dev, 565271f5aeeSJun Yang eth_conf->rx_adv_conf.rss_conf.rss_hf, 566271f5aeeSJun Yang tc_index); 56789c2ea8fSHemant Agrawal if (ret) { 568271f5aeeSJun Yang DPAA2_PMD_ERR( 569271f5aeeSJun Yang "Unable to set flow distribution on tc%d." 570271f5aeeSJun Yang "Check queue config", tc_index); 57189c2ea8fSHemant Agrawal return ret; 57289c2ea8fSHemant Agrawal } 57389c2ea8fSHemant Agrawal } 574271f5aeeSJun Yang } 575c5acbb5eSHemant Agrawal 576295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) 5770ebce612SSunil Kumar Kori rx_l3_csum_offload = true; 5780ebce612SSunil Kumar Kori 579295968d1SFerruh Yigit if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) || 580295968d1SFerruh Yigit (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) || 581295968d1SFerruh Yigit (rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)) 5820ebce612SSunil Kumar Kori rx_l4_csum_offload = true; 58321ce788cSHemant Agrawal 58421ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 5850ebce612SSunil Kumar Kori DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); 58621ce788cSHemant Agrawal if (ret) { 587a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); 58821ce788cSHemant Agrawal return ret; 58921ce788cSHemant Agrawal } 59021ce788cSHemant Agrawal 59121ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 5920ebce612SSunil Kumar Kori DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); 59321ce788cSHemant Agrawal if (ret) { 594a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); 59521ce788cSHemant Agrawal return ret; 59621ce788cSHemant Agrawal } 59721ce788cSHemant Agrawal 5987eaf1323SGagandeep Singh #if !defined(RTE_LIBRTE_IEEE1588) 599295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) 6007eaf1323SGagandeep Singh #endif 60161c41e2eSThomas Monjalon { 60261c41e2eSThomas Monjalon ret = rte_mbuf_dyn_rx_timestamp_register( 60361c41e2eSThomas Monjalon &dpaa2_timestamp_dynfield_offset, 60461c41e2eSThomas Monjalon &dpaa2_timestamp_rx_dynflag); 60561c41e2eSThomas Monjalon if (ret != 0) { 60661c41e2eSThomas Monjalon DPAA2_PMD_ERR("Error to register timestamp field/flag"); 60761c41e2eSThomas Monjalon return -rte_errno; 60861c41e2eSThomas Monjalon } 609724f79dfSHemant Agrawal dpaa2_enable_ts[dev->data->port_id] = true; 61061c41e2eSThomas Monjalon } 61120196043SHemant Agrawal 612295968d1SFerruh Yigit if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 6130ebce612SSunil Kumar Kori tx_l3_csum_offload = true; 6140ebce612SSunil Kumar Kori 615295968d1SFerruh Yigit if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) || 616295968d1SFerruh Yigit (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) || 617295968d1SFerruh Yigit (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) 6180ebce612SSunil Kumar Kori tx_l4_csum_offload = true; 6190ebce612SSunil Kumar Kori 62021ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 6210ebce612SSunil Kumar Kori DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); 62221ce788cSHemant Agrawal if (ret) { 623a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); 62421ce788cSHemant Agrawal return ret; 62521ce788cSHemant Agrawal } 62621ce788cSHemant Agrawal 62721ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 6280ebce612SSunil Kumar Kori DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); 62921ce788cSHemant Agrawal if (ret) { 630a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); 63121ce788cSHemant Agrawal return ret; 63221ce788cSHemant Agrawal } 63321ce788cSHemant Agrawal 634ffb3389cSNipun Gupta /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 635ffb3389cSNipun Gupta * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 636ffb3389cSNipun Gupta * to 0 for LS2 in the hardware thus disabling data/annotation 637ffb3389cSNipun Gupta * stashing. For LX2 this is fixed in hardware and thus hash result and 638ffb3389cSNipun Gupta * parse results can be received in FD using this option. 639ffb3389cSNipun Gupta */ 640ffb3389cSNipun Gupta if (dpaa2_svr_family == SVR_LX2160A) { 641ffb3389cSNipun Gupta ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 642ffb3389cSNipun Gupta DPNI_FLCTYPE_HASH, true); 643ffb3389cSNipun Gupta if (ret) { 644a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); 645ffb3389cSNipun Gupta return ret; 646ffb3389cSNipun Gupta } 647ffb3389cSNipun Gupta } 648ffb3389cSNipun Gupta 649295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 650295968d1SFerruh Yigit dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK); 651c172f85eSHemant Agrawal 652f023d059SJun Yang if (eth_conf->lpbk_mode) { 653f023d059SJun Yang ret = dpaa2_dev_recycle_config(dev); 654f023d059SJun Yang if (ret) { 655f023d059SJun Yang DPAA2_PMD_ERR("Error to configure %s to recycle port.", 656f023d059SJun Yang dev->data->name); 657f023d059SJun Yang 658f023d059SJun Yang return ret; 659f023d059SJun Yang } 660f023d059SJun Yang } else { 661f023d059SJun Yang /** User may disable loopback mode by calling 662f023d059SJun Yang * "dev_configure" with lpbk_mode cleared. 663f023d059SJun Yang * No matter the port was configured recycle or not, 664f023d059SJun Yang * recycle de-configure is called here. 665f023d059SJun Yang * If port is not recycled, the de-configure will return directly. 666f023d059SJun Yang */ 667f023d059SJun Yang ret = dpaa2_dev_recycle_deconfig(dev); 668f023d059SJun Yang if (ret) { 669f023d059SJun Yang DPAA2_PMD_ERR("Error to de-configure recycle port %s.", 670f023d059SJun Yang dev->data->name); 671f023d059SJun Yang 672f023d059SJun Yang return ret; 673f023d059SJun Yang } 674f023d059SJun Yang } 675f023d059SJun Yang 676ac624068SGagandeep Singh dpaa2_tm_init(dev); 677ac624068SGagandeep Singh 6783e5a335dSHemant Agrawal return 0; 6793e5a335dSHemant Agrawal } 6803e5a335dSHemant Agrawal 6813e5a335dSHemant Agrawal /* Function to setup RX flow information. It contains traffic class ID, 6823e5a335dSHemant Agrawal * flow ID, destination configuration etc. 6833e5a335dSHemant Agrawal */ 6843e5a335dSHemant Agrawal static int 6853e5a335dSHemant Agrawal dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 6863e5a335dSHemant Agrawal uint16_t rx_queue_id, 68713b856acSHemant Agrawal uint16_t nb_rx_desc, 6883e5a335dSHemant Agrawal unsigned int socket_id __rte_unused, 689988a7c38SHemant Agrawal const struct rte_eth_rxconf *rx_conf, 6903e5a335dSHemant Agrawal struct rte_mempool *mb_pool) 6913e5a335dSHemant Agrawal { 6923e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 69381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 6943e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 6953e5a335dSHemant Agrawal struct dpni_queue cfg; 6963e5a335dSHemant Agrawal uint8_t options = 0; 6973e5a335dSHemant Agrawal uint8_t flow_id; 698bee61d86SHemant Agrawal uint32_t bpid; 69913b856acSHemant Agrawal int i, ret; 7003e5a335dSHemant Agrawal 7013e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 7023e5a335dSHemant Agrawal 703a10a988aSShreyansh Jain DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", 7043e5a335dSHemant Agrawal dev, rx_queue_id, mb_pool, rx_conf); 7053e5a335dSHemant Agrawal 70635dc25d1SRohit Raj total_nb_rx_desc += nb_rx_desc; 70735dc25d1SRohit Raj if (total_nb_rx_desc > MAX_NB_RX_DESC) { 708f665790aSDavid Marchand DPAA2_PMD_WARN("Total nb_rx_desc exceeds %d limit. Please use Normal buffers", 70935dc25d1SRohit Raj MAX_NB_RX_DESC); 71035dc25d1SRohit Raj DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script"); 71135dc25d1SRohit Raj } 71235dc25d1SRohit Raj 713988a7c38SHemant Agrawal /* Rx deferred start is not supported */ 714988a7c38SHemant Agrawal if (rx_conf->rx_deferred_start) { 715988a7c38SHemant Agrawal DPAA2_PMD_ERR("%p:Rx deferred start not supported", 716988a7c38SHemant Agrawal (void *)dev); 717988a7c38SHemant Agrawal return -EINVAL; 718988a7c38SHemant Agrawal } 719988a7c38SHemant Agrawal 720bee61d86SHemant Agrawal if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 7216ac5a55bSJun Yang if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7226ac5a55bSJun Yang ret = rte_dpaa2_bpid_info_init(mb_pool); 7236ac5a55bSJun Yang if (ret) 7246ac5a55bSJun Yang return ret; 7256ac5a55bSJun Yang } 726bee61d86SHemant Agrawal bpid = mempool_to_bpid(mb_pool); 7276ac5a55bSJun Yang ret = dpaa2_attach_bp_list(priv, dpni, 728bee61d86SHemant Agrawal rte_dpaa2_bpid_info[bpid].bp_list); 729bee61d86SHemant Agrawal if (ret) 730bee61d86SHemant Agrawal return ret; 731bee61d86SHemant Agrawal } 7323e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 7333e5a335dSHemant Agrawal dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 734109df460SShreyansh Jain dpaa2_q->bp_array = rte_dpaa2_bpid_info; 735de1d70f0SHemant Agrawal dpaa2_q->nb_desc = UINT16_MAX; 736de1d70f0SHemant Agrawal dpaa2_q->offloads = rx_conf->offloads; 7373e5a335dSHemant Agrawal 738599017a2SHemant Agrawal /*Get the flow id from given VQ id*/ 73913b856acSHemant Agrawal flow_id = dpaa2_q->flow_id; 7403e5a335dSHemant Agrawal memset(&cfg, 0, sizeof(struct dpni_queue)); 7413e5a335dSHemant Agrawal 7423e5a335dSHemant Agrawal options = options | DPNI_QUEUE_OPT_USER_CTX; 7435ae1edffSHemant Agrawal cfg.user_context = (size_t)(dpaa2_q); 7443e5a335dSHemant Agrawal 74513b856acSHemant Agrawal /* check if a private cgr available. */ 74613b856acSHemant Agrawal for (i = 0; i < priv->max_cgs; i++) { 74713b856acSHemant Agrawal if (!priv->cgid_in_use[i]) { 74813b856acSHemant Agrawal priv->cgid_in_use[i] = 1; 74913b856acSHemant Agrawal break; 75013b856acSHemant Agrawal } 75113b856acSHemant Agrawal } 75213b856acSHemant Agrawal 75313b856acSHemant Agrawal if (i < priv->max_cgs) { 75413b856acSHemant Agrawal options |= DPNI_QUEUE_OPT_SET_CGID; 75513b856acSHemant Agrawal cfg.cgid = i; 75613b856acSHemant Agrawal dpaa2_q->cgid = cfg.cgid; 75713b856acSHemant Agrawal } else { 75813b856acSHemant Agrawal dpaa2_q->cgid = 0xff; 75913b856acSHemant Agrawal } 76013b856acSHemant Agrawal 76137529eceSHemant Agrawal /*if ls2088 or rev2 device, enable the stashing */ 76230db823eSHemant Agrawal 763e0ded73bSHemant Agrawal if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 76437529eceSHemant Agrawal options |= DPNI_QUEUE_OPT_FLC; 76537529eceSHemant Agrawal cfg.flc.stash_control = true; 766c794f2caSJun Yang dpaa2_flc_stashing_clear_all(&cfg.flc.value); 767c794f2caSJun Yang if (getenv("DPAA2_DATA_STASHING_OFF")) { 768c794f2caSJun Yang dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 0, 769c794f2caSJun Yang &cfg.flc.value); 770c794f2caSJun Yang dpaa2_q->data_stashing_off = 1; 771c794f2caSJun Yang } else { 772c794f2caSJun Yang dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1, 773c794f2caSJun Yang &cfg.flc.value); 774c794f2caSJun Yang dpaa2_q->data_stashing_off = 0; 775c794f2caSJun Yang } 776c794f2caSJun Yang if ((dpaa2_svr_family & 0xffff0000) != SVR_LX2160A) { 777c794f2caSJun Yang dpaa2_flc_stashing_set(DPAA2_FLC_ANNO_STASHING, 1, 778c794f2caSJun Yang &cfg.flc.value); 779c794f2caSJun Yang } 78037529eceSHemant Agrawal } 7813e5a335dSHemant Agrawal ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 7823e5a335dSHemant Agrawal dpaa2_q->tc_index, flow_id, options, &cfg); 7833e5a335dSHemant Agrawal if (ret) { 784a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); 7853e5a335dSHemant Agrawal return -1; 7863e5a335dSHemant Agrawal } 7873e5a335dSHemant Agrawal 78823d6a87eSHemant Agrawal if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 78923d6a87eSHemant Agrawal struct dpni_taildrop taildrop; 79023d6a87eSHemant Agrawal 79123d6a87eSHemant Agrawal taildrop.enable = 1; 792de1d70f0SHemant Agrawal dpaa2_q->nb_desc = nb_rx_desc; 79313b856acSHemant Agrawal /* Private CGR will use tail drop length as nb_rx_desc. 79413b856acSHemant Agrawal * for rest cases we can use standard byte based tail drop. 79513b856acSHemant Agrawal * There is no HW restriction, but number of CGRs are limited, 79613b856acSHemant Agrawal * hence this restriction is placed. 79713b856acSHemant Agrawal */ 79813b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) { 79923d6a87eSHemant Agrawal /*enabling per rx queue congestion control */ 80013b856acSHemant Agrawal taildrop.threshold = nb_rx_desc; 80113b856acSHemant Agrawal taildrop.units = DPNI_CONGESTION_UNIT_FRAMES; 80213b856acSHemant Agrawal taildrop.oal = 0; 80313b856acSHemant Agrawal DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d", 80413b856acSHemant Agrawal rx_queue_id); 80513b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 80613b856acSHemant Agrawal DPNI_CP_CONGESTION_GROUP, 80713b856acSHemant Agrawal DPNI_QUEUE_RX, 80813b856acSHemant Agrawal dpaa2_q->tc_index, 8097a3a9d56SJun Yang dpaa2_q->cgid, &taildrop); 81013b856acSHemant Agrawal } else { 81113b856acSHemant Agrawal /*enabling per rx queue congestion control */ 81213b856acSHemant Agrawal taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q; 81323d6a87eSHemant Agrawal taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 814d47f0292SHemant Agrawal taildrop.oal = CONG_RX_OAL; 81513b856acSHemant Agrawal DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d", 81623d6a87eSHemant Agrawal rx_queue_id); 81723d6a87eSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 81823d6a87eSHemant Agrawal DPNI_CP_QUEUE, DPNI_QUEUE_RX, 81913b856acSHemant Agrawal dpaa2_q->tc_index, flow_id, 82013b856acSHemant Agrawal &taildrop); 82113b856acSHemant Agrawal } 82213b856acSHemant Agrawal if (ret) { 82313b856acSHemant Agrawal DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 82413b856acSHemant Agrawal ret); 82513b856acSHemant Agrawal return -1; 82613b856acSHemant Agrawal } 82713b856acSHemant Agrawal } else { /* Disable tail Drop */ 82813b856acSHemant Agrawal struct dpni_taildrop taildrop = {0}; 82913b856acSHemant Agrawal DPAA2_PMD_INFO("Tail drop is disabled on queue"); 83013b856acSHemant Agrawal 83113b856acSHemant Agrawal taildrop.enable = 0; 83213b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) { 83313b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 83413b856acSHemant Agrawal DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX, 83513b856acSHemant Agrawal dpaa2_q->tc_index, 8367a3a9d56SJun Yang dpaa2_q->cgid, &taildrop); 83713b856acSHemant Agrawal } else { 83813b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 83913b856acSHemant Agrawal DPNI_CP_QUEUE, DPNI_QUEUE_RX, 84023d6a87eSHemant Agrawal dpaa2_q->tc_index, flow_id, &taildrop); 84113b856acSHemant Agrawal } 84223d6a87eSHemant Agrawal if (ret) { 843a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 844a10a988aSShreyansh Jain ret); 84523d6a87eSHemant Agrawal return -1; 84623d6a87eSHemant Agrawal } 84723d6a87eSHemant Agrawal } 84823d6a87eSHemant Agrawal 8493e5a335dSHemant Agrawal dev->data->rx_queues[rx_queue_id] = dpaa2_q; 8503e5a335dSHemant Agrawal return 0; 8513e5a335dSHemant Agrawal } 8523e5a335dSHemant Agrawal 8533e5a335dSHemant Agrawal static int 8543e5a335dSHemant Agrawal dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 8553e5a335dSHemant Agrawal uint16_t tx_queue_id, 856b5869095SHemant Agrawal uint16_t nb_tx_desc, 8573e5a335dSHemant Agrawal unsigned int socket_id __rte_unused, 858988a7c38SHemant Agrawal const struct rte_eth_txconf *tx_conf) 8593e5a335dSHemant Agrawal { 8603e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 8613e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 8623e5a335dSHemant Agrawal priv->tx_vq[tx_queue_id]; 8639ceacab7SPriyanka Jain struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *) 8649ceacab7SPriyanka Jain priv->tx_conf_vq[tx_queue_id]; 86581c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 8663e5a335dSHemant Agrawal struct dpni_queue tx_conf_cfg; 8673e5a335dSHemant Agrawal struct dpni_queue tx_flow_cfg; 8683e5a335dSHemant Agrawal uint8_t options = 0, flow_id; 86972100f0dSGagandeep Singh uint16_t channel_id; 870e26bf82eSSachin Saxena struct dpni_queue_id qid; 8713e5a335dSHemant Agrawal uint32_t tc_id; 8723e5a335dSHemant Agrawal int ret; 8733e5a335dSHemant Agrawal 8743e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 8753e5a335dSHemant Agrawal 876988a7c38SHemant Agrawal /* Tx deferred start is not supported */ 877988a7c38SHemant Agrawal if (tx_conf->tx_deferred_start) { 878988a7c38SHemant Agrawal DPAA2_PMD_ERR("%p:Tx deferred start not supported", 879988a7c38SHemant Agrawal (void *)dev); 880988a7c38SHemant Agrawal return -EINVAL; 881988a7c38SHemant Agrawal } 882988a7c38SHemant Agrawal 883de1d70f0SHemant Agrawal dpaa2_q->nb_desc = UINT16_MAX; 884de1d70f0SHemant Agrawal dpaa2_q->offloads = tx_conf->offloads; 885de1d70f0SHemant Agrawal 8863e5a335dSHemant Agrawal /* Return if queue already configured */ 887f9989673SAkhil Goyal if (dpaa2_q->flow_id != 0xffff) { 888f9989673SAkhil Goyal dev->data->tx_queues[tx_queue_id] = dpaa2_q; 8893e5a335dSHemant Agrawal return 0; 890f9989673SAkhil Goyal } 8913e5a335dSHemant Agrawal 8923e5a335dSHemant Agrawal memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 8933e5a335dSHemant Agrawal memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 8943e5a335dSHemant Agrawal 8953e5a335dSHemant Agrawal if (tx_queue_id == 0) { 8963e5a335dSHemant Agrawal /*Set tx-conf and error configuration*/ 8978d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) 8989ceacab7SPriyanka Jain ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 8999ceacab7SPriyanka Jain priv->token, 9009ceacab7SPriyanka Jain DPNI_CONF_AFFINE); 9019ceacab7SPriyanka Jain else 9023e5a335dSHemant Agrawal ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 9033e5a335dSHemant Agrawal priv->token, 9043e5a335dSHemant Agrawal DPNI_CONF_DISABLE); 9053e5a335dSHemant Agrawal if (ret) { 906a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in set tx conf mode settings: " 907a10a988aSShreyansh Jain "err=%d", ret); 9083e5a335dSHemant Agrawal return -1; 9093e5a335dSHemant Agrawal } 9103e5a335dSHemant Agrawal } 91172100f0dSGagandeep Singh 91272100f0dSGagandeep Singh tc_id = tx_queue_id % priv->num_tx_tc; 91372100f0dSGagandeep Singh channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels; 91472100f0dSGagandeep Singh flow_id = 0; 91572100f0dSGagandeep Singh 91672100f0dSGagandeep Singh ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 91772100f0dSGagandeep Singh ((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg); 91872100f0dSGagandeep Singh if (ret) { 91972100f0dSGagandeep Singh DPAA2_PMD_ERR("Error in setting the tx flow: " 92072100f0dSGagandeep Singh "tc_id=%d, flow=%d err=%d", 92172100f0dSGagandeep Singh tc_id, flow_id, ret); 92272100f0dSGagandeep Singh return -1; 92372100f0dSGagandeep Singh } 92472100f0dSGagandeep Singh 92572100f0dSGagandeep Singh dpaa2_q->flow_id = flow_id; 92672100f0dSGagandeep Singh 9273e5a335dSHemant Agrawal dpaa2_q->tc_index = tc_id; 9283e5a335dSHemant Agrawal 929e26bf82eSSachin Saxena ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 93072100f0dSGagandeep Singh DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index), 931e26bf82eSSachin Saxena dpaa2_q->flow_id, &tx_flow_cfg, &qid); 932e26bf82eSSachin Saxena if (ret) { 933e26bf82eSSachin Saxena DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); 934e26bf82eSSachin Saxena return -1; 935e26bf82eSSachin Saxena } 936e26bf82eSSachin Saxena dpaa2_q->fqid = qid.fqid; 937e26bf82eSSachin Saxena 938a0840963SHemant Agrawal if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 93913b856acSHemant Agrawal struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; 9407ae777d0SHemant Agrawal 941de1d70f0SHemant Agrawal dpaa2_q->nb_desc = nb_tx_desc; 942de1d70f0SHemant Agrawal 94329dfa62fSHemant Agrawal cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 944b5869095SHemant Agrawal cong_notif_cfg.threshold_entry = nb_tx_desc; 9457ae777d0SHemant Agrawal /* Notify that the queue is not congested when the data in 9467be78d02SJosh Soref * the queue is below this threshold.(90% of value) 9477ae777d0SHemant Agrawal */ 94838a0ac75SHemant Agrawal cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10; 9497ae777d0SHemant Agrawal cong_notif_cfg.message_ctx = 0; 950543dbfecSNipun Gupta cong_notif_cfg.message_iova = 951543dbfecSNipun Gupta (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); 9527ae777d0SHemant Agrawal cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 9537ae777d0SHemant Agrawal cong_notif_cfg.notification_mode = 9547ae777d0SHemant Agrawal DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 9557ae777d0SHemant Agrawal DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 9567ae777d0SHemant Agrawal DPNI_CONG_OPT_COHERENT_WRITE; 95755984a9bSShreyansh Jain cong_notif_cfg.cg_point = DPNI_CP_QUEUE; 9587ae777d0SHemant Agrawal 9597ae777d0SHemant Agrawal ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 9607ae777d0SHemant Agrawal priv->token, 9617ae777d0SHemant Agrawal DPNI_QUEUE_TX, 96272100f0dSGagandeep Singh ((channel_id << 8) | tc_id), 9637ae777d0SHemant Agrawal &cong_notif_cfg); 9647ae777d0SHemant Agrawal if (ret) { 965a10a988aSShreyansh Jain DPAA2_PMD_ERR( 966a10a988aSShreyansh Jain "Error in setting tx congestion notification: " 967a10a988aSShreyansh Jain "err=%d", ret); 9687ae777d0SHemant Agrawal return -ret; 9697ae777d0SHemant Agrawal } 9707ae777d0SHemant Agrawal } 97116c4a3c4SNipun Gupta dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf; 9723e5a335dSHemant Agrawal dev->data->tx_queues[tx_queue_id] = dpaa2_q; 9739ceacab7SPriyanka Jain 9748d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 9759ceacab7SPriyanka Jain dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q; 9769ceacab7SPriyanka Jain options = options | DPNI_QUEUE_OPT_USER_CTX; 9779ceacab7SPriyanka Jain tx_conf_cfg.user_context = (size_t)(dpaa2_q); 9789ceacab7SPriyanka Jain ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, 97972100f0dSGagandeep Singh DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), 9809ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg); 9819ceacab7SPriyanka Jain if (ret) { 9829ceacab7SPriyanka Jain DPAA2_PMD_ERR("Error in setting the tx conf flow: " 9839ceacab7SPriyanka Jain "tc_index=%d, flow=%d err=%d", 9849ceacab7SPriyanka Jain dpaa2_tx_conf_q->tc_index, 9859ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, ret); 9869ceacab7SPriyanka Jain return -1; 9879ceacab7SPriyanka Jain } 9889ceacab7SPriyanka Jain 9899ceacab7SPriyanka Jain ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 99072100f0dSGagandeep Singh DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), 9919ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); 9929ceacab7SPriyanka Jain if (ret) { 9939ceacab7SPriyanka Jain DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); 9949ceacab7SPriyanka Jain return -1; 9959ceacab7SPriyanka Jain } 9969ceacab7SPriyanka Jain dpaa2_tx_conf_q->fqid = qid.fqid; 9979ceacab7SPriyanka Jain } 9983e5a335dSHemant Agrawal return 0; 9993e5a335dSHemant Agrawal } 10003e5a335dSHemant Agrawal 10013e5a335dSHemant Agrawal static void 10027483341aSXueming Li dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id) 10033e5a335dSHemant Agrawal { 10047483341aSXueming Li struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id]; 100513b856acSHemant Agrawal struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private; 100681c42c84SShreyansh Jain struct fsl_mc_io *dpni = 100781c42c84SShreyansh Jain (struct fsl_mc_io *)priv->eth_dev->process_private; 100813b856acSHemant Agrawal uint8_t options = 0; 100913b856acSHemant Agrawal int ret; 101013b856acSHemant Agrawal struct dpni_queue cfg; 101113b856acSHemant Agrawal 101213b856acSHemant Agrawal memset(&cfg, 0, sizeof(struct dpni_queue)); 10133e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 101435dc25d1SRohit Raj 101535dc25d1SRohit Raj total_nb_rx_desc -= dpaa2_q->nb_desc; 101635dc25d1SRohit Raj 101713b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) { 101813b856acSHemant Agrawal options = DPNI_QUEUE_OPT_CLEAR_CGID; 101913b856acSHemant Agrawal cfg.cgid = dpaa2_q->cgid; 102013b856acSHemant Agrawal 102113b856acSHemant Agrawal ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, 102213b856acSHemant Agrawal DPNI_QUEUE_RX, 102313b856acSHemant Agrawal dpaa2_q->tc_index, dpaa2_q->flow_id, 102413b856acSHemant Agrawal options, &cfg); 102513b856acSHemant Agrawal if (ret) 102613b856acSHemant Agrawal DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d", 102713b856acSHemant Agrawal dpaa2_q->fqid, ret); 102813b856acSHemant Agrawal priv->cgid_in_use[dpaa2_q->cgid] = 0; 102913b856acSHemant Agrawal dpaa2_q->cgid = 0xff; 103013b856acSHemant Agrawal } 10313e5a335dSHemant Agrawal } 10323e5a335dSHemant Agrawal 1033f40adb40SHemant Agrawal static uint32_t 10348d7d4fcdSKonstantin Ananyev dpaa2_dev_rx_queue_count(void *rx_queue) 1035f40adb40SHemant Agrawal { 1036f40adb40SHemant Agrawal int32_t ret; 1037f40adb40SHemant Agrawal struct dpaa2_queue *dpaa2_q; 1038f40adb40SHemant Agrawal struct qbman_swp *swp; 1039f40adb40SHemant Agrawal struct qbman_fq_query_np_rslt state; 1040f40adb40SHemant Agrawal uint32_t frame_cnt = 0; 1041f40adb40SHemant Agrawal 1042f40adb40SHemant Agrawal if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 1043f40adb40SHemant Agrawal ret = dpaa2_affine_qbman_swp(); 1044f40adb40SHemant Agrawal if (ret) { 1045d527f5d9SNipun Gupta DPAA2_PMD_ERR( 1046f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 1047d527f5d9SNipun Gupta rte_gettid()); 1048f40adb40SHemant Agrawal return -EINVAL; 1049f40adb40SHemant Agrawal } 1050f40adb40SHemant Agrawal } 1051f40adb40SHemant Agrawal swp = DPAA2_PER_LCORE_PORTAL; 1052f40adb40SHemant Agrawal 10538d7d4fcdSKonstantin Ananyev dpaa2_q = rx_queue; 1054f40adb40SHemant Agrawal 1055f40adb40SHemant Agrawal if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 1056f40adb40SHemant Agrawal frame_cnt = qbman_fq_state_frame_count(&state); 10578d7d4fcdSKonstantin Ananyev DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u", 10588d7d4fcdSKonstantin Ananyev rx_queue, frame_cnt); 1059f40adb40SHemant Agrawal } 1060f40adb40SHemant Agrawal return frame_cnt; 1061f40adb40SHemant Agrawal } 1062f40adb40SHemant Agrawal 1063a5fc38d4SHemant Agrawal static const uint32_t * 1064ba6a168aSSivaramakrishnan Venkat dpaa2_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 1065a5fc38d4SHemant Agrawal { 1066a5fc38d4SHemant Agrawal static const uint32_t ptypes[] = { 1067a5fc38d4SHemant Agrawal /*todo -= add more types */ 1068a5fc38d4SHemant Agrawal RTE_PTYPE_L2_ETHER, 1069a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV4, 1070a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV4_EXT, 1071a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV6, 1072a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV6_EXT, 1073a5fc38d4SHemant Agrawal RTE_PTYPE_L4_TCP, 1074a5fc38d4SHemant Agrawal RTE_PTYPE_L4_UDP, 1075a5fc38d4SHemant Agrawal RTE_PTYPE_L4_SCTP, 1076a5fc38d4SHemant Agrawal RTE_PTYPE_L4_ICMP, 1077a5fc38d4SHemant Agrawal }; 1078a5fc38d4SHemant Agrawal 1079a3a997f0SHemant Agrawal if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx || 108020191ab3SNipun Gupta dev->rx_pkt_burst == dpaa2_dev_rx || 1081ba6a168aSSivaramakrishnan Venkat dev->rx_pkt_burst == dpaa2_dev_loopback_rx) { 1082ba6a168aSSivaramakrishnan Venkat *no_of_elements = RTE_DIM(ptypes); 1083a5fc38d4SHemant Agrawal return ptypes; 1084ba6a168aSSivaramakrishnan Venkat } 1085a5fc38d4SHemant Agrawal return NULL; 1086a5fc38d4SHemant Agrawal } 1087a5fc38d4SHemant Agrawal 1088c5acbb5eSHemant Agrawal /** 1089c5acbb5eSHemant Agrawal * Dpaa2 link Interrupt handler 1090c5acbb5eSHemant Agrawal * 1091c5acbb5eSHemant Agrawal * @param param 10927be78d02SJosh Soref * The address of parameter (struct rte_eth_dev *) registered before. 1093c5acbb5eSHemant Agrawal * 1094c5acbb5eSHemant Agrawal * @return 1095c5acbb5eSHemant Agrawal * void 1096c5acbb5eSHemant Agrawal */ 1097c5acbb5eSHemant Agrawal static void 1098c5acbb5eSHemant Agrawal dpaa2_interrupt_handler(void *param) 1099c5acbb5eSHemant Agrawal { 1100c5acbb5eSHemant Agrawal struct rte_eth_dev *dev = param; 1101c5acbb5eSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 110281c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1103c5acbb5eSHemant Agrawal int ret; 1104c5acbb5eSHemant Agrawal int irq_index = DPNI_IRQ_INDEX; 1105c5acbb5eSHemant Agrawal unsigned int status = 0, clear = 0; 1106c5acbb5eSHemant Agrawal 1107c5acbb5eSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1108c5acbb5eSHemant Agrawal 1109c5acbb5eSHemant Agrawal if (dpni == NULL) { 1110a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1111c5acbb5eSHemant Agrawal return; 1112c5acbb5eSHemant Agrawal } 1113c5acbb5eSHemant Agrawal 1114c5acbb5eSHemant Agrawal ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 1115c5acbb5eSHemant Agrawal irq_index, &status); 1116c5acbb5eSHemant Agrawal if (unlikely(ret)) { 1117a10a988aSShreyansh Jain DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); 1118c5acbb5eSHemant Agrawal clear = 0xffffffff; 1119c5acbb5eSHemant Agrawal goto out; 1120c5acbb5eSHemant Agrawal } 1121c5acbb5eSHemant Agrawal 1122c5acbb5eSHemant Agrawal if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 1123c5acbb5eSHemant Agrawal clear = DPNI_IRQ_EVENT_LINK_CHANGED; 1124c5acbb5eSHemant Agrawal dpaa2_dev_link_update(dev, 0); 1125c5acbb5eSHemant Agrawal /* calling all the apps registered for link status event */ 11265723fbedSFerruh Yigit rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1127c5acbb5eSHemant Agrawal } 1128c5acbb5eSHemant Agrawal out: 1129c5acbb5eSHemant Agrawal ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 1130c5acbb5eSHemant Agrawal irq_index, clear); 1131c5acbb5eSHemant Agrawal if (unlikely(ret)) 1132a10a988aSShreyansh Jain DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); 1133c5acbb5eSHemant Agrawal } 1134c5acbb5eSHemant Agrawal 1135c5acbb5eSHemant Agrawal static int 1136c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 1137c5acbb5eSHemant Agrawal { 1138c5acbb5eSHemant Agrawal int err = 0; 1139c5acbb5eSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 114081c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1141c5acbb5eSHemant Agrawal int irq_index = DPNI_IRQ_INDEX; 1142c5acbb5eSHemant Agrawal unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 1143c5acbb5eSHemant Agrawal 1144c5acbb5eSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1145c5acbb5eSHemant Agrawal 1146c5acbb5eSHemant Agrawal err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 1147c5acbb5eSHemant Agrawal irq_index, mask); 1148c5acbb5eSHemant Agrawal if (err < 0) { 1149a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, 1150c5acbb5eSHemant Agrawal strerror(-err)); 1151c5acbb5eSHemant Agrawal return err; 1152c5acbb5eSHemant Agrawal } 1153c5acbb5eSHemant Agrawal 1154c5acbb5eSHemant Agrawal err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 1155c5acbb5eSHemant Agrawal irq_index, enable); 1156c5acbb5eSHemant Agrawal if (err < 0) 1157a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, 1158c5acbb5eSHemant Agrawal strerror(-err)); 1159c5acbb5eSHemant Agrawal 1160c5acbb5eSHemant Agrawal return err; 1161c5acbb5eSHemant Agrawal } 1162c5acbb5eSHemant Agrawal 11633e5a335dSHemant Agrawal static int 11643e5a335dSHemant Agrawal dpaa2_dev_start(struct rte_eth_dev *dev) 11653e5a335dSHemant Agrawal { 1166c5acbb5eSHemant Agrawal struct rte_device *rdev = dev->device; 1167c5acbb5eSHemant Agrawal struct rte_dpaa2_device *dpaa2_dev; 11683e5a335dSHemant Agrawal struct rte_eth_dev_data *data = dev->data; 11693e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = data->dev_private; 117081c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 11713e5a335dSHemant Agrawal struct dpni_queue cfg; 1172ef18dafeSHemant Agrawal struct dpni_error_cfg err_cfg; 11733e5a335dSHemant Agrawal struct dpni_queue_id qid; 11743e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 11753e5a335dSHemant Agrawal int ret, i; 1176c5acbb5eSHemant Agrawal struct rte_intr_handle *intr_handle; 1177c5acbb5eSHemant Agrawal 1178c5acbb5eSHemant Agrawal dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 1179d61138d4SHarman Kalra intr_handle = dpaa2_dev->intr_handle; 11803e5a335dSHemant Agrawal 11813e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 11823e5a335dSHemant Agrawal ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 11833e5a335dSHemant Agrawal if (ret) { 1184a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", 1185a10a988aSShreyansh Jain priv->hw_id, ret); 11863e5a335dSHemant Agrawal return ret; 11873e5a335dSHemant Agrawal } 11883e5a335dSHemant Agrawal 1189aa8c595aSHemant Agrawal /* Power up the phy. Needed to make the link go UP */ 1190a1f3a12cSHemant Agrawal dpaa2_dev_set_link_up(dev); 1191a1f3a12cSHemant Agrawal 11923e5a335dSHemant Agrawal for (i = 0; i < data->nb_rx_queues; i++) { 11933e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 11943e5a335dSHemant Agrawal ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 11953e5a335dSHemant Agrawal DPNI_QUEUE_RX, dpaa2_q->tc_index, 11963e5a335dSHemant Agrawal dpaa2_q->flow_id, &cfg, &qid); 11973e5a335dSHemant Agrawal if (ret) { 1198a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in getting flow information: " 1199a10a988aSShreyansh Jain "err=%d", ret); 12003e5a335dSHemant Agrawal return ret; 12013e5a335dSHemant Agrawal } 12023e5a335dSHemant Agrawal dpaa2_q->fqid = qid.fqid; 12033e5a335dSHemant Agrawal } 12043e5a335dSHemant Agrawal 12054690a611SNipun Gupta if (dpaa2_enable_err_queue) { 12064690a611SNipun Gupta ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 12074690a611SNipun Gupta DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid); 12084690a611SNipun Gupta if (ret) { 12094690a611SNipun Gupta DPAA2_PMD_ERR("Error getting rx err flow information: err=%d", 12104690a611SNipun Gupta ret); 12114690a611SNipun Gupta return ret; 12124690a611SNipun Gupta } 12134690a611SNipun Gupta dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; 12144690a611SNipun Gupta dpaa2_q->fqid = qid.fqid; 12154690a611SNipun Gupta dpaa2_q->eth_data = dev->data; 12164690a611SNipun Gupta 12174690a611SNipun Gupta err_cfg.errors = DPNI_ERROR_DISC; 12184690a611SNipun Gupta err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; 12194690a611SNipun Gupta } else { 12204690a611SNipun Gupta /* checksum errors, send them to normal path 12214690a611SNipun Gupta * and set it in annotation 12224690a611SNipun Gupta */ 1223ef18dafeSHemant Agrawal err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 12244690a611SNipun Gupta 12254690a611SNipun Gupta /* if packet with parse error are not to be dropped */ 122634356a5dSShreyansh Jain err_cfg.errors |= DPNI_ERROR_PHE; 1227ef18dafeSHemant Agrawal 1228ef18dafeSHemant Agrawal err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 12294690a611SNipun Gupta } 1230ef18dafeSHemant Agrawal err_cfg.set_frame_annotation = true; 1231ef18dafeSHemant Agrawal 1232ef18dafeSHemant Agrawal ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 1233ef18dafeSHemant Agrawal priv->token, &err_cfg); 1234ef18dafeSHemant Agrawal if (ret) { 1235a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", 1236a10a988aSShreyansh Jain ret); 1237ef18dafeSHemant Agrawal return ret; 1238ef18dafeSHemant Agrawal } 1239ef18dafeSHemant Agrawal 1240c5acbb5eSHemant Agrawal /* if the interrupts were configured on this devices*/ 1241d61138d4SHarman Kalra if (intr_handle && rte_intr_fd_get(intr_handle) && 1242d61138d4SHarman Kalra dev->data->dev_conf.intr_conf.lsc != 0) { 1243c5acbb5eSHemant Agrawal /* Registering LSC interrupt handler */ 1244c5acbb5eSHemant Agrawal rte_intr_callback_register(intr_handle, 1245c5acbb5eSHemant Agrawal dpaa2_interrupt_handler, 1246c5acbb5eSHemant Agrawal (void *)dev); 1247c5acbb5eSHemant Agrawal 1248c5acbb5eSHemant Agrawal /* enable vfio intr/eventfd mapping 1249c5acbb5eSHemant Agrawal * Interrupt index 0 is required, so we can not use 1250c5acbb5eSHemant Agrawal * rte_intr_enable. 1251c5acbb5eSHemant Agrawal */ 1252c5acbb5eSHemant Agrawal rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 1253c5acbb5eSHemant Agrawal 1254c5acbb5eSHemant Agrawal /* enable dpni_irqs */ 1255c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(dev, 1); 1256c5acbb5eSHemant Agrawal } 1257c5acbb5eSHemant Agrawal 125816c4a3c4SNipun Gupta /* Change the tx burst function if ordered queues are used */ 125916c4a3c4SNipun Gupta if (priv->en_ordered) 126016c4a3c4SNipun Gupta dev->tx_pkt_burst = dpaa2_dev_tx_ordered; 126116c4a3c4SNipun Gupta 1262f4909c42SJie Hai for (i = 0; i < dev->data->nb_rx_queues; i++) 1263f4909c42SJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 1264f4909c42SJie Hai for (i = 0; i < dev->data->nb_tx_queues; i++) 1265f4909c42SJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 1266f4909c42SJie Hai 12673e5a335dSHemant Agrawal return 0; 12683e5a335dSHemant Agrawal } 12693e5a335dSHemant Agrawal 12703e5a335dSHemant Agrawal /** 12713e5a335dSHemant Agrawal * This routine disables all traffic on the adapter by issuing a 12723e5a335dSHemant Agrawal * global reset on the MAC. 12733e5a335dSHemant Agrawal */ 127462024eb8SIvan Ilchenko static int 12753e5a335dSHemant Agrawal dpaa2_dev_stop(struct rte_eth_dev *dev) 12763e5a335dSHemant Agrawal { 12773e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 127881c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 12793e5a335dSHemant Agrawal int ret; 1280c56c86ffSHemant Agrawal struct rte_eth_link link; 1281d192fd32SVanshika Shukla struct rte_device *rdev = dev->device; 1282d192fd32SVanshika Shukla struct rte_intr_handle *intr_handle; 1283d192fd32SVanshika Shukla struct rte_dpaa2_device *dpaa2_dev; 1284f4909c42SJie Hai uint16_t i; 1285d192fd32SVanshika Shukla 1286d192fd32SVanshika Shukla dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 1287d192fd32SVanshika Shukla intr_handle = dpaa2_dev->intr_handle; 12883e5a335dSHemant Agrawal 12893e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 12903e5a335dSHemant Agrawal 1291c5acbb5eSHemant Agrawal /* reset interrupt callback */ 1292d61138d4SHarman Kalra if (intr_handle && rte_intr_fd_get(intr_handle) && 1293d61138d4SHarman Kalra dev->data->dev_conf.intr_conf.lsc != 0) { 1294c5acbb5eSHemant Agrawal /*disable dpni irqs */ 1295c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(dev, 0); 1296c5acbb5eSHemant Agrawal 1297c5acbb5eSHemant Agrawal /* disable vfio intr before callback unregister */ 1298c5acbb5eSHemant Agrawal rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 1299c5acbb5eSHemant Agrawal 1300c5acbb5eSHemant Agrawal /* Unregistering LSC interrupt handler */ 1301c5acbb5eSHemant Agrawal rte_intr_callback_unregister(intr_handle, 1302c5acbb5eSHemant Agrawal dpaa2_interrupt_handler, 1303c5acbb5eSHemant Agrawal (void *)dev); 1304c5acbb5eSHemant Agrawal } 1305c5acbb5eSHemant Agrawal 1306a1f3a12cSHemant Agrawal dpaa2_dev_set_link_down(dev); 1307a1f3a12cSHemant Agrawal 13083e5a335dSHemant Agrawal ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 13093e5a335dSHemant Agrawal if (ret) { 1310a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", 13113e5a335dSHemant Agrawal ret, priv->hw_id); 131262024eb8SIvan Ilchenko return ret; 13133e5a335dSHemant Agrawal } 1314c56c86ffSHemant Agrawal 1315c56c86ffSHemant Agrawal /* clear the recorded link status */ 1316c56c86ffSHemant Agrawal memset(&link, 0, sizeof(link)); 13177e2eb5f0SStephen Hemminger rte_eth_linkstatus_set(dev, &link); 131862024eb8SIvan Ilchenko 1319f4909c42SJie Hai for (i = 0; i < dev->data->nb_rx_queues; i++) 1320f4909c42SJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1321f4909c42SJie Hai for (i = 0; i < dev->data->nb_tx_queues; i++) 1322f4909c42SJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1323f4909c42SJie Hai 132462024eb8SIvan Ilchenko return 0; 13253e5a335dSHemant Agrawal } 13263e5a335dSHemant Agrawal 1327b142387bSThomas Monjalon static int 13283e5a335dSHemant Agrawal dpaa2_dev_close(struct rte_eth_dev *dev) 13293e5a335dSHemant Agrawal { 13303e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 133181c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 13325964d36aSSachin Saxena int i, ret; 1333a1f3a12cSHemant Agrawal struct rte_eth_link link; 13343e5a335dSHemant Agrawal 13353e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 13363e5a335dSHemant Agrawal 13375964d36aSSachin Saxena if (rte_eal_process_type() != RTE_PROC_PRIMARY) 13385964d36aSSachin Saxena return 0; 13396a556bd6SHemant Agrawal 13405964d36aSSachin Saxena if (!dpni) { 13415964d36aSSachin Saxena DPAA2_PMD_WARN("Already closed or not started"); 13425964d36aSSachin Saxena return -1; 13435964d36aSSachin Saxena } 13445964d36aSSachin Saxena 1345ac624068SGagandeep Singh dpaa2_tm_deinit(dev); 13465964d36aSSachin Saxena dpaa2_flow_clean(dev); 13473e5a335dSHemant Agrawal /* Clean the device first */ 13483e5a335dSHemant Agrawal ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 13493e5a335dSHemant Agrawal if (ret) { 1350a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); 1351b142387bSThomas Monjalon return -1; 13523e5a335dSHemant Agrawal } 1353a1f3a12cSHemant Agrawal 1354a1f3a12cSHemant Agrawal memset(&link, 0, sizeof(link)); 13557e2eb5f0SStephen Hemminger rte_eth_linkstatus_set(dev, &link); 1356b142387bSThomas Monjalon 13575964d36aSSachin Saxena /* Free private queues memory */ 13585964d36aSSachin Saxena dpaa2_free_rx_tx_queues(dev); 13595964d36aSSachin Saxena /* Close the device at underlying layer*/ 13605964d36aSSachin Saxena ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 13615964d36aSSachin Saxena if (ret) { 13625964d36aSSachin Saxena DPAA2_PMD_ERR("Failure closing dpni device with err code %d", 13635964d36aSSachin Saxena ret); 13645964d36aSSachin Saxena } 13655964d36aSSachin Saxena 13665964d36aSSachin Saxena /* Free the allocated memory for ethernet private data and dpni*/ 13675964d36aSSachin Saxena priv->hw = NULL; 13685964d36aSSachin Saxena dev->process_private = NULL; 13695964d36aSSachin Saxena rte_free(dpni); 13705964d36aSSachin Saxena 13715964d36aSSachin Saxena for (i = 0; i < MAX_TCS; i++) 13725964d36aSSachin Saxena rte_free((void *)(size_t)priv->extract.tc_extract_param[i]); 13735964d36aSSachin Saxena 13745964d36aSSachin Saxena if (priv->extract.qos_extract_param) 13755964d36aSSachin Saxena rte_free((void *)(size_t)priv->extract.qos_extract_param); 13765964d36aSSachin Saxena 13775964d36aSSachin Saxena DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name); 1378b142387bSThomas Monjalon return 0; 13793e5a335dSHemant Agrawal } 13803e5a335dSHemant Agrawal 13819039c812SAndrew Rybchenko static int 1382c0e5c69aSHemant Agrawal dpaa2_dev_promiscuous_enable( 1383c0e5c69aSHemant Agrawal struct rte_eth_dev *dev) 1384c0e5c69aSHemant Agrawal { 1385c0e5c69aSHemant Agrawal int ret; 1386c0e5c69aSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 138781c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1388c0e5c69aSHemant Agrawal 1389c0e5c69aSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1390c0e5c69aSHemant Agrawal 1391c0e5c69aSHemant Agrawal if (dpni == NULL) { 1392a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 13939039c812SAndrew Rybchenko return -ENODEV; 1394c0e5c69aSHemant Agrawal } 1395c0e5c69aSHemant Agrawal 1396c0e5c69aSHemant Agrawal ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 1397c0e5c69aSHemant Agrawal if (ret < 0) 1398a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); 13995d5aeeedSHemant Agrawal 14005d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 14015d5aeeedSHemant Agrawal if (ret < 0) 1402a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); 14039039c812SAndrew Rybchenko 14049039c812SAndrew Rybchenko return ret; 1405c0e5c69aSHemant Agrawal } 1406c0e5c69aSHemant Agrawal 14079039c812SAndrew Rybchenko static int 1408c0e5c69aSHemant Agrawal dpaa2_dev_promiscuous_disable( 1409c0e5c69aSHemant Agrawal struct rte_eth_dev *dev) 1410c0e5c69aSHemant Agrawal { 1411c0e5c69aSHemant Agrawal int ret; 1412c0e5c69aSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 141381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1414c0e5c69aSHemant Agrawal 1415c0e5c69aSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1416c0e5c69aSHemant Agrawal 1417c0e5c69aSHemant Agrawal if (dpni == NULL) { 1418a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 14199039c812SAndrew Rybchenko return -ENODEV; 1420c0e5c69aSHemant Agrawal } 1421c0e5c69aSHemant Agrawal 1422c0e5c69aSHemant Agrawal ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 1423c0e5c69aSHemant Agrawal if (ret < 0) 1424a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); 14255d5aeeedSHemant Agrawal 14265d5aeeedSHemant Agrawal if (dev->data->all_multicast == 0) { 14275d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 14285d5aeeedSHemant Agrawal priv->token, false); 14295d5aeeedSHemant Agrawal if (ret < 0) 1430a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable M promisc mode %d", 14315d5aeeedSHemant Agrawal ret); 14325d5aeeedSHemant Agrawal } 14339039c812SAndrew Rybchenko 14349039c812SAndrew Rybchenko return ret; 14355d5aeeedSHemant Agrawal } 14365d5aeeedSHemant Agrawal 1437ca041cd4SIvan Ilchenko static int 14385d5aeeedSHemant Agrawal dpaa2_dev_allmulticast_enable( 14395d5aeeedSHemant Agrawal struct rte_eth_dev *dev) 14405d5aeeedSHemant Agrawal { 14415d5aeeedSHemant Agrawal int ret; 14425d5aeeedSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 144381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 14445d5aeeedSHemant Agrawal 14455d5aeeedSHemant Agrawal PMD_INIT_FUNC_TRACE(); 14465d5aeeedSHemant Agrawal 14475d5aeeedSHemant Agrawal if (dpni == NULL) { 1448a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1449ca041cd4SIvan Ilchenko return -ENODEV; 14505d5aeeedSHemant Agrawal } 14515d5aeeedSHemant Agrawal 14525d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 14535d5aeeedSHemant Agrawal if (ret < 0) 1454a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); 1455ca041cd4SIvan Ilchenko 1456ca041cd4SIvan Ilchenko return ret; 14575d5aeeedSHemant Agrawal } 14585d5aeeedSHemant Agrawal 1459ca041cd4SIvan Ilchenko static int 14605d5aeeedSHemant Agrawal dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 14615d5aeeedSHemant Agrawal { 14625d5aeeedSHemant Agrawal int ret; 14635d5aeeedSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 146481c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 14655d5aeeedSHemant Agrawal 14665d5aeeedSHemant Agrawal PMD_INIT_FUNC_TRACE(); 14675d5aeeedSHemant Agrawal 14685d5aeeedSHemant Agrawal if (dpni == NULL) { 1469a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1470ca041cd4SIvan Ilchenko return -ENODEV; 14715d5aeeedSHemant Agrawal } 14725d5aeeedSHemant Agrawal 14735d5aeeedSHemant Agrawal /* must remain on for all promiscuous */ 14745d5aeeedSHemant Agrawal if (dev->data->promiscuous == 1) 1475ca041cd4SIvan Ilchenko return 0; 14765d5aeeedSHemant Agrawal 14775d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 14785d5aeeedSHemant Agrawal if (ret < 0) 1479a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); 1480ca041cd4SIvan Ilchenko 1481ca041cd4SIvan Ilchenko return ret; 1482c0e5c69aSHemant Agrawal } 1483e31d4d21SHemant Agrawal 1484e31d4d21SHemant Agrawal static int 1485e31d4d21SHemant Agrawal dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1486e31d4d21SHemant Agrawal { 1487e31d4d21SHemant Agrawal int ret; 1488e31d4d21SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 148981c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 149035b2d13fSOlivier Matz uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN 149144ea7355SAshish Jain + VLAN_TAG_SIZE; 1492e31d4d21SHemant Agrawal 1493e31d4d21SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1494e31d4d21SHemant Agrawal 1495e31d4d21SHemant Agrawal if (dpni == NULL) { 1496a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1497e31d4d21SHemant Agrawal return -EINVAL; 1498e31d4d21SHemant Agrawal } 1499e31d4d21SHemant Agrawal 1500e31d4d21SHemant Agrawal /* Set the Max Rx frame length as 'mtu' + 1501e31d4d21SHemant Agrawal * Maximum Ethernet header length 1502e31d4d21SHemant Agrawal */ 1503e31d4d21SHemant Agrawal ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 15046f8be0fbSHemant Agrawal frame_size - RTE_ETHER_CRC_LEN); 1505e31d4d21SHemant Agrawal if (ret) { 1506a10a988aSShreyansh Jain DPAA2_PMD_ERR("Setting the max frame length failed"); 1507e31d4d21SHemant Agrawal return -1; 1508e31d4d21SHemant Agrawal } 1509a10a988aSShreyansh Jain DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); 1510e31d4d21SHemant Agrawal return 0; 1511e31d4d21SHemant Agrawal } 1512e31d4d21SHemant Agrawal 1513b4d97b7dSHemant Agrawal static int 1514b4d97b7dSHemant Agrawal dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 15156d13ea8eSOlivier Matz struct rte_ether_addr *addr, 1516b4d97b7dSHemant Agrawal __rte_unused uint32_t index, 1517b4d97b7dSHemant Agrawal __rte_unused uint32_t pool) 1518b4d97b7dSHemant Agrawal { 1519b4d97b7dSHemant Agrawal int ret; 1520b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 152181c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1522b4d97b7dSHemant Agrawal 1523b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1524b4d97b7dSHemant Agrawal 1525b4d97b7dSHemant Agrawal if (dpni == NULL) { 1526a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1527b4d97b7dSHemant Agrawal return -1; 1528b4d97b7dSHemant Agrawal } 1529b4d97b7dSHemant Agrawal 153096f7bfe8SSachin Saxena ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, 153196f7bfe8SSachin Saxena addr->addr_bytes, 0, 0, 0); 1532b4d97b7dSHemant Agrawal if (ret) 1533a10a988aSShreyansh Jain DPAA2_PMD_ERR( 1534a10a988aSShreyansh Jain "error: Adding the MAC ADDR failed: err = %d", ret); 1535b4d97b7dSHemant Agrawal return 0; 1536b4d97b7dSHemant Agrawal } 1537b4d97b7dSHemant Agrawal 1538b4d97b7dSHemant Agrawal static void 1539b4d97b7dSHemant Agrawal dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1540b4d97b7dSHemant Agrawal uint32_t index) 1541b4d97b7dSHemant Agrawal { 1542b4d97b7dSHemant Agrawal int ret; 1543b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 154481c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1545b4d97b7dSHemant Agrawal struct rte_eth_dev_data *data = dev->data; 15466d13ea8eSOlivier Matz struct rte_ether_addr *macaddr; 1547b4d97b7dSHemant Agrawal 1548b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1549b4d97b7dSHemant Agrawal 1550b4d97b7dSHemant Agrawal macaddr = &data->mac_addrs[index]; 1551b4d97b7dSHemant Agrawal 1552b4d97b7dSHemant Agrawal if (dpni == NULL) { 1553a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1554b4d97b7dSHemant Agrawal return; 1555b4d97b7dSHemant Agrawal } 1556b4d97b7dSHemant Agrawal 1557b4d97b7dSHemant Agrawal ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1558b4d97b7dSHemant Agrawal priv->token, macaddr->addr_bytes); 1559b4d97b7dSHemant Agrawal if (ret) 1560a10a988aSShreyansh Jain DPAA2_PMD_ERR( 1561a10a988aSShreyansh Jain "error: Removing the MAC ADDR failed: err = %d", ret); 1562b4d97b7dSHemant Agrawal } 1563b4d97b7dSHemant Agrawal 1564caccf8b3SOlivier Matz static int 1565b4d97b7dSHemant Agrawal dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 15666d13ea8eSOlivier Matz struct rte_ether_addr *addr) 1567b4d97b7dSHemant Agrawal { 1568b4d97b7dSHemant Agrawal int ret; 1569b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 157081c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1571b4d97b7dSHemant Agrawal 1572b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1573b4d97b7dSHemant Agrawal 1574b4d97b7dSHemant Agrawal if (dpni == NULL) { 1575a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1576caccf8b3SOlivier Matz return -EINVAL; 1577b4d97b7dSHemant Agrawal } 1578b4d97b7dSHemant Agrawal 1579b4d97b7dSHemant Agrawal ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1580b4d97b7dSHemant Agrawal priv->token, addr->addr_bytes); 1581b4d97b7dSHemant Agrawal 1582b4d97b7dSHemant Agrawal if (ret) 1583a10a988aSShreyansh Jain DPAA2_PMD_ERR( 1584a10a988aSShreyansh Jain "error: Setting the MAC ADDR failed %d", ret); 1585caccf8b3SOlivier Matz 1586caccf8b3SOlivier Matz return ret; 1587b4d97b7dSHemant Agrawal } 1588a10a988aSShreyansh Jain 1589b0aa5459SHemant Agrawal static 1590d5b0924bSMatan Azrad int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1591b0aa5459SHemant Agrawal struct rte_eth_stats *stats) 1592b0aa5459SHemant Agrawal { 1593b0aa5459SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 159481c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1595b0aa5459SHemant Agrawal int32_t retcode; 1596b0aa5459SHemant Agrawal uint8_t page0 = 0, page1 = 1, page2 = 2; 1597b0aa5459SHemant Agrawal union dpni_statistics value; 1598e43f2521SShreyansh Jain int i; 1599e43f2521SShreyansh Jain struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq; 1600b0aa5459SHemant Agrawal 1601b0aa5459SHemant Agrawal memset(&value, 0, sizeof(union dpni_statistics)); 1602b0aa5459SHemant Agrawal 1603b0aa5459SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1604b0aa5459SHemant Agrawal 1605b0aa5459SHemant Agrawal if (!dpni) { 1606a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1607d5b0924bSMatan Azrad return -EINVAL; 1608b0aa5459SHemant Agrawal } 1609b0aa5459SHemant Agrawal 1610b0aa5459SHemant Agrawal if (!stats) { 1611a10a988aSShreyansh Jain DPAA2_PMD_ERR("stats is NULL"); 1612d5b0924bSMatan Azrad return -EINVAL; 1613b0aa5459SHemant Agrawal } 1614b0aa5459SHemant Agrawal 1615b0aa5459SHemant Agrawal /*Get Counters from page_0*/ 1616b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 161716bbc98aSShreyansh Jain page0, 0, &value); 1618b0aa5459SHemant Agrawal if (retcode) 1619b0aa5459SHemant Agrawal goto err; 1620b0aa5459SHemant Agrawal 1621b0aa5459SHemant Agrawal stats->ipackets = value.page_0.ingress_all_frames; 1622b0aa5459SHemant Agrawal stats->ibytes = value.page_0.ingress_all_bytes; 1623b0aa5459SHemant Agrawal 1624b0aa5459SHemant Agrawal /*Get Counters from page_1*/ 1625b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 162616bbc98aSShreyansh Jain page1, 0, &value); 1627b0aa5459SHemant Agrawal if (retcode) 1628b0aa5459SHemant Agrawal goto err; 1629b0aa5459SHemant Agrawal 1630b0aa5459SHemant Agrawal stats->opackets = value.page_1.egress_all_frames; 1631b0aa5459SHemant Agrawal stats->obytes = value.page_1.egress_all_bytes; 1632b0aa5459SHemant Agrawal 1633b0aa5459SHemant Agrawal /*Get Counters from page_2*/ 1634b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 163516bbc98aSShreyansh Jain page2, 0, &value); 1636b0aa5459SHemant Agrawal if (retcode) 1637b0aa5459SHemant Agrawal goto err; 1638b0aa5459SHemant Agrawal 1639b4d97b7dSHemant Agrawal /* Ingress drop frame count due to configured rules */ 1640b4d97b7dSHemant Agrawal stats->ierrors = value.page_2.ingress_filtered_frames; 1641b4d97b7dSHemant Agrawal /* Ingress drop frame count due to error */ 1642b4d97b7dSHemant Agrawal stats->ierrors += value.page_2.ingress_discarded_frames; 1643b4d97b7dSHemant Agrawal 1644b0aa5459SHemant Agrawal stats->oerrors = value.page_2.egress_discarded_frames; 1645b0aa5459SHemant Agrawal stats->imissed = value.page_2.ingress_nobuffer_discards; 1646b0aa5459SHemant Agrawal 1647e43f2521SShreyansh Jain /* Fill in per queue stats */ 1648e43f2521SShreyansh Jain for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && 1649e43f2521SShreyansh Jain (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { 1650e43f2521SShreyansh Jain dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i]; 1651e43f2521SShreyansh Jain dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i]; 1652e43f2521SShreyansh Jain if (dpaa2_rxq) 1653e43f2521SShreyansh Jain stats->q_ipackets[i] = dpaa2_rxq->rx_pkts; 1654e43f2521SShreyansh Jain if (dpaa2_txq) 1655e43f2521SShreyansh Jain stats->q_opackets[i] = dpaa2_txq->tx_pkts; 1656e43f2521SShreyansh Jain 1657e43f2521SShreyansh Jain /* Byte counting is not implemented */ 1658e43f2521SShreyansh Jain stats->q_ibytes[i] = 0; 1659e43f2521SShreyansh Jain stats->q_obytes[i] = 0; 1660e43f2521SShreyansh Jain } 1661e43f2521SShreyansh Jain 1662d5b0924bSMatan Azrad return 0; 1663b0aa5459SHemant Agrawal 1664b0aa5459SHemant Agrawal err: 1665a10a988aSShreyansh Jain DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1666d5b0924bSMatan Azrad return retcode; 1667b0aa5459SHemant Agrawal }; 1668b0aa5459SHemant Agrawal 16691d6329b2SHemant Agrawal static int 16701d6329b2SHemant Agrawal dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 16711d6329b2SHemant Agrawal unsigned int n) 16721d6329b2SHemant Agrawal { 16731d6329b2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 167481c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 16751d6329b2SHemant Agrawal int32_t retcode; 1676c720c5f6SHemant Agrawal union dpni_statistics value[5] = {}; 16771d6329b2SHemant Agrawal unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 16781d6329b2SHemant Agrawal 16791d6329b2SHemant Agrawal if (n < num) 16801d6329b2SHemant Agrawal return num; 16811d6329b2SHemant Agrawal 1682876b2c90SHemant Agrawal if (xstats == NULL) 1683876b2c90SHemant Agrawal return 0; 1684876b2c90SHemant Agrawal 16851d6329b2SHemant Agrawal /* Get Counters from page_0*/ 16861d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 16871d6329b2SHemant Agrawal 0, 0, &value[0]); 16881d6329b2SHemant Agrawal if (retcode) 16891d6329b2SHemant Agrawal goto err; 16901d6329b2SHemant Agrawal 16911d6329b2SHemant Agrawal /* Get Counters from page_1*/ 16921d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 16931d6329b2SHemant Agrawal 1, 0, &value[1]); 16941d6329b2SHemant Agrawal if (retcode) 16951d6329b2SHemant Agrawal goto err; 16961d6329b2SHemant Agrawal 16971d6329b2SHemant Agrawal /* Get Counters from page_2*/ 16981d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 16991d6329b2SHemant Agrawal 2, 0, &value[2]); 17001d6329b2SHemant Agrawal if (retcode) 17011d6329b2SHemant Agrawal goto err; 17021d6329b2SHemant Agrawal 1703c720c5f6SHemant Agrawal for (i = 0; i < priv->max_cgs; i++) { 1704c720c5f6SHemant Agrawal if (!priv->cgid_in_use[i]) { 1705c720c5f6SHemant Agrawal /* Get Counters from page_4*/ 1706c720c5f6SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, 1707c720c5f6SHemant Agrawal priv->token, 1708c720c5f6SHemant Agrawal 4, 0, &value[4]); 1709c720c5f6SHemant Agrawal if (retcode) 1710c720c5f6SHemant Agrawal goto err; 1711c720c5f6SHemant Agrawal break; 1712c720c5f6SHemant Agrawal } 1713c720c5f6SHemant Agrawal } 1714c720c5f6SHemant Agrawal 17151d6329b2SHemant Agrawal for (i = 0; i < num; i++) { 17161d6329b2SHemant Agrawal xstats[i].id = i; 17171d6329b2SHemant Agrawal xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 17181d6329b2SHemant Agrawal raw.counter[dpaa2_xstats_strings[i].stats_id]; 17191d6329b2SHemant Agrawal } 17201d6329b2SHemant Agrawal return i; 17211d6329b2SHemant Agrawal err: 1722a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); 17231d6329b2SHemant Agrawal return retcode; 17241d6329b2SHemant Agrawal } 17251d6329b2SHemant Agrawal 17261d6329b2SHemant Agrawal static int 17271d6329b2SHemant Agrawal dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 17281d6329b2SHemant Agrawal struct rte_eth_xstat_name *xstats_names, 1729876b2c90SHemant Agrawal unsigned int limit) 17301d6329b2SHemant Agrawal { 17311d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 17321d6329b2SHemant Agrawal 1733876b2c90SHemant Agrawal if (limit < stat_cnt) 1734876b2c90SHemant Agrawal return stat_cnt; 1735876b2c90SHemant Agrawal 17361d6329b2SHemant Agrawal if (xstats_names != NULL) 17371d6329b2SHemant Agrawal for (i = 0; i < stat_cnt; i++) 1738f9acaf84SBruce Richardson strlcpy(xstats_names[i].name, 1739f9acaf84SBruce Richardson dpaa2_xstats_strings[i].name, 1740f9acaf84SBruce Richardson sizeof(xstats_names[i].name)); 17411d6329b2SHemant Agrawal 17421d6329b2SHemant Agrawal return stat_cnt; 17431d6329b2SHemant Agrawal } 17441d6329b2SHemant Agrawal 17451d6329b2SHemant Agrawal static int 17461d6329b2SHemant Agrawal dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 17471d6329b2SHemant Agrawal uint64_t *values, unsigned int n) 17481d6329b2SHemant Agrawal { 17491d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 17501d6329b2SHemant Agrawal uint64_t values_copy[stat_cnt]; 17511d6329b2SHemant Agrawal 17521d6329b2SHemant Agrawal if (!ids) { 17531d6329b2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 175481c42c84SShreyansh Jain struct fsl_mc_io *dpni = 175581c42c84SShreyansh Jain (struct fsl_mc_io *)dev->process_private; 17561d6329b2SHemant Agrawal int32_t retcode; 1757c720c5f6SHemant Agrawal union dpni_statistics value[5] = {}; 17581d6329b2SHemant Agrawal 17591d6329b2SHemant Agrawal if (n < stat_cnt) 17601d6329b2SHemant Agrawal return stat_cnt; 17611d6329b2SHemant Agrawal 17621d6329b2SHemant Agrawal if (!values) 17631d6329b2SHemant Agrawal return 0; 17641d6329b2SHemant Agrawal 17651d6329b2SHemant Agrawal /* Get Counters from page_0*/ 17661d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17671d6329b2SHemant Agrawal 0, 0, &value[0]); 17681d6329b2SHemant Agrawal if (retcode) 17691d6329b2SHemant Agrawal return 0; 17701d6329b2SHemant Agrawal 17711d6329b2SHemant Agrawal /* Get Counters from page_1*/ 17721d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17731d6329b2SHemant Agrawal 1, 0, &value[1]); 17741d6329b2SHemant Agrawal if (retcode) 17751d6329b2SHemant Agrawal return 0; 17761d6329b2SHemant Agrawal 17771d6329b2SHemant Agrawal /* Get Counters from page_2*/ 17781d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17791d6329b2SHemant Agrawal 2, 0, &value[2]); 17801d6329b2SHemant Agrawal if (retcode) 17811d6329b2SHemant Agrawal return 0; 17821d6329b2SHemant Agrawal 1783c720c5f6SHemant Agrawal /* Get Counters from page_4*/ 1784c720c5f6SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1785c720c5f6SHemant Agrawal 4, 0, &value[4]); 1786c720c5f6SHemant Agrawal if (retcode) 1787c720c5f6SHemant Agrawal return 0; 1788c720c5f6SHemant Agrawal 17891d6329b2SHemant Agrawal for (i = 0; i < stat_cnt; i++) { 17901d6329b2SHemant Agrawal values[i] = value[dpaa2_xstats_strings[i].page_id]. 17911d6329b2SHemant Agrawal raw.counter[dpaa2_xstats_strings[i].stats_id]; 17921d6329b2SHemant Agrawal } 17931d6329b2SHemant Agrawal return stat_cnt; 17941d6329b2SHemant Agrawal } 17951d6329b2SHemant Agrawal 17961d6329b2SHemant Agrawal dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 17971d6329b2SHemant Agrawal 17981d6329b2SHemant Agrawal for (i = 0; i < n; i++) { 17991d6329b2SHemant Agrawal if (ids[i] >= stat_cnt) { 1800a10a988aSShreyansh Jain DPAA2_PMD_ERR("xstats id value isn't valid"); 18011d6329b2SHemant Agrawal return -1; 18021d6329b2SHemant Agrawal } 18031d6329b2SHemant Agrawal values[i] = values_copy[ids[i]]; 18041d6329b2SHemant Agrawal } 18051d6329b2SHemant Agrawal return n; 18061d6329b2SHemant Agrawal } 18071d6329b2SHemant Agrawal 18081d6329b2SHemant Agrawal static int 18091d6329b2SHemant Agrawal dpaa2_xstats_get_names_by_id( 18101d6329b2SHemant Agrawal struct rte_eth_dev *dev, 18111d6329b2SHemant Agrawal const uint64_t *ids, 18128c9f976fSAndrew Rybchenko struct rte_eth_xstat_name *xstats_names, 18131d6329b2SHemant Agrawal unsigned int limit) 18141d6329b2SHemant Agrawal { 18151d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 18161d6329b2SHemant Agrawal struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 18171d6329b2SHemant Agrawal 18181d6329b2SHemant Agrawal if (!ids) 18191d6329b2SHemant Agrawal return dpaa2_xstats_get_names(dev, xstats_names, limit); 18201d6329b2SHemant Agrawal 18211d6329b2SHemant Agrawal dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 18221d6329b2SHemant Agrawal 18231d6329b2SHemant Agrawal for (i = 0; i < limit; i++) { 18241d6329b2SHemant Agrawal if (ids[i] >= stat_cnt) { 1825a10a988aSShreyansh Jain DPAA2_PMD_ERR("xstats id value isn't valid"); 18261d6329b2SHemant Agrawal return -1; 18271d6329b2SHemant Agrawal } 18281d6329b2SHemant Agrawal strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 18291d6329b2SHemant Agrawal } 18301d6329b2SHemant Agrawal return limit; 18311d6329b2SHemant Agrawal } 18321d6329b2SHemant Agrawal 18339970a9adSIgor Romanov static int 18341d6329b2SHemant Agrawal dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1835b0aa5459SHemant Agrawal { 1836b0aa5459SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 183781c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 18389970a9adSIgor Romanov int retcode; 1839e43f2521SShreyansh Jain int i; 1840e43f2521SShreyansh Jain struct dpaa2_queue *dpaa2_q; 1841b0aa5459SHemant Agrawal 1842b0aa5459SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1843b0aa5459SHemant Agrawal 1844b0aa5459SHemant Agrawal if (dpni == NULL) { 1845a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 18469970a9adSIgor Romanov return -EINVAL; 1847b0aa5459SHemant Agrawal } 1848b0aa5459SHemant Agrawal 1849b0aa5459SHemant Agrawal retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1850b0aa5459SHemant Agrawal if (retcode) 1851b0aa5459SHemant Agrawal goto error; 1852b0aa5459SHemant Agrawal 1853e43f2521SShreyansh Jain /* Reset the per queue stats in dpaa2_queue structure */ 1854e43f2521SShreyansh Jain for (i = 0; i < priv->nb_rx_queues; i++) { 1855e43f2521SShreyansh Jain dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1856e43f2521SShreyansh Jain if (dpaa2_q) 1857e43f2521SShreyansh Jain dpaa2_q->rx_pkts = 0; 1858e43f2521SShreyansh Jain } 1859e43f2521SShreyansh Jain 1860e43f2521SShreyansh Jain for (i = 0; i < priv->nb_tx_queues; i++) { 1861e43f2521SShreyansh Jain dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 1862e43f2521SShreyansh Jain if (dpaa2_q) 1863e43f2521SShreyansh Jain dpaa2_q->tx_pkts = 0; 1864e43f2521SShreyansh Jain } 1865e43f2521SShreyansh Jain 18669970a9adSIgor Romanov return 0; 1867b0aa5459SHemant Agrawal 1868b0aa5459SHemant Agrawal error: 1869a10a988aSShreyansh Jain DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 18709970a9adSIgor Romanov return retcode; 1871b0aa5459SHemant Agrawal }; 1872b0aa5459SHemant Agrawal 1873c56c86ffSHemant Agrawal /* return 0 means link status changed, -1 means not changed */ 1874c56c86ffSHemant Agrawal static int 1875c56c86ffSHemant Agrawal dpaa2_dev_link_update(struct rte_eth_dev *dev, 1876eadcfd95SRohit Raj int wait_to_complete) 1877c56c86ffSHemant Agrawal { 1878c56c86ffSHemant Agrawal int ret; 1879c56c86ffSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 188081c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 18817e2eb5f0SStephen Hemminger struct rte_eth_link link; 1882c56c86ffSHemant Agrawal struct dpni_link_state state = {0}; 1883eadcfd95SRohit Raj uint8_t count; 1884c56c86ffSHemant Agrawal 1885c56c86ffSHemant Agrawal if (dpni == NULL) { 1886a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1887c56c86ffSHemant Agrawal return 0; 1888c56c86ffSHemant Agrawal } 1889c56c86ffSHemant Agrawal 1890eadcfd95SRohit Raj for (count = 0; count <= MAX_REPEAT_TIME; count++) { 1891eadcfd95SRohit Raj ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, 1892eadcfd95SRohit Raj &state); 1893c56c86ffSHemant Agrawal if (ret < 0) { 189444e87c27SShreyansh Jain DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); 1895c56c86ffSHemant Agrawal return -1; 1896c56c86ffSHemant Agrawal } 1897295968d1SFerruh Yigit if (state.up == RTE_ETH_LINK_DOWN && 1898eadcfd95SRohit Raj wait_to_complete) 1899eadcfd95SRohit Raj rte_delay_ms(CHECK_INTERVAL); 1900eadcfd95SRohit Raj else 1901eadcfd95SRohit Raj break; 1902eadcfd95SRohit Raj } 1903c56c86ffSHemant Agrawal 1904c56c86ffSHemant Agrawal memset(&link, 0, sizeof(struct rte_eth_link)); 1905c56c86ffSHemant Agrawal link.link_status = state.up; 1906c56c86ffSHemant Agrawal link.link_speed = state.rate; 1907c56c86ffSHemant Agrawal 1908c56c86ffSHemant Agrawal if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1909295968d1SFerruh Yigit link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 1910c56c86ffSHemant Agrawal else 1911295968d1SFerruh Yigit link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1912c56c86ffSHemant Agrawal 19137e2eb5f0SStephen Hemminger ret = rte_eth_linkstatus_set(dev, &link); 19147e2eb5f0SStephen Hemminger if (ret == -1) 1915a10a988aSShreyansh Jain DPAA2_PMD_DEBUG("No change in status"); 1916c56c86ffSHemant Agrawal else 1917f665790aSDavid Marchand DPAA2_PMD_INFO("Port %d Link is %s", dev->data->port_id, 19187e2eb5f0SStephen Hemminger link.link_status ? "Up" : "Down"); 19197e2eb5f0SStephen Hemminger 19207e2eb5f0SStephen Hemminger return ret; 1921c56c86ffSHemant Agrawal } 1922c56c86ffSHemant Agrawal 1923a1f3a12cSHemant Agrawal /** 1924a1f3a12cSHemant Agrawal * Toggle the DPNI to enable, if not already enabled. 1925a1f3a12cSHemant Agrawal * This is not strictly PHY up/down - it is more of logical toggling. 1926a1f3a12cSHemant Agrawal */ 1927a1f3a12cSHemant Agrawal static int 1928a1f3a12cSHemant Agrawal dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1929a1f3a12cSHemant Agrawal { 1930a1f3a12cSHemant Agrawal int ret = -EINVAL; 1931a1f3a12cSHemant Agrawal struct dpaa2_dev_priv *priv; 1932a1f3a12cSHemant Agrawal struct fsl_mc_io *dpni; 1933a1f3a12cSHemant Agrawal int en = 0; 1934aa8c595aSHemant Agrawal struct dpni_link_state state = {0}; 1935a1f3a12cSHemant Agrawal 1936a1f3a12cSHemant Agrawal priv = dev->data->dev_private; 193781c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 1938a1f3a12cSHemant Agrawal 1939a1f3a12cSHemant Agrawal if (dpni == NULL) { 1940a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1941a1f3a12cSHemant Agrawal return ret; 1942a1f3a12cSHemant Agrawal } 1943a1f3a12cSHemant Agrawal 1944a1f3a12cSHemant Agrawal /* Check if DPNI is currently enabled */ 1945a1f3a12cSHemant Agrawal ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1946a1f3a12cSHemant Agrawal if (ret) { 1947a1f3a12cSHemant Agrawal /* Unable to obtain dpni status; Not continuing */ 1948a10a988aSShreyansh Jain DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1949a1f3a12cSHemant Agrawal return -EINVAL; 1950a1f3a12cSHemant Agrawal } 1951a1f3a12cSHemant Agrawal 1952a1f3a12cSHemant Agrawal /* Enable link if not already enabled */ 1953a1f3a12cSHemant Agrawal if (!en) { 1954a1f3a12cSHemant Agrawal ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1955a1f3a12cSHemant Agrawal if (ret) { 1956a10a988aSShreyansh Jain DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1957a1f3a12cSHemant Agrawal return -EINVAL; 1958a1f3a12cSHemant Agrawal } 1959a1f3a12cSHemant Agrawal } 1960aa8c595aSHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1961aa8c595aSHemant Agrawal if (ret < 0) { 196244e87c27SShreyansh Jain DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret); 1963aa8c595aSHemant Agrawal return -1; 1964aa8c595aSHemant Agrawal } 1965aa8c595aSHemant Agrawal 1966a1f3a12cSHemant Agrawal /* changing tx burst function to start enqueues */ 1967a1f3a12cSHemant Agrawal dev->tx_pkt_burst = dpaa2_dev_tx; 1968aa8c595aSHemant Agrawal dev->data->dev_link.link_status = state.up; 19697e6ecac2SRohit Raj dev->data->dev_link.link_speed = state.rate; 1970a1f3a12cSHemant Agrawal 1971aa8c595aSHemant Agrawal if (state.up) 1972a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); 1973aa8c595aSHemant Agrawal else 1974a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); 1975a1f3a12cSHemant Agrawal return ret; 1976a1f3a12cSHemant Agrawal } 1977a1f3a12cSHemant Agrawal 1978a1f3a12cSHemant Agrawal /** 1979a1f3a12cSHemant Agrawal * Toggle the DPNI to disable, if not already disabled. 1980a1f3a12cSHemant Agrawal * This is not strictly PHY up/down - it is more of logical toggling. 1981a1f3a12cSHemant Agrawal */ 1982a1f3a12cSHemant Agrawal static int 1983a1f3a12cSHemant Agrawal dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1984a1f3a12cSHemant Agrawal { 1985a1f3a12cSHemant Agrawal int ret = -EINVAL; 1986a1f3a12cSHemant Agrawal struct dpaa2_dev_priv *priv; 1987a1f3a12cSHemant Agrawal struct fsl_mc_io *dpni; 1988a1f3a12cSHemant Agrawal int dpni_enabled = 0; 1989a1f3a12cSHemant Agrawal int retries = 10; 1990a1f3a12cSHemant Agrawal 1991a1f3a12cSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1992a1f3a12cSHemant Agrawal 1993a1f3a12cSHemant Agrawal priv = dev->data->dev_private; 199481c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 1995a1f3a12cSHemant Agrawal 1996a1f3a12cSHemant Agrawal if (dpni == NULL) { 1997a10a988aSShreyansh Jain DPAA2_PMD_ERR("Device has not yet been configured"); 1998a1f3a12cSHemant Agrawal return ret; 1999a1f3a12cSHemant Agrawal } 2000a1f3a12cSHemant Agrawal 2001a1f3a12cSHemant Agrawal /*changing tx burst function to avoid any more enqueues */ 2002a41f593fSFerruh Yigit dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 2003a1f3a12cSHemant Agrawal 2004a1f3a12cSHemant Agrawal /* Loop while dpni_disable() attempts to drain the egress FQs 2005a1f3a12cSHemant Agrawal * and confirm them back to us. 2006a1f3a12cSHemant Agrawal */ 2007a1f3a12cSHemant Agrawal do { 2008a1f3a12cSHemant Agrawal ret = dpni_disable(dpni, 0, priv->token); 2009a1f3a12cSHemant Agrawal if (ret) { 2010a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni disable failed (%d)", ret); 2011a1f3a12cSHemant Agrawal return ret; 2012a1f3a12cSHemant Agrawal } 2013a1f3a12cSHemant Agrawal ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 2014a1f3a12cSHemant Agrawal if (ret) { 2015a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); 2016a1f3a12cSHemant Agrawal return ret; 2017a1f3a12cSHemant Agrawal } 2018a1f3a12cSHemant Agrawal if (dpni_enabled) 2019a1f3a12cSHemant Agrawal /* Allow the MC some slack */ 2020a1f3a12cSHemant Agrawal rte_delay_us(100 * 1000); 2021a1f3a12cSHemant Agrawal } while (dpni_enabled && --retries); 2022a1f3a12cSHemant Agrawal 2023a1f3a12cSHemant Agrawal if (!retries) { 2024a10a988aSShreyansh Jain DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); 2025a1f3a12cSHemant Agrawal /* todo- we may have to manually cleanup queues. 2026a1f3a12cSHemant Agrawal */ 2027a1f3a12cSHemant Agrawal } else { 2028a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link DOWN successful", 2029a1f3a12cSHemant Agrawal dev->data->port_id); 2030a1f3a12cSHemant Agrawal } 2031a1f3a12cSHemant Agrawal 2032a1f3a12cSHemant Agrawal dev->data->dev_link.link_status = 0; 2033a1f3a12cSHemant Agrawal 2034a1f3a12cSHemant Agrawal return ret; 2035a1f3a12cSHemant Agrawal } 2036a1f3a12cSHemant Agrawal 2037977d0006SHemant Agrawal static int 2038977d0006SHemant Agrawal dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2039977d0006SHemant Agrawal { 2040977d0006SHemant Agrawal int ret = -EINVAL; 2041977d0006SHemant Agrawal struct dpaa2_dev_priv *priv; 2042977d0006SHemant Agrawal struct fsl_mc_io *dpni; 2043977d0006SHemant Agrawal struct dpni_link_state state = {0}; 2044977d0006SHemant Agrawal 2045977d0006SHemant Agrawal PMD_INIT_FUNC_TRACE(); 2046977d0006SHemant Agrawal 2047977d0006SHemant Agrawal priv = dev->data->dev_private; 204881c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 2049977d0006SHemant Agrawal 2050977d0006SHemant Agrawal if (dpni == NULL || fc_conf == NULL) { 2051a10a988aSShreyansh Jain DPAA2_PMD_ERR("device not configured"); 2052977d0006SHemant Agrawal return ret; 2053977d0006SHemant Agrawal } 2054977d0006SHemant Agrawal 2055977d0006SHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 2056977d0006SHemant Agrawal if (ret) { 2057a10a988aSShreyansh Jain DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 2058977d0006SHemant Agrawal return ret; 2059977d0006SHemant Agrawal } 2060977d0006SHemant Agrawal 2061977d0006SHemant Agrawal memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 2062977d0006SHemant Agrawal if (state.options & DPNI_LINK_OPT_PAUSE) { 2063977d0006SHemant Agrawal /* DPNI_LINK_OPT_PAUSE set 2064977d0006SHemant Agrawal * if ASYM_PAUSE not set, 2065977d0006SHemant Agrawal * RX Side flow control (handle received Pause frame) 2066977d0006SHemant Agrawal * TX side flow control (send Pause frame) 2067977d0006SHemant Agrawal * if ASYM_PAUSE set, 2068977d0006SHemant Agrawal * RX Side flow control (handle received Pause frame) 2069977d0006SHemant Agrawal * No TX side flow control (send Pause frame disabled) 2070977d0006SHemant Agrawal */ 2071977d0006SHemant Agrawal if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 2072295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_FULL; 2073977d0006SHemant Agrawal else 2074295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2075977d0006SHemant Agrawal } else { 2076977d0006SHemant Agrawal /* DPNI_LINK_OPT_PAUSE not set 2077977d0006SHemant Agrawal * if ASYM_PAUSE set, 2078977d0006SHemant Agrawal * TX side flow control (send Pause frame) 2079977d0006SHemant Agrawal * No RX side flow control (No action on pause frame rx) 2080977d0006SHemant Agrawal * if ASYM_PAUSE not set, 2081977d0006SHemant Agrawal * Flow control disabled 2082977d0006SHemant Agrawal */ 2083977d0006SHemant Agrawal if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 2084295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2085977d0006SHemant Agrawal else 2086295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_NONE; 2087977d0006SHemant Agrawal } 2088977d0006SHemant Agrawal 2089977d0006SHemant Agrawal return ret; 2090977d0006SHemant Agrawal } 2091977d0006SHemant Agrawal 2092977d0006SHemant Agrawal static int 2093977d0006SHemant Agrawal dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2094977d0006SHemant Agrawal { 2095977d0006SHemant Agrawal int ret = -EINVAL; 2096977d0006SHemant Agrawal struct dpaa2_dev_priv *priv; 2097977d0006SHemant Agrawal struct fsl_mc_io *dpni; 2098977d0006SHemant Agrawal struct dpni_link_state state = {0}; 2099977d0006SHemant Agrawal struct dpni_link_cfg cfg = {0}; 2100977d0006SHemant Agrawal 2101977d0006SHemant Agrawal PMD_INIT_FUNC_TRACE(); 2102977d0006SHemant Agrawal 2103977d0006SHemant Agrawal priv = dev->data->dev_private; 210481c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 2105977d0006SHemant Agrawal 2106977d0006SHemant Agrawal if (dpni == NULL) { 2107a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 2108977d0006SHemant Agrawal return ret; 2109977d0006SHemant Agrawal } 2110977d0006SHemant Agrawal 2111977d0006SHemant Agrawal /* It is necessary to obtain the current state before setting fc_conf 2112977d0006SHemant Agrawal * as MC would return error in case rate, autoneg or duplex values are 2113977d0006SHemant Agrawal * different. 2114977d0006SHemant Agrawal */ 2115977d0006SHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 2116977d0006SHemant Agrawal if (ret) { 2117a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); 2118977d0006SHemant Agrawal return -1; 2119977d0006SHemant Agrawal } 2120977d0006SHemant Agrawal 2121977d0006SHemant Agrawal /* Disable link before setting configuration */ 2122977d0006SHemant Agrawal dpaa2_dev_set_link_down(dev); 2123977d0006SHemant Agrawal 2124977d0006SHemant Agrawal /* Based on fc_conf, update cfg */ 2125977d0006SHemant Agrawal cfg.rate = state.rate; 2126977d0006SHemant Agrawal cfg.options = state.options; 2127977d0006SHemant Agrawal 2128977d0006SHemant Agrawal /* update cfg with fc_conf */ 2129977d0006SHemant Agrawal switch (fc_conf->mode) { 2130295968d1SFerruh Yigit case RTE_ETH_FC_FULL: 2131977d0006SHemant Agrawal /* Full flow control; 2132977d0006SHemant Agrawal * OPT_PAUSE set, ASYM_PAUSE not set 2133977d0006SHemant Agrawal */ 2134977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_PAUSE; 2135977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 2136f090a4c3SHemant Agrawal break; 2137295968d1SFerruh Yigit case RTE_ETH_FC_TX_PAUSE: 2138977d0006SHemant Agrawal /* Enable RX flow control 2139977d0006SHemant Agrawal * OPT_PAUSE not set; 2140977d0006SHemant Agrawal * ASYM_PAUSE set; 2141977d0006SHemant Agrawal */ 2142977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 2143977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_PAUSE; 2144977d0006SHemant Agrawal break; 2145295968d1SFerruh Yigit case RTE_ETH_FC_RX_PAUSE: 2146977d0006SHemant Agrawal /* Enable TX Flow control 2147977d0006SHemant Agrawal * OPT_PAUSE set 2148977d0006SHemant Agrawal * ASYM_PAUSE set 2149977d0006SHemant Agrawal */ 2150977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_PAUSE; 2151977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 2152977d0006SHemant Agrawal break; 2153295968d1SFerruh Yigit case RTE_ETH_FC_NONE: 2154977d0006SHemant Agrawal /* Disable Flow control 2155977d0006SHemant Agrawal * OPT_PAUSE not set 2156977d0006SHemant Agrawal * ASYM_PAUSE not set 2157977d0006SHemant Agrawal */ 2158977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_PAUSE; 2159977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 2160977d0006SHemant Agrawal break; 2161977d0006SHemant Agrawal default: 2162a10a988aSShreyansh Jain DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", 2163977d0006SHemant Agrawal fc_conf->mode); 2164977d0006SHemant Agrawal return -1; 2165977d0006SHemant Agrawal } 2166977d0006SHemant Agrawal 2167977d0006SHemant Agrawal ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 2168977d0006SHemant Agrawal if (ret) 2169a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", 2170977d0006SHemant Agrawal ret); 2171977d0006SHemant Agrawal 2172977d0006SHemant Agrawal /* Enable link */ 2173977d0006SHemant Agrawal dpaa2_dev_set_link_up(dev); 2174977d0006SHemant Agrawal 2175977d0006SHemant Agrawal return ret; 2176977d0006SHemant Agrawal } 2177977d0006SHemant Agrawal 217863d5c3b0SHemant Agrawal static int 217963d5c3b0SHemant Agrawal dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 218063d5c3b0SHemant Agrawal struct rte_eth_rss_conf *rss_conf) 218163d5c3b0SHemant Agrawal { 218263d5c3b0SHemant Agrawal struct rte_eth_dev_data *data = dev->data; 2183271f5aeeSJun Yang struct dpaa2_dev_priv *priv = data->dev_private; 218463d5c3b0SHemant Agrawal struct rte_eth_conf *eth_conf = &data->dev_conf; 2185271f5aeeSJun Yang int ret, tc_index; 218663d5c3b0SHemant Agrawal 218763d5c3b0SHemant Agrawal PMD_INIT_FUNC_TRACE(); 218863d5c3b0SHemant Agrawal 218963d5c3b0SHemant Agrawal if (rss_conf->rss_hf) { 2190271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { 2191271f5aeeSJun Yang ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf, 2192271f5aeeSJun Yang tc_index); 219363d5c3b0SHemant Agrawal if (ret) { 2194271f5aeeSJun Yang DPAA2_PMD_ERR("Unable to set flow dist on tc%d", 2195271f5aeeSJun Yang tc_index); 219663d5c3b0SHemant Agrawal return ret; 219763d5c3b0SHemant Agrawal } 2198271f5aeeSJun Yang } 219963d5c3b0SHemant Agrawal } else { 2200271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { 2201271f5aeeSJun Yang ret = dpaa2_remove_flow_dist(dev, tc_index); 220263d5c3b0SHemant Agrawal if (ret) { 2203271f5aeeSJun Yang DPAA2_PMD_ERR( 2204271f5aeeSJun Yang "Unable to remove flow dist on tc%d", 2205271f5aeeSJun Yang tc_index); 220663d5c3b0SHemant Agrawal return ret; 220763d5c3b0SHemant Agrawal } 220863d5c3b0SHemant Agrawal } 2209271f5aeeSJun Yang } 221063d5c3b0SHemant Agrawal eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 221163d5c3b0SHemant Agrawal return 0; 221263d5c3b0SHemant Agrawal } 221363d5c3b0SHemant Agrawal 221463d5c3b0SHemant Agrawal static int 221563d5c3b0SHemant Agrawal dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 221663d5c3b0SHemant Agrawal struct rte_eth_rss_conf *rss_conf) 221763d5c3b0SHemant Agrawal { 221863d5c3b0SHemant Agrawal struct rte_eth_dev_data *data = dev->data; 221963d5c3b0SHemant Agrawal struct rte_eth_conf *eth_conf = &data->dev_conf; 222063d5c3b0SHemant Agrawal 222163d5c3b0SHemant Agrawal /* dpaa2 does not support rss_key, so length should be 0*/ 222263d5c3b0SHemant Agrawal rss_conf->rss_key_len = 0; 222363d5c3b0SHemant Agrawal rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 222463d5c3b0SHemant Agrawal return 0; 222563d5c3b0SHemant Agrawal } 222663d5c3b0SHemant Agrawal 2227b677d4c6SNipun Gupta int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 2228b677d4c6SNipun Gupta int eth_rx_queue_id, 22293835cc22SNipun Gupta struct dpaa2_dpcon_dev *dpcon, 2230b677d4c6SNipun Gupta const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 2231b677d4c6SNipun Gupta { 2232b677d4c6SNipun Gupta struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 223381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2234b677d4c6SNipun Gupta struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 2235b677d4c6SNipun Gupta uint8_t flow_id = dpaa2_ethq->flow_id; 2236b677d4c6SNipun Gupta struct dpni_queue cfg; 22373835cc22SNipun Gupta uint8_t options, priority; 2238b677d4c6SNipun Gupta int ret; 2239b677d4c6SNipun Gupta 2240b677d4c6SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 2241b677d4c6SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 22422d378863SNipun Gupta else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 22432d378863SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 224416c4a3c4SNipun Gupta else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED) 224516c4a3c4SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_ordered_event; 2246b677d4c6SNipun Gupta else 2247b677d4c6SNipun Gupta return -EINVAL; 2248b677d4c6SNipun Gupta 22493835cc22SNipun Gupta priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) * 22503835cc22SNipun Gupta (dpcon->num_priorities - 1); 22513835cc22SNipun Gupta 2252b677d4c6SNipun Gupta memset(&cfg, 0, sizeof(struct dpni_queue)); 2253b677d4c6SNipun Gupta options = DPNI_QUEUE_OPT_DEST; 2254b677d4c6SNipun Gupta cfg.destination.type = DPNI_DEST_DPCON; 22553835cc22SNipun Gupta cfg.destination.id = dpcon->dpcon_id; 22563835cc22SNipun Gupta cfg.destination.priority = priority; 2257b677d4c6SNipun Gupta 22582d378863SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 22592d378863SNipun Gupta options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 22602d378863SNipun Gupta cfg.destination.hold_active = 1; 22612d378863SNipun Gupta } 22622d378863SNipun Gupta 226316c4a3c4SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED && 226416c4a3c4SNipun Gupta !eth_priv->en_ordered) { 226516c4a3c4SNipun Gupta struct opr_cfg ocfg; 226616c4a3c4SNipun Gupta 226716c4a3c4SNipun Gupta /* Restoration window size = 256 frames */ 226816c4a3c4SNipun Gupta ocfg.oprrws = 3; 226916c4a3c4SNipun Gupta /* Restoration window size = 512 frames for LX2 */ 227016c4a3c4SNipun Gupta if (dpaa2_svr_family == SVR_LX2160A) 227116c4a3c4SNipun Gupta ocfg.oprrws = 4; 227216c4a3c4SNipun Gupta /* Auto advance NESN window enabled */ 227316c4a3c4SNipun Gupta ocfg.oa = 1; 227416c4a3c4SNipun Gupta /* Late arrival window size disabled */ 227516c4a3c4SNipun Gupta ocfg.olws = 0; 22767be78d02SJosh Soref /* ORL resource exhaustion advance NESN disabled */ 227716c4a3c4SNipun Gupta ocfg.oeane = 0; 227816c4a3c4SNipun Gupta /* Loose ordering enabled */ 227916c4a3c4SNipun Gupta ocfg.oloe = 1; 228016c4a3c4SNipun Gupta eth_priv->en_loose_ordered = 1; 228116c4a3c4SNipun Gupta /* Strict ordering enabled if explicitly set */ 228216c4a3c4SNipun Gupta if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) { 228316c4a3c4SNipun Gupta ocfg.oloe = 0; 228416c4a3c4SNipun Gupta eth_priv->en_loose_ordered = 0; 228516c4a3c4SNipun Gupta } 228616c4a3c4SNipun Gupta 228716c4a3c4SNipun Gupta ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token, 228816c4a3c4SNipun Gupta dpaa2_ethq->tc_index, flow_id, 22892cb2abf3SHemant Agrawal OPR_OPT_CREATE, &ocfg, 0); 229016c4a3c4SNipun Gupta if (ret) { 2291f665790aSDavid Marchand DPAA2_PMD_ERR("Error setting opr: ret: %d", ret); 229216c4a3c4SNipun Gupta return ret; 229316c4a3c4SNipun Gupta } 229416c4a3c4SNipun Gupta 229516c4a3c4SNipun Gupta eth_priv->en_ordered = 1; 229616c4a3c4SNipun Gupta } 229716c4a3c4SNipun Gupta 2298b677d4c6SNipun Gupta options |= DPNI_QUEUE_OPT_USER_CTX; 22995ae1edffSHemant Agrawal cfg.user_context = (size_t)(dpaa2_ethq); 2300b677d4c6SNipun Gupta 2301b677d4c6SNipun Gupta ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 2302b677d4c6SNipun Gupta dpaa2_ethq->tc_index, flow_id, options, &cfg); 2303b677d4c6SNipun Gupta if (ret) { 2304a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 2305b677d4c6SNipun Gupta return ret; 2306b677d4c6SNipun Gupta } 2307b677d4c6SNipun Gupta 2308b677d4c6SNipun Gupta memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 2309b677d4c6SNipun Gupta 2310b677d4c6SNipun Gupta return 0; 2311b677d4c6SNipun Gupta } 2312b677d4c6SNipun Gupta 2313b677d4c6SNipun Gupta int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 2314b677d4c6SNipun Gupta int eth_rx_queue_id) 2315b677d4c6SNipun Gupta { 2316b677d4c6SNipun Gupta struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 231781c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2318b677d4c6SNipun Gupta struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 2319b677d4c6SNipun Gupta uint8_t flow_id = dpaa2_ethq->flow_id; 2320b677d4c6SNipun Gupta struct dpni_queue cfg; 2321b677d4c6SNipun Gupta uint8_t options; 2322b677d4c6SNipun Gupta int ret; 2323b677d4c6SNipun Gupta 2324b677d4c6SNipun Gupta memset(&cfg, 0, sizeof(struct dpni_queue)); 2325b677d4c6SNipun Gupta options = DPNI_QUEUE_OPT_DEST; 2326b677d4c6SNipun Gupta cfg.destination.type = DPNI_DEST_NONE; 2327b677d4c6SNipun Gupta 2328b677d4c6SNipun Gupta ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 2329b677d4c6SNipun Gupta dpaa2_ethq->tc_index, flow_id, options, &cfg); 2330b677d4c6SNipun Gupta if (ret) 2331a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 2332b677d4c6SNipun Gupta 2333b677d4c6SNipun Gupta return ret; 2334b677d4c6SNipun Gupta } 2335b677d4c6SNipun Gupta 2336fe2b986aSSunil Kumar Kori static int 2337fb7ad441SThomas Monjalon dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev, 2338fb7ad441SThomas Monjalon const struct rte_flow_ops **ops) 2339fe2b986aSSunil Kumar Kori { 2340fe2b986aSSunil Kumar Kori if (!dev) 2341fe2b986aSSunil Kumar Kori return -ENODEV; 2342fe2b986aSSunil Kumar Kori 2343fb7ad441SThomas Monjalon *ops = &dpaa2_flow_ops; 2344fb7ad441SThomas Monjalon return 0; 2345fe2b986aSSunil Kumar Kori } 2346fe2b986aSSunil Kumar Kori 2347de1d70f0SHemant Agrawal static void 2348de1d70f0SHemant Agrawal dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2349de1d70f0SHemant Agrawal struct rte_eth_rxq_info *qinfo) 2350de1d70f0SHemant Agrawal { 2351de1d70f0SHemant Agrawal struct dpaa2_queue *rxq; 2352731fa400SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 2353731fa400SHemant Agrawal struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2354731fa400SHemant Agrawal uint16_t max_frame_length; 2355de1d70f0SHemant Agrawal 2356de1d70f0SHemant Agrawal rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id]; 2357de1d70f0SHemant Agrawal 2358de1d70f0SHemant Agrawal qinfo->mp = rxq->mb_pool; 2359de1d70f0SHemant Agrawal qinfo->scattered_rx = dev->data->scattered_rx; 2360de1d70f0SHemant Agrawal qinfo->nb_desc = rxq->nb_desc; 2361731fa400SHemant Agrawal if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 2362731fa400SHemant Agrawal &max_frame_length) == 0) 2363731fa400SHemant Agrawal qinfo->rx_buf_size = max_frame_length; 2364de1d70f0SHemant Agrawal 2365de1d70f0SHemant Agrawal qinfo->conf.rx_free_thresh = 1; 2366de1d70f0SHemant Agrawal qinfo->conf.rx_drop_en = 1; 2367de1d70f0SHemant Agrawal qinfo->conf.rx_deferred_start = 0; 2368de1d70f0SHemant Agrawal qinfo->conf.offloads = rxq->offloads; 2369de1d70f0SHemant Agrawal } 2370de1d70f0SHemant Agrawal 2371de1d70f0SHemant Agrawal static void 2372de1d70f0SHemant Agrawal dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2373de1d70f0SHemant Agrawal struct rte_eth_txq_info *qinfo) 2374de1d70f0SHemant Agrawal { 2375de1d70f0SHemant Agrawal struct dpaa2_queue *txq; 2376de1d70f0SHemant Agrawal 2377de1d70f0SHemant Agrawal txq = dev->data->tx_queues[queue_id]; 2378de1d70f0SHemant Agrawal 2379de1d70f0SHemant Agrawal qinfo->nb_desc = txq->nb_desc; 2380de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.pthresh = 0; 2381de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.hthresh = 0; 2382de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.wthresh = 0; 2383de1d70f0SHemant Agrawal 2384de1d70f0SHemant Agrawal qinfo->conf.tx_free_thresh = 0; 2385de1d70f0SHemant Agrawal qinfo->conf.tx_rs_thresh = 0; 2386de1d70f0SHemant Agrawal qinfo->conf.offloads = txq->offloads; 2387de1d70f0SHemant Agrawal qinfo->conf.tx_deferred_start = 0; 2388de1d70f0SHemant Agrawal } 2389de1d70f0SHemant Agrawal 2390ac624068SGagandeep Singh static int 2391ac624068SGagandeep Singh dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) 2392ac624068SGagandeep Singh { 2393ac624068SGagandeep Singh *(const void **)ops = &dpaa2_tm_ops; 2394ac624068SGagandeep Singh 2395ac624068SGagandeep Singh return 0; 2396ac624068SGagandeep Singh } 2397ac624068SGagandeep Singh 2398a5b375edSNipun Gupta void 2399a5b375edSNipun Gupta rte_pmd_dpaa2_thread_init(void) 2400a5b375edSNipun Gupta { 2401a5b375edSNipun Gupta int ret; 2402a5b375edSNipun Gupta 2403a5b375edSNipun Gupta if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 2404a5b375edSNipun Gupta ret = dpaa2_affine_qbman_swp(); 2405a5b375edSNipun Gupta if (ret) { 2406a5b375edSNipun Gupta DPAA2_PMD_ERR( 2407f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 2408a5b375edSNipun Gupta rte_gettid()); 2409a5b375edSNipun Gupta return; 2410a5b375edSNipun Gupta } 2411a5b375edSNipun Gupta } 2412a5b375edSNipun Gupta } 2413a5b375edSNipun Gupta 24143e5a335dSHemant Agrawal static struct eth_dev_ops dpaa2_ethdev_ops = { 24153e5a335dSHemant Agrawal .dev_configure = dpaa2_eth_dev_configure, 24163e5a335dSHemant Agrawal .dev_start = dpaa2_dev_start, 24173e5a335dSHemant Agrawal .dev_stop = dpaa2_dev_stop, 24183e5a335dSHemant Agrawal .dev_close = dpaa2_dev_close, 2419c0e5c69aSHemant Agrawal .promiscuous_enable = dpaa2_dev_promiscuous_enable, 2420c0e5c69aSHemant Agrawal .promiscuous_disable = dpaa2_dev_promiscuous_disable, 24215d5aeeedSHemant Agrawal .allmulticast_enable = dpaa2_dev_allmulticast_enable, 24225d5aeeedSHemant Agrawal .allmulticast_disable = dpaa2_dev_allmulticast_disable, 2423a1f3a12cSHemant Agrawal .dev_set_link_up = dpaa2_dev_set_link_up, 2424a1f3a12cSHemant Agrawal .dev_set_link_down = dpaa2_dev_set_link_down, 2425c56c86ffSHemant Agrawal .link_update = dpaa2_dev_link_update, 2426b0aa5459SHemant Agrawal .stats_get = dpaa2_dev_stats_get, 24271d6329b2SHemant Agrawal .xstats_get = dpaa2_dev_xstats_get, 24281d6329b2SHemant Agrawal .xstats_get_by_id = dpaa2_xstats_get_by_id, 24291d6329b2SHemant Agrawal .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 24301d6329b2SHemant Agrawal .xstats_get_names = dpaa2_xstats_get_names, 2431b0aa5459SHemant Agrawal .stats_reset = dpaa2_dev_stats_reset, 24321d6329b2SHemant Agrawal .xstats_reset = dpaa2_dev_stats_reset, 2433748eccb9SHemant Agrawal .fw_version_get = dpaa2_fw_version_get, 24343e5a335dSHemant Agrawal .dev_infos_get = dpaa2_dev_info_get, 2435a5fc38d4SHemant Agrawal .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 2436e31d4d21SHemant Agrawal .mtu_set = dpaa2_dev_mtu_set, 24373ce294f2SHemant Agrawal .vlan_filter_set = dpaa2_vlan_filter_set, 24383ce294f2SHemant Agrawal .vlan_offload_set = dpaa2_vlan_offload_set, 2439e59b75ffSHemant Agrawal .vlan_tpid_set = dpaa2_vlan_tpid_set, 24403e5a335dSHemant Agrawal .rx_queue_setup = dpaa2_dev_rx_queue_setup, 24413e5a335dSHemant Agrawal .rx_queue_release = dpaa2_dev_rx_queue_release, 24423e5a335dSHemant Agrawal .tx_queue_setup = dpaa2_dev_tx_queue_setup, 2443ddbc2b66SApeksha Gupta .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get, 2444ddbc2b66SApeksha Gupta .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get, 2445977d0006SHemant Agrawal .flow_ctrl_get = dpaa2_flow_ctrl_get, 2446977d0006SHemant Agrawal .flow_ctrl_set = dpaa2_flow_ctrl_set, 2447b4d97b7dSHemant Agrawal .mac_addr_add = dpaa2_dev_add_mac_addr, 2448b4d97b7dSHemant Agrawal .mac_addr_remove = dpaa2_dev_remove_mac_addr, 2449b4d97b7dSHemant Agrawal .mac_addr_set = dpaa2_dev_set_mac_addr, 245063d5c3b0SHemant Agrawal .rss_hash_update = dpaa2_dev_rss_hash_update, 245163d5c3b0SHemant Agrawal .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 2452fb7ad441SThomas Monjalon .flow_ops_get = dpaa2_dev_flow_ops_get, 2453de1d70f0SHemant Agrawal .rxq_info_get = dpaa2_rxq_info_get, 2454de1d70f0SHemant Agrawal .txq_info_get = dpaa2_txq_info_get, 2455ac624068SGagandeep Singh .tm_ops_get = dpaa2_tm_ops_get, 2456bc767866SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588) 2457bc767866SPriyanka Jain .timesync_enable = dpaa2_timesync_enable, 2458bc767866SPriyanka Jain .timesync_disable = dpaa2_timesync_disable, 2459bc767866SPriyanka Jain .timesync_read_time = dpaa2_timesync_read_time, 2460bc767866SPriyanka Jain .timesync_write_time = dpaa2_timesync_write_time, 2461bc767866SPriyanka Jain .timesync_adjust_time = dpaa2_timesync_adjust_time, 2462bc767866SPriyanka Jain .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp, 2463bc767866SPriyanka Jain .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp, 2464bc767866SPriyanka Jain #endif 24653e5a335dSHemant Agrawal }; 24663e5a335dSHemant Agrawal 2467c3e0a706SShreyansh Jain /* Populate the mac address from physically available (u-boot/firmware) and/or 2468c3e0a706SShreyansh Jain * one set by higher layers like MC (restool) etc. 2469c3e0a706SShreyansh Jain * Returns the table of MAC entries (multiple entries) 2470c3e0a706SShreyansh Jain */ 2471c3e0a706SShreyansh Jain static int 2472c3e0a706SShreyansh Jain populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, 24736d13ea8eSOlivier Matz struct rte_ether_addr *mac_entry) 2474c3e0a706SShreyansh Jain { 2475c3e0a706SShreyansh Jain int ret; 24766d13ea8eSOlivier Matz struct rte_ether_addr phy_mac, prime_mac; 247741c24ea2SShreyansh Jain 24786d13ea8eSOlivier Matz memset(&phy_mac, 0, sizeof(struct rte_ether_addr)); 24796d13ea8eSOlivier Matz memset(&prime_mac, 0, sizeof(struct rte_ether_addr)); 2480c3e0a706SShreyansh Jain 2481c3e0a706SShreyansh Jain /* Get the physical device MAC address */ 2482c3e0a706SShreyansh Jain ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 2483c3e0a706SShreyansh Jain phy_mac.addr_bytes); 2484c3e0a706SShreyansh Jain if (ret) { 2485c3e0a706SShreyansh Jain DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret); 2486c3e0a706SShreyansh Jain goto cleanup; 2487c3e0a706SShreyansh Jain } 2488c3e0a706SShreyansh Jain 2489c3e0a706SShreyansh Jain ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 2490c3e0a706SShreyansh Jain prime_mac.addr_bytes); 2491c3e0a706SShreyansh Jain if (ret) { 2492c3e0a706SShreyansh Jain DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret); 2493c3e0a706SShreyansh Jain goto cleanup; 2494c3e0a706SShreyansh Jain } 2495c3e0a706SShreyansh Jain 2496c3e0a706SShreyansh Jain /* Now that both MAC have been obtained, do: 2497c3e0a706SShreyansh Jain * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy 2498c3e0a706SShreyansh Jain * and return phy 2499c3e0a706SShreyansh Jain * If empty_mac(phy), return prime. 2500c3e0a706SShreyansh Jain * if both are empty, create random MAC, set as prime and return 2501c3e0a706SShreyansh Jain */ 2502538da7a1SOlivier Matz if (!rte_is_zero_ether_addr(&phy_mac)) { 2503c3e0a706SShreyansh Jain /* If the addresses are not same, overwrite prime */ 2504538da7a1SOlivier Matz if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) { 2505c3e0a706SShreyansh Jain ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 2506c3e0a706SShreyansh Jain priv->token, 2507c3e0a706SShreyansh Jain phy_mac.addr_bytes); 2508c3e0a706SShreyansh Jain if (ret) { 2509c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to set MAC Address: %d", 2510c3e0a706SShreyansh Jain ret); 2511c3e0a706SShreyansh Jain goto cleanup; 2512c3e0a706SShreyansh Jain } 25136d13ea8eSOlivier Matz memcpy(&prime_mac, &phy_mac, 25146d13ea8eSOlivier Matz sizeof(struct rte_ether_addr)); 2515c3e0a706SShreyansh Jain } 2516538da7a1SOlivier Matz } else if (rte_is_zero_ether_addr(&prime_mac)) { 2517c3e0a706SShreyansh Jain /* In case phys and prime, both are zero, create random MAC */ 2518538da7a1SOlivier Matz rte_eth_random_addr(prime_mac.addr_bytes); 2519c3e0a706SShreyansh Jain ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 2520c3e0a706SShreyansh Jain priv->token, 2521c3e0a706SShreyansh Jain prime_mac.addr_bytes); 2522c3e0a706SShreyansh Jain if (ret) { 2523c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret); 2524c3e0a706SShreyansh Jain goto cleanup; 2525c3e0a706SShreyansh Jain } 2526c3e0a706SShreyansh Jain } 2527c3e0a706SShreyansh Jain 2528c3e0a706SShreyansh Jain /* prime_mac the final MAC address */ 25296d13ea8eSOlivier Matz memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr)); 2530c3e0a706SShreyansh Jain return 0; 2531c3e0a706SShreyansh Jain 2532c3e0a706SShreyansh Jain cleanup: 2533c3e0a706SShreyansh Jain return -1; 2534c3e0a706SShreyansh Jain } 2535c3e0a706SShreyansh Jain 2536c147eae0SHemant Agrawal static int 2537a3a997f0SHemant Agrawal check_devargs_handler(__rte_unused const char *key, const char *value, 2538a3a997f0SHemant Agrawal __rte_unused void *opaque) 2539a3a997f0SHemant Agrawal { 2540a3a997f0SHemant Agrawal if (strcmp(value, "1")) 2541a3a997f0SHemant Agrawal return -1; 2542a3a997f0SHemant Agrawal 2543a3a997f0SHemant Agrawal return 0; 2544a3a997f0SHemant Agrawal } 2545a3a997f0SHemant Agrawal 2546a3a997f0SHemant Agrawal static int 2547a3a997f0SHemant Agrawal dpaa2_get_devargs(struct rte_devargs *devargs, const char *key) 2548a3a997f0SHemant Agrawal { 2549a3a997f0SHemant Agrawal struct rte_kvargs *kvlist; 2550a3a997f0SHemant Agrawal 2551a3a997f0SHemant Agrawal if (!devargs) 2552a3a997f0SHemant Agrawal return 0; 2553a3a997f0SHemant Agrawal 2554a3a997f0SHemant Agrawal kvlist = rte_kvargs_parse(devargs->args, NULL); 2555a3a997f0SHemant Agrawal if (!kvlist) 2556a3a997f0SHemant Agrawal return 0; 2557a3a997f0SHemant Agrawal 2558a3a997f0SHemant Agrawal if (!rte_kvargs_count(kvlist, key)) { 2559a3a997f0SHemant Agrawal rte_kvargs_free(kvlist); 2560a3a997f0SHemant Agrawal return 0; 2561a3a997f0SHemant Agrawal } 2562a3a997f0SHemant Agrawal 2563a3a997f0SHemant Agrawal if (rte_kvargs_process(kvlist, key, 2564a3a997f0SHemant Agrawal check_devargs_handler, NULL) < 0) { 2565a3a997f0SHemant Agrawal rte_kvargs_free(kvlist); 2566a3a997f0SHemant Agrawal return 0; 2567a3a997f0SHemant Agrawal } 2568a3a997f0SHemant Agrawal rte_kvargs_free(kvlist); 2569a3a997f0SHemant Agrawal 2570a3a997f0SHemant Agrawal return 1; 2571a3a997f0SHemant Agrawal } 2572a3a997f0SHemant Agrawal 2573a3a997f0SHemant Agrawal static int 2574c147eae0SHemant Agrawal dpaa2_dev_init(struct rte_eth_dev *eth_dev) 2575c147eae0SHemant Agrawal { 25763e5a335dSHemant Agrawal struct rte_device *dev = eth_dev->device; 25773e5a335dSHemant Agrawal struct rte_dpaa2_device *dpaa2_dev; 25783e5a335dSHemant Agrawal struct fsl_mc_io *dpni_dev; 25793e5a335dSHemant Agrawal struct dpni_attr attr; 25803e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 2581bee61d86SHemant Agrawal struct dpni_buffer_layout layout; 2582fe2b986aSSunil Kumar Kori int ret, hw_id, i; 25833e5a335dSHemant Agrawal 2584d401ead1SHemant Agrawal PMD_INIT_FUNC_TRACE(); 2585d401ead1SHemant Agrawal 258681c42c84SShreyansh Jain dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 258781c42c84SShreyansh Jain if (!dpni_dev) { 258881c42c84SShreyansh Jain DPAA2_PMD_ERR("Memory allocation failed for dpni device"); 258981c42c84SShreyansh Jain return -1; 259081c42c84SShreyansh Jain } 2591a6a5f4b4SHemant Agrawal dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 259281c42c84SShreyansh Jain eth_dev->process_private = (void *)dpni_dev; 259381c42c84SShreyansh Jain 2594c147eae0SHemant Agrawal /* For secondary processes, the primary has done all the work */ 2595e7b187dbSShreyansh Jain if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2596e7b187dbSShreyansh Jain /* In case of secondary, only burst and ops API need to be 2597e7b187dbSShreyansh Jain * plugged. 2598e7b187dbSShreyansh Jain */ 2599e7b187dbSShreyansh Jain eth_dev->dev_ops = &dpaa2_ethdev_ops; 2600cbfc6111SFerruh Yigit eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count; 2601a3a997f0SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) 2602a3a997f0SHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; 260320191ab3SNipun Gupta else if (dpaa2_get_devargs(dev->devargs, 260420191ab3SNipun Gupta DRIVER_NO_PREFETCH_MODE)) 260520191ab3SNipun Gupta eth_dev->rx_pkt_burst = dpaa2_dev_rx; 2606a3a997f0SHemant Agrawal else 2607e7b187dbSShreyansh Jain eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 2608e7b187dbSShreyansh Jain eth_dev->tx_pkt_burst = dpaa2_dev_tx; 2609c147eae0SHemant Agrawal return 0; 2610e7b187dbSShreyansh Jain } 2611c147eae0SHemant Agrawal 26123e5a335dSHemant Agrawal dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 26133e5a335dSHemant Agrawal 26143e5a335dSHemant Agrawal hw_id = dpaa2_dev->object_id; 26153e5a335dSHemant Agrawal ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 26163e5a335dSHemant Agrawal if (ret) { 2617a10a988aSShreyansh Jain DPAA2_PMD_ERR( 2618a10a988aSShreyansh Jain "Failure in opening dpni@%d with err code %d", 2619d4984046SHemant Agrawal hw_id, ret); 2620d4984046SHemant Agrawal rte_free(dpni_dev); 26213e5a335dSHemant Agrawal return -1; 26223e5a335dSHemant Agrawal } 26233e5a335dSHemant Agrawal 2624f023d059SJun Yang if (eth_dev->data->dev_conf.lpbk_mode) 2625f023d059SJun Yang dpaa2_dev_recycle_deconfig(eth_dev); 2626f023d059SJun Yang 26273e5a335dSHemant Agrawal /* Clean the device first */ 26283e5a335dSHemant Agrawal ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 26293e5a335dSHemant Agrawal if (ret) { 2630a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", 2631d4984046SHemant Agrawal hw_id, ret); 2632d4984046SHemant Agrawal goto init_err; 26333e5a335dSHemant Agrawal } 26343e5a335dSHemant Agrawal 26353e5a335dSHemant Agrawal ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 26363e5a335dSHemant Agrawal if (ret) { 2637a10a988aSShreyansh Jain DPAA2_PMD_ERR( 2638a10a988aSShreyansh Jain "Failure in get dpni@%d attribute, err code %d", 2639d4984046SHemant Agrawal hw_id, ret); 2640d4984046SHemant Agrawal goto init_err; 26413e5a335dSHemant Agrawal } 26423e5a335dSHemant Agrawal 264316bbc98aSShreyansh Jain priv->num_rx_tc = attr.num_rx_tcs; 264472100f0dSGagandeep Singh priv->num_tx_tc = attr.num_tx_tcs; 26454ce58f8aSJun Yang priv->qos_entries = attr.qos_entries; 26464ce58f8aSJun Yang priv->fs_entries = attr.fs_entries; 26474ce58f8aSJun Yang priv->dist_queues = attr.num_queues; 264872100f0dSGagandeep Singh priv->num_channels = attr.num_channels; 264972100f0dSGagandeep Singh priv->channel_inuse = 0; 2650f023d059SJun Yang rte_spinlock_init(&priv->lpbk_qp_lock); 26514ce58f8aSJun Yang 265213b856acSHemant Agrawal /* only if the custom CG is enabled */ 265313b856acSHemant Agrawal if (attr.options & DPNI_OPT_CUSTOM_CG) 265413b856acSHemant Agrawal priv->max_cgs = attr.num_cgs; 265513b856acSHemant Agrawal else 265613b856acSHemant Agrawal priv->max_cgs = 0; 265713b856acSHemant Agrawal 265813b856acSHemant Agrawal for (i = 0; i < priv->max_cgs; i++) 265913b856acSHemant Agrawal priv->cgid_in_use[i] = 0; 266089c2ea8fSHemant Agrawal 2661fe2b986aSSunil Kumar Kori for (i = 0; i < attr.num_rx_tcs; i++) 2662fe2b986aSSunil Kumar Kori priv->nb_rx_queues += attr.num_queues; 266389c2ea8fSHemant Agrawal 266472100f0dSGagandeep Singh priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels; 2665ef18dafeSHemant Agrawal 266613b856acSHemant Agrawal DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d", 2667a10a988aSShreyansh Jain priv->num_rx_tc, priv->nb_rx_queues, 266813b856acSHemant Agrawal priv->nb_tx_queues, priv->max_cgs); 26693e5a335dSHemant Agrawal 26703e5a335dSHemant Agrawal priv->hw = dpni_dev; 26713e5a335dSHemant Agrawal priv->hw_id = hw_id; 267233fad432SHemant Agrawal priv->options = attr.options; 267333fad432SHemant Agrawal priv->max_mac_filters = attr.mac_filter_entries; 267433fad432SHemant Agrawal priv->max_vlan_filters = attr.vlan_filter_entries; 26753e5a335dSHemant Agrawal priv->flags = 0; 2676e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588) 26770fcdbde0SHemant Agrawal DPAA2_PMD_INFO("DPDK IEEE1588 is enabled"); 26788d21c563SHemant Agrawal priv->flags |= DPAA2_TX_CONF_ENABLE; 2679e806bf87SPriyanka Jain #endif 26808d21c563SHemant Agrawal /* Used with ``fslmc:dpni.1,drv_tx_conf=1`` */ 26818d21c563SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_TX_CONF)) { 26828d21c563SHemant Agrawal priv->flags |= DPAA2_TX_CONF_ENABLE; 26838d21c563SHemant Agrawal DPAA2_PMD_INFO("TX_CONF Enabled"); 26848d21c563SHemant Agrawal } 26853e5a335dSHemant Agrawal 26864690a611SNipun Gupta if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) { 26874690a611SNipun Gupta dpaa2_enable_err_queue = 1; 26884690a611SNipun Gupta DPAA2_PMD_INFO("Enable error queue"); 26894690a611SNipun Gupta } 26904690a611SNipun Gupta 26913e5a335dSHemant Agrawal /* Allocate memory for hardware structure for queues */ 26923e5a335dSHemant Agrawal ret = dpaa2_alloc_rx_tx_queues(eth_dev); 26933e5a335dSHemant Agrawal if (ret) { 2694a10a988aSShreyansh Jain DPAA2_PMD_ERR("Queue allocation Failed"); 2695d4984046SHemant Agrawal goto init_err; 26963e5a335dSHemant Agrawal } 26973e5a335dSHemant Agrawal 2698c3e0a706SShreyansh Jain /* Allocate memory for storing MAC addresses. 2699c3e0a706SShreyansh Jain * Table of mac_filter_entries size is allocated so that RTE ether lib 2700c3e0a706SShreyansh Jain * can add MAC entries when rte_eth_dev_mac_addr_add is called. 2701c3e0a706SShreyansh Jain */ 270233fad432SHemant Agrawal eth_dev->data->mac_addrs = rte_zmalloc("dpni", 270335b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 270433fad432SHemant Agrawal if (eth_dev->data->mac_addrs == NULL) { 2705a10a988aSShreyansh Jain DPAA2_PMD_ERR( 2706d4984046SHemant Agrawal "Failed to allocate %d bytes needed to store MAC addresses", 270735b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * attr.mac_filter_entries); 2708d4984046SHemant Agrawal ret = -ENOMEM; 2709d4984046SHemant Agrawal goto init_err; 271033fad432SHemant Agrawal } 271133fad432SHemant Agrawal 2712c3e0a706SShreyansh Jain ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]); 271333fad432SHemant Agrawal if (ret) { 2714c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to fetch MAC Address for device"); 2715c3e0a706SShreyansh Jain rte_free(eth_dev->data->mac_addrs); 2716c3e0a706SShreyansh Jain eth_dev->data->mac_addrs = NULL; 2717d4984046SHemant Agrawal goto init_err; 271833fad432SHemant Agrawal } 271933fad432SHemant Agrawal 2720bee61d86SHemant Agrawal /* ... tx buffer layout ... */ 2721bee61d86SHemant Agrawal memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 27228d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 27239ceacab7SPriyanka Jain layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 27249ceacab7SPriyanka Jain DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 27259ceacab7SPriyanka Jain layout.pass_timestamp = true; 27269ceacab7SPriyanka Jain } else { 2727bee61d86SHemant Agrawal layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 27289ceacab7SPriyanka Jain } 2729bee61d86SHemant Agrawal layout.pass_frame_status = 1; 2730bee61d86SHemant Agrawal ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2731bee61d86SHemant Agrawal DPNI_QUEUE_TX, &layout); 2732bee61d86SHemant Agrawal if (ret) { 2733a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); 2734d4984046SHemant Agrawal goto init_err; 2735bee61d86SHemant Agrawal } 2736bee61d86SHemant Agrawal 2737bee61d86SHemant Agrawal /* ... tx-conf and error buffer layout ... */ 2738bee61d86SHemant Agrawal memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 27398d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 27408d21c563SHemant Agrawal layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 27419ceacab7SPriyanka Jain layout.pass_timestamp = true; 27429ceacab7SPriyanka Jain } 27438d21c563SHemant Agrawal layout.options |= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 2744bee61d86SHemant Agrawal layout.pass_frame_status = 1; 2745bee61d86SHemant Agrawal ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2746bee61d86SHemant Agrawal DPNI_QUEUE_TX_CONFIRM, &layout); 2747bee61d86SHemant Agrawal if (ret) { 2748a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", 2749d4984046SHemant Agrawal ret); 2750d4984046SHemant Agrawal goto init_err; 2751bee61d86SHemant Agrawal } 2752bee61d86SHemant Agrawal 27533e5a335dSHemant Agrawal eth_dev->dev_ops = &dpaa2_ethdev_ops; 2754c147eae0SHemant Agrawal 2755a3a997f0SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) { 2756a3a997f0SHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; 2757a3a997f0SHemant Agrawal DPAA2_PMD_INFO("Loopback mode"); 275820191ab3SNipun Gupta } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) { 275920191ab3SNipun Gupta eth_dev->rx_pkt_burst = dpaa2_dev_rx; 276020191ab3SNipun Gupta DPAA2_PMD_INFO("No Prefetch mode"); 2761a3a997f0SHemant Agrawal } else { 27625c6942fdSHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 2763a3a997f0SHemant Agrawal } 2764cd9935ceSHemant Agrawal eth_dev->tx_pkt_burst = dpaa2_dev_tx; 27651261cd68SHemant Agrawal 27667be78d02SJosh Soref /* Init fields w.r.t. classification */ 27675f176728SJun Yang memset(&priv->extract.qos_key_extract, 0, 27685f176728SJun Yang sizeof(struct dpaa2_key_extract)); 2769fe2b986aSSunil Kumar Kori priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); 2770fe2b986aSSunil Kumar Kori if (!priv->extract.qos_extract_param) { 2771fe2b986aSSunil Kumar Kori DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " 27727be78d02SJosh Soref " classification ", ret); 2773fe2b986aSSunil Kumar Kori goto init_err; 2774fe2b986aSSunil Kumar Kori } 27755f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv4_src_offset = 27765f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 27775f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv4_dst_offset = 27785f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 27795f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv6_src_offset = 27805f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 27815f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv6_dst_offset = 27825f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 27835f176728SJun Yang 2784fe2b986aSSunil Kumar Kori for (i = 0; i < MAX_TCS; i++) { 27855f176728SJun Yang memset(&priv->extract.tc_key_extract[i], 0, 27865f176728SJun Yang sizeof(struct dpaa2_key_extract)); 27875f176728SJun Yang priv->extract.tc_extract_param[i] = 2788fe2b986aSSunil Kumar Kori (size_t)rte_malloc(NULL, 256, 64); 27895f176728SJun Yang if (!priv->extract.tc_extract_param[i]) { 27907be78d02SJosh Soref DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification", 2791fe2b986aSSunil Kumar Kori ret); 2792fe2b986aSSunil Kumar Kori goto init_err; 2793fe2b986aSSunil Kumar Kori } 27945f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv4_src_offset = 27955f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 27965f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset = 27975f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 27985f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv6_src_offset = 27995f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28005f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset = 28015f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 2802fe2b986aSSunil Kumar Kori } 2803fe2b986aSSunil Kumar Kori 28046f8be0fbSHemant Agrawal ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token, 28056f8be0fbSHemant Agrawal RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN 28066f8be0fbSHemant Agrawal + VLAN_TAG_SIZE); 28076f8be0fbSHemant Agrawal if (ret) { 28086f8be0fbSHemant Agrawal DPAA2_PMD_ERR("Unable to set mtu. check config"); 28096f8be0fbSHemant Agrawal goto init_err; 28106f8be0fbSHemant Agrawal } 28116f8be0fbSHemant Agrawal 281272ec7a67SSunil Kumar Kori /*TODO To enable soft parser support DPAA2 driver needs to integrate 281372ec7a67SSunil Kumar Kori * with external entity to receive byte code for software sequence 281472ec7a67SSunil Kumar Kori * and same will be offload to the H/W using MC interface. 281572ec7a67SSunil Kumar Kori * Currently it is assumed that DPAA2 driver has byte code by some 281672ec7a67SSunil Kumar Kori * mean and same if offloaded to H/W. 281772ec7a67SSunil Kumar Kori */ 281872ec7a67SSunil Kumar Kori if (getenv("DPAA2_ENABLE_SOFT_PARSER")) { 281972ec7a67SSunil Kumar Kori WRIOP_SS_INITIALIZER(priv); 282072ec7a67SSunil Kumar Kori ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS); 282172ec7a67SSunil Kumar Kori if (ret < 0) { 2822f665790aSDavid Marchand DPAA2_PMD_ERR(" Error(%d) in loading softparser", 282372ec7a67SSunil Kumar Kori ret); 282472ec7a67SSunil Kumar Kori return ret; 282572ec7a67SSunil Kumar Kori } 282672ec7a67SSunil Kumar Kori 282772ec7a67SSunil Kumar Kori ret = dpaa2_eth_enable_wriop_soft_parser(priv, 282872ec7a67SSunil Kumar Kori DPNI_SS_INGRESS); 282972ec7a67SSunil Kumar Kori if (ret < 0) { 2830f665790aSDavid Marchand DPAA2_PMD_ERR(" Error(%d) in enabling softparser", 283172ec7a67SSunil Kumar Kori ret); 283272ec7a67SSunil Kumar Kori return ret; 283372ec7a67SSunil Kumar Kori } 283472ec7a67SSunil Kumar Kori } 2835a247fcd9SStephen Hemminger DPAA2_PMD_INFO("%s: netdev created, connected to %s", 2836f023d059SJun Yang eth_dev->data->name, dpaa2_dev->ep_name); 2837f023d059SJun Yang 2838c147eae0SHemant Agrawal return 0; 2839d4984046SHemant Agrawal init_err: 28403e5a335dSHemant Agrawal dpaa2_dev_close(eth_dev); 28413e5a335dSHemant Agrawal 28425964d36aSSachin Saxena return ret; 2843c147eae0SHemant Agrawal } 2844c147eae0SHemant Agrawal 2845028d1dfdSJun Yang int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev) 2846028d1dfdSJun Yang { 2847028d1dfdSJun Yang return dev->device->driver == &rte_dpaa2_pmd.driver; 2848028d1dfdSJun Yang } 2849028d1dfdSJun Yang 2850c147eae0SHemant Agrawal static int 285155fd2703SHemant Agrawal rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 2852c147eae0SHemant Agrawal struct rte_dpaa2_device *dpaa2_dev) 2853c147eae0SHemant Agrawal { 2854c147eae0SHemant Agrawal struct rte_eth_dev *eth_dev; 285581c42c84SShreyansh Jain struct dpaa2_dev_priv *dev_priv; 2856c147eae0SHemant Agrawal int diag; 2857c147eae0SHemant Agrawal 2858f4435e38SHemant Agrawal if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > 2859f4435e38SHemant Agrawal RTE_PKTMBUF_HEADROOM) { 2860f4435e38SHemant Agrawal DPAA2_PMD_ERR( 2861f4435e38SHemant Agrawal "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)", 2862f4435e38SHemant Agrawal RTE_PKTMBUF_HEADROOM, 2863f4435e38SHemant Agrawal DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE); 2864f4435e38SHemant Agrawal 2865f4435e38SHemant Agrawal return -1; 2866f4435e38SHemant Agrawal } 2867f4435e38SHemant Agrawal 2868c147eae0SHemant Agrawal if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2869e729ec76SHemant Agrawal eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 2870e729ec76SHemant Agrawal if (!eth_dev) 2871e729ec76SHemant Agrawal return -ENODEV; 287281c42c84SShreyansh Jain dev_priv = rte_zmalloc("ethdev private structure", 2873c147eae0SHemant Agrawal sizeof(struct dpaa2_dev_priv), 2874c147eae0SHemant Agrawal RTE_CACHE_LINE_SIZE); 287581c42c84SShreyansh Jain if (dev_priv == NULL) { 2876a10a988aSShreyansh Jain DPAA2_PMD_CRIT( 2877a10a988aSShreyansh Jain "Unable to allocate memory for private data"); 2878c147eae0SHemant Agrawal rte_eth_dev_release_port(eth_dev); 2879c147eae0SHemant Agrawal return -ENOMEM; 2880c147eae0SHemant Agrawal } 288181c42c84SShreyansh Jain eth_dev->data->dev_private = (void *)dev_priv; 288281c42c84SShreyansh Jain /* Store a pointer to eth_dev in dev_private */ 288381c42c84SShreyansh Jain dev_priv->eth_dev = eth_dev; 2884e729ec76SHemant Agrawal } else { 2885e729ec76SHemant Agrawal eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 288681c42c84SShreyansh Jain if (!eth_dev) { 288781c42c84SShreyansh Jain DPAA2_PMD_DEBUG("returning enodev"); 2888e729ec76SHemant Agrawal return -ENODEV; 2889c147eae0SHemant Agrawal } 289081c42c84SShreyansh Jain } 2891e729ec76SHemant Agrawal 2892c147eae0SHemant Agrawal eth_dev->device = &dpaa2_dev->device; 289355fd2703SHemant Agrawal 2894c147eae0SHemant Agrawal dpaa2_dev->eth_dev = eth_dev; 2895c147eae0SHemant Agrawal eth_dev->data->rx_mbuf_alloc_failed = 0; 2896c147eae0SHemant Agrawal 289792b7e33eSHemant Agrawal if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 289892b7e33eSHemant Agrawal eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 289992b7e33eSHemant Agrawal 2900f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2901f30e69b4SFerruh Yigit 2902c147eae0SHemant Agrawal /* Invoke PMD device initialization function */ 2903c147eae0SHemant Agrawal diag = dpaa2_dev_init(eth_dev); 2904fbe90cddSThomas Monjalon if (diag == 0) { 290575e2a1d4SGagandeep Singh if (!dpaa2_tx_sg_pool) { 290675e2a1d4SGagandeep Singh dpaa2_tx_sg_pool = 290775e2a1d4SGagandeep Singh rte_pktmbuf_pool_create("dpaa2_mbuf_tx_sg_pool", 290875e2a1d4SGagandeep Singh DPAA2_POOL_SIZE, 290975e2a1d4SGagandeep Singh DPAA2_POOL_CACHE_SIZE, 0, 291075e2a1d4SGagandeep Singh DPAA2_MAX_SGS * sizeof(struct qbman_sge), 291175e2a1d4SGagandeep Singh rte_socket_id()); 291275e2a1d4SGagandeep Singh if (dpaa2_tx_sg_pool == NULL) { 2913f665790aSDavid Marchand DPAA2_PMD_ERR("SG pool creation failed"); 291475e2a1d4SGagandeep Singh return -ENOMEM; 291575e2a1d4SGagandeep Singh } 291675e2a1d4SGagandeep Singh } 2917fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev); 291875e2a1d4SGagandeep Singh dpaa2_valid_dev++; 2919c147eae0SHemant Agrawal return 0; 2920fbe90cddSThomas Monjalon } 2921c147eae0SHemant Agrawal 2922c147eae0SHemant Agrawal rte_eth_dev_release_port(eth_dev); 2923c147eae0SHemant Agrawal return diag; 2924c147eae0SHemant Agrawal } 2925c147eae0SHemant Agrawal 2926c147eae0SHemant Agrawal static int 2927c147eae0SHemant Agrawal rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 2928c147eae0SHemant Agrawal { 2929c147eae0SHemant Agrawal struct rte_eth_dev *eth_dev; 29305964d36aSSachin Saxena int ret; 2931c147eae0SHemant Agrawal 2932c147eae0SHemant Agrawal eth_dev = dpaa2_dev->eth_dev; 29335964d36aSSachin Saxena dpaa2_dev_close(eth_dev); 293475e2a1d4SGagandeep Singh dpaa2_valid_dev--; 293575e2a1d4SGagandeep Singh if (!dpaa2_valid_dev) 293675e2a1d4SGagandeep Singh rte_mempool_free(dpaa2_tx_sg_pool); 29375964d36aSSachin Saxena ret = rte_eth_dev_release_port(eth_dev); 2938c147eae0SHemant Agrawal 29395964d36aSSachin Saxena return ret; 2940c147eae0SHemant Agrawal } 2941c147eae0SHemant Agrawal 2942c147eae0SHemant Agrawal static struct rte_dpaa2_driver rte_dpaa2_pmd = { 294392b7e33eSHemant Agrawal .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 2944bad555dfSShreyansh Jain .drv_type = DPAA2_ETH, 2945c147eae0SHemant Agrawal .probe = rte_dpaa2_probe, 2946c147eae0SHemant Agrawal .remove = rte_dpaa2_remove, 2947c147eae0SHemant Agrawal }; 2948c147eae0SHemant Agrawal 29494ed8a733SVanshika Shukla RTE_PMD_REGISTER_DPAA2(NET_DPAA2_PMD_DRIVER_NAME, rte_dpaa2_pmd); 29504ed8a733SVanshika Shukla RTE_PMD_REGISTER_PARAM_STRING(NET_DPAA2_PMD_DRIVER_NAME, 295120191ab3SNipun Gupta DRIVER_LOOPBACK_MODE "=<int> " 29528d21c563SHemant Agrawal DRIVER_NO_PREFETCH_MODE "=<int>" 29534690a611SNipun Gupta DRIVER_TX_CONF "=<int>" 29544690a611SNipun Gupta DRIVER_ERROR_QUEUE "=<int>"); 2955eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE); 2956