1131a75b6SHemant Agrawal /* * SPDX-License-Identifier: BSD-3-Clause 2c147eae0SHemant Agrawal * 3c147eae0SHemant Agrawal * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 412d98eceSJun Yang * Copyright 2016-2024 NXP 5c147eae0SHemant Agrawal * 6c147eae0SHemant Agrawal */ 7c147eae0SHemant Agrawal 8c147eae0SHemant Agrawal #include <time.h> 9c147eae0SHemant Agrawal #include <net/if.h> 10c147eae0SHemant Agrawal 11c147eae0SHemant Agrawal #include <rte_mbuf.h> 12df96fd0dSBruce Richardson #include <ethdev_driver.h> 13c147eae0SHemant Agrawal #include <rte_malloc.h> 14c147eae0SHemant Agrawal #include <rte_memcpy.h> 15c147eae0SHemant Agrawal #include <rte_string_fns.h> 16c147eae0SHemant Agrawal #include <rte_cycles.h> 17c147eae0SHemant Agrawal #include <rte_kvargs.h> 181acb7f54SDavid Marchand #include <dev_driver.h> 19b4f22ca5SDavid Marchand #include <bus_fslmc_driver.h> 20fe2b986aSSunil Kumar Kori #include <rte_flow_driver.h> 216ac5a55bSJun Yang #include "rte_dpaa2_mempool.h" 22c147eae0SHemant Agrawal 23a10a988aSShreyansh Jain #include "dpaa2_pmd_logs.h" 24c147eae0SHemant Agrawal #include <fslmc_vfio.h> 253e5a335dSHemant Agrawal #include <dpaa2_hw_pvt.h> 26bee61d86SHemant Agrawal #include <dpaa2_hw_mempool.h> 273cf50ff5SHemant Agrawal #include <dpaa2_hw_dpio.h> 28748eccb9SHemant Agrawal #include <mc/fsl_dpmng.h> 29c147eae0SHemant Agrawal #include "dpaa2_ethdev.h" 3072ec7a67SSunil Kumar Kori #include "dpaa2_sparser.h" 31f40adb40SHemant Agrawal #include <fsl_qbman_debug.h> 32c147eae0SHemant Agrawal 33c7ec1ba8SHemant Agrawal #define DRIVER_LOOPBACK_MODE "drv_loopback" 3420191ab3SNipun Gupta #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch" 358d21c563SHemant Agrawal #define DRIVER_TX_CONF "drv_tx_conf" 364690a611SNipun Gupta #define DRIVER_ERROR_QUEUE "drv_err_queue" 37eadcfd95SRohit Raj #define CHECK_INTERVAL 100 /* 100ms */ 38eadcfd95SRohit Raj #define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */ 39a3a997f0SHemant Agrawal 40175fe7d9SSunil Kumar Kori /* Supported Rx offloads */ 41175fe7d9SSunil Kumar Kori static uint64_t dev_rx_offloads_sup = 42295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_CHECKSUM | 43295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | 44295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 45295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 46295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 47295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 48295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TIMESTAMP; 49175fe7d9SSunil Kumar Kori 50175fe7d9SSunil Kumar Kori /* Rx offloads which cannot be disabled */ 51175fe7d9SSunil Kumar Kori static uint64_t dev_rx_offloads_nodis = 52295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_RSS_HASH | 53295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCATTER; 54175fe7d9SSunil Kumar Kori 55175fe7d9SSunil Kumar Kori /* Supported Tx offloads */ 56175fe7d9SSunil Kumar Kori static uint64_t dev_tx_offloads_sup = 57295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 58295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 59295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 60295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 61295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | 62295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 63295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | 64295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 65175fe7d9SSunil Kumar Kori 66175fe7d9SSunil Kumar Kori /* Tx offloads which cannot be disabled */ 67175fe7d9SSunil Kumar Kori static uint64_t dev_tx_offloads_nodis = 68295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 69175fe7d9SSunil Kumar Kori 70c1870f65SAkhil Goyal /* enable timestamp in mbuf */ 71724f79dfSHemant Agrawal bool dpaa2_enable_ts[RTE_MAX_ETHPORTS]; 7261c41e2eSThomas Monjalon uint64_t dpaa2_timestamp_rx_dynflag; 7361c41e2eSThomas Monjalon int dpaa2_timestamp_dynfield_offset = -1; 74c1870f65SAkhil Goyal 754690a611SNipun Gupta /* Enable error queue */ 764690a611SNipun Gupta bool dpaa2_enable_err_queue; 774690a611SNipun Gupta 7835dc25d1SRohit Raj #define MAX_NB_RX_DESC 11264 7935dc25d1SRohit Raj int total_nb_rx_desc; 8035dc25d1SRohit Raj 8175e2a1d4SGagandeep Singh int dpaa2_valid_dev; 8275e2a1d4SGagandeep Singh struct rte_mempool *dpaa2_tx_sg_pool; 8375e2a1d4SGagandeep Singh 841d6329b2SHemant Agrawal struct rte_dpaa2_xstats_name_off { 851d6329b2SHemant Agrawal char name[RTE_ETH_XSTATS_NAME_SIZE]; 861d6329b2SHemant Agrawal uint8_t page_id; /* dpni statistics page id */ 871d6329b2SHemant Agrawal uint8_t stats_id; /* stats id in the given page */ 881d6329b2SHemant Agrawal }; 891d6329b2SHemant Agrawal 901d6329b2SHemant Agrawal static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 911d6329b2SHemant Agrawal {"ingress_multicast_frames", 0, 2}, 921d6329b2SHemant Agrawal {"ingress_multicast_bytes", 0, 3}, 931d6329b2SHemant Agrawal {"ingress_broadcast_frames", 0, 4}, 941d6329b2SHemant Agrawal {"ingress_broadcast_bytes", 0, 5}, 951d6329b2SHemant Agrawal {"egress_multicast_frames", 1, 2}, 961d6329b2SHemant Agrawal {"egress_multicast_bytes", 1, 3}, 971d6329b2SHemant Agrawal {"egress_broadcast_frames", 1, 4}, 981d6329b2SHemant Agrawal {"egress_broadcast_bytes", 1, 5}, 991d6329b2SHemant Agrawal {"ingress_filtered_frames", 2, 0}, 1001d6329b2SHemant Agrawal {"ingress_discarded_frames", 2, 1}, 1011d6329b2SHemant Agrawal {"ingress_nobuffer_discards", 2, 2}, 1021d6329b2SHemant Agrawal {"egress_discarded_frames", 2, 3}, 1031d6329b2SHemant Agrawal {"egress_confirmed_frames", 2, 4}, 104c720c5f6SHemant Agrawal {"cgr_reject_frames", 4, 0}, 105c720c5f6SHemant Agrawal {"cgr_reject_bytes", 4, 1}, 1061d6329b2SHemant Agrawal }; 1071d6329b2SHemant Agrawal 108c147eae0SHemant Agrawal static struct rte_dpaa2_driver rte_dpaa2_pmd; 109c5acbb5eSHemant Agrawal static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 110c5acbb5eSHemant Agrawal int wait_to_complete); 111a1f3a12cSHemant Agrawal static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 112a1f3a12cSHemant Agrawal static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 113e1640849SHemant Agrawal static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 114c147eae0SHemant Agrawal 1153ce294f2SHemant Agrawal static int 1163ce294f2SHemant Agrawal dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1173ce294f2SHemant Agrawal { 1183ce294f2SHemant Agrawal int ret; 1193ce294f2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 12081c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 1213ce294f2SHemant Agrawal 1223ce294f2SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1233ce294f2SHemant Agrawal 1243ce294f2SHemant Agrawal if (dpni == NULL) { 125a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1263ce294f2SHemant Agrawal return -1; 1273ce294f2SHemant Agrawal } 1283ce294f2SHemant Agrawal 1293ce294f2SHemant Agrawal if (on) 13096f7bfe8SSachin Saxena ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, 13196f7bfe8SSachin Saxena vlan_id, 0, 0, 0); 1323ce294f2SHemant Agrawal else 1333ce294f2SHemant Agrawal ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 1343ce294f2SHemant Agrawal priv->token, vlan_id); 1353ce294f2SHemant Agrawal 1363ce294f2SHemant Agrawal if (ret < 0) 137a10a988aSShreyansh Jain DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", 1383ce294f2SHemant Agrawal ret, vlan_id, priv->hw_id); 1393ce294f2SHemant Agrawal 1403ce294f2SHemant Agrawal return ret; 1413ce294f2SHemant Agrawal } 1423ce294f2SHemant Agrawal 143289ba0c0SDavid Harton static int 1443ce294f2SHemant Agrawal dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1453ce294f2SHemant Agrawal { 1463ce294f2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 14781c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 14850ce3e7aSWei Hu (Xavier) int ret = 0; 1493ce294f2SHemant Agrawal 1503ce294f2SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1513ce294f2SHemant Agrawal 152295968d1SFerruh Yigit if (mask & RTE_ETH_VLAN_FILTER_MASK) { 1537be78d02SJosh Soref /* VLAN Filter not available */ 154c172f85eSHemant Agrawal if (!priv->max_vlan_filters) { 155a10a988aSShreyansh Jain DPAA2_PMD_INFO("VLAN filter not available"); 15650ce3e7aSWei Hu (Xavier) return -ENOTSUP; 157c172f85eSHemant Agrawal } 158c172f85eSHemant Agrawal 1590ebce612SSunil Kumar Kori if (dev->data->dev_conf.rxmode.offloads & 160295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1613ce294f2SHemant Agrawal ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 1623ce294f2SHemant Agrawal priv->token, true); 1633ce294f2SHemant Agrawal else 1643ce294f2SHemant Agrawal ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 1653ce294f2SHemant Agrawal priv->token, false); 1663ce294f2SHemant Agrawal if (ret < 0) 167a10a988aSShreyansh Jain DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); 1683ce294f2SHemant Agrawal } 169289ba0c0SDavid Harton 17050ce3e7aSWei Hu (Xavier) return ret; 1713ce294f2SHemant Agrawal } 1723ce294f2SHemant Agrawal 173748eccb9SHemant Agrawal static int 174e59b75ffSHemant Agrawal dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, 175e59b75ffSHemant Agrawal enum rte_vlan_type vlan_type __rte_unused, 176e59b75ffSHemant Agrawal uint16_t tpid) 177e59b75ffSHemant Agrawal { 178e59b75ffSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 17981c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 180e59b75ffSHemant Agrawal int ret = -ENOTSUP; 181e59b75ffSHemant Agrawal 182e59b75ffSHemant Agrawal PMD_INIT_FUNC_TRACE(); 183e59b75ffSHemant Agrawal 184e59b75ffSHemant Agrawal /* nothing to be done for standard vlan tpids */ 185e59b75ffSHemant Agrawal if (tpid == 0x8100 || tpid == 0x88A8) 186e59b75ffSHemant Agrawal return 0; 187e59b75ffSHemant Agrawal 188e59b75ffSHemant Agrawal ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, 189e59b75ffSHemant Agrawal priv->token, tpid); 190e59b75ffSHemant Agrawal if (ret < 0) 191e59b75ffSHemant Agrawal DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret); 192e59b75ffSHemant Agrawal /* if already configured tpids, remove them first */ 193e59b75ffSHemant Agrawal if (ret == -EBUSY) { 194e59b75ffSHemant Agrawal struct dpni_custom_tpid_cfg tpid_list = {0}; 195e59b75ffSHemant Agrawal 196e59b75ffSHemant Agrawal ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW, 197e59b75ffSHemant Agrawal priv->token, &tpid_list); 198e59b75ffSHemant Agrawal if (ret < 0) 199e59b75ffSHemant Agrawal goto fail; 200e59b75ffSHemant Agrawal ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW, 201e59b75ffSHemant Agrawal priv->token, tpid_list.tpid1); 202e59b75ffSHemant Agrawal if (ret < 0) 203e59b75ffSHemant Agrawal goto fail; 204e59b75ffSHemant Agrawal ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, 205e59b75ffSHemant Agrawal priv->token, tpid); 206e59b75ffSHemant Agrawal } 207e59b75ffSHemant Agrawal fail: 208e59b75ffSHemant Agrawal return ret; 209e59b75ffSHemant Agrawal } 210e59b75ffSHemant Agrawal 211e59b75ffSHemant Agrawal static int 212748eccb9SHemant Agrawal dpaa2_fw_version_get(struct rte_eth_dev *dev, 213748eccb9SHemant Agrawal char *fw_version, 214748eccb9SHemant Agrawal size_t fw_size) 215748eccb9SHemant Agrawal { 216748eccb9SHemant Agrawal int ret; 21781c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 218748eccb9SHemant Agrawal struct mc_soc_version mc_plat_info = {0}; 219748eccb9SHemant Agrawal struct mc_version mc_ver_info = {0}; 220748eccb9SHemant Agrawal 221748eccb9SHemant Agrawal PMD_INIT_FUNC_TRACE(); 222748eccb9SHemant Agrawal 223748eccb9SHemant Agrawal if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 224a10a988aSShreyansh Jain DPAA2_PMD_WARN("\tmc_get_soc_version failed"); 225748eccb9SHemant Agrawal 226748eccb9SHemant Agrawal if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 227a10a988aSShreyansh Jain DPAA2_PMD_WARN("\tmc_get_version failed"); 228748eccb9SHemant Agrawal 229748eccb9SHemant Agrawal ret = snprintf(fw_version, fw_size, 230748eccb9SHemant Agrawal "%x-%d.%d.%d", 231748eccb9SHemant Agrawal mc_plat_info.svr, 232748eccb9SHemant Agrawal mc_ver_info.major, 233748eccb9SHemant Agrawal mc_ver_info.minor, 234748eccb9SHemant Agrawal mc_ver_info.revision); 235d345d6c9SFerruh Yigit if (ret < 0) 236d345d6c9SFerruh Yigit return -EINVAL; 237748eccb9SHemant Agrawal 238748eccb9SHemant Agrawal ret += 1; /* add the size of '\0' */ 239d345d6c9SFerruh Yigit if (fw_size < (size_t)ret) 240748eccb9SHemant Agrawal return ret; 241748eccb9SHemant Agrawal else 242748eccb9SHemant Agrawal return 0; 243748eccb9SHemant Agrawal } 244748eccb9SHemant Agrawal 245bdad90d1SIvan Ilchenko static int 2463e5a335dSHemant Agrawal dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2473e5a335dSHemant Agrawal { 2483e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 2493e5a335dSHemant Agrawal 2503e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 2513e5a335dSHemant Agrawal 25233fad432SHemant Agrawal dev_info->max_mac_addrs = priv->max_mac_filters; 253bee61d86SHemant Agrawal dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 254bee61d86SHemant Agrawal dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 2553e5a335dSHemant Agrawal dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 2563e5a335dSHemant Agrawal dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 257175fe7d9SSunil Kumar Kori dev_info->rx_offload_capa = dev_rx_offloads_sup | 258175fe7d9SSunil Kumar Kori dev_rx_offloads_nodis; 259175fe7d9SSunil Kumar Kori dev_info->tx_offload_capa = dev_tx_offloads_sup | 260175fe7d9SSunil Kumar Kori dev_tx_offloads_nodis; 261295968d1SFerruh Yigit dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | 262295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_2_5G | 263295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_10G; 2642fe6f1b7SDmitry Kozlyuk dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 265762b275fSHemant Agrawal 266762b275fSHemant Agrawal dev_info->max_hash_mac_addrs = 0; 267762b275fSHemant Agrawal dev_info->max_vfs = 0; 268295968d1SFerruh Yigit dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 269762b275fSHemant Agrawal dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; 270bdad90d1SIvan Ilchenko 271e35ead33SHemant Agrawal dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size; 272e35ead33SHemant Agrawal /* same is rx size for best perf */ 273e35ead33SHemant Agrawal dev_info->default_txportconf.burst_size = dpaa2_dqrr_size; 274e35ead33SHemant Agrawal 275e35ead33SHemant Agrawal dev_info->default_rxportconf.nb_queues = 1; 276e35ead33SHemant Agrawal dev_info->default_txportconf.nb_queues = 1; 277e35ead33SHemant Agrawal dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD; 278e35ead33SHemant Agrawal dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC; 279e35ead33SHemant Agrawal 2807e2c3f14SHemant Agrawal if (dpaa2_svr_family == SVR_LX2160A) { 281295968d1SFerruh Yigit dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G | 282295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_40G | 283295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_50G | 284295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_100G; 2857e2c3f14SHemant Agrawal } 2867e2c3f14SHemant Agrawal 287bdad90d1SIvan Ilchenko return 0; 2883e5a335dSHemant Agrawal } 2893e5a335dSHemant Agrawal 2903e5a335dSHemant Agrawal static int 291ddbc2b66SApeksha Gupta dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev, 292ddbc2b66SApeksha Gupta __rte_unused uint16_t queue_id, 293ddbc2b66SApeksha Gupta struct rte_eth_burst_mode *mode) 294ddbc2b66SApeksha Gupta { 295ddbc2b66SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 296ddbc2b66SApeksha Gupta int ret = -EINVAL; 297ddbc2b66SApeksha Gupta unsigned int i; 298ddbc2b66SApeksha Gupta const struct burst_info { 299ddbc2b66SApeksha Gupta uint64_t flags; 300ddbc2b66SApeksha Gupta const char *output; 301ddbc2b66SApeksha Gupta } rx_offload_map[] = { 302295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"}, 303295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, 304295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 305295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"}, 306295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"}, 307295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"}, 308295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"}, 309295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}, 310295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"} 311ddbc2b66SApeksha Gupta }; 312ddbc2b66SApeksha Gupta 313ddbc2b66SApeksha Gupta /* Update Rx offload info */ 314ddbc2b66SApeksha Gupta for (i = 0; i < RTE_DIM(rx_offload_map); i++) { 315ddbc2b66SApeksha Gupta if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) { 316ddbc2b66SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s", 317ddbc2b66SApeksha Gupta rx_offload_map[i].output); 318ddbc2b66SApeksha Gupta ret = 0; 319ddbc2b66SApeksha Gupta break; 320ddbc2b66SApeksha Gupta } 321ddbc2b66SApeksha Gupta } 322ddbc2b66SApeksha Gupta return ret; 323ddbc2b66SApeksha Gupta } 324ddbc2b66SApeksha Gupta 325ddbc2b66SApeksha Gupta static int 326ddbc2b66SApeksha Gupta dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev, 327ddbc2b66SApeksha Gupta __rte_unused uint16_t queue_id, 328ddbc2b66SApeksha Gupta struct rte_eth_burst_mode *mode) 329ddbc2b66SApeksha Gupta { 330ddbc2b66SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 331ddbc2b66SApeksha Gupta int ret = -EINVAL; 332ddbc2b66SApeksha Gupta unsigned int i; 333ddbc2b66SApeksha Gupta const struct burst_info { 334ddbc2b66SApeksha Gupta uint64_t flags; 335ddbc2b66SApeksha Gupta const char *output; 336ddbc2b66SApeksha Gupta } tx_offload_map[] = { 337295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"}, 338295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, 339295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, 340295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, 341295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, 342295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 343295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"}, 344295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"}, 345295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"} 346ddbc2b66SApeksha Gupta }; 347ddbc2b66SApeksha Gupta 348ddbc2b66SApeksha Gupta /* Update Tx offload info */ 349ddbc2b66SApeksha Gupta for (i = 0; i < RTE_DIM(tx_offload_map); i++) { 350ddbc2b66SApeksha Gupta if (eth_conf->txmode.offloads & tx_offload_map[i].flags) { 351ddbc2b66SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s", 352ddbc2b66SApeksha Gupta tx_offload_map[i].output); 353ddbc2b66SApeksha Gupta ret = 0; 354ddbc2b66SApeksha Gupta break; 355ddbc2b66SApeksha Gupta } 356ddbc2b66SApeksha Gupta } 357ddbc2b66SApeksha Gupta return ret; 358ddbc2b66SApeksha Gupta } 359ddbc2b66SApeksha Gupta 360ddbc2b66SApeksha Gupta static int 3613e5a335dSHemant Agrawal dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 3623e5a335dSHemant Agrawal { 3633e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 3643e5a335dSHemant Agrawal uint16_t dist_idx; 3653e5a335dSHemant Agrawal uint32_t vq_id; 3662d5f7f52SAshish Jain uint8_t num_rxqueue_per_tc; 3673e5a335dSHemant Agrawal struct dpaa2_queue *mc_q, *mcq; 3683e5a335dSHemant Agrawal uint32_t tot_queues; 36912d98eceSJun Yang int i, ret = 0; 3703e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 3713e5a335dSHemant Agrawal 3723e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 3733e5a335dSHemant Agrawal 3742d5f7f52SAshish Jain num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc); 3758d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) 3769ceacab7SPriyanka Jain tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues; 3779ceacab7SPriyanka Jain else 3783e5a335dSHemant Agrawal tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 3793e5a335dSHemant Agrawal mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 3803e5a335dSHemant Agrawal RTE_CACHE_LINE_SIZE); 3813e5a335dSHemant Agrawal if (!mc_q) { 382a10a988aSShreyansh Jain DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); 3833e5a335dSHemant Agrawal return -1; 3843e5a335dSHemant Agrawal } 3853e5a335dSHemant Agrawal 3863e5a335dSHemant Agrawal for (i = 0; i < priv->nb_rx_queues; i++) { 38785ee5ddaSShreyansh Jain mc_q->eth_data = dev->data; 3883e5a335dSHemant Agrawal priv->rx_vq[i] = mc_q++; 38912d98eceSJun Yang dpaa2_q = priv->rx_vq[i]; 39012d98eceSJun Yang ret = dpaa2_queue_storage_alloc(dpaa2_q, 39112d98eceSJun Yang RTE_MAX_LCORE); 39212d98eceSJun Yang if (ret) 3933cf50ff5SHemant Agrawal goto fail; 3943e5a335dSHemant Agrawal } 3953e5a335dSHemant Agrawal 3964690a611SNipun Gupta if (dpaa2_enable_err_queue) { 3974690a611SNipun Gupta priv->rx_err_vq = rte_zmalloc("dpni_rx_err", 3984690a611SNipun Gupta sizeof(struct dpaa2_queue), 0); 39929e5519dSWeiguo Li if (!priv->rx_err_vq) 40029e5519dSWeiguo Li goto fail; 4014690a611SNipun Gupta 40212d98eceSJun Yang dpaa2_q = priv->rx_err_vq; 40312d98eceSJun Yang ret = dpaa2_queue_storage_alloc(dpaa2_q, 40412d98eceSJun Yang RTE_MAX_LCORE); 40512d98eceSJun Yang if (ret) 4064690a611SNipun Gupta goto fail; 4074690a611SNipun Gupta } 4084690a611SNipun Gupta 4093e5a335dSHemant Agrawal for (i = 0; i < priv->nb_tx_queues; i++) { 41085ee5ddaSShreyansh Jain mc_q->eth_data = dev->data; 4117ae777d0SHemant Agrawal mc_q->flow_id = 0xffff; 4123e5a335dSHemant Agrawal priv->tx_vq[i] = mc_q++; 4137ae777d0SHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 4147ae777d0SHemant Agrawal dpaa2_q->cscn = rte_malloc(NULL, 4157ae777d0SHemant Agrawal sizeof(struct qbman_result), 16); 4167ae777d0SHemant Agrawal if (!dpaa2_q->cscn) 4177ae777d0SHemant Agrawal goto fail_tx; 4183e5a335dSHemant Agrawal } 4193e5a335dSHemant Agrawal 4208d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 4219ceacab7SPriyanka Jain /*Setup tx confirmation queues*/ 4229ceacab7SPriyanka Jain for (i = 0; i < priv->nb_tx_queues; i++) { 4239ceacab7SPriyanka Jain mc_q->eth_data = dev->data; 4249ceacab7SPriyanka Jain mc_q->tc_index = i; 4259ceacab7SPriyanka Jain mc_q->flow_id = 0; 4269ceacab7SPriyanka Jain priv->tx_conf_vq[i] = mc_q++; 42712d98eceSJun Yang dpaa2_q = priv->tx_conf_vq[i]; 42812d98eceSJun Yang ret = dpaa2_queue_storage_alloc(dpaa2_q, 42912d98eceSJun Yang RTE_MAX_LCORE); 43012d98eceSJun Yang if (ret) 4319ceacab7SPriyanka Jain goto fail_tx_conf; 4329ceacab7SPriyanka Jain } 4339ceacab7SPriyanka Jain } 4349ceacab7SPriyanka Jain 4353e5a335dSHemant Agrawal vq_id = 0; 436599017a2SHemant Agrawal for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 43712d98eceSJun Yang mcq = priv->rx_vq[vq_id]; 4382d5f7f52SAshish Jain mcq->tc_index = dist_idx / num_rxqueue_per_tc; 4392d5f7f52SAshish Jain mcq->flow_id = dist_idx % num_rxqueue_per_tc; 4403e5a335dSHemant Agrawal vq_id++; 4413e5a335dSHemant Agrawal } 4423e5a335dSHemant Agrawal 4433e5a335dSHemant Agrawal return 0; 4449ceacab7SPriyanka Jain fail_tx_conf: 4459ceacab7SPriyanka Jain i -= 1; 4469ceacab7SPriyanka Jain while (i >= 0) { 44712d98eceSJun Yang dpaa2_q = priv->tx_conf_vq[i]; 44812d98eceSJun Yang dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE); 4499ceacab7SPriyanka Jain priv->tx_conf_vq[i--] = NULL; 4509ceacab7SPriyanka Jain } 4519ceacab7SPriyanka Jain i = priv->nb_tx_queues; 4527ae777d0SHemant Agrawal fail_tx: 4537ae777d0SHemant Agrawal i -= 1; 4547ae777d0SHemant Agrawal while (i >= 0) { 45512d98eceSJun Yang dpaa2_q = priv->tx_vq[i]; 4567ae777d0SHemant Agrawal rte_free(dpaa2_q->cscn); 4577ae777d0SHemant Agrawal priv->tx_vq[i--] = NULL; 4587ae777d0SHemant Agrawal } 4597ae777d0SHemant Agrawal i = priv->nb_rx_queues; 4603e5a335dSHemant Agrawal fail: 4613e5a335dSHemant Agrawal i -= 1; 4623e5a335dSHemant Agrawal mc_q = priv->rx_vq[0]; 4633e5a335dSHemant Agrawal while (i >= 0) { 46412d98eceSJun Yang dpaa2_q = priv->rx_vq[i]; 46512d98eceSJun Yang dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE); 4663e5a335dSHemant Agrawal priv->rx_vq[i--] = NULL; 4673e5a335dSHemant Agrawal } 4684690a611SNipun Gupta 4694690a611SNipun Gupta if (dpaa2_enable_err_queue) { 47012d98eceSJun Yang dpaa2_q = priv->rx_err_vq; 47112d98eceSJun Yang dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE); 4724690a611SNipun Gupta } 4734690a611SNipun Gupta 4743e5a335dSHemant Agrawal rte_free(mc_q); 4753e5a335dSHemant Agrawal return -1; 4763e5a335dSHemant Agrawal } 4773e5a335dSHemant Agrawal 4785d9a1e4dSHemant Agrawal static void 4795d9a1e4dSHemant Agrawal dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) 4805d9a1e4dSHemant Agrawal { 4815d9a1e4dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 4825d9a1e4dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 4835d9a1e4dSHemant Agrawal int i; 4845d9a1e4dSHemant Agrawal 4855d9a1e4dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 4865d9a1e4dSHemant Agrawal 4875d9a1e4dSHemant Agrawal /* Queue allocation base */ 4885d9a1e4dSHemant Agrawal if (priv->rx_vq[0]) { 4895d9a1e4dSHemant Agrawal /* cleaning up queue storage */ 4905d9a1e4dSHemant Agrawal for (i = 0; i < priv->nb_rx_queues; i++) { 49112d98eceSJun Yang dpaa2_q = priv->rx_vq[i]; 49212d98eceSJun Yang dpaa2_queue_storage_free(dpaa2_q, 49312d98eceSJun Yang RTE_MAX_LCORE); 4945d9a1e4dSHemant Agrawal } 4955d9a1e4dSHemant Agrawal /* cleanup tx queue cscn */ 4965d9a1e4dSHemant Agrawal for (i = 0; i < priv->nb_tx_queues; i++) { 49712d98eceSJun Yang dpaa2_q = priv->tx_vq[i]; 4985d9a1e4dSHemant Agrawal rte_free(dpaa2_q->cscn); 4995d9a1e4dSHemant Agrawal } 5008d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 5019ceacab7SPriyanka Jain /* cleanup tx conf queue storage */ 5029ceacab7SPriyanka Jain for (i = 0; i < priv->nb_tx_queues; i++) { 50312d98eceSJun Yang dpaa2_q = priv->tx_conf_vq[i]; 50412d98eceSJun Yang dpaa2_queue_storage_free(dpaa2_q, 50512d98eceSJun Yang RTE_MAX_LCORE); 5069ceacab7SPriyanka Jain } 5079ceacab7SPriyanka Jain } 5085d9a1e4dSHemant Agrawal /*free memory for all queues (RX+TX) */ 5095d9a1e4dSHemant Agrawal rte_free(priv->rx_vq[0]); 5105d9a1e4dSHemant Agrawal priv->rx_vq[0] = NULL; 5115d9a1e4dSHemant Agrawal } 5125d9a1e4dSHemant Agrawal } 5135d9a1e4dSHemant Agrawal 5143e5a335dSHemant Agrawal static int 5153e5a335dSHemant Agrawal dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 5163e5a335dSHemant Agrawal { 51721ce788cSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 51881c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 51921ce788cSHemant Agrawal struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 5200ebce612SSunil Kumar Kori uint64_t rx_offloads = eth_conf->rxmode.offloads; 5210ebce612SSunil Kumar Kori uint64_t tx_offloads = eth_conf->txmode.offloads; 5220ebce612SSunil Kumar Kori int rx_l3_csum_offload = false; 5230ebce612SSunil Kumar Kori int rx_l4_csum_offload = false; 5240ebce612SSunil Kumar Kori int tx_l3_csum_offload = false; 5250ebce612SSunil Kumar Kori int tx_l4_csum_offload = false; 526271f5aeeSJun Yang int ret, tc_index; 5271bb4a528SFerruh Yigit uint32_t max_rx_pktlen; 5282013e308SVanshika Shukla #if defined(RTE_LIBRTE_IEEE1588) 5292013e308SVanshika Shukla uint16_t ptp_correction_offset; 5302013e308SVanshika Shukla #endif 5313e5a335dSHemant Agrawal 5323e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 5333e5a335dSHemant Agrawal 5347bdf45f9SHemant Agrawal /* Rx offloads which are enabled by default */ 535175fe7d9SSunil Kumar Kori if (dev_rx_offloads_nodis & ~rx_offloads) { 5367bdf45f9SHemant Agrawal DPAA2_PMD_INFO( 5377bdf45f9SHemant Agrawal "Some of rx offloads enabled by default - requested 0x%" PRIx64 5387bdf45f9SHemant Agrawal " fixed are 0x%" PRIx64, 539175fe7d9SSunil Kumar Kori rx_offloads, dev_rx_offloads_nodis); 540175fe7d9SSunil Kumar Kori } 5410ebce612SSunil Kumar Kori 5427bdf45f9SHemant Agrawal /* Tx offloads which are enabled by default */ 543175fe7d9SSunil Kumar Kori if (dev_tx_offloads_nodis & ~tx_offloads) { 5447bdf45f9SHemant Agrawal DPAA2_PMD_INFO( 5457bdf45f9SHemant Agrawal "Some of tx offloads enabled by default - requested 0x%" PRIx64 5467bdf45f9SHemant Agrawal " fixed are 0x%" PRIx64, 547175fe7d9SSunil Kumar Kori tx_offloads, dev_tx_offloads_nodis); 548175fe7d9SSunil Kumar Kori } 5490ebce612SSunil Kumar Kori 5501bb4a528SFerruh Yigit max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN + 5511bb4a528SFerruh Yigit RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; 5521bb4a528SFerruh Yigit if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) { 55344ea7355SAshish Jain ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 5541bb4a528SFerruh Yigit priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN); 5551bb4a528SFerruh Yigit if (ret != 0) { 5561bb4a528SFerruh Yigit DPAA2_PMD_ERR("Unable to set mtu. check config"); 557e1640849SHemant Agrawal return ret; 558e1640849SHemant Agrawal } 559de08b474SApeksha Gupta DPAA2_PMD_DEBUG("MTU configured for the device: %d", 56079ef9825SHemant Agrawal dev->data->mtu); 561e1640849SHemant Agrawal } else { 562de08b474SApeksha Gupta DPAA2_PMD_ERR("Configured mtu %d and calculated max-pkt-len is %d which should be <= %d", 563de08b474SApeksha Gupta eth_conf->rxmode.mtu, max_rx_pktlen, DPAA2_MAX_RX_PKT_LEN); 564e1640849SHemant Agrawal return -1; 565e1640849SHemant Agrawal } 566e1640849SHemant Agrawal 567295968d1SFerruh Yigit if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) { 568271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { 56989c2ea8fSHemant Agrawal ret = dpaa2_setup_flow_dist(dev, 570271f5aeeSJun Yang eth_conf->rx_adv_conf.rss_conf.rss_hf, 571271f5aeeSJun Yang tc_index); 57289c2ea8fSHemant Agrawal if (ret) { 573271f5aeeSJun Yang DPAA2_PMD_ERR( 574271f5aeeSJun Yang "Unable to set flow distribution on tc%d." 575271f5aeeSJun Yang "Check queue config", tc_index); 57689c2ea8fSHemant Agrawal return ret; 57789c2ea8fSHemant Agrawal } 57889c2ea8fSHemant Agrawal } 579271f5aeeSJun Yang } 580c5acbb5eSHemant Agrawal 581295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) 5820ebce612SSunil Kumar Kori rx_l3_csum_offload = true; 5830ebce612SSunil Kumar Kori 584295968d1SFerruh Yigit if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) || 585295968d1SFerruh Yigit (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) || 586295968d1SFerruh Yigit (rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)) 5870ebce612SSunil Kumar Kori rx_l4_csum_offload = true; 58821ce788cSHemant Agrawal 58921ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 5900ebce612SSunil Kumar Kori DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); 59121ce788cSHemant Agrawal if (ret) { 592a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); 59321ce788cSHemant Agrawal return ret; 59421ce788cSHemant Agrawal } 59521ce788cSHemant Agrawal 59621ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 5970ebce612SSunil Kumar Kori DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); 59821ce788cSHemant Agrawal if (ret) { 599a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); 60021ce788cSHemant Agrawal return ret; 60121ce788cSHemant Agrawal } 60221ce788cSHemant Agrawal 6037eaf1323SGagandeep Singh #if !defined(RTE_LIBRTE_IEEE1588) 604295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) 6057eaf1323SGagandeep Singh #endif 60661c41e2eSThomas Monjalon { 60761c41e2eSThomas Monjalon ret = rte_mbuf_dyn_rx_timestamp_register( 60861c41e2eSThomas Monjalon &dpaa2_timestamp_dynfield_offset, 60961c41e2eSThomas Monjalon &dpaa2_timestamp_rx_dynflag); 61061c41e2eSThomas Monjalon if (ret != 0) { 61161c41e2eSThomas Monjalon DPAA2_PMD_ERR("Error to register timestamp field/flag"); 61261c41e2eSThomas Monjalon return -rte_errno; 61361c41e2eSThomas Monjalon } 614724f79dfSHemant Agrawal dpaa2_enable_ts[dev->data->port_id] = true; 61561c41e2eSThomas Monjalon } 61620196043SHemant Agrawal 6172013e308SVanshika Shukla #if defined(RTE_LIBRTE_IEEE1588) 6182013e308SVanshika Shukla /* By default setting ptp correction offset for Ethernet SYNC packets */ 6192013e308SVanshika Shukla ptp_correction_offset = RTE_ETHER_HDR_LEN + 8; 6202013e308SVanshika Shukla rte_pmd_dpaa2_set_one_step_ts(dev->data->port_id, ptp_correction_offset, 0); 6212013e308SVanshika Shukla #endif 622295968d1SFerruh Yigit if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 6230ebce612SSunil Kumar Kori tx_l3_csum_offload = true; 6240ebce612SSunil Kumar Kori 625295968d1SFerruh Yigit if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) || 626295968d1SFerruh Yigit (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) || 627295968d1SFerruh Yigit (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) 6280ebce612SSunil Kumar Kori tx_l4_csum_offload = true; 6290ebce612SSunil Kumar Kori 63021ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 6310ebce612SSunil Kumar Kori DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); 63221ce788cSHemant Agrawal if (ret) { 633a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); 63421ce788cSHemant Agrawal return ret; 63521ce788cSHemant Agrawal } 63621ce788cSHemant Agrawal 63721ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 6380ebce612SSunil Kumar Kori DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); 63921ce788cSHemant Agrawal if (ret) { 640a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); 64121ce788cSHemant Agrawal return ret; 64221ce788cSHemant Agrawal } 64321ce788cSHemant Agrawal 644ffb3389cSNipun Gupta /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 645ffb3389cSNipun Gupta * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 646ffb3389cSNipun Gupta * to 0 for LS2 in the hardware thus disabling data/annotation 647ffb3389cSNipun Gupta * stashing. For LX2 this is fixed in hardware and thus hash result and 648ffb3389cSNipun Gupta * parse results can be received in FD using this option. 649ffb3389cSNipun Gupta */ 650ffb3389cSNipun Gupta if (dpaa2_svr_family == SVR_LX2160A) { 651ffb3389cSNipun Gupta ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 652ffb3389cSNipun Gupta DPNI_FLCTYPE_HASH, true); 653ffb3389cSNipun Gupta if (ret) { 654a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); 655ffb3389cSNipun Gupta return ret; 656ffb3389cSNipun Gupta } 657ffb3389cSNipun Gupta } 658ffb3389cSNipun Gupta 659295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 660295968d1SFerruh Yigit dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK); 661c172f85eSHemant Agrawal 662f023d059SJun Yang if (eth_conf->lpbk_mode) { 663f023d059SJun Yang ret = dpaa2_dev_recycle_config(dev); 664f023d059SJun Yang if (ret) { 665f023d059SJun Yang DPAA2_PMD_ERR("Error to configure %s to recycle port.", 666f023d059SJun Yang dev->data->name); 667f023d059SJun Yang 668f023d059SJun Yang return ret; 669f023d059SJun Yang } 670f023d059SJun Yang } else { 671f023d059SJun Yang /** User may disable loopback mode by calling 672f023d059SJun Yang * "dev_configure" with lpbk_mode cleared. 673f023d059SJun Yang * No matter the port was configured recycle or not, 674f023d059SJun Yang * recycle de-configure is called here. 675f023d059SJun Yang * If port is not recycled, the de-configure will return directly. 676f023d059SJun Yang */ 677f023d059SJun Yang ret = dpaa2_dev_recycle_deconfig(dev); 678f023d059SJun Yang if (ret) { 679f023d059SJun Yang DPAA2_PMD_ERR("Error to de-configure recycle port %s.", 680f023d059SJun Yang dev->data->name); 681f023d059SJun Yang 682f023d059SJun Yang return ret; 683f023d059SJun Yang } 684f023d059SJun Yang } 685f023d059SJun Yang 686ac624068SGagandeep Singh dpaa2_tm_init(dev); 687ac624068SGagandeep Singh 6883e5a335dSHemant Agrawal return 0; 6893e5a335dSHemant Agrawal } 6903e5a335dSHemant Agrawal 6913e5a335dSHemant Agrawal /* Function to setup RX flow information. It contains traffic class ID, 6923e5a335dSHemant Agrawal * flow ID, destination configuration etc. 6933e5a335dSHemant Agrawal */ 6943e5a335dSHemant Agrawal static int 6953e5a335dSHemant Agrawal dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 6963e5a335dSHemant Agrawal uint16_t rx_queue_id, 69713b856acSHemant Agrawal uint16_t nb_rx_desc, 6983e5a335dSHemant Agrawal unsigned int socket_id __rte_unused, 699988a7c38SHemant Agrawal const struct rte_eth_rxconf *rx_conf, 7003e5a335dSHemant Agrawal struct rte_mempool *mb_pool) 7013e5a335dSHemant Agrawal { 7023e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 70381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 7043e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 7053e5a335dSHemant Agrawal struct dpni_queue cfg; 7063e5a335dSHemant Agrawal uint8_t options = 0; 7073e5a335dSHemant Agrawal uint8_t flow_id; 708bee61d86SHemant Agrawal uint32_t bpid; 70913b856acSHemant Agrawal int i, ret; 7103e5a335dSHemant Agrawal 7113e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 7123e5a335dSHemant Agrawal 713a10a988aSShreyansh Jain DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", 7143e5a335dSHemant Agrawal dev, rx_queue_id, mb_pool, rx_conf); 7153e5a335dSHemant Agrawal 71635dc25d1SRohit Raj total_nb_rx_desc += nb_rx_desc; 71735dc25d1SRohit Raj if (total_nb_rx_desc > MAX_NB_RX_DESC) { 718f665790aSDavid Marchand DPAA2_PMD_WARN("Total nb_rx_desc exceeds %d limit. Please use Normal buffers", 71935dc25d1SRohit Raj MAX_NB_RX_DESC); 72035dc25d1SRohit Raj DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script"); 72135dc25d1SRohit Raj } 72235dc25d1SRohit Raj 723988a7c38SHemant Agrawal /* Rx deferred start is not supported */ 724988a7c38SHemant Agrawal if (rx_conf->rx_deferred_start) { 725988a7c38SHemant Agrawal DPAA2_PMD_ERR("%p:Rx deferred start not supported", 726988a7c38SHemant Agrawal (void *)dev); 727988a7c38SHemant Agrawal return -EINVAL; 728988a7c38SHemant Agrawal } 729988a7c38SHemant Agrawal 730bee61d86SHemant Agrawal if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 7316ac5a55bSJun Yang if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7326ac5a55bSJun Yang ret = rte_dpaa2_bpid_info_init(mb_pool); 7336ac5a55bSJun Yang if (ret) 7346ac5a55bSJun Yang return ret; 7356ac5a55bSJun Yang } 736bee61d86SHemant Agrawal bpid = mempool_to_bpid(mb_pool); 7376ac5a55bSJun Yang ret = dpaa2_attach_bp_list(priv, dpni, 738bee61d86SHemant Agrawal rte_dpaa2_bpid_info[bpid].bp_list); 739bee61d86SHemant Agrawal if (ret) 740bee61d86SHemant Agrawal return ret; 741bee61d86SHemant Agrawal } 7423e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 7433e5a335dSHemant Agrawal dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 744109df460SShreyansh Jain dpaa2_q->bp_array = rte_dpaa2_bpid_info; 745de1d70f0SHemant Agrawal dpaa2_q->nb_desc = UINT16_MAX; 746de1d70f0SHemant Agrawal dpaa2_q->offloads = rx_conf->offloads; 7473e5a335dSHemant Agrawal 748599017a2SHemant Agrawal /*Get the flow id from given VQ id*/ 74913b856acSHemant Agrawal flow_id = dpaa2_q->flow_id; 7503e5a335dSHemant Agrawal memset(&cfg, 0, sizeof(struct dpni_queue)); 7513e5a335dSHemant Agrawal 7523e5a335dSHemant Agrawal options = options | DPNI_QUEUE_OPT_USER_CTX; 7535ae1edffSHemant Agrawal cfg.user_context = (size_t)(dpaa2_q); 7543e5a335dSHemant Agrawal 75513b856acSHemant Agrawal /* check if a private cgr available. */ 75613b856acSHemant Agrawal for (i = 0; i < priv->max_cgs; i++) { 75713b856acSHemant Agrawal if (!priv->cgid_in_use[i]) { 75813b856acSHemant Agrawal priv->cgid_in_use[i] = 1; 75913b856acSHemant Agrawal break; 76013b856acSHemant Agrawal } 76113b856acSHemant Agrawal } 76213b856acSHemant Agrawal 76313b856acSHemant Agrawal if (i < priv->max_cgs) { 76413b856acSHemant Agrawal options |= DPNI_QUEUE_OPT_SET_CGID; 76513b856acSHemant Agrawal cfg.cgid = i; 76613b856acSHemant Agrawal dpaa2_q->cgid = cfg.cgid; 76713b856acSHemant Agrawal } else { 76813b856acSHemant Agrawal dpaa2_q->cgid = 0xff; 76913b856acSHemant Agrawal } 77013b856acSHemant Agrawal 77137529eceSHemant Agrawal /*if ls2088 or rev2 device, enable the stashing */ 77230db823eSHemant Agrawal 773e0ded73bSHemant Agrawal if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 77437529eceSHemant Agrawal options |= DPNI_QUEUE_OPT_FLC; 77537529eceSHemant Agrawal cfg.flc.stash_control = true; 776c794f2caSJun Yang dpaa2_flc_stashing_clear_all(&cfg.flc.value); 777c794f2caSJun Yang if (getenv("DPAA2_DATA_STASHING_OFF")) { 778c794f2caSJun Yang dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 0, 779c794f2caSJun Yang &cfg.flc.value); 780c794f2caSJun Yang dpaa2_q->data_stashing_off = 1; 781c794f2caSJun Yang } else { 782c794f2caSJun Yang dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1, 783c794f2caSJun Yang &cfg.flc.value); 784c794f2caSJun Yang dpaa2_q->data_stashing_off = 0; 785c794f2caSJun Yang } 786c794f2caSJun Yang if ((dpaa2_svr_family & 0xffff0000) != SVR_LX2160A) { 787c794f2caSJun Yang dpaa2_flc_stashing_set(DPAA2_FLC_ANNO_STASHING, 1, 788c794f2caSJun Yang &cfg.flc.value); 789c794f2caSJun Yang } 79037529eceSHemant Agrawal } 7913e5a335dSHemant Agrawal ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 7923e5a335dSHemant Agrawal dpaa2_q->tc_index, flow_id, options, &cfg); 7933e5a335dSHemant Agrawal if (ret) { 794a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); 7953e5a335dSHemant Agrawal return -1; 7963e5a335dSHemant Agrawal } 7973e5a335dSHemant Agrawal 79823d6a87eSHemant Agrawal if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 79923d6a87eSHemant Agrawal struct dpni_taildrop taildrop; 80023d6a87eSHemant Agrawal 80123d6a87eSHemant Agrawal taildrop.enable = 1; 802de1d70f0SHemant Agrawal dpaa2_q->nb_desc = nb_rx_desc; 80313b856acSHemant Agrawal /* Private CGR will use tail drop length as nb_rx_desc. 80413b856acSHemant Agrawal * for rest cases we can use standard byte based tail drop. 80513b856acSHemant Agrawal * There is no HW restriction, but number of CGRs are limited, 80613b856acSHemant Agrawal * hence this restriction is placed. 80713b856acSHemant Agrawal */ 80813b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) { 80923d6a87eSHemant Agrawal /*enabling per rx queue congestion control */ 81013b856acSHemant Agrawal taildrop.threshold = nb_rx_desc; 81113b856acSHemant Agrawal taildrop.units = DPNI_CONGESTION_UNIT_FRAMES; 81213b856acSHemant Agrawal taildrop.oal = 0; 81313b856acSHemant Agrawal DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d", 81413b856acSHemant Agrawal rx_queue_id); 81513b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 81613b856acSHemant Agrawal DPNI_CP_CONGESTION_GROUP, 81713b856acSHemant Agrawal DPNI_QUEUE_RX, 81813b856acSHemant Agrawal dpaa2_q->tc_index, 8197a3a9d56SJun Yang dpaa2_q->cgid, &taildrop); 82013b856acSHemant Agrawal } else { 82113b856acSHemant Agrawal /*enabling per rx queue congestion control */ 82213b856acSHemant Agrawal taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q; 82323d6a87eSHemant Agrawal taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 824d47f0292SHemant Agrawal taildrop.oal = CONG_RX_OAL; 82513b856acSHemant Agrawal DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d", 82623d6a87eSHemant Agrawal rx_queue_id); 82723d6a87eSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 82823d6a87eSHemant Agrawal DPNI_CP_QUEUE, DPNI_QUEUE_RX, 82913b856acSHemant Agrawal dpaa2_q->tc_index, flow_id, 83013b856acSHemant Agrawal &taildrop); 83113b856acSHemant Agrawal } 83213b856acSHemant Agrawal if (ret) { 83313b856acSHemant Agrawal DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 83413b856acSHemant Agrawal ret); 83513b856acSHemant Agrawal return -1; 83613b856acSHemant Agrawal } 83713b856acSHemant Agrawal } else { /* Disable tail Drop */ 83813b856acSHemant Agrawal struct dpni_taildrop taildrop = {0}; 83913b856acSHemant Agrawal DPAA2_PMD_INFO("Tail drop is disabled on queue"); 84013b856acSHemant Agrawal 84113b856acSHemant Agrawal taildrop.enable = 0; 84213b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) { 84313b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 84413b856acSHemant Agrawal DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX, 84513b856acSHemant Agrawal dpaa2_q->tc_index, 8467a3a9d56SJun Yang dpaa2_q->cgid, &taildrop); 84713b856acSHemant Agrawal } else { 84813b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 84913b856acSHemant Agrawal DPNI_CP_QUEUE, DPNI_QUEUE_RX, 85023d6a87eSHemant Agrawal dpaa2_q->tc_index, flow_id, &taildrop); 85113b856acSHemant Agrawal } 85223d6a87eSHemant Agrawal if (ret) { 853a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 854a10a988aSShreyansh Jain ret); 85523d6a87eSHemant Agrawal return -1; 85623d6a87eSHemant Agrawal } 85723d6a87eSHemant Agrawal } 85823d6a87eSHemant Agrawal 8593e5a335dSHemant Agrawal dev->data->rx_queues[rx_queue_id] = dpaa2_q; 8603e5a335dSHemant Agrawal return 0; 8613e5a335dSHemant Agrawal } 8623e5a335dSHemant Agrawal 8633e5a335dSHemant Agrawal static int 8643e5a335dSHemant Agrawal dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 8653e5a335dSHemant Agrawal uint16_t tx_queue_id, 866b5869095SHemant Agrawal uint16_t nb_tx_desc, 8673e5a335dSHemant Agrawal unsigned int socket_id __rte_unused, 868988a7c38SHemant Agrawal const struct rte_eth_txconf *tx_conf) 8693e5a335dSHemant Agrawal { 8703e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 8713e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 8723e5a335dSHemant Agrawal priv->tx_vq[tx_queue_id]; 8739ceacab7SPriyanka Jain struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *) 8749ceacab7SPriyanka Jain priv->tx_conf_vq[tx_queue_id]; 87581c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 8763e5a335dSHemant Agrawal struct dpni_queue tx_conf_cfg; 8773e5a335dSHemant Agrawal struct dpni_queue tx_flow_cfg; 8783e5a335dSHemant Agrawal uint8_t options = 0, flow_id; 879*591200efSGagandeep Singh uint8_t ceetm_ch_idx; 88072100f0dSGagandeep Singh uint16_t channel_id; 881e26bf82eSSachin Saxena struct dpni_queue_id qid; 8823e5a335dSHemant Agrawal uint32_t tc_id; 8833e5a335dSHemant Agrawal int ret; 8843e5a335dSHemant Agrawal 8853e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 8863e5a335dSHemant Agrawal 887988a7c38SHemant Agrawal /* Tx deferred start is not supported */ 888988a7c38SHemant Agrawal if (tx_conf->tx_deferred_start) { 889988a7c38SHemant Agrawal DPAA2_PMD_ERR("%p:Tx deferred start not supported", 890988a7c38SHemant Agrawal (void *)dev); 891988a7c38SHemant Agrawal return -EINVAL; 892988a7c38SHemant Agrawal } 893988a7c38SHemant Agrawal 894de1d70f0SHemant Agrawal dpaa2_q->nb_desc = UINT16_MAX; 895de1d70f0SHemant Agrawal dpaa2_q->offloads = tx_conf->offloads; 896de1d70f0SHemant Agrawal 8973e5a335dSHemant Agrawal /* Return if queue already configured */ 898f9989673SAkhil Goyal if (dpaa2_q->flow_id != 0xffff) { 899f9989673SAkhil Goyal dev->data->tx_queues[tx_queue_id] = dpaa2_q; 9003e5a335dSHemant Agrawal return 0; 901f9989673SAkhil Goyal } 9023e5a335dSHemant Agrawal 9033e5a335dSHemant Agrawal memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 9043e5a335dSHemant Agrawal memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 9053e5a335dSHemant Agrawal 906*591200efSGagandeep Singh if (!tx_queue_id) { 907*591200efSGagandeep Singh for (ceetm_ch_idx = 0; 908*591200efSGagandeep Singh ceetm_ch_idx <= (priv->num_channels - 1); 909*591200efSGagandeep Singh ceetm_ch_idx++) { 9103e5a335dSHemant Agrawal /*Set tx-conf and error configuration*/ 911*591200efSGagandeep Singh if (priv->flags & DPAA2_TX_CONF_ENABLE) { 912*591200efSGagandeep Singh ret = dpni_set_tx_confirmation_mode(dpni, 913*591200efSGagandeep Singh CMD_PRI_LOW, priv->token, 914*591200efSGagandeep Singh ceetm_ch_idx, 9159ceacab7SPriyanka Jain DPNI_CONF_AFFINE); 916*591200efSGagandeep Singh } else { 917*591200efSGagandeep Singh ret = dpni_set_tx_confirmation_mode(dpni, 918*591200efSGagandeep Singh CMD_PRI_LOW, priv->token, 919*591200efSGagandeep Singh ceetm_ch_idx, 9203e5a335dSHemant Agrawal DPNI_CONF_DISABLE); 921*591200efSGagandeep Singh } 9223e5a335dSHemant Agrawal if (ret) { 923*591200efSGagandeep Singh DPAA2_PMD_ERR("Error(%d) in tx conf setting", 924*591200efSGagandeep Singh ret); 925*591200efSGagandeep Singh return ret; 926*591200efSGagandeep Singh } 9273e5a335dSHemant Agrawal } 9283e5a335dSHemant Agrawal } 92972100f0dSGagandeep Singh 93072100f0dSGagandeep Singh tc_id = tx_queue_id % priv->num_tx_tc; 93172100f0dSGagandeep Singh channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels; 93272100f0dSGagandeep Singh flow_id = 0; 93372100f0dSGagandeep Singh 93472100f0dSGagandeep Singh ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 93572100f0dSGagandeep Singh ((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg); 93672100f0dSGagandeep Singh if (ret) { 93772100f0dSGagandeep Singh DPAA2_PMD_ERR("Error in setting the tx flow: " 93872100f0dSGagandeep Singh "tc_id=%d, flow=%d err=%d", 93972100f0dSGagandeep Singh tc_id, flow_id, ret); 94072100f0dSGagandeep Singh return -1; 94172100f0dSGagandeep Singh } 94272100f0dSGagandeep Singh 94372100f0dSGagandeep Singh dpaa2_q->flow_id = flow_id; 94472100f0dSGagandeep Singh 9453e5a335dSHemant Agrawal dpaa2_q->tc_index = tc_id; 9463e5a335dSHemant Agrawal 947e26bf82eSSachin Saxena ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 94872100f0dSGagandeep Singh DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index), 949e26bf82eSSachin Saxena dpaa2_q->flow_id, &tx_flow_cfg, &qid); 950e26bf82eSSachin Saxena if (ret) { 951e26bf82eSSachin Saxena DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); 952e26bf82eSSachin Saxena return -1; 953e26bf82eSSachin Saxena } 954e26bf82eSSachin Saxena dpaa2_q->fqid = qid.fqid; 955e26bf82eSSachin Saxena 956a0840963SHemant Agrawal if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 95713b856acSHemant Agrawal struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; 9587ae777d0SHemant Agrawal 959de1d70f0SHemant Agrawal dpaa2_q->nb_desc = nb_tx_desc; 960de1d70f0SHemant Agrawal 96129dfa62fSHemant Agrawal cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 962b5869095SHemant Agrawal cong_notif_cfg.threshold_entry = nb_tx_desc; 9637ae777d0SHemant Agrawal /* Notify that the queue is not congested when the data in 9647be78d02SJosh Soref * the queue is below this threshold.(90% of value) 9657ae777d0SHemant Agrawal */ 96638a0ac75SHemant Agrawal cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10; 9677ae777d0SHemant Agrawal cong_notif_cfg.message_ctx = 0; 968543dbfecSNipun Gupta cong_notif_cfg.message_iova = 969543dbfecSNipun Gupta (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); 9707ae777d0SHemant Agrawal cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 9717ae777d0SHemant Agrawal cong_notif_cfg.notification_mode = 9727ae777d0SHemant Agrawal DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 9737ae777d0SHemant Agrawal DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 9747ae777d0SHemant Agrawal DPNI_CONG_OPT_COHERENT_WRITE; 97555984a9bSShreyansh Jain cong_notif_cfg.cg_point = DPNI_CP_QUEUE; 9767ae777d0SHemant Agrawal 9777ae777d0SHemant Agrawal ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 9787ae777d0SHemant Agrawal priv->token, 9797ae777d0SHemant Agrawal DPNI_QUEUE_TX, 98072100f0dSGagandeep Singh ((channel_id << 8) | tc_id), 9817ae777d0SHemant Agrawal &cong_notif_cfg); 9827ae777d0SHemant Agrawal if (ret) { 983a10a988aSShreyansh Jain DPAA2_PMD_ERR( 984a10a988aSShreyansh Jain "Error in setting tx congestion notification: " 985a10a988aSShreyansh Jain "err=%d", ret); 9867ae777d0SHemant Agrawal return -ret; 9877ae777d0SHemant Agrawal } 9887ae777d0SHemant Agrawal } 98916c4a3c4SNipun Gupta dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf; 9903e5a335dSHemant Agrawal dev->data->tx_queues[tx_queue_id] = dpaa2_q; 9919ceacab7SPriyanka Jain 9928d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 9939ceacab7SPriyanka Jain dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q; 9949ceacab7SPriyanka Jain options = options | DPNI_QUEUE_OPT_USER_CTX; 9959ceacab7SPriyanka Jain tx_conf_cfg.user_context = (size_t)(dpaa2_q); 9969ceacab7SPriyanka Jain ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, 99772100f0dSGagandeep Singh DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), 9989ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg); 9999ceacab7SPriyanka Jain if (ret) { 10009ceacab7SPriyanka Jain DPAA2_PMD_ERR("Error in setting the tx conf flow: " 10019ceacab7SPriyanka Jain "tc_index=%d, flow=%d err=%d", 10029ceacab7SPriyanka Jain dpaa2_tx_conf_q->tc_index, 10039ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, ret); 10049ceacab7SPriyanka Jain return -1; 10059ceacab7SPriyanka Jain } 10069ceacab7SPriyanka Jain 10079ceacab7SPriyanka Jain ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 100872100f0dSGagandeep Singh DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), 10099ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); 10109ceacab7SPriyanka Jain if (ret) { 10119ceacab7SPriyanka Jain DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); 10129ceacab7SPriyanka Jain return -1; 10139ceacab7SPriyanka Jain } 10149ceacab7SPriyanka Jain dpaa2_tx_conf_q->fqid = qid.fqid; 10159ceacab7SPriyanka Jain } 10163e5a335dSHemant Agrawal return 0; 10173e5a335dSHemant Agrawal } 10183e5a335dSHemant Agrawal 10193e5a335dSHemant Agrawal static void 10207483341aSXueming Li dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id) 10213e5a335dSHemant Agrawal { 10227483341aSXueming Li struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id]; 102313b856acSHemant Agrawal struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private; 102481c42c84SShreyansh Jain struct fsl_mc_io *dpni = 102581c42c84SShreyansh Jain (struct fsl_mc_io *)priv->eth_dev->process_private; 102613b856acSHemant Agrawal uint8_t options = 0; 102713b856acSHemant Agrawal int ret; 102813b856acSHemant Agrawal struct dpni_queue cfg; 102913b856acSHemant Agrawal 103013b856acSHemant Agrawal memset(&cfg, 0, sizeof(struct dpni_queue)); 10313e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 103235dc25d1SRohit Raj 103335dc25d1SRohit Raj total_nb_rx_desc -= dpaa2_q->nb_desc; 103435dc25d1SRohit Raj 103513b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) { 103613b856acSHemant Agrawal options = DPNI_QUEUE_OPT_CLEAR_CGID; 103713b856acSHemant Agrawal cfg.cgid = dpaa2_q->cgid; 103813b856acSHemant Agrawal 103913b856acSHemant Agrawal ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, 104013b856acSHemant Agrawal DPNI_QUEUE_RX, 104113b856acSHemant Agrawal dpaa2_q->tc_index, dpaa2_q->flow_id, 104213b856acSHemant Agrawal options, &cfg); 104313b856acSHemant Agrawal if (ret) 104413b856acSHemant Agrawal DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d", 104513b856acSHemant Agrawal dpaa2_q->fqid, ret); 104613b856acSHemant Agrawal priv->cgid_in_use[dpaa2_q->cgid] = 0; 104713b856acSHemant Agrawal dpaa2_q->cgid = 0xff; 104813b856acSHemant Agrawal } 10493e5a335dSHemant Agrawal } 10503e5a335dSHemant Agrawal 1051f40adb40SHemant Agrawal static uint32_t 10528d7d4fcdSKonstantin Ananyev dpaa2_dev_rx_queue_count(void *rx_queue) 1053f40adb40SHemant Agrawal { 1054f40adb40SHemant Agrawal int32_t ret; 1055f40adb40SHemant Agrawal struct dpaa2_queue *dpaa2_q; 1056f40adb40SHemant Agrawal struct qbman_swp *swp; 1057f40adb40SHemant Agrawal struct qbman_fq_query_np_rslt state; 1058f40adb40SHemant Agrawal uint32_t frame_cnt = 0; 1059f40adb40SHemant Agrawal 1060f40adb40SHemant Agrawal if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 1061f40adb40SHemant Agrawal ret = dpaa2_affine_qbman_swp(); 1062f40adb40SHemant Agrawal if (ret) { 1063d527f5d9SNipun Gupta DPAA2_PMD_ERR( 1064f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 1065d527f5d9SNipun Gupta rte_gettid()); 1066f40adb40SHemant Agrawal return -EINVAL; 1067f40adb40SHemant Agrawal } 1068f40adb40SHemant Agrawal } 1069f40adb40SHemant Agrawal swp = DPAA2_PER_LCORE_PORTAL; 1070f40adb40SHemant Agrawal 10718d7d4fcdSKonstantin Ananyev dpaa2_q = rx_queue; 1072f40adb40SHemant Agrawal 1073f40adb40SHemant Agrawal if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 1074f40adb40SHemant Agrawal frame_cnt = qbman_fq_state_frame_count(&state); 10758d7d4fcdSKonstantin Ananyev DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u", 10768d7d4fcdSKonstantin Ananyev rx_queue, frame_cnt); 1077f40adb40SHemant Agrawal } 1078f40adb40SHemant Agrawal return frame_cnt; 1079f40adb40SHemant Agrawal } 1080f40adb40SHemant Agrawal 1081a5fc38d4SHemant Agrawal static const uint32_t * 1082ba6a168aSSivaramakrishnan Venkat dpaa2_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 1083a5fc38d4SHemant Agrawal { 1084a5fc38d4SHemant Agrawal static const uint32_t ptypes[] = { 1085a5fc38d4SHemant Agrawal /*todo -= add more types */ 1086a5fc38d4SHemant Agrawal RTE_PTYPE_L2_ETHER, 1087a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV4, 1088a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV4_EXT, 1089a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV6, 1090a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV6_EXT, 1091a5fc38d4SHemant Agrawal RTE_PTYPE_L4_TCP, 1092a5fc38d4SHemant Agrawal RTE_PTYPE_L4_UDP, 1093a5fc38d4SHemant Agrawal RTE_PTYPE_L4_SCTP, 1094a5fc38d4SHemant Agrawal RTE_PTYPE_L4_ICMP, 1095a5fc38d4SHemant Agrawal }; 1096a5fc38d4SHemant Agrawal 1097a3a997f0SHemant Agrawal if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx || 109820191ab3SNipun Gupta dev->rx_pkt_burst == dpaa2_dev_rx || 1099ba6a168aSSivaramakrishnan Venkat dev->rx_pkt_burst == dpaa2_dev_loopback_rx) { 1100ba6a168aSSivaramakrishnan Venkat *no_of_elements = RTE_DIM(ptypes); 1101a5fc38d4SHemant Agrawal return ptypes; 1102ba6a168aSSivaramakrishnan Venkat } 1103a5fc38d4SHemant Agrawal return NULL; 1104a5fc38d4SHemant Agrawal } 1105a5fc38d4SHemant Agrawal 1106c5acbb5eSHemant Agrawal /** 1107c5acbb5eSHemant Agrawal * Dpaa2 link Interrupt handler 1108c5acbb5eSHemant Agrawal * 1109c5acbb5eSHemant Agrawal * @param param 11107be78d02SJosh Soref * The address of parameter (struct rte_eth_dev *) registered before. 1111c5acbb5eSHemant Agrawal * 1112c5acbb5eSHemant Agrawal * @return 1113c5acbb5eSHemant Agrawal * void 1114c5acbb5eSHemant Agrawal */ 1115c5acbb5eSHemant Agrawal static void 1116c5acbb5eSHemant Agrawal dpaa2_interrupt_handler(void *param) 1117c5acbb5eSHemant Agrawal { 1118c5acbb5eSHemant Agrawal struct rte_eth_dev *dev = param; 1119c5acbb5eSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 112081c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1121c5acbb5eSHemant Agrawal int ret; 1122c5acbb5eSHemant Agrawal int irq_index = DPNI_IRQ_INDEX; 1123c5acbb5eSHemant Agrawal unsigned int status = 0, clear = 0; 1124c5acbb5eSHemant Agrawal 1125c5acbb5eSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1126c5acbb5eSHemant Agrawal 1127c5acbb5eSHemant Agrawal if (dpni == NULL) { 1128a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1129c5acbb5eSHemant Agrawal return; 1130c5acbb5eSHemant Agrawal } 1131c5acbb5eSHemant Agrawal 1132c5acbb5eSHemant Agrawal ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 1133c5acbb5eSHemant Agrawal irq_index, &status); 1134c5acbb5eSHemant Agrawal if (unlikely(ret)) { 1135a10a988aSShreyansh Jain DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); 1136c5acbb5eSHemant Agrawal clear = 0xffffffff; 1137c5acbb5eSHemant Agrawal goto out; 1138c5acbb5eSHemant Agrawal } 1139c5acbb5eSHemant Agrawal 1140c5acbb5eSHemant Agrawal if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 1141c5acbb5eSHemant Agrawal clear = DPNI_IRQ_EVENT_LINK_CHANGED; 1142c5acbb5eSHemant Agrawal dpaa2_dev_link_update(dev, 0); 1143c5acbb5eSHemant Agrawal /* calling all the apps registered for link status event */ 11445723fbedSFerruh Yigit rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1145c5acbb5eSHemant Agrawal } 1146c5acbb5eSHemant Agrawal out: 1147c5acbb5eSHemant Agrawal ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 1148c5acbb5eSHemant Agrawal irq_index, clear); 1149c5acbb5eSHemant Agrawal if (unlikely(ret)) 1150a10a988aSShreyansh Jain DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); 1151c5acbb5eSHemant Agrawal } 1152c5acbb5eSHemant Agrawal 1153c5acbb5eSHemant Agrawal static int 1154c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 1155c5acbb5eSHemant Agrawal { 1156c5acbb5eSHemant Agrawal int err = 0; 1157c5acbb5eSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 115881c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1159c5acbb5eSHemant Agrawal int irq_index = DPNI_IRQ_INDEX; 1160c5acbb5eSHemant Agrawal unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 1161c5acbb5eSHemant Agrawal 1162c5acbb5eSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1163c5acbb5eSHemant Agrawal 1164c5acbb5eSHemant Agrawal err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 1165c5acbb5eSHemant Agrawal irq_index, mask); 1166c5acbb5eSHemant Agrawal if (err < 0) { 1167a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, 1168c5acbb5eSHemant Agrawal strerror(-err)); 1169c5acbb5eSHemant Agrawal return err; 1170c5acbb5eSHemant Agrawal } 1171c5acbb5eSHemant Agrawal 1172c5acbb5eSHemant Agrawal err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 1173c5acbb5eSHemant Agrawal irq_index, enable); 1174c5acbb5eSHemant Agrawal if (err < 0) 1175a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, 1176c5acbb5eSHemant Agrawal strerror(-err)); 1177c5acbb5eSHemant Agrawal 1178c5acbb5eSHemant Agrawal return err; 1179c5acbb5eSHemant Agrawal } 1180c5acbb5eSHemant Agrawal 11813e5a335dSHemant Agrawal static int 11823e5a335dSHemant Agrawal dpaa2_dev_start(struct rte_eth_dev *dev) 11833e5a335dSHemant Agrawal { 1184c5acbb5eSHemant Agrawal struct rte_device *rdev = dev->device; 1185c5acbb5eSHemant Agrawal struct rte_dpaa2_device *dpaa2_dev; 11863e5a335dSHemant Agrawal struct rte_eth_dev_data *data = dev->data; 11873e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = data->dev_private; 118881c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 11893e5a335dSHemant Agrawal struct dpni_queue cfg; 1190ef18dafeSHemant Agrawal struct dpni_error_cfg err_cfg; 11913e5a335dSHemant Agrawal struct dpni_queue_id qid; 11923e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 11933e5a335dSHemant Agrawal int ret, i; 1194c5acbb5eSHemant Agrawal struct rte_intr_handle *intr_handle; 1195c5acbb5eSHemant Agrawal 1196c5acbb5eSHemant Agrawal dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 1197d61138d4SHarman Kalra intr_handle = dpaa2_dev->intr_handle; 11983e5a335dSHemant Agrawal 11993e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 12003e5a335dSHemant Agrawal ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 12013e5a335dSHemant Agrawal if (ret) { 1202a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", 1203a10a988aSShreyansh Jain priv->hw_id, ret); 12043e5a335dSHemant Agrawal return ret; 12053e5a335dSHemant Agrawal } 12063e5a335dSHemant Agrawal 1207aa8c595aSHemant Agrawal /* Power up the phy. Needed to make the link go UP */ 1208a1f3a12cSHemant Agrawal dpaa2_dev_set_link_up(dev); 1209a1f3a12cSHemant Agrawal 12103e5a335dSHemant Agrawal for (i = 0; i < data->nb_rx_queues; i++) { 12113e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 12123e5a335dSHemant Agrawal ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 12133e5a335dSHemant Agrawal DPNI_QUEUE_RX, dpaa2_q->tc_index, 12143e5a335dSHemant Agrawal dpaa2_q->flow_id, &cfg, &qid); 12153e5a335dSHemant Agrawal if (ret) { 1216a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in getting flow information: " 1217a10a988aSShreyansh Jain "err=%d", ret); 12183e5a335dSHemant Agrawal return ret; 12193e5a335dSHemant Agrawal } 12203e5a335dSHemant Agrawal dpaa2_q->fqid = qid.fqid; 12213e5a335dSHemant Agrawal } 12223e5a335dSHemant Agrawal 12234690a611SNipun Gupta if (dpaa2_enable_err_queue) { 12244690a611SNipun Gupta ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 12254690a611SNipun Gupta DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid); 12264690a611SNipun Gupta if (ret) { 12274690a611SNipun Gupta DPAA2_PMD_ERR("Error getting rx err flow information: err=%d", 12284690a611SNipun Gupta ret); 12294690a611SNipun Gupta return ret; 12304690a611SNipun Gupta } 12314690a611SNipun Gupta dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; 12324690a611SNipun Gupta dpaa2_q->fqid = qid.fqid; 12334690a611SNipun Gupta dpaa2_q->eth_data = dev->data; 12344690a611SNipun Gupta 12354690a611SNipun Gupta err_cfg.errors = DPNI_ERROR_DISC; 12364690a611SNipun Gupta err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; 12374690a611SNipun Gupta } else { 12384690a611SNipun Gupta /* checksum errors, send them to normal path 12394690a611SNipun Gupta * and set it in annotation 12404690a611SNipun Gupta */ 1241ef18dafeSHemant Agrawal err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 12424690a611SNipun Gupta 12434690a611SNipun Gupta /* if packet with parse error are not to be dropped */ 124434356a5dSShreyansh Jain err_cfg.errors |= DPNI_ERROR_PHE; 1245ef18dafeSHemant Agrawal 1246ef18dafeSHemant Agrawal err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 12474690a611SNipun Gupta } 1248ef18dafeSHemant Agrawal err_cfg.set_frame_annotation = true; 1249ef18dafeSHemant Agrawal 1250ef18dafeSHemant Agrawal ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 1251ef18dafeSHemant Agrawal priv->token, &err_cfg); 1252ef18dafeSHemant Agrawal if (ret) { 1253a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", 1254a10a988aSShreyansh Jain ret); 1255ef18dafeSHemant Agrawal return ret; 1256ef18dafeSHemant Agrawal } 1257ef18dafeSHemant Agrawal 1258c5acbb5eSHemant Agrawal /* if the interrupts were configured on this devices*/ 1259d61138d4SHarman Kalra if (intr_handle && rte_intr_fd_get(intr_handle) && 1260d61138d4SHarman Kalra dev->data->dev_conf.intr_conf.lsc != 0) { 1261c5acbb5eSHemant Agrawal /* Registering LSC interrupt handler */ 1262c5acbb5eSHemant Agrawal rte_intr_callback_register(intr_handle, 1263c5acbb5eSHemant Agrawal dpaa2_interrupt_handler, 1264c5acbb5eSHemant Agrawal (void *)dev); 1265c5acbb5eSHemant Agrawal 1266c5acbb5eSHemant Agrawal /* enable vfio intr/eventfd mapping 1267c5acbb5eSHemant Agrawal * Interrupt index 0 is required, so we can not use 1268c5acbb5eSHemant Agrawal * rte_intr_enable. 1269c5acbb5eSHemant Agrawal */ 1270c5acbb5eSHemant Agrawal rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 1271c5acbb5eSHemant Agrawal 1272c5acbb5eSHemant Agrawal /* enable dpni_irqs */ 1273c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(dev, 1); 1274c5acbb5eSHemant Agrawal } 1275c5acbb5eSHemant Agrawal 127616c4a3c4SNipun Gupta /* Change the tx burst function if ordered queues are used */ 127716c4a3c4SNipun Gupta if (priv->en_ordered) 127816c4a3c4SNipun Gupta dev->tx_pkt_burst = dpaa2_dev_tx_ordered; 127916c4a3c4SNipun Gupta 1280f4909c42SJie Hai for (i = 0; i < dev->data->nb_rx_queues; i++) 1281f4909c42SJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 1282f4909c42SJie Hai for (i = 0; i < dev->data->nb_tx_queues; i++) 1283f4909c42SJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 1284f4909c42SJie Hai 12853e5a335dSHemant Agrawal return 0; 12863e5a335dSHemant Agrawal } 12873e5a335dSHemant Agrawal 12883e5a335dSHemant Agrawal /** 12893e5a335dSHemant Agrawal * This routine disables all traffic on the adapter by issuing a 12903e5a335dSHemant Agrawal * global reset on the MAC. 12913e5a335dSHemant Agrawal */ 129262024eb8SIvan Ilchenko static int 12933e5a335dSHemant Agrawal dpaa2_dev_stop(struct rte_eth_dev *dev) 12943e5a335dSHemant Agrawal { 12953e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 129681c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 12973e5a335dSHemant Agrawal int ret; 1298c56c86ffSHemant Agrawal struct rte_eth_link link; 1299d192fd32SVanshika Shukla struct rte_device *rdev = dev->device; 1300d192fd32SVanshika Shukla struct rte_intr_handle *intr_handle; 1301d192fd32SVanshika Shukla struct rte_dpaa2_device *dpaa2_dev; 1302f4909c42SJie Hai uint16_t i; 1303d192fd32SVanshika Shukla 1304d192fd32SVanshika Shukla dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 1305d192fd32SVanshika Shukla intr_handle = dpaa2_dev->intr_handle; 13063e5a335dSHemant Agrawal 13073e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 13083e5a335dSHemant Agrawal 1309c5acbb5eSHemant Agrawal /* reset interrupt callback */ 1310d61138d4SHarman Kalra if (intr_handle && rte_intr_fd_get(intr_handle) && 1311d61138d4SHarman Kalra dev->data->dev_conf.intr_conf.lsc != 0) { 1312c5acbb5eSHemant Agrawal /*disable dpni irqs */ 1313c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(dev, 0); 1314c5acbb5eSHemant Agrawal 1315c5acbb5eSHemant Agrawal /* disable vfio intr before callback unregister */ 1316c5acbb5eSHemant Agrawal rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 1317c5acbb5eSHemant Agrawal 1318c5acbb5eSHemant Agrawal /* Unregistering LSC interrupt handler */ 1319c5acbb5eSHemant Agrawal rte_intr_callback_unregister(intr_handle, 1320c5acbb5eSHemant Agrawal dpaa2_interrupt_handler, 1321c5acbb5eSHemant Agrawal (void *)dev); 1322c5acbb5eSHemant Agrawal } 1323c5acbb5eSHemant Agrawal 1324a1f3a12cSHemant Agrawal dpaa2_dev_set_link_down(dev); 1325a1f3a12cSHemant Agrawal 13263e5a335dSHemant Agrawal ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 13273e5a335dSHemant Agrawal if (ret) { 1328a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", 13293e5a335dSHemant Agrawal ret, priv->hw_id); 133062024eb8SIvan Ilchenko return ret; 13313e5a335dSHemant Agrawal } 1332c56c86ffSHemant Agrawal 1333c56c86ffSHemant Agrawal /* clear the recorded link status */ 1334c56c86ffSHemant Agrawal memset(&link, 0, sizeof(link)); 13357e2eb5f0SStephen Hemminger rte_eth_linkstatus_set(dev, &link); 133662024eb8SIvan Ilchenko 1337f4909c42SJie Hai for (i = 0; i < dev->data->nb_rx_queues; i++) 1338f4909c42SJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1339f4909c42SJie Hai for (i = 0; i < dev->data->nb_tx_queues; i++) 1340f4909c42SJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1341f4909c42SJie Hai 134262024eb8SIvan Ilchenko return 0; 13433e5a335dSHemant Agrawal } 13443e5a335dSHemant Agrawal 1345b142387bSThomas Monjalon static int 13463e5a335dSHemant Agrawal dpaa2_dev_close(struct rte_eth_dev *dev) 13473e5a335dSHemant Agrawal { 13483e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 134981c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 13505964d36aSSachin Saxena int i, ret; 1351a1f3a12cSHemant Agrawal struct rte_eth_link link; 13523e5a335dSHemant Agrawal 13533e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 13543e5a335dSHemant Agrawal 13555964d36aSSachin Saxena if (rte_eal_process_type() != RTE_PROC_PRIMARY) 13565964d36aSSachin Saxena return 0; 13576a556bd6SHemant Agrawal 13585964d36aSSachin Saxena if (!dpni) { 13595964d36aSSachin Saxena DPAA2_PMD_WARN("Already closed or not started"); 13605964d36aSSachin Saxena return -1; 13615964d36aSSachin Saxena } 13625964d36aSSachin Saxena 1363ac624068SGagandeep Singh dpaa2_tm_deinit(dev); 13645964d36aSSachin Saxena dpaa2_flow_clean(dev); 13653e5a335dSHemant Agrawal /* Clean the device first */ 13663e5a335dSHemant Agrawal ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 13673e5a335dSHemant Agrawal if (ret) { 1368a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); 1369b142387bSThomas Monjalon return -1; 13703e5a335dSHemant Agrawal } 1371a1f3a12cSHemant Agrawal 1372a1f3a12cSHemant Agrawal memset(&link, 0, sizeof(link)); 13737e2eb5f0SStephen Hemminger rte_eth_linkstatus_set(dev, &link); 1374b142387bSThomas Monjalon 13755964d36aSSachin Saxena /* Free private queues memory */ 13765964d36aSSachin Saxena dpaa2_free_rx_tx_queues(dev); 13775964d36aSSachin Saxena /* Close the device at underlying layer*/ 13785964d36aSSachin Saxena ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 13795964d36aSSachin Saxena if (ret) { 13805964d36aSSachin Saxena DPAA2_PMD_ERR("Failure closing dpni device with err code %d", 13815964d36aSSachin Saxena ret); 13825964d36aSSachin Saxena } 13835964d36aSSachin Saxena 13845964d36aSSachin Saxena /* Free the allocated memory for ethernet private data and dpni*/ 13855964d36aSSachin Saxena priv->hw = NULL; 13865964d36aSSachin Saxena dev->process_private = NULL; 13875964d36aSSachin Saxena rte_free(dpni); 13885964d36aSSachin Saxena 13895964d36aSSachin Saxena for (i = 0; i < MAX_TCS; i++) 13905964d36aSSachin Saxena rte_free((void *)(size_t)priv->extract.tc_extract_param[i]); 13915964d36aSSachin Saxena 13925964d36aSSachin Saxena if (priv->extract.qos_extract_param) 13935964d36aSSachin Saxena rte_free((void *)(size_t)priv->extract.qos_extract_param); 13945964d36aSSachin Saxena 13955964d36aSSachin Saxena DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name); 1396b142387bSThomas Monjalon return 0; 13973e5a335dSHemant Agrawal } 13983e5a335dSHemant Agrawal 13999039c812SAndrew Rybchenko static int 1400c0e5c69aSHemant Agrawal dpaa2_dev_promiscuous_enable( 1401c0e5c69aSHemant Agrawal struct rte_eth_dev *dev) 1402c0e5c69aSHemant Agrawal { 1403c0e5c69aSHemant Agrawal int ret; 1404c0e5c69aSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 140581c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1406c0e5c69aSHemant Agrawal 1407c0e5c69aSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1408c0e5c69aSHemant Agrawal 1409c0e5c69aSHemant Agrawal if (dpni == NULL) { 1410a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 14119039c812SAndrew Rybchenko return -ENODEV; 1412c0e5c69aSHemant Agrawal } 1413c0e5c69aSHemant Agrawal 1414c0e5c69aSHemant Agrawal ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 1415c0e5c69aSHemant Agrawal if (ret < 0) 1416a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); 14175d5aeeedSHemant Agrawal 14185d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 14195d5aeeedSHemant Agrawal if (ret < 0) 1420a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); 14219039c812SAndrew Rybchenko 14229039c812SAndrew Rybchenko return ret; 1423c0e5c69aSHemant Agrawal } 1424c0e5c69aSHemant Agrawal 14259039c812SAndrew Rybchenko static int 1426c0e5c69aSHemant Agrawal dpaa2_dev_promiscuous_disable( 1427c0e5c69aSHemant Agrawal struct rte_eth_dev *dev) 1428c0e5c69aSHemant Agrawal { 1429c0e5c69aSHemant Agrawal int ret; 1430c0e5c69aSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 143181c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1432c0e5c69aSHemant Agrawal 1433c0e5c69aSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1434c0e5c69aSHemant Agrawal 1435c0e5c69aSHemant Agrawal if (dpni == NULL) { 1436a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 14379039c812SAndrew Rybchenko return -ENODEV; 1438c0e5c69aSHemant Agrawal } 1439c0e5c69aSHemant Agrawal 1440c0e5c69aSHemant Agrawal ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 1441c0e5c69aSHemant Agrawal if (ret < 0) 1442a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); 14435d5aeeedSHemant Agrawal 14445d5aeeedSHemant Agrawal if (dev->data->all_multicast == 0) { 14455d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 14465d5aeeedSHemant Agrawal priv->token, false); 14475d5aeeedSHemant Agrawal if (ret < 0) 1448a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable M promisc mode %d", 14495d5aeeedSHemant Agrawal ret); 14505d5aeeedSHemant Agrawal } 14519039c812SAndrew Rybchenko 14529039c812SAndrew Rybchenko return ret; 14535d5aeeedSHemant Agrawal } 14545d5aeeedSHemant Agrawal 1455ca041cd4SIvan Ilchenko static int 14565d5aeeedSHemant Agrawal dpaa2_dev_allmulticast_enable( 14575d5aeeedSHemant Agrawal struct rte_eth_dev *dev) 14585d5aeeedSHemant Agrawal { 14595d5aeeedSHemant Agrawal int ret; 14605d5aeeedSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 146181c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 14625d5aeeedSHemant Agrawal 14635d5aeeedSHemant Agrawal PMD_INIT_FUNC_TRACE(); 14645d5aeeedSHemant Agrawal 14655d5aeeedSHemant Agrawal if (dpni == NULL) { 1466a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1467ca041cd4SIvan Ilchenko return -ENODEV; 14685d5aeeedSHemant Agrawal } 14695d5aeeedSHemant Agrawal 14705d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 14715d5aeeedSHemant Agrawal if (ret < 0) 1472a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); 1473ca041cd4SIvan Ilchenko 1474ca041cd4SIvan Ilchenko return ret; 14755d5aeeedSHemant Agrawal } 14765d5aeeedSHemant Agrawal 1477ca041cd4SIvan Ilchenko static int 14785d5aeeedSHemant Agrawal dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 14795d5aeeedSHemant Agrawal { 14805d5aeeedSHemant Agrawal int ret; 14815d5aeeedSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 148281c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 14835d5aeeedSHemant Agrawal 14845d5aeeedSHemant Agrawal PMD_INIT_FUNC_TRACE(); 14855d5aeeedSHemant Agrawal 14865d5aeeedSHemant Agrawal if (dpni == NULL) { 1487a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1488ca041cd4SIvan Ilchenko return -ENODEV; 14895d5aeeedSHemant Agrawal } 14905d5aeeedSHemant Agrawal 14915d5aeeedSHemant Agrawal /* must remain on for all promiscuous */ 14925d5aeeedSHemant Agrawal if (dev->data->promiscuous == 1) 1493ca041cd4SIvan Ilchenko return 0; 14945d5aeeedSHemant Agrawal 14955d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 14965d5aeeedSHemant Agrawal if (ret < 0) 1497a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); 1498ca041cd4SIvan Ilchenko 1499ca041cd4SIvan Ilchenko return ret; 1500c0e5c69aSHemant Agrawal } 1501e31d4d21SHemant Agrawal 1502e31d4d21SHemant Agrawal static int 1503e31d4d21SHemant Agrawal dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1504e31d4d21SHemant Agrawal { 1505e31d4d21SHemant Agrawal int ret; 1506e31d4d21SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 150781c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 150835b2d13fSOlivier Matz uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN 150944ea7355SAshish Jain + VLAN_TAG_SIZE; 1510e31d4d21SHemant Agrawal 1511e31d4d21SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1512e31d4d21SHemant Agrawal 1513e31d4d21SHemant Agrawal if (dpni == NULL) { 1514a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1515e31d4d21SHemant Agrawal return -EINVAL; 1516e31d4d21SHemant Agrawal } 1517e31d4d21SHemant Agrawal 1518e31d4d21SHemant Agrawal /* Set the Max Rx frame length as 'mtu' + 1519e31d4d21SHemant Agrawal * Maximum Ethernet header length 1520e31d4d21SHemant Agrawal */ 1521e31d4d21SHemant Agrawal ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 15226f8be0fbSHemant Agrawal frame_size - RTE_ETHER_CRC_LEN); 1523e31d4d21SHemant Agrawal if (ret) { 1524a10a988aSShreyansh Jain DPAA2_PMD_ERR("Setting the max frame length failed"); 1525e31d4d21SHemant Agrawal return -1; 1526e31d4d21SHemant Agrawal } 1527de08b474SApeksha Gupta dev->data->mtu = mtu; 1528a10a988aSShreyansh Jain DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); 1529e31d4d21SHemant Agrawal return 0; 1530e31d4d21SHemant Agrawal } 1531e31d4d21SHemant Agrawal 1532b4d97b7dSHemant Agrawal static int 1533b4d97b7dSHemant Agrawal dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 15346d13ea8eSOlivier Matz struct rte_ether_addr *addr, 1535b4d97b7dSHemant Agrawal __rte_unused uint32_t index, 1536b4d97b7dSHemant Agrawal __rte_unused uint32_t pool) 1537b4d97b7dSHemant Agrawal { 1538b4d97b7dSHemant Agrawal int ret; 1539b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 154081c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1541b4d97b7dSHemant Agrawal 1542b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1543b4d97b7dSHemant Agrawal 1544b4d97b7dSHemant Agrawal if (dpni == NULL) { 1545a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1546b4d97b7dSHemant Agrawal return -1; 1547b4d97b7dSHemant Agrawal } 1548b4d97b7dSHemant Agrawal 154996f7bfe8SSachin Saxena ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, 155096f7bfe8SSachin Saxena addr->addr_bytes, 0, 0, 0); 1551b4d97b7dSHemant Agrawal if (ret) 1552a10a988aSShreyansh Jain DPAA2_PMD_ERR( 1553a10a988aSShreyansh Jain "error: Adding the MAC ADDR failed: err = %d", ret); 1554b4d97b7dSHemant Agrawal return 0; 1555b4d97b7dSHemant Agrawal } 1556b4d97b7dSHemant Agrawal 1557b4d97b7dSHemant Agrawal static void 1558b4d97b7dSHemant Agrawal dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1559b4d97b7dSHemant Agrawal uint32_t index) 1560b4d97b7dSHemant Agrawal { 1561b4d97b7dSHemant Agrawal int ret; 1562b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 156381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1564b4d97b7dSHemant Agrawal struct rte_eth_dev_data *data = dev->data; 15656d13ea8eSOlivier Matz struct rte_ether_addr *macaddr; 1566b4d97b7dSHemant Agrawal 1567b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1568b4d97b7dSHemant Agrawal 1569b4d97b7dSHemant Agrawal macaddr = &data->mac_addrs[index]; 1570b4d97b7dSHemant Agrawal 1571b4d97b7dSHemant Agrawal if (dpni == NULL) { 1572a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1573b4d97b7dSHemant Agrawal return; 1574b4d97b7dSHemant Agrawal } 1575b4d97b7dSHemant Agrawal 1576b4d97b7dSHemant Agrawal ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1577b4d97b7dSHemant Agrawal priv->token, macaddr->addr_bytes); 1578b4d97b7dSHemant Agrawal if (ret) 1579a10a988aSShreyansh Jain DPAA2_PMD_ERR( 1580a10a988aSShreyansh Jain "error: Removing the MAC ADDR failed: err = %d", ret); 1581b4d97b7dSHemant Agrawal } 1582b4d97b7dSHemant Agrawal 1583caccf8b3SOlivier Matz static int 1584b4d97b7dSHemant Agrawal dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 15856d13ea8eSOlivier Matz struct rte_ether_addr *addr) 1586b4d97b7dSHemant Agrawal { 1587b4d97b7dSHemant Agrawal int ret; 1588b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 158981c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1590b4d97b7dSHemant Agrawal 1591b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1592b4d97b7dSHemant Agrawal 1593b4d97b7dSHemant Agrawal if (dpni == NULL) { 1594a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1595caccf8b3SOlivier Matz return -EINVAL; 1596b4d97b7dSHemant Agrawal } 1597b4d97b7dSHemant Agrawal 1598b4d97b7dSHemant Agrawal ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1599b4d97b7dSHemant Agrawal priv->token, addr->addr_bytes); 1600b4d97b7dSHemant Agrawal 1601b4d97b7dSHemant Agrawal if (ret) 1602a10a988aSShreyansh Jain DPAA2_PMD_ERR( 1603a10a988aSShreyansh Jain "error: Setting the MAC ADDR failed %d", ret); 1604caccf8b3SOlivier Matz 1605caccf8b3SOlivier Matz return ret; 1606b4d97b7dSHemant Agrawal } 1607a10a988aSShreyansh Jain 1608b0aa5459SHemant Agrawal static 1609d5b0924bSMatan Azrad int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1610b0aa5459SHemant Agrawal struct rte_eth_stats *stats) 1611b0aa5459SHemant Agrawal { 1612b0aa5459SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 161381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1614b0aa5459SHemant Agrawal int32_t retcode; 1615b0aa5459SHemant Agrawal uint8_t page0 = 0, page1 = 1, page2 = 2; 1616b0aa5459SHemant Agrawal union dpni_statistics value; 1617e43f2521SShreyansh Jain int i; 1618e43f2521SShreyansh Jain struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq; 1619b0aa5459SHemant Agrawal 1620b0aa5459SHemant Agrawal memset(&value, 0, sizeof(union dpni_statistics)); 1621b0aa5459SHemant Agrawal 1622b0aa5459SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1623b0aa5459SHemant Agrawal 1624b0aa5459SHemant Agrawal if (!dpni) { 1625a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1626d5b0924bSMatan Azrad return -EINVAL; 1627b0aa5459SHemant Agrawal } 1628b0aa5459SHemant Agrawal 1629b0aa5459SHemant Agrawal if (!stats) { 1630a10a988aSShreyansh Jain DPAA2_PMD_ERR("stats is NULL"); 1631d5b0924bSMatan Azrad return -EINVAL; 1632b0aa5459SHemant Agrawal } 1633b0aa5459SHemant Agrawal 1634b0aa5459SHemant Agrawal /*Get Counters from page_0*/ 1635b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 163616bbc98aSShreyansh Jain page0, 0, &value); 1637b0aa5459SHemant Agrawal if (retcode) 1638b0aa5459SHemant Agrawal goto err; 1639b0aa5459SHemant Agrawal 1640b0aa5459SHemant Agrawal stats->ipackets = value.page_0.ingress_all_frames; 1641b0aa5459SHemant Agrawal stats->ibytes = value.page_0.ingress_all_bytes; 1642b0aa5459SHemant Agrawal 1643b0aa5459SHemant Agrawal /*Get Counters from page_1*/ 1644b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 164516bbc98aSShreyansh Jain page1, 0, &value); 1646b0aa5459SHemant Agrawal if (retcode) 1647b0aa5459SHemant Agrawal goto err; 1648b0aa5459SHemant Agrawal 1649b0aa5459SHemant Agrawal stats->opackets = value.page_1.egress_all_frames; 1650b0aa5459SHemant Agrawal stats->obytes = value.page_1.egress_all_bytes; 1651b0aa5459SHemant Agrawal 1652b0aa5459SHemant Agrawal /*Get Counters from page_2*/ 1653b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 165416bbc98aSShreyansh Jain page2, 0, &value); 1655b0aa5459SHemant Agrawal if (retcode) 1656b0aa5459SHemant Agrawal goto err; 1657b0aa5459SHemant Agrawal 1658b4d97b7dSHemant Agrawal /* Ingress drop frame count due to configured rules */ 1659b4d97b7dSHemant Agrawal stats->ierrors = value.page_2.ingress_filtered_frames; 1660b4d97b7dSHemant Agrawal /* Ingress drop frame count due to error */ 1661b4d97b7dSHemant Agrawal stats->ierrors += value.page_2.ingress_discarded_frames; 1662b4d97b7dSHemant Agrawal 1663b0aa5459SHemant Agrawal stats->oerrors = value.page_2.egress_discarded_frames; 1664b0aa5459SHemant Agrawal stats->imissed = value.page_2.ingress_nobuffer_discards; 1665b0aa5459SHemant Agrawal 1666e43f2521SShreyansh Jain /* Fill in per queue stats */ 1667e43f2521SShreyansh Jain for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && 1668e43f2521SShreyansh Jain (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { 1669e43f2521SShreyansh Jain dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i]; 1670e43f2521SShreyansh Jain dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i]; 1671e43f2521SShreyansh Jain if (dpaa2_rxq) 1672e43f2521SShreyansh Jain stats->q_ipackets[i] = dpaa2_rxq->rx_pkts; 1673e43f2521SShreyansh Jain if (dpaa2_txq) 1674e43f2521SShreyansh Jain stats->q_opackets[i] = dpaa2_txq->tx_pkts; 1675e43f2521SShreyansh Jain 1676e43f2521SShreyansh Jain /* Byte counting is not implemented */ 1677e43f2521SShreyansh Jain stats->q_ibytes[i] = 0; 1678e43f2521SShreyansh Jain stats->q_obytes[i] = 0; 1679e43f2521SShreyansh Jain } 1680e43f2521SShreyansh Jain 1681d5b0924bSMatan Azrad return 0; 1682b0aa5459SHemant Agrawal 1683b0aa5459SHemant Agrawal err: 1684a10a988aSShreyansh Jain DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1685d5b0924bSMatan Azrad return retcode; 1686b0aa5459SHemant Agrawal }; 1687b0aa5459SHemant Agrawal 16881d6329b2SHemant Agrawal static int 16891d6329b2SHemant Agrawal dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 16901d6329b2SHemant Agrawal unsigned int n) 16911d6329b2SHemant Agrawal { 16921d6329b2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 169381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 16941d6329b2SHemant Agrawal int32_t retcode; 1695c720c5f6SHemant Agrawal union dpni_statistics value[5] = {}; 16961d6329b2SHemant Agrawal unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 16971d6329b2SHemant Agrawal 16981d6329b2SHemant Agrawal if (n < num) 16991d6329b2SHemant Agrawal return num; 17001d6329b2SHemant Agrawal 1701876b2c90SHemant Agrawal if (xstats == NULL) 1702876b2c90SHemant Agrawal return 0; 1703876b2c90SHemant Agrawal 17041d6329b2SHemant Agrawal /* Get Counters from page_0*/ 17051d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17061d6329b2SHemant Agrawal 0, 0, &value[0]); 17071d6329b2SHemant Agrawal if (retcode) 17081d6329b2SHemant Agrawal goto err; 17091d6329b2SHemant Agrawal 17101d6329b2SHemant Agrawal /* Get Counters from page_1*/ 17111d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17121d6329b2SHemant Agrawal 1, 0, &value[1]); 17131d6329b2SHemant Agrawal if (retcode) 17141d6329b2SHemant Agrawal goto err; 17151d6329b2SHemant Agrawal 17161d6329b2SHemant Agrawal /* Get Counters from page_2*/ 17171d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17181d6329b2SHemant Agrawal 2, 0, &value[2]); 17191d6329b2SHemant Agrawal if (retcode) 17201d6329b2SHemant Agrawal goto err; 17211d6329b2SHemant Agrawal 1722c720c5f6SHemant Agrawal for (i = 0; i < priv->max_cgs; i++) { 1723c720c5f6SHemant Agrawal if (!priv->cgid_in_use[i]) { 1724c720c5f6SHemant Agrawal /* Get Counters from page_4*/ 1725c720c5f6SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, 1726c720c5f6SHemant Agrawal priv->token, 1727c720c5f6SHemant Agrawal 4, 0, &value[4]); 1728c720c5f6SHemant Agrawal if (retcode) 1729c720c5f6SHemant Agrawal goto err; 1730c720c5f6SHemant Agrawal break; 1731c720c5f6SHemant Agrawal } 1732c720c5f6SHemant Agrawal } 1733c720c5f6SHemant Agrawal 17341d6329b2SHemant Agrawal for (i = 0; i < num; i++) { 17351d6329b2SHemant Agrawal xstats[i].id = i; 17361d6329b2SHemant Agrawal xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 17371d6329b2SHemant Agrawal raw.counter[dpaa2_xstats_strings[i].stats_id]; 17381d6329b2SHemant Agrawal } 17391d6329b2SHemant Agrawal return i; 17401d6329b2SHemant Agrawal err: 1741a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); 17421d6329b2SHemant Agrawal return retcode; 17431d6329b2SHemant Agrawal } 17441d6329b2SHemant Agrawal 17451d6329b2SHemant Agrawal static int 17461d6329b2SHemant Agrawal dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 17471d6329b2SHemant Agrawal struct rte_eth_xstat_name *xstats_names, 1748876b2c90SHemant Agrawal unsigned int limit) 17491d6329b2SHemant Agrawal { 17501d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 17511d6329b2SHemant Agrawal 1752876b2c90SHemant Agrawal if (limit < stat_cnt) 1753876b2c90SHemant Agrawal return stat_cnt; 1754876b2c90SHemant Agrawal 17551d6329b2SHemant Agrawal if (xstats_names != NULL) 17561d6329b2SHemant Agrawal for (i = 0; i < stat_cnt; i++) 1757f9acaf84SBruce Richardson strlcpy(xstats_names[i].name, 1758f9acaf84SBruce Richardson dpaa2_xstats_strings[i].name, 1759f9acaf84SBruce Richardson sizeof(xstats_names[i].name)); 17601d6329b2SHemant Agrawal 17611d6329b2SHemant Agrawal return stat_cnt; 17621d6329b2SHemant Agrawal } 17631d6329b2SHemant Agrawal 17641d6329b2SHemant Agrawal static int 17651d6329b2SHemant Agrawal dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 17661d6329b2SHemant Agrawal uint64_t *values, unsigned int n) 17671d6329b2SHemant Agrawal { 17681d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 17691d6329b2SHemant Agrawal uint64_t values_copy[stat_cnt]; 17701d6329b2SHemant Agrawal 17711d6329b2SHemant Agrawal if (!ids) { 17721d6329b2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 177381c42c84SShreyansh Jain struct fsl_mc_io *dpni = 177481c42c84SShreyansh Jain (struct fsl_mc_io *)dev->process_private; 17751d6329b2SHemant Agrawal int32_t retcode; 1776c720c5f6SHemant Agrawal union dpni_statistics value[5] = {}; 17771d6329b2SHemant Agrawal 17781d6329b2SHemant Agrawal if (n < stat_cnt) 17791d6329b2SHemant Agrawal return stat_cnt; 17801d6329b2SHemant Agrawal 17811d6329b2SHemant Agrawal if (!values) 17821d6329b2SHemant Agrawal return 0; 17831d6329b2SHemant Agrawal 17841d6329b2SHemant Agrawal /* Get Counters from page_0*/ 17851d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17861d6329b2SHemant Agrawal 0, 0, &value[0]); 17871d6329b2SHemant Agrawal if (retcode) 17881d6329b2SHemant Agrawal return 0; 17891d6329b2SHemant Agrawal 17901d6329b2SHemant Agrawal /* Get Counters from page_1*/ 17911d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17921d6329b2SHemant Agrawal 1, 0, &value[1]); 17931d6329b2SHemant Agrawal if (retcode) 17941d6329b2SHemant Agrawal return 0; 17951d6329b2SHemant Agrawal 17961d6329b2SHemant Agrawal /* Get Counters from page_2*/ 17971d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17981d6329b2SHemant Agrawal 2, 0, &value[2]); 17991d6329b2SHemant Agrawal if (retcode) 18001d6329b2SHemant Agrawal return 0; 18011d6329b2SHemant Agrawal 1802c720c5f6SHemant Agrawal /* Get Counters from page_4*/ 1803c720c5f6SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1804c720c5f6SHemant Agrawal 4, 0, &value[4]); 1805c720c5f6SHemant Agrawal if (retcode) 1806c720c5f6SHemant Agrawal return 0; 1807c720c5f6SHemant Agrawal 18081d6329b2SHemant Agrawal for (i = 0; i < stat_cnt; i++) { 18091d6329b2SHemant Agrawal values[i] = value[dpaa2_xstats_strings[i].page_id]. 18101d6329b2SHemant Agrawal raw.counter[dpaa2_xstats_strings[i].stats_id]; 18111d6329b2SHemant Agrawal } 18121d6329b2SHemant Agrawal return stat_cnt; 18131d6329b2SHemant Agrawal } 18141d6329b2SHemant Agrawal 18151d6329b2SHemant Agrawal dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 18161d6329b2SHemant Agrawal 18171d6329b2SHemant Agrawal for (i = 0; i < n; i++) { 18181d6329b2SHemant Agrawal if (ids[i] >= stat_cnt) { 1819a10a988aSShreyansh Jain DPAA2_PMD_ERR("xstats id value isn't valid"); 18201d6329b2SHemant Agrawal return -1; 18211d6329b2SHemant Agrawal } 18221d6329b2SHemant Agrawal values[i] = values_copy[ids[i]]; 18231d6329b2SHemant Agrawal } 18241d6329b2SHemant Agrawal return n; 18251d6329b2SHemant Agrawal } 18261d6329b2SHemant Agrawal 18271d6329b2SHemant Agrawal static int 18281d6329b2SHemant Agrawal dpaa2_xstats_get_names_by_id( 18291d6329b2SHemant Agrawal struct rte_eth_dev *dev, 18301d6329b2SHemant Agrawal const uint64_t *ids, 18318c9f976fSAndrew Rybchenko struct rte_eth_xstat_name *xstats_names, 18321d6329b2SHemant Agrawal unsigned int limit) 18331d6329b2SHemant Agrawal { 18341d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 18351d6329b2SHemant Agrawal struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 18361d6329b2SHemant Agrawal 18371d6329b2SHemant Agrawal if (!ids) 18381d6329b2SHemant Agrawal return dpaa2_xstats_get_names(dev, xstats_names, limit); 18391d6329b2SHemant Agrawal 18401d6329b2SHemant Agrawal dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 18411d6329b2SHemant Agrawal 18421d6329b2SHemant Agrawal for (i = 0; i < limit; i++) { 18431d6329b2SHemant Agrawal if (ids[i] >= stat_cnt) { 1844a10a988aSShreyansh Jain DPAA2_PMD_ERR("xstats id value isn't valid"); 18451d6329b2SHemant Agrawal return -1; 18461d6329b2SHemant Agrawal } 18471d6329b2SHemant Agrawal strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 18481d6329b2SHemant Agrawal } 18491d6329b2SHemant Agrawal return limit; 18501d6329b2SHemant Agrawal } 18511d6329b2SHemant Agrawal 18529970a9adSIgor Romanov static int 18531d6329b2SHemant Agrawal dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1854b0aa5459SHemant Agrawal { 1855b0aa5459SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 185681c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 18579970a9adSIgor Romanov int retcode; 1858e43f2521SShreyansh Jain int i; 1859e43f2521SShreyansh Jain struct dpaa2_queue *dpaa2_q; 1860b0aa5459SHemant Agrawal 1861b0aa5459SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1862b0aa5459SHemant Agrawal 1863b0aa5459SHemant Agrawal if (dpni == NULL) { 1864a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 18659970a9adSIgor Romanov return -EINVAL; 1866b0aa5459SHemant Agrawal } 1867b0aa5459SHemant Agrawal 1868b0aa5459SHemant Agrawal retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1869b0aa5459SHemant Agrawal if (retcode) 1870b0aa5459SHemant Agrawal goto error; 1871b0aa5459SHemant Agrawal 1872e43f2521SShreyansh Jain /* Reset the per queue stats in dpaa2_queue structure */ 1873e43f2521SShreyansh Jain for (i = 0; i < priv->nb_rx_queues; i++) { 1874e43f2521SShreyansh Jain dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1875e43f2521SShreyansh Jain if (dpaa2_q) 1876e43f2521SShreyansh Jain dpaa2_q->rx_pkts = 0; 1877e43f2521SShreyansh Jain } 1878e43f2521SShreyansh Jain 1879e43f2521SShreyansh Jain for (i = 0; i < priv->nb_tx_queues; i++) { 1880e43f2521SShreyansh Jain dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 1881e43f2521SShreyansh Jain if (dpaa2_q) 1882e43f2521SShreyansh Jain dpaa2_q->tx_pkts = 0; 1883e43f2521SShreyansh Jain } 1884e43f2521SShreyansh Jain 18859970a9adSIgor Romanov return 0; 1886b0aa5459SHemant Agrawal 1887b0aa5459SHemant Agrawal error: 1888a10a988aSShreyansh Jain DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 18899970a9adSIgor Romanov return retcode; 1890b0aa5459SHemant Agrawal }; 1891b0aa5459SHemant Agrawal 1892c56c86ffSHemant Agrawal /* return 0 means link status changed, -1 means not changed */ 1893c56c86ffSHemant Agrawal static int 1894c56c86ffSHemant Agrawal dpaa2_dev_link_update(struct rte_eth_dev *dev, 1895eadcfd95SRohit Raj int wait_to_complete) 1896c56c86ffSHemant Agrawal { 1897c56c86ffSHemant Agrawal int ret; 1898c56c86ffSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 189981c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 19007e2eb5f0SStephen Hemminger struct rte_eth_link link; 1901c56c86ffSHemant Agrawal struct dpni_link_state state = {0}; 1902eadcfd95SRohit Raj uint8_t count; 1903c56c86ffSHemant Agrawal 1904c56c86ffSHemant Agrawal if (dpni == NULL) { 1905a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1906c56c86ffSHemant Agrawal return 0; 1907c56c86ffSHemant Agrawal } 1908c56c86ffSHemant Agrawal 1909eadcfd95SRohit Raj for (count = 0; count <= MAX_REPEAT_TIME; count++) { 1910eadcfd95SRohit Raj ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, 1911eadcfd95SRohit Raj &state); 1912c56c86ffSHemant Agrawal if (ret < 0) { 191344e87c27SShreyansh Jain DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); 1914c56c86ffSHemant Agrawal return -1; 1915c56c86ffSHemant Agrawal } 1916295968d1SFerruh Yigit if (state.up == RTE_ETH_LINK_DOWN && 1917eadcfd95SRohit Raj wait_to_complete) 1918eadcfd95SRohit Raj rte_delay_ms(CHECK_INTERVAL); 1919eadcfd95SRohit Raj else 1920eadcfd95SRohit Raj break; 1921eadcfd95SRohit Raj } 1922c56c86ffSHemant Agrawal 1923c56c86ffSHemant Agrawal memset(&link, 0, sizeof(struct rte_eth_link)); 1924c56c86ffSHemant Agrawal link.link_status = state.up; 1925c56c86ffSHemant Agrawal link.link_speed = state.rate; 1926c56c86ffSHemant Agrawal 1927c56c86ffSHemant Agrawal if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1928295968d1SFerruh Yigit link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 1929c56c86ffSHemant Agrawal else 1930295968d1SFerruh Yigit link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1931c56c86ffSHemant Agrawal 19327e2eb5f0SStephen Hemminger ret = rte_eth_linkstatus_set(dev, &link); 19337e2eb5f0SStephen Hemminger if (ret == -1) 1934a10a988aSShreyansh Jain DPAA2_PMD_DEBUG("No change in status"); 1935c56c86ffSHemant Agrawal else 1936f665790aSDavid Marchand DPAA2_PMD_INFO("Port %d Link is %s", dev->data->port_id, 19377e2eb5f0SStephen Hemminger link.link_status ? "Up" : "Down"); 19387e2eb5f0SStephen Hemminger 19397e2eb5f0SStephen Hemminger return ret; 1940c56c86ffSHemant Agrawal } 1941c56c86ffSHemant Agrawal 1942a1f3a12cSHemant Agrawal /** 1943a1f3a12cSHemant Agrawal * Toggle the DPNI to enable, if not already enabled. 1944a1f3a12cSHemant Agrawal * This is not strictly PHY up/down - it is more of logical toggling. 1945a1f3a12cSHemant Agrawal */ 1946a1f3a12cSHemant Agrawal static int 1947a1f3a12cSHemant Agrawal dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1948a1f3a12cSHemant Agrawal { 1949a1f3a12cSHemant Agrawal int ret = -EINVAL; 1950a1f3a12cSHemant Agrawal struct dpaa2_dev_priv *priv; 1951a1f3a12cSHemant Agrawal struct fsl_mc_io *dpni; 1952a1f3a12cSHemant Agrawal int en = 0; 1953aa8c595aSHemant Agrawal struct dpni_link_state state = {0}; 1954a1f3a12cSHemant Agrawal 1955a1f3a12cSHemant Agrawal priv = dev->data->dev_private; 195681c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 1957a1f3a12cSHemant Agrawal 1958a1f3a12cSHemant Agrawal if (dpni == NULL) { 1959a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1960a1f3a12cSHemant Agrawal return ret; 1961a1f3a12cSHemant Agrawal } 1962a1f3a12cSHemant Agrawal 1963a1f3a12cSHemant Agrawal /* Check if DPNI is currently enabled */ 1964a1f3a12cSHemant Agrawal ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1965a1f3a12cSHemant Agrawal if (ret) { 1966a1f3a12cSHemant Agrawal /* Unable to obtain dpni status; Not continuing */ 1967a10a988aSShreyansh Jain DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1968a1f3a12cSHemant Agrawal return -EINVAL; 1969a1f3a12cSHemant Agrawal } 1970a1f3a12cSHemant Agrawal 1971a1f3a12cSHemant Agrawal /* Enable link if not already enabled */ 1972a1f3a12cSHemant Agrawal if (!en) { 1973a1f3a12cSHemant Agrawal ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1974a1f3a12cSHemant Agrawal if (ret) { 1975a10a988aSShreyansh Jain DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1976a1f3a12cSHemant Agrawal return -EINVAL; 1977a1f3a12cSHemant Agrawal } 1978a1f3a12cSHemant Agrawal } 1979aa8c595aSHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1980aa8c595aSHemant Agrawal if (ret < 0) { 198144e87c27SShreyansh Jain DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret); 1982aa8c595aSHemant Agrawal return -1; 1983aa8c595aSHemant Agrawal } 1984aa8c595aSHemant Agrawal 1985a1f3a12cSHemant Agrawal /* changing tx burst function to start enqueues */ 1986a1f3a12cSHemant Agrawal dev->tx_pkt_burst = dpaa2_dev_tx; 1987aa8c595aSHemant Agrawal dev->data->dev_link.link_status = state.up; 19887e6ecac2SRohit Raj dev->data->dev_link.link_speed = state.rate; 1989a1f3a12cSHemant Agrawal 1990aa8c595aSHemant Agrawal if (state.up) 1991a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); 1992aa8c595aSHemant Agrawal else 1993a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); 1994a1f3a12cSHemant Agrawal return ret; 1995a1f3a12cSHemant Agrawal } 1996a1f3a12cSHemant Agrawal 1997a1f3a12cSHemant Agrawal /** 1998a1f3a12cSHemant Agrawal * Toggle the DPNI to disable, if not already disabled. 1999a1f3a12cSHemant Agrawal * This is not strictly PHY up/down - it is more of logical toggling. 2000a1f3a12cSHemant Agrawal */ 2001a1f3a12cSHemant Agrawal static int 2002a1f3a12cSHemant Agrawal dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 2003a1f3a12cSHemant Agrawal { 2004a1f3a12cSHemant Agrawal int ret = -EINVAL; 2005a1f3a12cSHemant Agrawal struct dpaa2_dev_priv *priv; 2006a1f3a12cSHemant Agrawal struct fsl_mc_io *dpni; 2007a1f3a12cSHemant Agrawal int dpni_enabled = 0; 2008a1f3a12cSHemant Agrawal int retries = 10; 2009a1f3a12cSHemant Agrawal 2010a1f3a12cSHemant Agrawal PMD_INIT_FUNC_TRACE(); 2011a1f3a12cSHemant Agrawal 2012a1f3a12cSHemant Agrawal priv = dev->data->dev_private; 201381c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 2014a1f3a12cSHemant Agrawal 2015a1f3a12cSHemant Agrawal if (dpni == NULL) { 2016a10a988aSShreyansh Jain DPAA2_PMD_ERR("Device has not yet been configured"); 2017a1f3a12cSHemant Agrawal return ret; 2018a1f3a12cSHemant Agrawal } 2019a1f3a12cSHemant Agrawal 2020a1f3a12cSHemant Agrawal /*changing tx burst function to avoid any more enqueues */ 2021a41f593fSFerruh Yigit dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 2022a1f3a12cSHemant Agrawal 2023a1f3a12cSHemant Agrawal /* Loop while dpni_disable() attempts to drain the egress FQs 2024a1f3a12cSHemant Agrawal * and confirm them back to us. 2025a1f3a12cSHemant Agrawal */ 2026a1f3a12cSHemant Agrawal do { 2027a1f3a12cSHemant Agrawal ret = dpni_disable(dpni, 0, priv->token); 2028a1f3a12cSHemant Agrawal if (ret) { 2029a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni disable failed (%d)", ret); 2030a1f3a12cSHemant Agrawal return ret; 2031a1f3a12cSHemant Agrawal } 2032a1f3a12cSHemant Agrawal ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 2033a1f3a12cSHemant Agrawal if (ret) { 2034a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); 2035a1f3a12cSHemant Agrawal return ret; 2036a1f3a12cSHemant Agrawal } 2037a1f3a12cSHemant Agrawal if (dpni_enabled) 2038a1f3a12cSHemant Agrawal /* Allow the MC some slack */ 2039a1f3a12cSHemant Agrawal rte_delay_us(100 * 1000); 2040a1f3a12cSHemant Agrawal } while (dpni_enabled && --retries); 2041a1f3a12cSHemant Agrawal 2042a1f3a12cSHemant Agrawal if (!retries) { 2043a10a988aSShreyansh Jain DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); 2044a1f3a12cSHemant Agrawal /* todo- we may have to manually cleanup queues. 2045a1f3a12cSHemant Agrawal */ 2046a1f3a12cSHemant Agrawal } else { 2047a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link DOWN successful", 2048a1f3a12cSHemant Agrawal dev->data->port_id); 2049a1f3a12cSHemant Agrawal } 2050a1f3a12cSHemant Agrawal 2051a1f3a12cSHemant Agrawal dev->data->dev_link.link_status = 0; 2052a1f3a12cSHemant Agrawal 2053a1f3a12cSHemant Agrawal return ret; 2054a1f3a12cSHemant Agrawal } 2055a1f3a12cSHemant Agrawal 2056977d0006SHemant Agrawal static int 2057977d0006SHemant Agrawal dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2058977d0006SHemant Agrawal { 2059977d0006SHemant Agrawal int ret = -EINVAL; 2060977d0006SHemant Agrawal struct dpaa2_dev_priv *priv; 2061977d0006SHemant Agrawal struct fsl_mc_io *dpni; 2062977d0006SHemant Agrawal struct dpni_link_state state = {0}; 2063977d0006SHemant Agrawal 2064977d0006SHemant Agrawal PMD_INIT_FUNC_TRACE(); 2065977d0006SHemant Agrawal 2066977d0006SHemant Agrawal priv = dev->data->dev_private; 206781c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 2068977d0006SHemant Agrawal 2069977d0006SHemant Agrawal if (dpni == NULL || fc_conf == NULL) { 2070a10a988aSShreyansh Jain DPAA2_PMD_ERR("device not configured"); 2071977d0006SHemant Agrawal return ret; 2072977d0006SHemant Agrawal } 2073977d0006SHemant Agrawal 2074977d0006SHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 2075977d0006SHemant Agrawal if (ret) { 2076a10a988aSShreyansh Jain DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 2077977d0006SHemant Agrawal return ret; 2078977d0006SHemant Agrawal } 2079977d0006SHemant Agrawal 2080977d0006SHemant Agrawal memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 2081977d0006SHemant Agrawal if (state.options & DPNI_LINK_OPT_PAUSE) { 2082977d0006SHemant Agrawal /* DPNI_LINK_OPT_PAUSE set 2083977d0006SHemant Agrawal * if ASYM_PAUSE not set, 2084977d0006SHemant Agrawal * RX Side flow control (handle received Pause frame) 2085977d0006SHemant Agrawal * TX side flow control (send Pause frame) 2086977d0006SHemant Agrawal * if ASYM_PAUSE set, 2087977d0006SHemant Agrawal * RX Side flow control (handle received Pause frame) 2088977d0006SHemant Agrawal * No TX side flow control (send Pause frame disabled) 2089977d0006SHemant Agrawal */ 2090977d0006SHemant Agrawal if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 2091295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_FULL; 2092977d0006SHemant Agrawal else 2093295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2094977d0006SHemant Agrawal } else { 2095977d0006SHemant Agrawal /* DPNI_LINK_OPT_PAUSE not set 2096977d0006SHemant Agrawal * if ASYM_PAUSE set, 2097977d0006SHemant Agrawal * TX side flow control (send Pause frame) 2098977d0006SHemant Agrawal * No RX side flow control (No action on pause frame rx) 2099977d0006SHemant Agrawal * if ASYM_PAUSE not set, 2100977d0006SHemant Agrawal * Flow control disabled 2101977d0006SHemant Agrawal */ 2102977d0006SHemant Agrawal if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 2103295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2104977d0006SHemant Agrawal else 2105295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_NONE; 2106977d0006SHemant Agrawal } 2107977d0006SHemant Agrawal 2108977d0006SHemant Agrawal return ret; 2109977d0006SHemant Agrawal } 2110977d0006SHemant Agrawal 2111977d0006SHemant Agrawal static int 2112977d0006SHemant Agrawal dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2113977d0006SHemant Agrawal { 2114977d0006SHemant Agrawal int ret = -EINVAL; 2115977d0006SHemant Agrawal struct dpaa2_dev_priv *priv; 2116977d0006SHemant Agrawal struct fsl_mc_io *dpni; 2117977d0006SHemant Agrawal struct dpni_link_state state = {0}; 2118977d0006SHemant Agrawal struct dpni_link_cfg cfg = {0}; 2119977d0006SHemant Agrawal 2120977d0006SHemant Agrawal PMD_INIT_FUNC_TRACE(); 2121977d0006SHemant Agrawal 2122977d0006SHemant Agrawal priv = dev->data->dev_private; 212381c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 2124977d0006SHemant Agrawal 2125977d0006SHemant Agrawal if (dpni == NULL) { 2126a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 2127977d0006SHemant Agrawal return ret; 2128977d0006SHemant Agrawal } 2129977d0006SHemant Agrawal 2130977d0006SHemant Agrawal /* It is necessary to obtain the current state before setting fc_conf 2131977d0006SHemant Agrawal * as MC would return error in case rate, autoneg or duplex values are 2132977d0006SHemant Agrawal * different. 2133977d0006SHemant Agrawal */ 2134977d0006SHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 2135977d0006SHemant Agrawal if (ret) { 2136a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); 2137977d0006SHemant Agrawal return -1; 2138977d0006SHemant Agrawal } 2139977d0006SHemant Agrawal 2140977d0006SHemant Agrawal /* Disable link before setting configuration */ 2141977d0006SHemant Agrawal dpaa2_dev_set_link_down(dev); 2142977d0006SHemant Agrawal 2143977d0006SHemant Agrawal /* Based on fc_conf, update cfg */ 2144977d0006SHemant Agrawal cfg.rate = state.rate; 2145977d0006SHemant Agrawal cfg.options = state.options; 2146977d0006SHemant Agrawal 2147977d0006SHemant Agrawal /* update cfg with fc_conf */ 2148977d0006SHemant Agrawal switch (fc_conf->mode) { 2149295968d1SFerruh Yigit case RTE_ETH_FC_FULL: 2150977d0006SHemant Agrawal /* Full flow control; 2151977d0006SHemant Agrawal * OPT_PAUSE set, ASYM_PAUSE not set 2152977d0006SHemant Agrawal */ 2153977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_PAUSE; 2154977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 2155f090a4c3SHemant Agrawal break; 2156295968d1SFerruh Yigit case RTE_ETH_FC_TX_PAUSE: 2157977d0006SHemant Agrawal /* Enable RX flow control 2158977d0006SHemant Agrawal * OPT_PAUSE not set; 2159977d0006SHemant Agrawal * ASYM_PAUSE set; 2160977d0006SHemant Agrawal */ 2161977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 2162977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_PAUSE; 2163977d0006SHemant Agrawal break; 2164295968d1SFerruh Yigit case RTE_ETH_FC_RX_PAUSE: 2165977d0006SHemant Agrawal /* Enable TX Flow control 2166977d0006SHemant Agrawal * OPT_PAUSE set 2167977d0006SHemant Agrawal * ASYM_PAUSE set 2168977d0006SHemant Agrawal */ 2169977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_PAUSE; 2170977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 2171977d0006SHemant Agrawal break; 2172295968d1SFerruh Yigit case RTE_ETH_FC_NONE: 2173977d0006SHemant Agrawal /* Disable Flow control 2174977d0006SHemant Agrawal * OPT_PAUSE not set 2175977d0006SHemant Agrawal * ASYM_PAUSE not set 2176977d0006SHemant Agrawal */ 2177977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_PAUSE; 2178977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 2179977d0006SHemant Agrawal break; 2180977d0006SHemant Agrawal default: 2181a10a988aSShreyansh Jain DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", 2182977d0006SHemant Agrawal fc_conf->mode); 2183977d0006SHemant Agrawal return -1; 2184977d0006SHemant Agrawal } 2185977d0006SHemant Agrawal 2186977d0006SHemant Agrawal ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 2187977d0006SHemant Agrawal if (ret) 2188a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", 2189977d0006SHemant Agrawal ret); 2190977d0006SHemant Agrawal 2191977d0006SHemant Agrawal /* Enable link */ 2192977d0006SHemant Agrawal dpaa2_dev_set_link_up(dev); 2193977d0006SHemant Agrawal 2194977d0006SHemant Agrawal return ret; 2195977d0006SHemant Agrawal } 2196977d0006SHemant Agrawal 219763d5c3b0SHemant Agrawal static int 219863d5c3b0SHemant Agrawal dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 219963d5c3b0SHemant Agrawal struct rte_eth_rss_conf *rss_conf) 220063d5c3b0SHemant Agrawal { 220163d5c3b0SHemant Agrawal struct rte_eth_dev_data *data = dev->data; 2202271f5aeeSJun Yang struct dpaa2_dev_priv *priv = data->dev_private; 220363d5c3b0SHemant Agrawal struct rte_eth_conf *eth_conf = &data->dev_conf; 2204271f5aeeSJun Yang int ret, tc_index; 220563d5c3b0SHemant Agrawal 220663d5c3b0SHemant Agrawal PMD_INIT_FUNC_TRACE(); 220763d5c3b0SHemant Agrawal 220863d5c3b0SHemant Agrawal if (rss_conf->rss_hf) { 2209271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { 2210271f5aeeSJun Yang ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf, 2211271f5aeeSJun Yang tc_index); 221263d5c3b0SHemant Agrawal if (ret) { 2213271f5aeeSJun Yang DPAA2_PMD_ERR("Unable to set flow dist on tc%d", 2214271f5aeeSJun Yang tc_index); 221563d5c3b0SHemant Agrawal return ret; 221663d5c3b0SHemant Agrawal } 2217271f5aeeSJun Yang } 221863d5c3b0SHemant Agrawal } else { 2219271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { 2220271f5aeeSJun Yang ret = dpaa2_remove_flow_dist(dev, tc_index); 222163d5c3b0SHemant Agrawal if (ret) { 2222271f5aeeSJun Yang DPAA2_PMD_ERR( 2223271f5aeeSJun Yang "Unable to remove flow dist on tc%d", 2224271f5aeeSJun Yang tc_index); 222563d5c3b0SHemant Agrawal return ret; 222663d5c3b0SHemant Agrawal } 222763d5c3b0SHemant Agrawal } 2228271f5aeeSJun Yang } 222963d5c3b0SHemant Agrawal eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 223063d5c3b0SHemant Agrawal return 0; 223163d5c3b0SHemant Agrawal } 223263d5c3b0SHemant Agrawal 223363d5c3b0SHemant Agrawal static int 223463d5c3b0SHemant Agrawal dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 223563d5c3b0SHemant Agrawal struct rte_eth_rss_conf *rss_conf) 223663d5c3b0SHemant Agrawal { 223763d5c3b0SHemant Agrawal struct rte_eth_dev_data *data = dev->data; 223863d5c3b0SHemant Agrawal struct rte_eth_conf *eth_conf = &data->dev_conf; 223963d5c3b0SHemant Agrawal 224063d5c3b0SHemant Agrawal /* dpaa2 does not support rss_key, so length should be 0*/ 224163d5c3b0SHemant Agrawal rss_conf->rss_key_len = 0; 224263d5c3b0SHemant Agrawal rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 224363d5c3b0SHemant Agrawal return 0; 224463d5c3b0SHemant Agrawal } 224563d5c3b0SHemant Agrawal 2246b677d4c6SNipun Gupta int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 2247b677d4c6SNipun Gupta int eth_rx_queue_id, 22483835cc22SNipun Gupta struct dpaa2_dpcon_dev *dpcon, 2249b677d4c6SNipun Gupta const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 2250b677d4c6SNipun Gupta { 2251b677d4c6SNipun Gupta struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 225281c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2253b677d4c6SNipun Gupta struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 2254b677d4c6SNipun Gupta uint8_t flow_id = dpaa2_ethq->flow_id; 2255b677d4c6SNipun Gupta struct dpni_queue cfg; 22563835cc22SNipun Gupta uint8_t options, priority; 2257b677d4c6SNipun Gupta int ret; 2258b677d4c6SNipun Gupta 2259b677d4c6SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 2260b677d4c6SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 22612d378863SNipun Gupta else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 22622d378863SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 226316c4a3c4SNipun Gupta else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED) 226416c4a3c4SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_ordered_event; 2265b677d4c6SNipun Gupta else 2266b677d4c6SNipun Gupta return -EINVAL; 2267b677d4c6SNipun Gupta 22683835cc22SNipun Gupta priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) * 22693835cc22SNipun Gupta (dpcon->num_priorities - 1); 22703835cc22SNipun Gupta 2271b677d4c6SNipun Gupta memset(&cfg, 0, sizeof(struct dpni_queue)); 2272b677d4c6SNipun Gupta options = DPNI_QUEUE_OPT_DEST; 2273b677d4c6SNipun Gupta cfg.destination.type = DPNI_DEST_DPCON; 22743835cc22SNipun Gupta cfg.destination.id = dpcon->dpcon_id; 22753835cc22SNipun Gupta cfg.destination.priority = priority; 2276b677d4c6SNipun Gupta 22772d378863SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 22782d378863SNipun Gupta options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 22792d378863SNipun Gupta cfg.destination.hold_active = 1; 22802d378863SNipun Gupta } 22812d378863SNipun Gupta 228216c4a3c4SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED && 228316c4a3c4SNipun Gupta !eth_priv->en_ordered) { 228416c4a3c4SNipun Gupta struct opr_cfg ocfg; 228516c4a3c4SNipun Gupta 228616c4a3c4SNipun Gupta /* Restoration window size = 256 frames */ 228716c4a3c4SNipun Gupta ocfg.oprrws = 3; 228816c4a3c4SNipun Gupta /* Restoration window size = 512 frames for LX2 */ 228916c4a3c4SNipun Gupta if (dpaa2_svr_family == SVR_LX2160A) 229016c4a3c4SNipun Gupta ocfg.oprrws = 4; 229116c4a3c4SNipun Gupta /* Auto advance NESN window enabled */ 229216c4a3c4SNipun Gupta ocfg.oa = 1; 229316c4a3c4SNipun Gupta /* Late arrival window size disabled */ 229416c4a3c4SNipun Gupta ocfg.olws = 0; 22957be78d02SJosh Soref /* ORL resource exhaustion advance NESN disabled */ 229616c4a3c4SNipun Gupta ocfg.oeane = 0; 229716c4a3c4SNipun Gupta /* Loose ordering enabled */ 229816c4a3c4SNipun Gupta ocfg.oloe = 1; 229916c4a3c4SNipun Gupta eth_priv->en_loose_ordered = 1; 230016c4a3c4SNipun Gupta /* Strict ordering enabled if explicitly set */ 230116c4a3c4SNipun Gupta if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) { 230216c4a3c4SNipun Gupta ocfg.oloe = 0; 230316c4a3c4SNipun Gupta eth_priv->en_loose_ordered = 0; 230416c4a3c4SNipun Gupta } 230516c4a3c4SNipun Gupta 230616c4a3c4SNipun Gupta ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token, 230716c4a3c4SNipun Gupta dpaa2_ethq->tc_index, flow_id, 23082cb2abf3SHemant Agrawal OPR_OPT_CREATE, &ocfg, 0); 230916c4a3c4SNipun Gupta if (ret) { 2310f665790aSDavid Marchand DPAA2_PMD_ERR("Error setting opr: ret: %d", ret); 231116c4a3c4SNipun Gupta return ret; 231216c4a3c4SNipun Gupta } 231316c4a3c4SNipun Gupta 231416c4a3c4SNipun Gupta eth_priv->en_ordered = 1; 231516c4a3c4SNipun Gupta } 231616c4a3c4SNipun Gupta 2317b677d4c6SNipun Gupta options |= DPNI_QUEUE_OPT_USER_CTX; 23185ae1edffSHemant Agrawal cfg.user_context = (size_t)(dpaa2_ethq); 2319b677d4c6SNipun Gupta 2320b677d4c6SNipun Gupta ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 2321b677d4c6SNipun Gupta dpaa2_ethq->tc_index, flow_id, options, &cfg); 2322b677d4c6SNipun Gupta if (ret) { 2323a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 2324b677d4c6SNipun Gupta return ret; 2325b677d4c6SNipun Gupta } 2326b677d4c6SNipun Gupta 2327b677d4c6SNipun Gupta memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 2328b677d4c6SNipun Gupta 2329b677d4c6SNipun Gupta return 0; 2330b677d4c6SNipun Gupta } 2331b677d4c6SNipun Gupta 2332b677d4c6SNipun Gupta int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 2333b677d4c6SNipun Gupta int eth_rx_queue_id) 2334b677d4c6SNipun Gupta { 2335b677d4c6SNipun Gupta struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 233681c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2337b677d4c6SNipun Gupta struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 2338b677d4c6SNipun Gupta uint8_t flow_id = dpaa2_ethq->flow_id; 2339b677d4c6SNipun Gupta struct dpni_queue cfg; 2340b677d4c6SNipun Gupta uint8_t options; 2341b677d4c6SNipun Gupta int ret; 2342b677d4c6SNipun Gupta 2343b677d4c6SNipun Gupta memset(&cfg, 0, sizeof(struct dpni_queue)); 2344b677d4c6SNipun Gupta options = DPNI_QUEUE_OPT_DEST; 2345b677d4c6SNipun Gupta cfg.destination.type = DPNI_DEST_NONE; 2346b677d4c6SNipun Gupta 2347b677d4c6SNipun Gupta ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 2348b677d4c6SNipun Gupta dpaa2_ethq->tc_index, flow_id, options, &cfg); 2349b677d4c6SNipun Gupta if (ret) 2350a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 2351b677d4c6SNipun Gupta 2352b677d4c6SNipun Gupta return ret; 2353b677d4c6SNipun Gupta } 2354b677d4c6SNipun Gupta 2355fe2b986aSSunil Kumar Kori static int 2356fb7ad441SThomas Monjalon dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev, 2357fb7ad441SThomas Monjalon const struct rte_flow_ops **ops) 2358fe2b986aSSunil Kumar Kori { 2359fe2b986aSSunil Kumar Kori if (!dev) 2360fe2b986aSSunil Kumar Kori return -ENODEV; 2361fe2b986aSSunil Kumar Kori 2362fb7ad441SThomas Monjalon *ops = &dpaa2_flow_ops; 2363fb7ad441SThomas Monjalon return 0; 2364fe2b986aSSunil Kumar Kori } 2365fe2b986aSSunil Kumar Kori 2366de1d70f0SHemant Agrawal static void 2367de1d70f0SHemant Agrawal dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2368de1d70f0SHemant Agrawal struct rte_eth_rxq_info *qinfo) 2369de1d70f0SHemant Agrawal { 2370de1d70f0SHemant Agrawal struct dpaa2_queue *rxq; 2371731fa400SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 2372731fa400SHemant Agrawal struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2373731fa400SHemant Agrawal uint16_t max_frame_length; 2374de1d70f0SHemant Agrawal 2375de1d70f0SHemant Agrawal rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id]; 2376de1d70f0SHemant Agrawal 2377de1d70f0SHemant Agrawal qinfo->mp = rxq->mb_pool; 2378de1d70f0SHemant Agrawal qinfo->scattered_rx = dev->data->scattered_rx; 2379de1d70f0SHemant Agrawal qinfo->nb_desc = rxq->nb_desc; 2380731fa400SHemant Agrawal if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 2381731fa400SHemant Agrawal &max_frame_length) == 0) 2382731fa400SHemant Agrawal qinfo->rx_buf_size = max_frame_length; 2383de1d70f0SHemant Agrawal 2384de1d70f0SHemant Agrawal qinfo->conf.rx_free_thresh = 1; 2385de1d70f0SHemant Agrawal qinfo->conf.rx_drop_en = 1; 2386de1d70f0SHemant Agrawal qinfo->conf.rx_deferred_start = 0; 2387de1d70f0SHemant Agrawal qinfo->conf.offloads = rxq->offloads; 2388de1d70f0SHemant Agrawal } 2389de1d70f0SHemant Agrawal 2390de1d70f0SHemant Agrawal static void 2391de1d70f0SHemant Agrawal dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2392de1d70f0SHemant Agrawal struct rte_eth_txq_info *qinfo) 2393de1d70f0SHemant Agrawal { 2394de1d70f0SHemant Agrawal struct dpaa2_queue *txq; 2395de1d70f0SHemant Agrawal 2396de1d70f0SHemant Agrawal txq = dev->data->tx_queues[queue_id]; 2397de1d70f0SHemant Agrawal 2398de1d70f0SHemant Agrawal qinfo->nb_desc = txq->nb_desc; 2399de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.pthresh = 0; 2400de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.hthresh = 0; 2401de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.wthresh = 0; 2402de1d70f0SHemant Agrawal 2403de1d70f0SHemant Agrawal qinfo->conf.tx_free_thresh = 0; 2404de1d70f0SHemant Agrawal qinfo->conf.tx_rs_thresh = 0; 2405de1d70f0SHemant Agrawal qinfo->conf.offloads = txq->offloads; 2406de1d70f0SHemant Agrawal qinfo->conf.tx_deferred_start = 0; 2407de1d70f0SHemant Agrawal } 2408de1d70f0SHemant Agrawal 2409ac624068SGagandeep Singh static int 2410ac624068SGagandeep Singh dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) 2411ac624068SGagandeep Singh { 2412ac624068SGagandeep Singh *(const void **)ops = &dpaa2_tm_ops; 2413ac624068SGagandeep Singh 2414ac624068SGagandeep Singh return 0; 2415ac624068SGagandeep Singh } 2416ac624068SGagandeep Singh 2417a5b375edSNipun Gupta void 2418a5b375edSNipun Gupta rte_pmd_dpaa2_thread_init(void) 2419a5b375edSNipun Gupta { 2420a5b375edSNipun Gupta int ret; 2421a5b375edSNipun Gupta 2422a5b375edSNipun Gupta if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 2423a5b375edSNipun Gupta ret = dpaa2_affine_qbman_swp(); 2424a5b375edSNipun Gupta if (ret) { 2425a5b375edSNipun Gupta DPAA2_PMD_ERR( 2426f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 2427a5b375edSNipun Gupta rte_gettid()); 2428a5b375edSNipun Gupta return; 2429a5b375edSNipun Gupta } 2430a5b375edSNipun Gupta } 2431a5b375edSNipun Gupta } 2432a5b375edSNipun Gupta 24333e5a335dSHemant Agrawal static struct eth_dev_ops dpaa2_ethdev_ops = { 24343e5a335dSHemant Agrawal .dev_configure = dpaa2_eth_dev_configure, 24353e5a335dSHemant Agrawal .dev_start = dpaa2_dev_start, 24363e5a335dSHemant Agrawal .dev_stop = dpaa2_dev_stop, 24373e5a335dSHemant Agrawal .dev_close = dpaa2_dev_close, 2438c0e5c69aSHemant Agrawal .promiscuous_enable = dpaa2_dev_promiscuous_enable, 2439c0e5c69aSHemant Agrawal .promiscuous_disable = dpaa2_dev_promiscuous_disable, 24405d5aeeedSHemant Agrawal .allmulticast_enable = dpaa2_dev_allmulticast_enable, 24415d5aeeedSHemant Agrawal .allmulticast_disable = dpaa2_dev_allmulticast_disable, 2442a1f3a12cSHemant Agrawal .dev_set_link_up = dpaa2_dev_set_link_up, 2443a1f3a12cSHemant Agrawal .dev_set_link_down = dpaa2_dev_set_link_down, 2444c56c86ffSHemant Agrawal .link_update = dpaa2_dev_link_update, 2445b0aa5459SHemant Agrawal .stats_get = dpaa2_dev_stats_get, 24461d6329b2SHemant Agrawal .xstats_get = dpaa2_dev_xstats_get, 24471d6329b2SHemant Agrawal .xstats_get_by_id = dpaa2_xstats_get_by_id, 24481d6329b2SHemant Agrawal .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 24491d6329b2SHemant Agrawal .xstats_get_names = dpaa2_xstats_get_names, 2450b0aa5459SHemant Agrawal .stats_reset = dpaa2_dev_stats_reset, 24511d6329b2SHemant Agrawal .xstats_reset = dpaa2_dev_stats_reset, 2452748eccb9SHemant Agrawal .fw_version_get = dpaa2_fw_version_get, 24533e5a335dSHemant Agrawal .dev_infos_get = dpaa2_dev_info_get, 2454a5fc38d4SHemant Agrawal .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 2455e31d4d21SHemant Agrawal .mtu_set = dpaa2_dev_mtu_set, 24563ce294f2SHemant Agrawal .vlan_filter_set = dpaa2_vlan_filter_set, 24573ce294f2SHemant Agrawal .vlan_offload_set = dpaa2_vlan_offload_set, 2458e59b75ffSHemant Agrawal .vlan_tpid_set = dpaa2_vlan_tpid_set, 24593e5a335dSHemant Agrawal .rx_queue_setup = dpaa2_dev_rx_queue_setup, 24603e5a335dSHemant Agrawal .rx_queue_release = dpaa2_dev_rx_queue_release, 24613e5a335dSHemant Agrawal .tx_queue_setup = dpaa2_dev_tx_queue_setup, 2462ddbc2b66SApeksha Gupta .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get, 2463ddbc2b66SApeksha Gupta .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get, 2464977d0006SHemant Agrawal .flow_ctrl_get = dpaa2_flow_ctrl_get, 2465977d0006SHemant Agrawal .flow_ctrl_set = dpaa2_flow_ctrl_set, 2466b4d97b7dSHemant Agrawal .mac_addr_add = dpaa2_dev_add_mac_addr, 2467b4d97b7dSHemant Agrawal .mac_addr_remove = dpaa2_dev_remove_mac_addr, 2468b4d97b7dSHemant Agrawal .mac_addr_set = dpaa2_dev_set_mac_addr, 246963d5c3b0SHemant Agrawal .rss_hash_update = dpaa2_dev_rss_hash_update, 247063d5c3b0SHemant Agrawal .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 2471fb7ad441SThomas Monjalon .flow_ops_get = dpaa2_dev_flow_ops_get, 2472de1d70f0SHemant Agrawal .rxq_info_get = dpaa2_rxq_info_get, 2473de1d70f0SHemant Agrawal .txq_info_get = dpaa2_txq_info_get, 2474ac624068SGagandeep Singh .tm_ops_get = dpaa2_tm_ops_get, 2475bc767866SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588) 2476bc767866SPriyanka Jain .timesync_enable = dpaa2_timesync_enable, 2477bc767866SPriyanka Jain .timesync_disable = dpaa2_timesync_disable, 2478bc767866SPriyanka Jain .timesync_read_time = dpaa2_timesync_read_time, 2479bc767866SPriyanka Jain .timesync_write_time = dpaa2_timesync_write_time, 2480bc767866SPriyanka Jain .timesync_adjust_time = dpaa2_timesync_adjust_time, 2481bc767866SPriyanka Jain .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp, 2482bc767866SPriyanka Jain .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp, 2483bc767866SPriyanka Jain #endif 24843e5a335dSHemant Agrawal }; 24853e5a335dSHemant Agrawal 2486c3e0a706SShreyansh Jain /* Populate the mac address from physically available (u-boot/firmware) and/or 2487c3e0a706SShreyansh Jain * one set by higher layers like MC (restool) etc. 2488c3e0a706SShreyansh Jain * Returns the table of MAC entries (multiple entries) 2489c3e0a706SShreyansh Jain */ 2490c3e0a706SShreyansh Jain static int 2491c3e0a706SShreyansh Jain populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, 24926d13ea8eSOlivier Matz struct rte_ether_addr *mac_entry) 2493c3e0a706SShreyansh Jain { 2494c3e0a706SShreyansh Jain int ret; 24956d13ea8eSOlivier Matz struct rte_ether_addr phy_mac, prime_mac; 249641c24ea2SShreyansh Jain 24976d13ea8eSOlivier Matz memset(&phy_mac, 0, sizeof(struct rte_ether_addr)); 24986d13ea8eSOlivier Matz memset(&prime_mac, 0, sizeof(struct rte_ether_addr)); 2499c3e0a706SShreyansh Jain 2500c3e0a706SShreyansh Jain /* Get the physical device MAC address */ 2501c3e0a706SShreyansh Jain ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 2502c3e0a706SShreyansh Jain phy_mac.addr_bytes); 2503c3e0a706SShreyansh Jain if (ret) { 2504c3e0a706SShreyansh Jain DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret); 2505c3e0a706SShreyansh Jain goto cleanup; 2506c3e0a706SShreyansh Jain } 2507c3e0a706SShreyansh Jain 2508c3e0a706SShreyansh Jain ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 2509c3e0a706SShreyansh Jain prime_mac.addr_bytes); 2510c3e0a706SShreyansh Jain if (ret) { 2511c3e0a706SShreyansh Jain DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret); 2512c3e0a706SShreyansh Jain goto cleanup; 2513c3e0a706SShreyansh Jain } 2514c3e0a706SShreyansh Jain 2515c3e0a706SShreyansh Jain /* Now that both MAC have been obtained, do: 2516c3e0a706SShreyansh Jain * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy 2517c3e0a706SShreyansh Jain * and return phy 2518c3e0a706SShreyansh Jain * If empty_mac(phy), return prime. 2519c3e0a706SShreyansh Jain * if both are empty, create random MAC, set as prime and return 2520c3e0a706SShreyansh Jain */ 2521538da7a1SOlivier Matz if (!rte_is_zero_ether_addr(&phy_mac)) { 2522c3e0a706SShreyansh Jain /* If the addresses are not same, overwrite prime */ 2523538da7a1SOlivier Matz if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) { 2524c3e0a706SShreyansh Jain ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 2525c3e0a706SShreyansh Jain priv->token, 2526c3e0a706SShreyansh Jain phy_mac.addr_bytes); 2527c3e0a706SShreyansh Jain if (ret) { 2528c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to set MAC Address: %d", 2529c3e0a706SShreyansh Jain ret); 2530c3e0a706SShreyansh Jain goto cleanup; 2531c3e0a706SShreyansh Jain } 25326d13ea8eSOlivier Matz memcpy(&prime_mac, &phy_mac, 25336d13ea8eSOlivier Matz sizeof(struct rte_ether_addr)); 2534c3e0a706SShreyansh Jain } 2535538da7a1SOlivier Matz } else if (rte_is_zero_ether_addr(&prime_mac)) { 2536c3e0a706SShreyansh Jain /* In case phys and prime, both are zero, create random MAC */ 2537538da7a1SOlivier Matz rte_eth_random_addr(prime_mac.addr_bytes); 2538c3e0a706SShreyansh Jain ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 2539c3e0a706SShreyansh Jain priv->token, 2540c3e0a706SShreyansh Jain prime_mac.addr_bytes); 2541c3e0a706SShreyansh Jain if (ret) { 2542c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret); 2543c3e0a706SShreyansh Jain goto cleanup; 2544c3e0a706SShreyansh Jain } 2545c3e0a706SShreyansh Jain } 2546c3e0a706SShreyansh Jain 2547c3e0a706SShreyansh Jain /* prime_mac the final MAC address */ 25486d13ea8eSOlivier Matz memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr)); 2549c3e0a706SShreyansh Jain return 0; 2550c3e0a706SShreyansh Jain 2551c3e0a706SShreyansh Jain cleanup: 2552c3e0a706SShreyansh Jain return -1; 2553c3e0a706SShreyansh Jain } 2554c3e0a706SShreyansh Jain 2555c147eae0SHemant Agrawal static int 2556a3a997f0SHemant Agrawal check_devargs_handler(__rte_unused const char *key, const char *value, 2557a3a997f0SHemant Agrawal __rte_unused void *opaque) 2558a3a997f0SHemant Agrawal { 2559a3a997f0SHemant Agrawal if (strcmp(value, "1")) 2560a3a997f0SHemant Agrawal return -1; 2561a3a997f0SHemant Agrawal 2562a3a997f0SHemant Agrawal return 0; 2563a3a997f0SHemant Agrawal } 2564a3a997f0SHemant Agrawal 2565a3a997f0SHemant Agrawal static int 2566a3a997f0SHemant Agrawal dpaa2_get_devargs(struct rte_devargs *devargs, const char *key) 2567a3a997f0SHemant Agrawal { 2568a3a997f0SHemant Agrawal struct rte_kvargs *kvlist; 2569a3a997f0SHemant Agrawal 2570a3a997f0SHemant Agrawal if (!devargs) 2571a3a997f0SHemant Agrawal return 0; 2572a3a997f0SHemant Agrawal 2573a3a997f0SHemant Agrawal kvlist = rte_kvargs_parse(devargs->args, NULL); 2574a3a997f0SHemant Agrawal if (!kvlist) 2575a3a997f0SHemant Agrawal return 0; 2576a3a997f0SHemant Agrawal 2577a3a997f0SHemant Agrawal if (!rte_kvargs_count(kvlist, key)) { 2578a3a997f0SHemant Agrawal rte_kvargs_free(kvlist); 2579a3a997f0SHemant Agrawal return 0; 2580a3a997f0SHemant Agrawal } 2581a3a997f0SHemant Agrawal 2582a3a997f0SHemant Agrawal if (rte_kvargs_process(kvlist, key, 2583a3a997f0SHemant Agrawal check_devargs_handler, NULL) < 0) { 2584a3a997f0SHemant Agrawal rte_kvargs_free(kvlist); 2585a3a997f0SHemant Agrawal return 0; 2586a3a997f0SHemant Agrawal } 2587a3a997f0SHemant Agrawal rte_kvargs_free(kvlist); 2588a3a997f0SHemant Agrawal 2589a3a997f0SHemant Agrawal return 1; 2590a3a997f0SHemant Agrawal } 2591a3a997f0SHemant Agrawal 2592a3a997f0SHemant Agrawal static int 2593c147eae0SHemant Agrawal dpaa2_dev_init(struct rte_eth_dev *eth_dev) 2594c147eae0SHemant Agrawal { 25953e5a335dSHemant Agrawal struct rte_device *dev = eth_dev->device; 25963e5a335dSHemant Agrawal struct rte_dpaa2_device *dpaa2_dev; 25973e5a335dSHemant Agrawal struct fsl_mc_io *dpni_dev; 25983e5a335dSHemant Agrawal struct dpni_attr attr; 25993e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 2600bee61d86SHemant Agrawal struct dpni_buffer_layout layout; 2601fe2b986aSSunil Kumar Kori int ret, hw_id, i; 26023e5a335dSHemant Agrawal 2603d401ead1SHemant Agrawal PMD_INIT_FUNC_TRACE(); 2604d401ead1SHemant Agrawal 260581c42c84SShreyansh Jain dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 260681c42c84SShreyansh Jain if (!dpni_dev) { 260781c42c84SShreyansh Jain DPAA2_PMD_ERR("Memory allocation failed for dpni device"); 260881c42c84SShreyansh Jain return -1; 260981c42c84SShreyansh Jain } 2610a6a5f4b4SHemant Agrawal dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 261181c42c84SShreyansh Jain eth_dev->process_private = (void *)dpni_dev; 261281c42c84SShreyansh Jain 2613c147eae0SHemant Agrawal /* For secondary processes, the primary has done all the work */ 2614e7b187dbSShreyansh Jain if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2615e7b187dbSShreyansh Jain /* In case of secondary, only burst and ops API need to be 2616e7b187dbSShreyansh Jain * plugged. 2617e7b187dbSShreyansh Jain */ 2618e7b187dbSShreyansh Jain eth_dev->dev_ops = &dpaa2_ethdev_ops; 2619cbfc6111SFerruh Yigit eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count; 2620a3a997f0SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) 2621a3a997f0SHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; 262220191ab3SNipun Gupta else if (dpaa2_get_devargs(dev->devargs, 262320191ab3SNipun Gupta DRIVER_NO_PREFETCH_MODE)) 262420191ab3SNipun Gupta eth_dev->rx_pkt_burst = dpaa2_dev_rx; 2625a3a997f0SHemant Agrawal else 2626e7b187dbSShreyansh Jain eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 2627e7b187dbSShreyansh Jain eth_dev->tx_pkt_burst = dpaa2_dev_tx; 2628c147eae0SHemant Agrawal return 0; 2629e7b187dbSShreyansh Jain } 2630c147eae0SHemant Agrawal 26313e5a335dSHemant Agrawal dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 26323e5a335dSHemant Agrawal 26333e5a335dSHemant Agrawal hw_id = dpaa2_dev->object_id; 26343e5a335dSHemant Agrawal ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 26353e5a335dSHemant Agrawal if (ret) { 2636a10a988aSShreyansh Jain DPAA2_PMD_ERR( 2637a10a988aSShreyansh Jain "Failure in opening dpni@%d with err code %d", 2638d4984046SHemant Agrawal hw_id, ret); 2639d4984046SHemant Agrawal rte_free(dpni_dev); 26403e5a335dSHemant Agrawal return -1; 26413e5a335dSHemant Agrawal } 26423e5a335dSHemant Agrawal 2643f023d059SJun Yang if (eth_dev->data->dev_conf.lpbk_mode) 2644f023d059SJun Yang dpaa2_dev_recycle_deconfig(eth_dev); 2645f023d059SJun Yang 26463e5a335dSHemant Agrawal /* Clean the device first */ 26473e5a335dSHemant Agrawal ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 26483e5a335dSHemant Agrawal if (ret) { 2649a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", 2650d4984046SHemant Agrawal hw_id, ret); 2651d4984046SHemant Agrawal goto init_err; 26523e5a335dSHemant Agrawal } 26533e5a335dSHemant Agrawal 26543e5a335dSHemant Agrawal ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 26553e5a335dSHemant Agrawal if (ret) { 2656a10a988aSShreyansh Jain DPAA2_PMD_ERR( 2657a10a988aSShreyansh Jain "Failure in get dpni@%d attribute, err code %d", 2658d4984046SHemant Agrawal hw_id, ret); 2659d4984046SHemant Agrawal goto init_err; 26603e5a335dSHemant Agrawal } 26613e5a335dSHemant Agrawal 266216bbc98aSShreyansh Jain priv->num_rx_tc = attr.num_rx_tcs; 266372100f0dSGagandeep Singh priv->num_tx_tc = attr.num_tx_tcs; 26644ce58f8aSJun Yang priv->qos_entries = attr.qos_entries; 26654ce58f8aSJun Yang priv->fs_entries = attr.fs_entries; 26664ce58f8aSJun Yang priv->dist_queues = attr.num_queues; 266772100f0dSGagandeep Singh priv->num_channels = attr.num_channels; 266872100f0dSGagandeep Singh priv->channel_inuse = 0; 2669f023d059SJun Yang rte_spinlock_init(&priv->lpbk_qp_lock); 26704ce58f8aSJun Yang 267113b856acSHemant Agrawal /* only if the custom CG is enabled */ 267213b856acSHemant Agrawal if (attr.options & DPNI_OPT_CUSTOM_CG) 267313b856acSHemant Agrawal priv->max_cgs = attr.num_cgs; 267413b856acSHemant Agrawal else 267513b856acSHemant Agrawal priv->max_cgs = 0; 267613b856acSHemant Agrawal 267713b856acSHemant Agrawal for (i = 0; i < priv->max_cgs; i++) 267813b856acSHemant Agrawal priv->cgid_in_use[i] = 0; 267989c2ea8fSHemant Agrawal 2680fe2b986aSSunil Kumar Kori for (i = 0; i < attr.num_rx_tcs; i++) 2681fe2b986aSSunil Kumar Kori priv->nb_rx_queues += attr.num_queues; 268289c2ea8fSHemant Agrawal 268372100f0dSGagandeep Singh priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels; 2684ef18dafeSHemant Agrawal 268513b856acSHemant Agrawal DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d", 2686a10a988aSShreyansh Jain priv->num_rx_tc, priv->nb_rx_queues, 268713b856acSHemant Agrawal priv->nb_tx_queues, priv->max_cgs); 26883e5a335dSHemant Agrawal 26893e5a335dSHemant Agrawal priv->hw = dpni_dev; 26903e5a335dSHemant Agrawal priv->hw_id = hw_id; 269133fad432SHemant Agrawal priv->options = attr.options; 269233fad432SHemant Agrawal priv->max_mac_filters = attr.mac_filter_entries; 269333fad432SHemant Agrawal priv->max_vlan_filters = attr.vlan_filter_entries; 26943e5a335dSHemant Agrawal priv->flags = 0; 2695e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588) 26960fcdbde0SHemant Agrawal DPAA2_PMD_INFO("DPDK IEEE1588 is enabled"); 26978d21c563SHemant Agrawal priv->flags |= DPAA2_TX_CONF_ENABLE; 2698e806bf87SPriyanka Jain #endif 26998d21c563SHemant Agrawal /* Used with ``fslmc:dpni.1,drv_tx_conf=1`` */ 27008d21c563SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_TX_CONF)) { 27018d21c563SHemant Agrawal priv->flags |= DPAA2_TX_CONF_ENABLE; 27028d21c563SHemant Agrawal DPAA2_PMD_INFO("TX_CONF Enabled"); 27038d21c563SHemant Agrawal } 27043e5a335dSHemant Agrawal 27054690a611SNipun Gupta if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) { 27064690a611SNipun Gupta dpaa2_enable_err_queue = 1; 2707a63c6426SJun Yang DPAA2_PMD_INFO("Enable DMA error checks"); 27084690a611SNipun Gupta } 27094690a611SNipun Gupta 27103e5a335dSHemant Agrawal /* Allocate memory for hardware structure for queues */ 27113e5a335dSHemant Agrawal ret = dpaa2_alloc_rx_tx_queues(eth_dev); 27123e5a335dSHemant Agrawal if (ret) { 2713a10a988aSShreyansh Jain DPAA2_PMD_ERR("Queue allocation Failed"); 2714d4984046SHemant Agrawal goto init_err; 27153e5a335dSHemant Agrawal } 27163e5a335dSHemant Agrawal 2717c3e0a706SShreyansh Jain /* Allocate memory for storing MAC addresses. 2718c3e0a706SShreyansh Jain * Table of mac_filter_entries size is allocated so that RTE ether lib 2719c3e0a706SShreyansh Jain * can add MAC entries when rte_eth_dev_mac_addr_add is called. 2720c3e0a706SShreyansh Jain */ 272133fad432SHemant Agrawal eth_dev->data->mac_addrs = rte_zmalloc("dpni", 272235b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 272333fad432SHemant Agrawal if (eth_dev->data->mac_addrs == NULL) { 2724a10a988aSShreyansh Jain DPAA2_PMD_ERR( 2725d4984046SHemant Agrawal "Failed to allocate %d bytes needed to store MAC addresses", 272635b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * attr.mac_filter_entries); 2727d4984046SHemant Agrawal ret = -ENOMEM; 2728d4984046SHemant Agrawal goto init_err; 272933fad432SHemant Agrawal } 273033fad432SHemant Agrawal 2731c3e0a706SShreyansh Jain ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]); 273233fad432SHemant Agrawal if (ret) { 2733c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to fetch MAC Address for device"); 2734c3e0a706SShreyansh Jain rte_free(eth_dev->data->mac_addrs); 2735c3e0a706SShreyansh Jain eth_dev->data->mac_addrs = NULL; 2736d4984046SHemant Agrawal goto init_err; 273733fad432SHemant Agrawal } 273833fad432SHemant Agrawal 2739bee61d86SHemant Agrawal /* ... tx buffer layout ... */ 2740bee61d86SHemant Agrawal memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 27418d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 27429ceacab7SPriyanka Jain layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 27439ceacab7SPriyanka Jain DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 27449ceacab7SPriyanka Jain layout.pass_timestamp = true; 27459ceacab7SPriyanka Jain } else { 2746bee61d86SHemant Agrawal layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 27479ceacab7SPriyanka Jain } 2748bee61d86SHemant Agrawal layout.pass_frame_status = 1; 2749bee61d86SHemant Agrawal ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2750bee61d86SHemant Agrawal DPNI_QUEUE_TX, &layout); 2751bee61d86SHemant Agrawal if (ret) { 2752a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); 2753d4984046SHemant Agrawal goto init_err; 2754bee61d86SHemant Agrawal } 2755bee61d86SHemant Agrawal 2756bee61d86SHemant Agrawal /* ... tx-conf and error buffer layout ... */ 2757bee61d86SHemant Agrawal memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 27588d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 27598d21c563SHemant Agrawal layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 27609ceacab7SPriyanka Jain layout.pass_timestamp = true; 27619ceacab7SPriyanka Jain } 27628d21c563SHemant Agrawal layout.options |= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 2763bee61d86SHemant Agrawal layout.pass_frame_status = 1; 2764bee61d86SHemant Agrawal ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2765bee61d86SHemant Agrawal DPNI_QUEUE_TX_CONFIRM, &layout); 2766bee61d86SHemant Agrawal if (ret) { 2767a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", 2768d4984046SHemant Agrawal ret); 2769d4984046SHemant Agrawal goto init_err; 2770bee61d86SHemant Agrawal } 2771bee61d86SHemant Agrawal 27723e5a335dSHemant Agrawal eth_dev->dev_ops = &dpaa2_ethdev_ops; 2773c147eae0SHemant Agrawal 2774a3a997f0SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) { 2775a3a997f0SHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; 2776a3a997f0SHemant Agrawal DPAA2_PMD_INFO("Loopback mode"); 277720191ab3SNipun Gupta } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) { 277820191ab3SNipun Gupta eth_dev->rx_pkt_burst = dpaa2_dev_rx; 277920191ab3SNipun Gupta DPAA2_PMD_INFO("No Prefetch mode"); 2780a3a997f0SHemant Agrawal } else { 27815c6942fdSHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 2782a3a997f0SHemant Agrawal } 2783cd9935ceSHemant Agrawal eth_dev->tx_pkt_burst = dpaa2_dev_tx; 27841261cd68SHemant Agrawal 27857be78d02SJosh Soref /* Init fields w.r.t. classification */ 27865f176728SJun Yang memset(&priv->extract.qos_key_extract, 0, 27875f176728SJun Yang sizeof(struct dpaa2_key_extract)); 2788fe2b986aSSunil Kumar Kori priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); 2789fe2b986aSSunil Kumar Kori if (!priv->extract.qos_extract_param) { 2790fe2b986aSSunil Kumar Kori DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " 27917be78d02SJosh Soref " classification ", ret); 2792fe2b986aSSunil Kumar Kori goto init_err; 2793fe2b986aSSunil Kumar Kori } 27945f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv4_src_offset = 27955f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 27965f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv4_dst_offset = 27975f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 27985f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv6_src_offset = 27995f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28005f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv6_dst_offset = 28015f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28025f176728SJun Yang 2803fe2b986aSSunil Kumar Kori for (i = 0; i < MAX_TCS; i++) { 28045f176728SJun Yang memset(&priv->extract.tc_key_extract[i], 0, 28055f176728SJun Yang sizeof(struct dpaa2_key_extract)); 28065f176728SJun Yang priv->extract.tc_extract_param[i] = 2807fe2b986aSSunil Kumar Kori (size_t)rte_malloc(NULL, 256, 64); 28085f176728SJun Yang if (!priv->extract.tc_extract_param[i]) { 28097be78d02SJosh Soref DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification", 2810fe2b986aSSunil Kumar Kori ret); 2811fe2b986aSSunil Kumar Kori goto init_err; 2812fe2b986aSSunil Kumar Kori } 28135f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv4_src_offset = 28145f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28155f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset = 28165f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28175f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv6_src_offset = 28185f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28195f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset = 28205f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 2821fe2b986aSSunil Kumar Kori } 2822fe2b986aSSunil Kumar Kori 28236f8be0fbSHemant Agrawal ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token, 28246f8be0fbSHemant Agrawal RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN 28256f8be0fbSHemant Agrawal + VLAN_TAG_SIZE); 28266f8be0fbSHemant Agrawal if (ret) { 28276f8be0fbSHemant Agrawal DPAA2_PMD_ERR("Unable to set mtu. check config"); 28286f8be0fbSHemant Agrawal goto init_err; 28296f8be0fbSHemant Agrawal } 2830de08b474SApeksha Gupta eth_dev->data->mtu = RTE_ETHER_MTU; 28316f8be0fbSHemant Agrawal 283272ec7a67SSunil Kumar Kori /*TODO To enable soft parser support DPAA2 driver needs to integrate 283372ec7a67SSunil Kumar Kori * with external entity to receive byte code for software sequence 283472ec7a67SSunil Kumar Kori * and same will be offload to the H/W using MC interface. 283572ec7a67SSunil Kumar Kori * Currently it is assumed that DPAA2 driver has byte code by some 283672ec7a67SSunil Kumar Kori * mean and same if offloaded to H/W. 283772ec7a67SSunil Kumar Kori */ 283872ec7a67SSunil Kumar Kori if (getenv("DPAA2_ENABLE_SOFT_PARSER")) { 283972ec7a67SSunil Kumar Kori WRIOP_SS_INITIALIZER(priv); 284072ec7a67SSunil Kumar Kori ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS); 284172ec7a67SSunil Kumar Kori if (ret < 0) { 2842f665790aSDavid Marchand DPAA2_PMD_ERR(" Error(%d) in loading softparser", 284372ec7a67SSunil Kumar Kori ret); 284472ec7a67SSunil Kumar Kori return ret; 284572ec7a67SSunil Kumar Kori } 284672ec7a67SSunil Kumar Kori 284772ec7a67SSunil Kumar Kori ret = dpaa2_eth_enable_wriop_soft_parser(priv, 284872ec7a67SSunil Kumar Kori DPNI_SS_INGRESS); 284972ec7a67SSunil Kumar Kori if (ret < 0) { 2850f665790aSDavid Marchand DPAA2_PMD_ERR(" Error(%d) in enabling softparser", 285172ec7a67SSunil Kumar Kori ret); 285272ec7a67SSunil Kumar Kori return ret; 285372ec7a67SSunil Kumar Kori } 285472ec7a67SSunil Kumar Kori } 2855a247fcd9SStephen Hemminger DPAA2_PMD_INFO("%s: netdev created, connected to %s", 2856f023d059SJun Yang eth_dev->data->name, dpaa2_dev->ep_name); 2857f023d059SJun Yang 2858c147eae0SHemant Agrawal return 0; 2859d4984046SHemant Agrawal init_err: 28603e5a335dSHemant Agrawal dpaa2_dev_close(eth_dev); 28613e5a335dSHemant Agrawal 28625964d36aSSachin Saxena return ret; 2863c147eae0SHemant Agrawal } 2864c147eae0SHemant Agrawal 2865028d1dfdSJun Yang int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev) 2866028d1dfdSJun Yang { 2867028d1dfdSJun Yang return dev->device->driver == &rte_dpaa2_pmd.driver; 2868028d1dfdSJun Yang } 2869028d1dfdSJun Yang 28702013e308SVanshika Shukla #if defined(RTE_LIBRTE_IEEE1588) 28712013e308SVanshika Shukla int 28722013e308SVanshika Shukla rte_pmd_dpaa2_get_one_step_ts(uint16_t port_id, bool mc_query) 28732013e308SVanshika Shukla { 28742013e308SVanshika Shukla struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 28752013e308SVanshika Shukla struct dpaa2_dev_priv *priv = dev->data->dev_private; 28762013e308SVanshika Shukla struct fsl_mc_io *dpni = priv->eth_dev->process_private; 28772013e308SVanshika Shukla struct dpni_single_step_cfg ptp_cfg; 28782013e308SVanshika Shukla int err; 28792013e308SVanshika Shukla 28802013e308SVanshika Shukla if (!mc_query) 28812013e308SVanshika Shukla return priv->ptp_correction_offset; 28822013e308SVanshika Shukla 28832013e308SVanshika Shukla err = dpni_get_single_step_cfg(dpni, CMD_PRI_LOW, priv->token, &ptp_cfg); 28842013e308SVanshika Shukla if (err) { 28852013e308SVanshika Shukla DPAA2_PMD_ERR("Failed to retrieve onestep configuration"); 28862013e308SVanshika Shukla return err; 28872013e308SVanshika Shukla } 28882013e308SVanshika Shukla 28892013e308SVanshika Shukla if (!ptp_cfg.ptp_onestep_reg_base) { 28902013e308SVanshika Shukla DPAA2_PMD_ERR("1588 onestep reg not available"); 28912013e308SVanshika Shukla return -1; 28922013e308SVanshika Shukla } 28932013e308SVanshika Shukla 28942013e308SVanshika Shukla priv->ptp_correction_offset = ptp_cfg.offset; 28952013e308SVanshika Shukla 28962013e308SVanshika Shukla return priv->ptp_correction_offset; 28972013e308SVanshika Shukla } 28982013e308SVanshika Shukla 28992013e308SVanshika Shukla int 29002013e308SVanshika Shukla rte_pmd_dpaa2_set_one_step_ts(uint16_t port_id, uint16_t offset, uint8_t ch_update) 29012013e308SVanshika Shukla { 29022013e308SVanshika Shukla struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 29032013e308SVanshika Shukla struct dpaa2_dev_priv *priv = dev->data->dev_private; 29042013e308SVanshika Shukla struct fsl_mc_io *dpni = dev->process_private; 29052013e308SVanshika Shukla struct dpni_single_step_cfg cfg; 29062013e308SVanshika Shukla int err; 29072013e308SVanshika Shukla 29082013e308SVanshika Shukla cfg.en = 1; 29092013e308SVanshika Shukla cfg.ch_update = ch_update; 29102013e308SVanshika Shukla cfg.offset = offset; 29112013e308SVanshika Shukla cfg.peer_delay = 0; 29122013e308SVanshika Shukla 29132013e308SVanshika Shukla err = dpni_set_single_step_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 29142013e308SVanshika Shukla if (err) 29152013e308SVanshika Shukla return err; 29162013e308SVanshika Shukla 29172013e308SVanshika Shukla priv->ptp_correction_offset = offset; 29182013e308SVanshika Shukla 29192013e308SVanshika Shukla return 0; 29202013e308SVanshika Shukla } 29212013e308SVanshika Shukla #endif 29222013e308SVanshika Shukla 2923748b9980SJun Yang static int dpaa2_tx_sg_pool_init(void) 2924748b9980SJun Yang { 2925748b9980SJun Yang char name[RTE_MEMZONE_NAMESIZE]; 2926748b9980SJun Yang 2927748b9980SJun Yang if (dpaa2_tx_sg_pool) 2928748b9980SJun Yang return 0; 2929748b9980SJun Yang 2930748b9980SJun Yang sprintf(name, "dpaa2_mbuf_tx_sg_pool"); 2931748b9980SJun Yang if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2932748b9980SJun Yang dpaa2_tx_sg_pool = rte_pktmbuf_pool_create(name, 2933748b9980SJun Yang DPAA2_POOL_SIZE, 2934748b9980SJun Yang DPAA2_POOL_CACHE_SIZE, 0, 2935748b9980SJun Yang DPAA2_MAX_SGS * sizeof(struct qbman_sge), 2936748b9980SJun Yang rte_socket_id()); 2937748b9980SJun Yang if (!dpaa2_tx_sg_pool) { 2938748b9980SJun Yang DPAA2_PMD_ERR("SG pool creation failed"); 2939748b9980SJun Yang return -ENOMEM; 2940748b9980SJun Yang } 2941748b9980SJun Yang } else { 2942748b9980SJun Yang dpaa2_tx_sg_pool = rte_mempool_lookup(name); 2943748b9980SJun Yang if (!dpaa2_tx_sg_pool) { 2944748b9980SJun Yang DPAA2_PMD_ERR("SG pool lookup failed"); 2945748b9980SJun Yang return -ENOMEM; 2946748b9980SJun Yang } 2947748b9980SJun Yang } 2948748b9980SJun Yang 2949748b9980SJun Yang return 0; 2950748b9980SJun Yang } 2951748b9980SJun Yang 2952c147eae0SHemant Agrawal static int 295355fd2703SHemant Agrawal rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 2954c147eae0SHemant Agrawal struct rte_dpaa2_device *dpaa2_dev) 2955c147eae0SHemant Agrawal { 2956c147eae0SHemant Agrawal struct rte_eth_dev *eth_dev; 295781c42c84SShreyansh Jain struct dpaa2_dev_priv *dev_priv; 2958c147eae0SHemant Agrawal int diag; 2959c147eae0SHemant Agrawal 2960f4435e38SHemant Agrawal if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > 2961f4435e38SHemant Agrawal RTE_PKTMBUF_HEADROOM) { 2962f4435e38SHemant Agrawal DPAA2_PMD_ERR( 2963f4435e38SHemant Agrawal "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)", 2964f4435e38SHemant Agrawal RTE_PKTMBUF_HEADROOM, 2965f4435e38SHemant Agrawal DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE); 2966f4435e38SHemant Agrawal 2967f4435e38SHemant Agrawal return -1; 2968f4435e38SHemant Agrawal } 2969f4435e38SHemant Agrawal 2970c147eae0SHemant Agrawal if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2971e729ec76SHemant Agrawal eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 2972e729ec76SHemant Agrawal if (!eth_dev) 2973e729ec76SHemant Agrawal return -ENODEV; 297481c42c84SShreyansh Jain dev_priv = rte_zmalloc("ethdev private structure", 2975c147eae0SHemant Agrawal sizeof(struct dpaa2_dev_priv), 2976c147eae0SHemant Agrawal RTE_CACHE_LINE_SIZE); 297781c42c84SShreyansh Jain if (dev_priv == NULL) { 2978a10a988aSShreyansh Jain DPAA2_PMD_CRIT( 2979a10a988aSShreyansh Jain "Unable to allocate memory for private data"); 2980c147eae0SHemant Agrawal rte_eth_dev_release_port(eth_dev); 2981c147eae0SHemant Agrawal return -ENOMEM; 2982c147eae0SHemant Agrawal } 298381c42c84SShreyansh Jain eth_dev->data->dev_private = (void *)dev_priv; 298481c42c84SShreyansh Jain /* Store a pointer to eth_dev in dev_private */ 298581c42c84SShreyansh Jain dev_priv->eth_dev = eth_dev; 2986e729ec76SHemant Agrawal } else { 2987e729ec76SHemant Agrawal eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 298881c42c84SShreyansh Jain if (!eth_dev) { 298981c42c84SShreyansh Jain DPAA2_PMD_DEBUG("returning enodev"); 2990e729ec76SHemant Agrawal return -ENODEV; 2991c147eae0SHemant Agrawal } 299281c42c84SShreyansh Jain } 2993e729ec76SHemant Agrawal 2994c147eae0SHemant Agrawal eth_dev->device = &dpaa2_dev->device; 299555fd2703SHemant Agrawal 2996c147eae0SHemant Agrawal dpaa2_dev->eth_dev = eth_dev; 2997c147eae0SHemant Agrawal eth_dev->data->rx_mbuf_alloc_failed = 0; 2998c147eae0SHemant Agrawal 299992b7e33eSHemant Agrawal if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 300092b7e33eSHemant Agrawal eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 300192b7e33eSHemant Agrawal 3002f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 3003f30e69b4SFerruh Yigit 3004c147eae0SHemant Agrawal /* Invoke PMD device initialization function */ 3005c147eae0SHemant Agrawal diag = dpaa2_dev_init(eth_dev); 3006748b9980SJun Yang if (!diag) { 3007748b9980SJun Yang diag = dpaa2_tx_sg_pool_init(); 3008748b9980SJun Yang if (diag) 3009748b9980SJun Yang return diag; 3010fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev); 301175e2a1d4SGagandeep Singh dpaa2_valid_dev++; 3012c147eae0SHemant Agrawal return 0; 3013fbe90cddSThomas Monjalon } 3014c147eae0SHemant Agrawal 3015c147eae0SHemant Agrawal rte_eth_dev_release_port(eth_dev); 3016c147eae0SHemant Agrawal return diag; 3017c147eae0SHemant Agrawal } 3018c147eae0SHemant Agrawal 3019c147eae0SHemant Agrawal static int 3020c147eae0SHemant Agrawal rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 3021c147eae0SHemant Agrawal { 3022c147eae0SHemant Agrawal struct rte_eth_dev *eth_dev; 30235964d36aSSachin Saxena int ret; 3024c147eae0SHemant Agrawal 3025c147eae0SHemant Agrawal eth_dev = dpaa2_dev->eth_dev; 30265964d36aSSachin Saxena dpaa2_dev_close(eth_dev); 302775e2a1d4SGagandeep Singh dpaa2_valid_dev--; 302875e2a1d4SGagandeep Singh if (!dpaa2_valid_dev) 302975e2a1d4SGagandeep Singh rte_mempool_free(dpaa2_tx_sg_pool); 30305964d36aSSachin Saxena ret = rte_eth_dev_release_port(eth_dev); 3031c147eae0SHemant Agrawal 30325964d36aSSachin Saxena return ret; 3033c147eae0SHemant Agrawal } 3034c147eae0SHemant Agrawal 3035c147eae0SHemant Agrawal static struct rte_dpaa2_driver rte_dpaa2_pmd = { 303692b7e33eSHemant Agrawal .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 3037bad555dfSShreyansh Jain .drv_type = DPAA2_ETH, 3038c147eae0SHemant Agrawal .probe = rte_dpaa2_probe, 3039c147eae0SHemant Agrawal .remove = rte_dpaa2_remove, 3040c147eae0SHemant Agrawal }; 3041c147eae0SHemant Agrawal 30424ed8a733SVanshika Shukla RTE_PMD_REGISTER_DPAA2(NET_DPAA2_PMD_DRIVER_NAME, rte_dpaa2_pmd); 30434ed8a733SVanshika Shukla RTE_PMD_REGISTER_PARAM_STRING(NET_DPAA2_PMD_DRIVER_NAME, 304420191ab3SNipun Gupta DRIVER_LOOPBACK_MODE "=<int> " 30458d21c563SHemant Agrawal DRIVER_NO_PREFETCH_MODE "=<int>" 30464690a611SNipun Gupta DRIVER_TX_CONF "=<int>" 30474690a611SNipun Gupta DRIVER_ERROR_QUEUE "=<int>"); 3048eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE); 3049