1131a75b6SHemant Agrawal /* * SPDX-License-Identifier: BSD-3-Clause 2c147eae0SHemant Agrawal * 3c147eae0SHemant Agrawal * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4ac624068SGagandeep Singh * Copyright 2016-2021 NXP 5c147eae0SHemant Agrawal * 6c147eae0SHemant Agrawal */ 7c147eae0SHemant Agrawal 8c147eae0SHemant Agrawal #include <time.h> 9c147eae0SHemant Agrawal #include <net/if.h> 10c147eae0SHemant Agrawal 11c147eae0SHemant Agrawal #include <rte_mbuf.h> 12df96fd0dSBruce Richardson #include <ethdev_driver.h> 13c147eae0SHemant Agrawal #include <rte_malloc.h> 14c147eae0SHemant Agrawal #include <rte_memcpy.h> 15c147eae0SHemant Agrawal #include <rte_string_fns.h> 16c147eae0SHemant Agrawal #include <rte_cycles.h> 17c147eae0SHemant Agrawal #include <rte_kvargs.h> 181acb7f54SDavid Marchand #include <dev_driver.h> 19b4f22ca5SDavid Marchand #include <bus_fslmc_driver.h> 20fe2b986aSSunil Kumar Kori #include <rte_flow_driver.h> 216ac5a55bSJun Yang #include "rte_dpaa2_mempool.h" 22c147eae0SHemant Agrawal 23a10a988aSShreyansh Jain #include "dpaa2_pmd_logs.h" 24c147eae0SHemant Agrawal #include <fslmc_vfio.h> 253e5a335dSHemant Agrawal #include <dpaa2_hw_pvt.h> 26bee61d86SHemant Agrawal #include <dpaa2_hw_mempool.h> 273cf50ff5SHemant Agrawal #include <dpaa2_hw_dpio.h> 28748eccb9SHemant Agrawal #include <mc/fsl_dpmng.h> 29c147eae0SHemant Agrawal #include "dpaa2_ethdev.h" 3072ec7a67SSunil Kumar Kori #include "dpaa2_sparser.h" 31f40adb40SHemant Agrawal #include <fsl_qbman_debug.h> 32c147eae0SHemant Agrawal 33c7ec1ba8SHemant Agrawal #define DRIVER_LOOPBACK_MODE "drv_loopback" 3420191ab3SNipun Gupta #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch" 358d21c563SHemant Agrawal #define DRIVER_TX_CONF "drv_tx_conf" 364690a611SNipun Gupta #define DRIVER_ERROR_QUEUE "drv_err_queue" 37eadcfd95SRohit Raj #define CHECK_INTERVAL 100 /* 100ms */ 38eadcfd95SRohit Raj #define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */ 39a3a997f0SHemant Agrawal 40175fe7d9SSunil Kumar Kori /* Supported Rx offloads */ 41175fe7d9SSunil Kumar Kori static uint64_t dev_rx_offloads_sup = 42295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_CHECKSUM | 43295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | 44295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 45295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 46295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 47295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 48295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TIMESTAMP; 49175fe7d9SSunil Kumar Kori 50175fe7d9SSunil Kumar Kori /* Rx offloads which cannot be disabled */ 51175fe7d9SSunil Kumar Kori static uint64_t dev_rx_offloads_nodis = 52295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_RSS_HASH | 53295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCATTER; 54175fe7d9SSunil Kumar Kori 55175fe7d9SSunil Kumar Kori /* Supported Tx offloads */ 56175fe7d9SSunil Kumar Kori static uint64_t dev_tx_offloads_sup = 57295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 58295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 59295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 60295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 61295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | 62295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 63295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | 64295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 65175fe7d9SSunil Kumar Kori 66175fe7d9SSunil Kumar Kori /* Tx offloads which cannot be disabled */ 67175fe7d9SSunil Kumar Kori static uint64_t dev_tx_offloads_nodis = 68295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 69175fe7d9SSunil Kumar Kori 70c1870f65SAkhil Goyal /* enable timestamp in mbuf */ 71724f79dfSHemant Agrawal bool dpaa2_enable_ts[RTE_MAX_ETHPORTS]; 7261c41e2eSThomas Monjalon uint64_t dpaa2_timestamp_rx_dynflag; 7361c41e2eSThomas Monjalon int dpaa2_timestamp_dynfield_offset = -1; 74c1870f65SAkhil Goyal 754690a611SNipun Gupta /* Enable error queue */ 764690a611SNipun Gupta bool dpaa2_enable_err_queue; 774690a611SNipun Gupta 7835dc25d1SRohit Raj #define MAX_NB_RX_DESC 11264 7935dc25d1SRohit Raj int total_nb_rx_desc; 8035dc25d1SRohit Raj 8175e2a1d4SGagandeep Singh int dpaa2_valid_dev; 8275e2a1d4SGagandeep Singh struct rte_mempool *dpaa2_tx_sg_pool; 8375e2a1d4SGagandeep Singh 841d6329b2SHemant Agrawal struct rte_dpaa2_xstats_name_off { 851d6329b2SHemant Agrawal char name[RTE_ETH_XSTATS_NAME_SIZE]; 861d6329b2SHemant Agrawal uint8_t page_id; /* dpni statistics page id */ 871d6329b2SHemant Agrawal uint8_t stats_id; /* stats id in the given page */ 881d6329b2SHemant Agrawal }; 891d6329b2SHemant Agrawal 901d6329b2SHemant Agrawal static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 911d6329b2SHemant Agrawal {"ingress_multicast_frames", 0, 2}, 921d6329b2SHemant Agrawal {"ingress_multicast_bytes", 0, 3}, 931d6329b2SHemant Agrawal {"ingress_broadcast_frames", 0, 4}, 941d6329b2SHemant Agrawal {"ingress_broadcast_bytes", 0, 5}, 951d6329b2SHemant Agrawal {"egress_multicast_frames", 1, 2}, 961d6329b2SHemant Agrawal {"egress_multicast_bytes", 1, 3}, 971d6329b2SHemant Agrawal {"egress_broadcast_frames", 1, 4}, 981d6329b2SHemant Agrawal {"egress_broadcast_bytes", 1, 5}, 991d6329b2SHemant Agrawal {"ingress_filtered_frames", 2, 0}, 1001d6329b2SHemant Agrawal {"ingress_discarded_frames", 2, 1}, 1011d6329b2SHemant Agrawal {"ingress_nobuffer_discards", 2, 2}, 1021d6329b2SHemant Agrawal {"egress_discarded_frames", 2, 3}, 1031d6329b2SHemant Agrawal {"egress_confirmed_frames", 2, 4}, 104c720c5f6SHemant Agrawal {"cgr_reject_frames", 4, 0}, 105c720c5f6SHemant Agrawal {"cgr_reject_bytes", 4, 1}, 1061d6329b2SHemant Agrawal }; 1071d6329b2SHemant Agrawal 108c147eae0SHemant Agrawal static struct rte_dpaa2_driver rte_dpaa2_pmd; 109c5acbb5eSHemant Agrawal static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 110c5acbb5eSHemant Agrawal int wait_to_complete); 111a1f3a12cSHemant Agrawal static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 112a1f3a12cSHemant Agrawal static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 113e1640849SHemant Agrawal static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 114c147eae0SHemant Agrawal 1153ce294f2SHemant Agrawal static int 1163ce294f2SHemant Agrawal dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1173ce294f2SHemant Agrawal { 1183ce294f2SHemant Agrawal int ret; 1193ce294f2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 12081c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 1213ce294f2SHemant Agrawal 1223ce294f2SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1233ce294f2SHemant Agrawal 1243ce294f2SHemant Agrawal if (dpni == NULL) { 125a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1263ce294f2SHemant Agrawal return -1; 1273ce294f2SHemant Agrawal } 1283ce294f2SHemant Agrawal 1293ce294f2SHemant Agrawal if (on) 13096f7bfe8SSachin Saxena ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, 13196f7bfe8SSachin Saxena vlan_id, 0, 0, 0); 1323ce294f2SHemant Agrawal else 1333ce294f2SHemant Agrawal ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 1343ce294f2SHemant Agrawal priv->token, vlan_id); 1353ce294f2SHemant Agrawal 1363ce294f2SHemant Agrawal if (ret < 0) 137a10a988aSShreyansh Jain DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", 1383ce294f2SHemant Agrawal ret, vlan_id, priv->hw_id); 1393ce294f2SHemant Agrawal 1403ce294f2SHemant Agrawal return ret; 1413ce294f2SHemant Agrawal } 1423ce294f2SHemant Agrawal 143289ba0c0SDavid Harton static int 1443ce294f2SHemant Agrawal dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1453ce294f2SHemant Agrawal { 1463ce294f2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 14781c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 14850ce3e7aSWei Hu (Xavier) int ret = 0; 1493ce294f2SHemant Agrawal 1503ce294f2SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1513ce294f2SHemant Agrawal 152295968d1SFerruh Yigit if (mask & RTE_ETH_VLAN_FILTER_MASK) { 1537be78d02SJosh Soref /* VLAN Filter not available */ 154c172f85eSHemant Agrawal if (!priv->max_vlan_filters) { 155a10a988aSShreyansh Jain DPAA2_PMD_INFO("VLAN filter not available"); 15650ce3e7aSWei Hu (Xavier) return -ENOTSUP; 157c172f85eSHemant Agrawal } 158c172f85eSHemant Agrawal 1590ebce612SSunil Kumar Kori if (dev->data->dev_conf.rxmode.offloads & 160295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1613ce294f2SHemant Agrawal ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 1623ce294f2SHemant Agrawal priv->token, true); 1633ce294f2SHemant Agrawal else 1643ce294f2SHemant Agrawal ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 1653ce294f2SHemant Agrawal priv->token, false); 1663ce294f2SHemant Agrawal if (ret < 0) 167a10a988aSShreyansh Jain DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); 1683ce294f2SHemant Agrawal } 169289ba0c0SDavid Harton 17050ce3e7aSWei Hu (Xavier) return ret; 1713ce294f2SHemant Agrawal } 1723ce294f2SHemant Agrawal 173748eccb9SHemant Agrawal static int 174e59b75ffSHemant Agrawal dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, 175e59b75ffSHemant Agrawal enum rte_vlan_type vlan_type __rte_unused, 176e59b75ffSHemant Agrawal uint16_t tpid) 177e59b75ffSHemant Agrawal { 178e59b75ffSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 17981c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 180e59b75ffSHemant Agrawal int ret = -ENOTSUP; 181e59b75ffSHemant Agrawal 182e59b75ffSHemant Agrawal PMD_INIT_FUNC_TRACE(); 183e59b75ffSHemant Agrawal 184e59b75ffSHemant Agrawal /* nothing to be done for standard vlan tpids */ 185e59b75ffSHemant Agrawal if (tpid == 0x8100 || tpid == 0x88A8) 186e59b75ffSHemant Agrawal return 0; 187e59b75ffSHemant Agrawal 188e59b75ffSHemant Agrawal ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, 189e59b75ffSHemant Agrawal priv->token, tpid); 190e59b75ffSHemant Agrawal if (ret < 0) 191e59b75ffSHemant Agrawal DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret); 192e59b75ffSHemant Agrawal /* if already configured tpids, remove them first */ 193e59b75ffSHemant Agrawal if (ret == -EBUSY) { 194e59b75ffSHemant Agrawal struct dpni_custom_tpid_cfg tpid_list = {0}; 195e59b75ffSHemant Agrawal 196e59b75ffSHemant Agrawal ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW, 197e59b75ffSHemant Agrawal priv->token, &tpid_list); 198e59b75ffSHemant Agrawal if (ret < 0) 199e59b75ffSHemant Agrawal goto fail; 200e59b75ffSHemant Agrawal ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW, 201e59b75ffSHemant Agrawal priv->token, tpid_list.tpid1); 202e59b75ffSHemant Agrawal if (ret < 0) 203e59b75ffSHemant Agrawal goto fail; 204e59b75ffSHemant Agrawal ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, 205e59b75ffSHemant Agrawal priv->token, tpid); 206e59b75ffSHemant Agrawal } 207e59b75ffSHemant Agrawal fail: 208e59b75ffSHemant Agrawal return ret; 209e59b75ffSHemant Agrawal } 210e59b75ffSHemant Agrawal 211e59b75ffSHemant Agrawal static int 212748eccb9SHemant Agrawal dpaa2_fw_version_get(struct rte_eth_dev *dev, 213748eccb9SHemant Agrawal char *fw_version, 214748eccb9SHemant Agrawal size_t fw_size) 215748eccb9SHemant Agrawal { 216748eccb9SHemant Agrawal int ret; 21781c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 218748eccb9SHemant Agrawal struct mc_soc_version mc_plat_info = {0}; 219748eccb9SHemant Agrawal struct mc_version mc_ver_info = {0}; 220748eccb9SHemant Agrawal 221748eccb9SHemant Agrawal PMD_INIT_FUNC_TRACE(); 222748eccb9SHemant Agrawal 223748eccb9SHemant Agrawal if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 224a10a988aSShreyansh Jain DPAA2_PMD_WARN("\tmc_get_soc_version failed"); 225748eccb9SHemant Agrawal 226748eccb9SHemant Agrawal if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 227a10a988aSShreyansh Jain DPAA2_PMD_WARN("\tmc_get_version failed"); 228748eccb9SHemant Agrawal 229748eccb9SHemant Agrawal ret = snprintf(fw_version, fw_size, 230748eccb9SHemant Agrawal "%x-%d.%d.%d", 231748eccb9SHemant Agrawal mc_plat_info.svr, 232748eccb9SHemant Agrawal mc_ver_info.major, 233748eccb9SHemant Agrawal mc_ver_info.minor, 234748eccb9SHemant Agrawal mc_ver_info.revision); 235d345d6c9SFerruh Yigit if (ret < 0) 236d345d6c9SFerruh Yigit return -EINVAL; 237748eccb9SHemant Agrawal 238748eccb9SHemant Agrawal ret += 1; /* add the size of '\0' */ 239d345d6c9SFerruh Yigit if (fw_size < (size_t)ret) 240748eccb9SHemant Agrawal return ret; 241748eccb9SHemant Agrawal else 242748eccb9SHemant Agrawal return 0; 243748eccb9SHemant Agrawal } 244748eccb9SHemant Agrawal 245bdad90d1SIvan Ilchenko static int 2463e5a335dSHemant Agrawal dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2473e5a335dSHemant Agrawal { 2483e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 2493e5a335dSHemant Agrawal 2503e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 2513e5a335dSHemant Agrawal 25233fad432SHemant Agrawal dev_info->max_mac_addrs = priv->max_mac_filters; 253bee61d86SHemant Agrawal dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 254bee61d86SHemant Agrawal dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 2553e5a335dSHemant Agrawal dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 2563e5a335dSHemant Agrawal dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 257175fe7d9SSunil Kumar Kori dev_info->rx_offload_capa = dev_rx_offloads_sup | 258175fe7d9SSunil Kumar Kori dev_rx_offloads_nodis; 259175fe7d9SSunil Kumar Kori dev_info->tx_offload_capa = dev_tx_offloads_sup | 260175fe7d9SSunil Kumar Kori dev_tx_offloads_nodis; 261295968d1SFerruh Yigit dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | 262295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_2_5G | 263295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_10G; 2642fe6f1b7SDmitry Kozlyuk dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 265762b275fSHemant Agrawal 266762b275fSHemant Agrawal dev_info->max_hash_mac_addrs = 0; 267762b275fSHemant Agrawal dev_info->max_vfs = 0; 268295968d1SFerruh Yigit dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 269762b275fSHemant Agrawal dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; 270bdad90d1SIvan Ilchenko 271e35ead33SHemant Agrawal dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size; 272e35ead33SHemant Agrawal /* same is rx size for best perf */ 273e35ead33SHemant Agrawal dev_info->default_txportconf.burst_size = dpaa2_dqrr_size; 274e35ead33SHemant Agrawal 275e35ead33SHemant Agrawal dev_info->default_rxportconf.nb_queues = 1; 276e35ead33SHemant Agrawal dev_info->default_txportconf.nb_queues = 1; 277e35ead33SHemant Agrawal dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD; 278e35ead33SHemant Agrawal dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC; 279e35ead33SHemant Agrawal 2807e2c3f14SHemant Agrawal if (dpaa2_svr_family == SVR_LX2160A) { 281295968d1SFerruh Yigit dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G | 282295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_40G | 283295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_50G | 284295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_100G; 2857e2c3f14SHemant Agrawal } 2867e2c3f14SHemant Agrawal 287bdad90d1SIvan Ilchenko return 0; 2883e5a335dSHemant Agrawal } 2893e5a335dSHemant Agrawal 2903e5a335dSHemant Agrawal static int 291ddbc2b66SApeksha Gupta dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev, 292ddbc2b66SApeksha Gupta __rte_unused uint16_t queue_id, 293ddbc2b66SApeksha Gupta struct rte_eth_burst_mode *mode) 294ddbc2b66SApeksha Gupta { 295ddbc2b66SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 296ddbc2b66SApeksha Gupta int ret = -EINVAL; 297ddbc2b66SApeksha Gupta unsigned int i; 298ddbc2b66SApeksha Gupta const struct burst_info { 299ddbc2b66SApeksha Gupta uint64_t flags; 300ddbc2b66SApeksha Gupta const char *output; 301ddbc2b66SApeksha Gupta } rx_offload_map[] = { 302295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"}, 303295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, 304295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 305295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"}, 306295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"}, 307295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"}, 308295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"}, 309295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}, 310295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"} 311ddbc2b66SApeksha Gupta }; 312ddbc2b66SApeksha Gupta 313ddbc2b66SApeksha Gupta /* Update Rx offload info */ 314ddbc2b66SApeksha Gupta for (i = 0; i < RTE_DIM(rx_offload_map); i++) { 315ddbc2b66SApeksha Gupta if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) { 316ddbc2b66SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s", 317ddbc2b66SApeksha Gupta rx_offload_map[i].output); 318ddbc2b66SApeksha Gupta ret = 0; 319ddbc2b66SApeksha Gupta break; 320ddbc2b66SApeksha Gupta } 321ddbc2b66SApeksha Gupta } 322ddbc2b66SApeksha Gupta return ret; 323ddbc2b66SApeksha Gupta } 324ddbc2b66SApeksha Gupta 325ddbc2b66SApeksha Gupta static int 326ddbc2b66SApeksha Gupta dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev, 327ddbc2b66SApeksha Gupta __rte_unused uint16_t queue_id, 328ddbc2b66SApeksha Gupta struct rte_eth_burst_mode *mode) 329ddbc2b66SApeksha Gupta { 330ddbc2b66SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 331ddbc2b66SApeksha Gupta int ret = -EINVAL; 332ddbc2b66SApeksha Gupta unsigned int i; 333ddbc2b66SApeksha Gupta const struct burst_info { 334ddbc2b66SApeksha Gupta uint64_t flags; 335ddbc2b66SApeksha Gupta const char *output; 336ddbc2b66SApeksha Gupta } tx_offload_map[] = { 337295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"}, 338295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, 339295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, 340295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, 341295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, 342295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 343295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"}, 344295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"}, 345295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"} 346ddbc2b66SApeksha Gupta }; 347ddbc2b66SApeksha Gupta 348ddbc2b66SApeksha Gupta /* Update Tx offload info */ 349ddbc2b66SApeksha Gupta for (i = 0; i < RTE_DIM(tx_offload_map); i++) { 350ddbc2b66SApeksha Gupta if (eth_conf->txmode.offloads & tx_offload_map[i].flags) { 351ddbc2b66SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s", 352ddbc2b66SApeksha Gupta tx_offload_map[i].output); 353ddbc2b66SApeksha Gupta ret = 0; 354ddbc2b66SApeksha Gupta break; 355ddbc2b66SApeksha Gupta } 356ddbc2b66SApeksha Gupta } 357ddbc2b66SApeksha Gupta return ret; 358ddbc2b66SApeksha Gupta } 359ddbc2b66SApeksha Gupta 360ddbc2b66SApeksha Gupta static int 3613e5a335dSHemant Agrawal dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 3623e5a335dSHemant Agrawal { 3633e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 3643e5a335dSHemant Agrawal uint16_t dist_idx; 3653e5a335dSHemant Agrawal uint32_t vq_id; 3662d5f7f52SAshish Jain uint8_t num_rxqueue_per_tc; 3673e5a335dSHemant Agrawal struct dpaa2_queue *mc_q, *mcq; 3683e5a335dSHemant Agrawal uint32_t tot_queues; 3693e5a335dSHemant Agrawal int i; 3703e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 3713e5a335dSHemant Agrawal 3723e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 3733e5a335dSHemant Agrawal 3742d5f7f52SAshish Jain num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc); 3758d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) 3769ceacab7SPriyanka Jain tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues; 3779ceacab7SPriyanka Jain else 3783e5a335dSHemant Agrawal tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 3793e5a335dSHemant Agrawal mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 3803e5a335dSHemant Agrawal RTE_CACHE_LINE_SIZE); 3813e5a335dSHemant Agrawal if (!mc_q) { 382a10a988aSShreyansh Jain DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); 3833e5a335dSHemant Agrawal return -1; 3843e5a335dSHemant Agrawal } 3853e5a335dSHemant Agrawal 3863e5a335dSHemant Agrawal for (i = 0; i < priv->nb_rx_queues; i++) { 38785ee5ddaSShreyansh Jain mc_q->eth_data = dev->data; 3883e5a335dSHemant Agrawal priv->rx_vq[i] = mc_q++; 3893e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 3903e5a335dSHemant Agrawal dpaa2_q->q_storage = rte_malloc("dq_storage", 3913e5a335dSHemant Agrawal sizeof(struct queue_storage_info_t), 3923e5a335dSHemant Agrawal RTE_CACHE_LINE_SIZE); 3933e5a335dSHemant Agrawal if (!dpaa2_q->q_storage) 3943e5a335dSHemant Agrawal goto fail; 3953e5a335dSHemant Agrawal 3963e5a335dSHemant Agrawal memset(dpaa2_q->q_storage, 0, 3973e5a335dSHemant Agrawal sizeof(struct queue_storage_info_t)); 3983cf50ff5SHemant Agrawal if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 3993cf50ff5SHemant Agrawal goto fail; 4003e5a335dSHemant Agrawal } 4013e5a335dSHemant Agrawal 4024690a611SNipun Gupta if (dpaa2_enable_err_queue) { 4034690a611SNipun Gupta priv->rx_err_vq = rte_zmalloc("dpni_rx_err", 4044690a611SNipun Gupta sizeof(struct dpaa2_queue), 0); 40529e5519dSWeiguo Li if (!priv->rx_err_vq) 40629e5519dSWeiguo Li goto fail; 4074690a611SNipun Gupta 4084690a611SNipun Gupta dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; 4094690a611SNipun Gupta dpaa2_q->q_storage = rte_malloc("err_dq_storage", 4104690a611SNipun Gupta sizeof(struct queue_storage_info_t) * 4114690a611SNipun Gupta RTE_MAX_LCORE, 4124690a611SNipun Gupta RTE_CACHE_LINE_SIZE); 4134690a611SNipun Gupta if (!dpaa2_q->q_storage) 4144690a611SNipun Gupta goto fail; 4154690a611SNipun Gupta 4164690a611SNipun Gupta memset(dpaa2_q->q_storage, 0, 4174690a611SNipun Gupta sizeof(struct queue_storage_info_t)); 4184690a611SNipun Gupta for (i = 0; i < RTE_MAX_LCORE; i++) 4194690a611SNipun Gupta if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i])) 4204690a611SNipun Gupta goto fail; 4214690a611SNipun Gupta } 4224690a611SNipun Gupta 4233e5a335dSHemant Agrawal for (i = 0; i < priv->nb_tx_queues; i++) { 42485ee5ddaSShreyansh Jain mc_q->eth_data = dev->data; 4257ae777d0SHemant Agrawal mc_q->flow_id = 0xffff; 4263e5a335dSHemant Agrawal priv->tx_vq[i] = mc_q++; 4277ae777d0SHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 4287ae777d0SHemant Agrawal dpaa2_q->cscn = rte_malloc(NULL, 4297ae777d0SHemant Agrawal sizeof(struct qbman_result), 16); 4307ae777d0SHemant Agrawal if (!dpaa2_q->cscn) 4317ae777d0SHemant Agrawal goto fail_tx; 4323e5a335dSHemant Agrawal } 4333e5a335dSHemant Agrawal 4348d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 4359ceacab7SPriyanka Jain /*Setup tx confirmation queues*/ 4369ceacab7SPriyanka Jain for (i = 0; i < priv->nb_tx_queues; i++) { 4379ceacab7SPriyanka Jain mc_q->eth_data = dev->data; 4389ceacab7SPriyanka Jain mc_q->tc_index = i; 4399ceacab7SPriyanka Jain mc_q->flow_id = 0; 4409ceacab7SPriyanka Jain priv->tx_conf_vq[i] = mc_q++; 4419ceacab7SPriyanka Jain dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; 4429ceacab7SPriyanka Jain dpaa2_q->q_storage = 4439ceacab7SPriyanka Jain rte_malloc("dq_storage", 4449ceacab7SPriyanka Jain sizeof(struct queue_storage_info_t), 4459ceacab7SPriyanka Jain RTE_CACHE_LINE_SIZE); 4469ceacab7SPriyanka Jain if (!dpaa2_q->q_storage) 4479ceacab7SPriyanka Jain goto fail_tx_conf; 4489ceacab7SPriyanka Jain 4499ceacab7SPriyanka Jain memset(dpaa2_q->q_storage, 0, 4509ceacab7SPriyanka Jain sizeof(struct queue_storage_info_t)); 4519ceacab7SPriyanka Jain if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 4529ceacab7SPriyanka Jain goto fail_tx_conf; 4539ceacab7SPriyanka Jain } 4549ceacab7SPriyanka Jain } 4559ceacab7SPriyanka Jain 4563e5a335dSHemant Agrawal vq_id = 0; 457599017a2SHemant Agrawal for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 4583e5a335dSHemant Agrawal mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 4592d5f7f52SAshish Jain mcq->tc_index = dist_idx / num_rxqueue_per_tc; 4602d5f7f52SAshish Jain mcq->flow_id = dist_idx % num_rxqueue_per_tc; 4613e5a335dSHemant Agrawal vq_id++; 4623e5a335dSHemant Agrawal } 4633e5a335dSHemant Agrawal 4643e5a335dSHemant Agrawal return 0; 4659ceacab7SPriyanka Jain fail_tx_conf: 4669ceacab7SPriyanka Jain i -= 1; 4679ceacab7SPriyanka Jain while (i >= 0) { 4689ceacab7SPriyanka Jain dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; 4699ceacab7SPriyanka Jain rte_free(dpaa2_q->q_storage); 4709ceacab7SPriyanka Jain priv->tx_conf_vq[i--] = NULL; 4719ceacab7SPriyanka Jain } 4729ceacab7SPriyanka Jain i = priv->nb_tx_queues; 4737ae777d0SHemant Agrawal fail_tx: 4747ae777d0SHemant Agrawal i -= 1; 4757ae777d0SHemant Agrawal while (i >= 0) { 4767ae777d0SHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 4777ae777d0SHemant Agrawal rte_free(dpaa2_q->cscn); 4787ae777d0SHemant Agrawal priv->tx_vq[i--] = NULL; 4797ae777d0SHemant Agrawal } 4807ae777d0SHemant Agrawal i = priv->nb_rx_queues; 4813e5a335dSHemant Agrawal fail: 4823e5a335dSHemant Agrawal i -= 1; 4833e5a335dSHemant Agrawal mc_q = priv->rx_vq[0]; 4843e5a335dSHemant Agrawal while (i >= 0) { 4853e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 4863cf50ff5SHemant Agrawal dpaa2_free_dq_storage(dpaa2_q->q_storage); 4873e5a335dSHemant Agrawal rte_free(dpaa2_q->q_storage); 4883e5a335dSHemant Agrawal priv->rx_vq[i--] = NULL; 4893e5a335dSHemant Agrawal } 4904690a611SNipun Gupta 4914690a611SNipun Gupta if (dpaa2_enable_err_queue) { 4924690a611SNipun Gupta dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; 4934690a611SNipun Gupta if (dpaa2_q->q_storage) 4944690a611SNipun Gupta dpaa2_free_dq_storage(dpaa2_q->q_storage); 4954690a611SNipun Gupta rte_free(dpaa2_q->q_storage); 4964690a611SNipun Gupta } 4974690a611SNipun Gupta 4983e5a335dSHemant Agrawal rte_free(mc_q); 4993e5a335dSHemant Agrawal return -1; 5003e5a335dSHemant Agrawal } 5013e5a335dSHemant Agrawal 5025d9a1e4dSHemant Agrawal static void 5035d9a1e4dSHemant Agrawal dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) 5045d9a1e4dSHemant Agrawal { 5055d9a1e4dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 5065d9a1e4dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 5075d9a1e4dSHemant Agrawal int i; 5085d9a1e4dSHemant Agrawal 5095d9a1e4dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 5105d9a1e4dSHemant Agrawal 5115d9a1e4dSHemant Agrawal /* Queue allocation base */ 5125d9a1e4dSHemant Agrawal if (priv->rx_vq[0]) { 5135d9a1e4dSHemant Agrawal /* cleaning up queue storage */ 5145d9a1e4dSHemant Agrawal for (i = 0; i < priv->nb_rx_queues; i++) { 5155d9a1e4dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 5165d9a1e4dSHemant Agrawal rte_free(dpaa2_q->q_storage); 5175d9a1e4dSHemant Agrawal } 5185d9a1e4dSHemant Agrawal /* cleanup tx queue cscn */ 5195d9a1e4dSHemant Agrawal for (i = 0; i < priv->nb_tx_queues; i++) { 5205d9a1e4dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 5215d9a1e4dSHemant Agrawal rte_free(dpaa2_q->cscn); 5225d9a1e4dSHemant Agrawal } 5238d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 5249ceacab7SPriyanka Jain /* cleanup tx conf queue storage */ 5259ceacab7SPriyanka Jain for (i = 0; i < priv->nb_tx_queues; i++) { 5269ceacab7SPriyanka Jain dpaa2_q = (struct dpaa2_queue *) 5279ceacab7SPriyanka Jain priv->tx_conf_vq[i]; 5289ceacab7SPriyanka Jain rte_free(dpaa2_q->q_storage); 5299ceacab7SPriyanka Jain } 5309ceacab7SPriyanka Jain } 5315d9a1e4dSHemant Agrawal /*free memory for all queues (RX+TX) */ 5325d9a1e4dSHemant Agrawal rte_free(priv->rx_vq[0]); 5335d9a1e4dSHemant Agrawal priv->rx_vq[0] = NULL; 5345d9a1e4dSHemant Agrawal } 5355d9a1e4dSHemant Agrawal } 5365d9a1e4dSHemant Agrawal 5373e5a335dSHemant Agrawal static int 5383e5a335dSHemant Agrawal dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 5393e5a335dSHemant Agrawal { 54021ce788cSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 54181c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 54221ce788cSHemant Agrawal struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 5430ebce612SSunil Kumar Kori uint64_t rx_offloads = eth_conf->rxmode.offloads; 5440ebce612SSunil Kumar Kori uint64_t tx_offloads = eth_conf->txmode.offloads; 5450ebce612SSunil Kumar Kori int rx_l3_csum_offload = false; 5460ebce612SSunil Kumar Kori int rx_l4_csum_offload = false; 5470ebce612SSunil Kumar Kori int tx_l3_csum_offload = false; 5480ebce612SSunil Kumar Kori int tx_l4_csum_offload = false; 549271f5aeeSJun Yang int ret, tc_index; 5501bb4a528SFerruh Yigit uint32_t max_rx_pktlen; 5513e5a335dSHemant Agrawal 5523e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 5533e5a335dSHemant Agrawal 5547bdf45f9SHemant Agrawal /* Rx offloads which are enabled by default */ 555175fe7d9SSunil Kumar Kori if (dev_rx_offloads_nodis & ~rx_offloads) { 5567bdf45f9SHemant Agrawal DPAA2_PMD_INFO( 5577bdf45f9SHemant Agrawal "Some of rx offloads enabled by default - requested 0x%" PRIx64 5587bdf45f9SHemant Agrawal " fixed are 0x%" PRIx64, 559175fe7d9SSunil Kumar Kori rx_offloads, dev_rx_offloads_nodis); 560175fe7d9SSunil Kumar Kori } 5610ebce612SSunil Kumar Kori 5627bdf45f9SHemant Agrawal /* Tx offloads which are enabled by default */ 563175fe7d9SSunil Kumar Kori if (dev_tx_offloads_nodis & ~tx_offloads) { 5647bdf45f9SHemant Agrawal DPAA2_PMD_INFO( 5657bdf45f9SHemant Agrawal "Some of tx offloads enabled by default - requested 0x%" PRIx64 5667bdf45f9SHemant Agrawal " fixed are 0x%" PRIx64, 567175fe7d9SSunil Kumar Kori tx_offloads, dev_tx_offloads_nodis); 568175fe7d9SSunil Kumar Kori } 5690ebce612SSunil Kumar Kori 5701bb4a528SFerruh Yigit max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN + 5711bb4a528SFerruh Yigit RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; 5721bb4a528SFerruh Yigit if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) { 57344ea7355SAshish Jain ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 5741bb4a528SFerruh Yigit priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN); 5751bb4a528SFerruh Yigit if (ret != 0) { 5761bb4a528SFerruh Yigit DPAA2_PMD_ERR("Unable to set mtu. check config"); 577e1640849SHemant Agrawal return ret; 578e1640849SHemant Agrawal } 57979ef9825SHemant Agrawal DPAA2_PMD_INFO("MTU configured for the device: %d", 58079ef9825SHemant Agrawal dev->data->mtu); 581e1640849SHemant Agrawal } else { 582e1640849SHemant Agrawal return -1; 583e1640849SHemant Agrawal } 584e1640849SHemant Agrawal 585295968d1SFerruh Yigit if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) { 586271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { 58789c2ea8fSHemant Agrawal ret = dpaa2_setup_flow_dist(dev, 588271f5aeeSJun Yang eth_conf->rx_adv_conf.rss_conf.rss_hf, 589271f5aeeSJun Yang tc_index); 59089c2ea8fSHemant Agrawal if (ret) { 591271f5aeeSJun Yang DPAA2_PMD_ERR( 592271f5aeeSJun Yang "Unable to set flow distribution on tc%d." 593271f5aeeSJun Yang "Check queue config", tc_index); 59489c2ea8fSHemant Agrawal return ret; 59589c2ea8fSHemant Agrawal } 59689c2ea8fSHemant Agrawal } 597271f5aeeSJun Yang } 598c5acbb5eSHemant Agrawal 599295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) 6000ebce612SSunil Kumar Kori rx_l3_csum_offload = true; 6010ebce612SSunil Kumar Kori 602295968d1SFerruh Yigit if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) || 603295968d1SFerruh Yigit (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) || 604295968d1SFerruh Yigit (rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)) 6050ebce612SSunil Kumar Kori rx_l4_csum_offload = true; 60621ce788cSHemant Agrawal 60721ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 6080ebce612SSunil Kumar Kori DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); 60921ce788cSHemant Agrawal if (ret) { 610a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); 61121ce788cSHemant Agrawal return ret; 61221ce788cSHemant Agrawal } 61321ce788cSHemant Agrawal 61421ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 6150ebce612SSunil Kumar Kori DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); 61621ce788cSHemant Agrawal if (ret) { 617a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); 61821ce788cSHemant Agrawal return ret; 61921ce788cSHemant Agrawal } 62021ce788cSHemant Agrawal 6217eaf1323SGagandeep Singh #if !defined(RTE_LIBRTE_IEEE1588) 622295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) 6237eaf1323SGagandeep Singh #endif 62461c41e2eSThomas Monjalon { 62561c41e2eSThomas Monjalon ret = rte_mbuf_dyn_rx_timestamp_register( 62661c41e2eSThomas Monjalon &dpaa2_timestamp_dynfield_offset, 62761c41e2eSThomas Monjalon &dpaa2_timestamp_rx_dynflag); 62861c41e2eSThomas Monjalon if (ret != 0) { 62961c41e2eSThomas Monjalon DPAA2_PMD_ERR("Error to register timestamp field/flag"); 63061c41e2eSThomas Monjalon return -rte_errno; 63161c41e2eSThomas Monjalon } 632724f79dfSHemant Agrawal dpaa2_enable_ts[dev->data->port_id] = true; 63361c41e2eSThomas Monjalon } 63420196043SHemant Agrawal 635295968d1SFerruh Yigit if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 6360ebce612SSunil Kumar Kori tx_l3_csum_offload = true; 6370ebce612SSunil Kumar Kori 638295968d1SFerruh Yigit if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) || 639295968d1SFerruh Yigit (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) || 640295968d1SFerruh Yigit (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) 6410ebce612SSunil Kumar Kori tx_l4_csum_offload = true; 6420ebce612SSunil Kumar Kori 64321ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 6440ebce612SSunil Kumar Kori DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); 64521ce788cSHemant Agrawal if (ret) { 646a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); 64721ce788cSHemant Agrawal return ret; 64821ce788cSHemant Agrawal } 64921ce788cSHemant Agrawal 65021ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 6510ebce612SSunil Kumar Kori DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); 65221ce788cSHemant Agrawal if (ret) { 653a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); 65421ce788cSHemant Agrawal return ret; 65521ce788cSHemant Agrawal } 65621ce788cSHemant Agrawal 657ffb3389cSNipun Gupta /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 658ffb3389cSNipun Gupta * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 659ffb3389cSNipun Gupta * to 0 for LS2 in the hardware thus disabling data/annotation 660ffb3389cSNipun Gupta * stashing. For LX2 this is fixed in hardware and thus hash result and 661ffb3389cSNipun Gupta * parse results can be received in FD using this option. 662ffb3389cSNipun Gupta */ 663ffb3389cSNipun Gupta if (dpaa2_svr_family == SVR_LX2160A) { 664ffb3389cSNipun Gupta ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 665ffb3389cSNipun Gupta DPNI_FLCTYPE_HASH, true); 666ffb3389cSNipun Gupta if (ret) { 667a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); 668ffb3389cSNipun Gupta return ret; 669ffb3389cSNipun Gupta } 670ffb3389cSNipun Gupta } 671ffb3389cSNipun Gupta 672295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 673295968d1SFerruh Yigit dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK); 674c172f85eSHemant Agrawal 675f023d059SJun Yang if (eth_conf->lpbk_mode) { 676f023d059SJun Yang ret = dpaa2_dev_recycle_config(dev); 677f023d059SJun Yang if (ret) { 678f023d059SJun Yang DPAA2_PMD_ERR("Error to configure %s to recycle port.", 679f023d059SJun Yang dev->data->name); 680f023d059SJun Yang 681f023d059SJun Yang return ret; 682f023d059SJun Yang } 683f023d059SJun Yang } else { 684f023d059SJun Yang /** User may disable loopback mode by calling 685f023d059SJun Yang * "dev_configure" with lpbk_mode cleared. 686f023d059SJun Yang * No matter the port was configured recycle or not, 687f023d059SJun Yang * recycle de-configure is called here. 688f023d059SJun Yang * If port is not recycled, the de-configure will return directly. 689f023d059SJun Yang */ 690f023d059SJun Yang ret = dpaa2_dev_recycle_deconfig(dev); 691f023d059SJun Yang if (ret) { 692f023d059SJun Yang DPAA2_PMD_ERR("Error to de-configure recycle port %s.", 693f023d059SJun Yang dev->data->name); 694f023d059SJun Yang 695f023d059SJun Yang return ret; 696f023d059SJun Yang } 697f023d059SJun Yang } 698f023d059SJun Yang 699ac624068SGagandeep Singh dpaa2_tm_init(dev); 700ac624068SGagandeep Singh 7013e5a335dSHemant Agrawal return 0; 7023e5a335dSHemant Agrawal } 7033e5a335dSHemant Agrawal 7043e5a335dSHemant Agrawal /* Function to setup RX flow information. It contains traffic class ID, 7053e5a335dSHemant Agrawal * flow ID, destination configuration etc. 7063e5a335dSHemant Agrawal */ 7073e5a335dSHemant Agrawal static int 7083e5a335dSHemant Agrawal dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 7093e5a335dSHemant Agrawal uint16_t rx_queue_id, 71013b856acSHemant Agrawal uint16_t nb_rx_desc, 7113e5a335dSHemant Agrawal unsigned int socket_id __rte_unused, 712988a7c38SHemant Agrawal const struct rte_eth_rxconf *rx_conf, 7133e5a335dSHemant Agrawal struct rte_mempool *mb_pool) 7143e5a335dSHemant Agrawal { 7153e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 71681c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 7173e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 7183e5a335dSHemant Agrawal struct dpni_queue cfg; 7193e5a335dSHemant Agrawal uint8_t options = 0; 7203e5a335dSHemant Agrawal uint8_t flow_id; 721bee61d86SHemant Agrawal uint32_t bpid; 72213b856acSHemant Agrawal int i, ret; 7233e5a335dSHemant Agrawal 7243e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 7253e5a335dSHemant Agrawal 726a10a988aSShreyansh Jain DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", 7273e5a335dSHemant Agrawal dev, rx_queue_id, mb_pool, rx_conf); 7283e5a335dSHemant Agrawal 72935dc25d1SRohit Raj total_nb_rx_desc += nb_rx_desc; 73035dc25d1SRohit Raj if (total_nb_rx_desc > MAX_NB_RX_DESC) { 731*f665790aSDavid Marchand DPAA2_PMD_WARN("Total nb_rx_desc exceeds %d limit. Please use Normal buffers", 73235dc25d1SRohit Raj MAX_NB_RX_DESC); 73335dc25d1SRohit Raj DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script"); 73435dc25d1SRohit Raj } 73535dc25d1SRohit Raj 736988a7c38SHemant Agrawal /* Rx deferred start is not supported */ 737988a7c38SHemant Agrawal if (rx_conf->rx_deferred_start) { 738988a7c38SHemant Agrawal DPAA2_PMD_ERR("%p:Rx deferred start not supported", 739988a7c38SHemant Agrawal (void *)dev); 740988a7c38SHemant Agrawal return -EINVAL; 741988a7c38SHemant Agrawal } 742988a7c38SHemant Agrawal 743bee61d86SHemant Agrawal if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 7446ac5a55bSJun Yang if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7456ac5a55bSJun Yang ret = rte_dpaa2_bpid_info_init(mb_pool); 7466ac5a55bSJun Yang if (ret) 7476ac5a55bSJun Yang return ret; 7486ac5a55bSJun Yang } 749bee61d86SHemant Agrawal bpid = mempool_to_bpid(mb_pool); 7506ac5a55bSJun Yang ret = dpaa2_attach_bp_list(priv, dpni, 751bee61d86SHemant Agrawal rte_dpaa2_bpid_info[bpid].bp_list); 752bee61d86SHemant Agrawal if (ret) 753bee61d86SHemant Agrawal return ret; 754bee61d86SHemant Agrawal } 7553e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 7563e5a335dSHemant Agrawal dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 757109df460SShreyansh Jain dpaa2_q->bp_array = rte_dpaa2_bpid_info; 758de1d70f0SHemant Agrawal dpaa2_q->nb_desc = UINT16_MAX; 759de1d70f0SHemant Agrawal dpaa2_q->offloads = rx_conf->offloads; 7603e5a335dSHemant Agrawal 761599017a2SHemant Agrawal /*Get the flow id from given VQ id*/ 76213b856acSHemant Agrawal flow_id = dpaa2_q->flow_id; 7633e5a335dSHemant Agrawal memset(&cfg, 0, sizeof(struct dpni_queue)); 7643e5a335dSHemant Agrawal 7653e5a335dSHemant Agrawal options = options | DPNI_QUEUE_OPT_USER_CTX; 7665ae1edffSHemant Agrawal cfg.user_context = (size_t)(dpaa2_q); 7673e5a335dSHemant Agrawal 76813b856acSHemant Agrawal /* check if a private cgr available. */ 76913b856acSHemant Agrawal for (i = 0; i < priv->max_cgs; i++) { 77013b856acSHemant Agrawal if (!priv->cgid_in_use[i]) { 77113b856acSHemant Agrawal priv->cgid_in_use[i] = 1; 77213b856acSHemant Agrawal break; 77313b856acSHemant Agrawal } 77413b856acSHemant Agrawal } 77513b856acSHemant Agrawal 77613b856acSHemant Agrawal if (i < priv->max_cgs) { 77713b856acSHemant Agrawal options |= DPNI_QUEUE_OPT_SET_CGID; 77813b856acSHemant Agrawal cfg.cgid = i; 77913b856acSHemant Agrawal dpaa2_q->cgid = cfg.cgid; 78013b856acSHemant Agrawal } else { 78113b856acSHemant Agrawal dpaa2_q->cgid = 0xff; 78213b856acSHemant Agrawal } 78313b856acSHemant Agrawal 78437529eceSHemant Agrawal /*if ls2088 or rev2 device, enable the stashing */ 78530db823eSHemant Agrawal 786e0ded73bSHemant Agrawal if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 78737529eceSHemant Agrawal options |= DPNI_QUEUE_OPT_FLC; 78837529eceSHemant Agrawal cfg.flc.stash_control = true; 78937529eceSHemant Agrawal cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 79037529eceSHemant Agrawal /* 00 00 00 - last 6 bit represent annotation, context stashing, 791e0ded73bSHemant Agrawal * data stashing setting 01 01 00 (0x14) 792e0ded73bSHemant Agrawal * (in following order ->DS AS CS) 793e0ded73bSHemant Agrawal * to enable 1 line data, 1 line annotation. 794e0ded73bSHemant Agrawal * For LX2, this setting should be 01 00 00 (0x10) 79537529eceSHemant Agrawal */ 796e0ded73bSHemant Agrawal if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) 797e0ded73bSHemant Agrawal cfg.flc.value |= 0x10; 798e0ded73bSHemant Agrawal else 79937529eceSHemant Agrawal cfg.flc.value |= 0x14; 80037529eceSHemant Agrawal } 8013e5a335dSHemant Agrawal ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 8023e5a335dSHemant Agrawal dpaa2_q->tc_index, flow_id, options, &cfg); 8033e5a335dSHemant Agrawal if (ret) { 804a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); 8053e5a335dSHemant Agrawal return -1; 8063e5a335dSHemant Agrawal } 8073e5a335dSHemant Agrawal 80823d6a87eSHemant Agrawal if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 80923d6a87eSHemant Agrawal struct dpni_taildrop taildrop; 81023d6a87eSHemant Agrawal 81123d6a87eSHemant Agrawal taildrop.enable = 1; 812de1d70f0SHemant Agrawal dpaa2_q->nb_desc = nb_rx_desc; 81313b856acSHemant Agrawal /* Private CGR will use tail drop length as nb_rx_desc. 81413b856acSHemant Agrawal * for rest cases we can use standard byte based tail drop. 81513b856acSHemant Agrawal * There is no HW restriction, but number of CGRs are limited, 81613b856acSHemant Agrawal * hence this restriction is placed. 81713b856acSHemant Agrawal */ 81813b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) { 81923d6a87eSHemant Agrawal /*enabling per rx queue congestion control */ 82013b856acSHemant Agrawal taildrop.threshold = nb_rx_desc; 82113b856acSHemant Agrawal taildrop.units = DPNI_CONGESTION_UNIT_FRAMES; 82213b856acSHemant Agrawal taildrop.oal = 0; 82313b856acSHemant Agrawal DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d", 82413b856acSHemant Agrawal rx_queue_id); 82513b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 82613b856acSHemant Agrawal DPNI_CP_CONGESTION_GROUP, 82713b856acSHemant Agrawal DPNI_QUEUE_RX, 82813b856acSHemant Agrawal dpaa2_q->tc_index, 8297a3a9d56SJun Yang dpaa2_q->cgid, &taildrop); 83013b856acSHemant Agrawal } else { 83113b856acSHemant Agrawal /*enabling per rx queue congestion control */ 83213b856acSHemant Agrawal taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q; 83323d6a87eSHemant Agrawal taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 834d47f0292SHemant Agrawal taildrop.oal = CONG_RX_OAL; 83513b856acSHemant Agrawal DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d", 83623d6a87eSHemant Agrawal rx_queue_id); 83723d6a87eSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 83823d6a87eSHemant Agrawal DPNI_CP_QUEUE, DPNI_QUEUE_RX, 83913b856acSHemant Agrawal dpaa2_q->tc_index, flow_id, 84013b856acSHemant Agrawal &taildrop); 84113b856acSHemant Agrawal } 84213b856acSHemant Agrawal if (ret) { 84313b856acSHemant Agrawal DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 84413b856acSHemant Agrawal ret); 84513b856acSHemant Agrawal return -1; 84613b856acSHemant Agrawal } 84713b856acSHemant Agrawal } else { /* Disable tail Drop */ 84813b856acSHemant Agrawal struct dpni_taildrop taildrop = {0}; 84913b856acSHemant Agrawal DPAA2_PMD_INFO("Tail drop is disabled on queue"); 85013b856acSHemant Agrawal 85113b856acSHemant Agrawal taildrop.enable = 0; 85213b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) { 85313b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 85413b856acSHemant Agrawal DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX, 85513b856acSHemant Agrawal dpaa2_q->tc_index, 8567a3a9d56SJun Yang dpaa2_q->cgid, &taildrop); 85713b856acSHemant Agrawal } else { 85813b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 85913b856acSHemant Agrawal DPNI_CP_QUEUE, DPNI_QUEUE_RX, 86023d6a87eSHemant Agrawal dpaa2_q->tc_index, flow_id, &taildrop); 86113b856acSHemant Agrawal } 86223d6a87eSHemant Agrawal if (ret) { 863a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 864a10a988aSShreyansh Jain ret); 86523d6a87eSHemant Agrawal return -1; 86623d6a87eSHemant Agrawal } 86723d6a87eSHemant Agrawal } 86823d6a87eSHemant Agrawal 8693e5a335dSHemant Agrawal dev->data->rx_queues[rx_queue_id] = dpaa2_q; 8703e5a335dSHemant Agrawal return 0; 8713e5a335dSHemant Agrawal } 8723e5a335dSHemant Agrawal 8733e5a335dSHemant Agrawal static int 8743e5a335dSHemant Agrawal dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 8753e5a335dSHemant Agrawal uint16_t tx_queue_id, 876b5869095SHemant Agrawal uint16_t nb_tx_desc, 8773e5a335dSHemant Agrawal unsigned int socket_id __rte_unused, 878988a7c38SHemant Agrawal const struct rte_eth_txconf *tx_conf) 8793e5a335dSHemant Agrawal { 8803e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 8813e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 8823e5a335dSHemant Agrawal priv->tx_vq[tx_queue_id]; 8839ceacab7SPriyanka Jain struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *) 8849ceacab7SPriyanka Jain priv->tx_conf_vq[tx_queue_id]; 88581c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private; 8863e5a335dSHemant Agrawal struct dpni_queue tx_conf_cfg; 8873e5a335dSHemant Agrawal struct dpni_queue tx_flow_cfg; 8883e5a335dSHemant Agrawal uint8_t options = 0, flow_id; 88972100f0dSGagandeep Singh uint16_t channel_id; 890e26bf82eSSachin Saxena struct dpni_queue_id qid; 8913e5a335dSHemant Agrawal uint32_t tc_id; 8923e5a335dSHemant Agrawal int ret; 8933e5a335dSHemant Agrawal 8943e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 8953e5a335dSHemant Agrawal 896988a7c38SHemant Agrawal /* Tx deferred start is not supported */ 897988a7c38SHemant Agrawal if (tx_conf->tx_deferred_start) { 898988a7c38SHemant Agrawal DPAA2_PMD_ERR("%p:Tx deferred start not supported", 899988a7c38SHemant Agrawal (void *)dev); 900988a7c38SHemant Agrawal return -EINVAL; 901988a7c38SHemant Agrawal } 902988a7c38SHemant Agrawal 903de1d70f0SHemant Agrawal dpaa2_q->nb_desc = UINT16_MAX; 904de1d70f0SHemant Agrawal dpaa2_q->offloads = tx_conf->offloads; 905de1d70f0SHemant Agrawal 9063e5a335dSHemant Agrawal /* Return if queue already configured */ 907f9989673SAkhil Goyal if (dpaa2_q->flow_id != 0xffff) { 908f9989673SAkhil Goyal dev->data->tx_queues[tx_queue_id] = dpaa2_q; 9093e5a335dSHemant Agrawal return 0; 910f9989673SAkhil Goyal } 9113e5a335dSHemant Agrawal 9123e5a335dSHemant Agrawal memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 9133e5a335dSHemant Agrawal memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 9143e5a335dSHemant Agrawal 9153e5a335dSHemant Agrawal if (tx_queue_id == 0) { 9163e5a335dSHemant Agrawal /*Set tx-conf and error configuration*/ 9178d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) 9189ceacab7SPriyanka Jain ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 9199ceacab7SPriyanka Jain priv->token, 9209ceacab7SPriyanka Jain DPNI_CONF_AFFINE); 9219ceacab7SPriyanka Jain else 9223e5a335dSHemant Agrawal ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 9233e5a335dSHemant Agrawal priv->token, 9243e5a335dSHemant Agrawal DPNI_CONF_DISABLE); 9253e5a335dSHemant Agrawal if (ret) { 926a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in set tx conf mode settings: " 927a10a988aSShreyansh Jain "err=%d", ret); 9283e5a335dSHemant Agrawal return -1; 9293e5a335dSHemant Agrawal } 9303e5a335dSHemant Agrawal } 93172100f0dSGagandeep Singh 93272100f0dSGagandeep Singh tc_id = tx_queue_id % priv->num_tx_tc; 93372100f0dSGagandeep Singh channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels; 93472100f0dSGagandeep Singh flow_id = 0; 93572100f0dSGagandeep Singh 93672100f0dSGagandeep Singh ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 93772100f0dSGagandeep Singh ((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg); 93872100f0dSGagandeep Singh if (ret) { 93972100f0dSGagandeep Singh DPAA2_PMD_ERR("Error in setting the tx flow: " 94072100f0dSGagandeep Singh "tc_id=%d, flow=%d err=%d", 94172100f0dSGagandeep Singh tc_id, flow_id, ret); 94272100f0dSGagandeep Singh return -1; 94372100f0dSGagandeep Singh } 94472100f0dSGagandeep Singh 94572100f0dSGagandeep Singh dpaa2_q->flow_id = flow_id; 94672100f0dSGagandeep Singh 9473e5a335dSHemant Agrawal dpaa2_q->tc_index = tc_id; 9483e5a335dSHemant Agrawal 949e26bf82eSSachin Saxena ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 95072100f0dSGagandeep Singh DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index), 951e26bf82eSSachin Saxena dpaa2_q->flow_id, &tx_flow_cfg, &qid); 952e26bf82eSSachin Saxena if (ret) { 953e26bf82eSSachin Saxena DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); 954e26bf82eSSachin Saxena return -1; 955e26bf82eSSachin Saxena } 956e26bf82eSSachin Saxena dpaa2_q->fqid = qid.fqid; 957e26bf82eSSachin Saxena 958a0840963SHemant Agrawal if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 95913b856acSHemant Agrawal struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; 9607ae777d0SHemant Agrawal 961de1d70f0SHemant Agrawal dpaa2_q->nb_desc = nb_tx_desc; 962de1d70f0SHemant Agrawal 96329dfa62fSHemant Agrawal cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 964b5869095SHemant Agrawal cong_notif_cfg.threshold_entry = nb_tx_desc; 9657ae777d0SHemant Agrawal /* Notify that the queue is not congested when the data in 9667be78d02SJosh Soref * the queue is below this threshold.(90% of value) 9677ae777d0SHemant Agrawal */ 96838a0ac75SHemant Agrawal cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10; 9697ae777d0SHemant Agrawal cong_notif_cfg.message_ctx = 0; 970543dbfecSNipun Gupta cong_notif_cfg.message_iova = 971543dbfecSNipun Gupta (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); 9727ae777d0SHemant Agrawal cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 9737ae777d0SHemant Agrawal cong_notif_cfg.notification_mode = 9747ae777d0SHemant Agrawal DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 9757ae777d0SHemant Agrawal DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 9767ae777d0SHemant Agrawal DPNI_CONG_OPT_COHERENT_WRITE; 97755984a9bSShreyansh Jain cong_notif_cfg.cg_point = DPNI_CP_QUEUE; 9787ae777d0SHemant Agrawal 9797ae777d0SHemant Agrawal ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 9807ae777d0SHemant Agrawal priv->token, 9817ae777d0SHemant Agrawal DPNI_QUEUE_TX, 98272100f0dSGagandeep Singh ((channel_id << 8) | tc_id), 9837ae777d0SHemant Agrawal &cong_notif_cfg); 9847ae777d0SHemant Agrawal if (ret) { 985a10a988aSShreyansh Jain DPAA2_PMD_ERR( 986a10a988aSShreyansh Jain "Error in setting tx congestion notification: " 987a10a988aSShreyansh Jain "err=%d", ret); 9887ae777d0SHemant Agrawal return -ret; 9897ae777d0SHemant Agrawal } 9907ae777d0SHemant Agrawal } 99116c4a3c4SNipun Gupta dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf; 9923e5a335dSHemant Agrawal dev->data->tx_queues[tx_queue_id] = dpaa2_q; 9939ceacab7SPriyanka Jain 9948d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 9959ceacab7SPriyanka Jain dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q; 9969ceacab7SPriyanka Jain options = options | DPNI_QUEUE_OPT_USER_CTX; 9979ceacab7SPriyanka Jain tx_conf_cfg.user_context = (size_t)(dpaa2_q); 9989ceacab7SPriyanka Jain ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, 99972100f0dSGagandeep Singh DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), 10009ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg); 10019ceacab7SPriyanka Jain if (ret) { 10029ceacab7SPriyanka Jain DPAA2_PMD_ERR("Error in setting the tx conf flow: " 10039ceacab7SPriyanka Jain "tc_index=%d, flow=%d err=%d", 10049ceacab7SPriyanka Jain dpaa2_tx_conf_q->tc_index, 10059ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, ret); 10069ceacab7SPriyanka Jain return -1; 10079ceacab7SPriyanka Jain } 10089ceacab7SPriyanka Jain 10099ceacab7SPriyanka Jain ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 101072100f0dSGagandeep Singh DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), 10119ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); 10129ceacab7SPriyanka Jain if (ret) { 10139ceacab7SPriyanka Jain DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); 10149ceacab7SPriyanka Jain return -1; 10159ceacab7SPriyanka Jain } 10169ceacab7SPriyanka Jain dpaa2_tx_conf_q->fqid = qid.fqid; 10179ceacab7SPriyanka Jain } 10183e5a335dSHemant Agrawal return 0; 10193e5a335dSHemant Agrawal } 10203e5a335dSHemant Agrawal 10213e5a335dSHemant Agrawal static void 10227483341aSXueming Li dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id) 10233e5a335dSHemant Agrawal { 10247483341aSXueming Li struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id]; 102513b856acSHemant Agrawal struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private; 102681c42c84SShreyansh Jain struct fsl_mc_io *dpni = 102781c42c84SShreyansh Jain (struct fsl_mc_io *)priv->eth_dev->process_private; 102813b856acSHemant Agrawal uint8_t options = 0; 102913b856acSHemant Agrawal int ret; 103013b856acSHemant Agrawal struct dpni_queue cfg; 103113b856acSHemant Agrawal 103213b856acSHemant Agrawal memset(&cfg, 0, sizeof(struct dpni_queue)); 10333e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 103435dc25d1SRohit Raj 103535dc25d1SRohit Raj total_nb_rx_desc -= dpaa2_q->nb_desc; 103635dc25d1SRohit Raj 103713b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) { 103813b856acSHemant Agrawal options = DPNI_QUEUE_OPT_CLEAR_CGID; 103913b856acSHemant Agrawal cfg.cgid = dpaa2_q->cgid; 104013b856acSHemant Agrawal 104113b856acSHemant Agrawal ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, 104213b856acSHemant Agrawal DPNI_QUEUE_RX, 104313b856acSHemant Agrawal dpaa2_q->tc_index, dpaa2_q->flow_id, 104413b856acSHemant Agrawal options, &cfg); 104513b856acSHemant Agrawal if (ret) 104613b856acSHemant Agrawal DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d", 104713b856acSHemant Agrawal dpaa2_q->fqid, ret); 104813b856acSHemant Agrawal priv->cgid_in_use[dpaa2_q->cgid] = 0; 104913b856acSHemant Agrawal dpaa2_q->cgid = 0xff; 105013b856acSHemant Agrawal } 10513e5a335dSHemant Agrawal } 10523e5a335dSHemant Agrawal 1053f40adb40SHemant Agrawal static uint32_t 10548d7d4fcdSKonstantin Ananyev dpaa2_dev_rx_queue_count(void *rx_queue) 1055f40adb40SHemant Agrawal { 1056f40adb40SHemant Agrawal int32_t ret; 1057f40adb40SHemant Agrawal struct dpaa2_queue *dpaa2_q; 1058f40adb40SHemant Agrawal struct qbman_swp *swp; 1059f40adb40SHemant Agrawal struct qbman_fq_query_np_rslt state; 1060f40adb40SHemant Agrawal uint32_t frame_cnt = 0; 1061f40adb40SHemant Agrawal 1062f40adb40SHemant Agrawal if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 1063f40adb40SHemant Agrawal ret = dpaa2_affine_qbman_swp(); 1064f40adb40SHemant Agrawal if (ret) { 1065d527f5d9SNipun Gupta DPAA2_PMD_ERR( 1066*f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 1067d527f5d9SNipun Gupta rte_gettid()); 1068f40adb40SHemant Agrawal return -EINVAL; 1069f40adb40SHemant Agrawal } 1070f40adb40SHemant Agrawal } 1071f40adb40SHemant Agrawal swp = DPAA2_PER_LCORE_PORTAL; 1072f40adb40SHemant Agrawal 10738d7d4fcdSKonstantin Ananyev dpaa2_q = rx_queue; 1074f40adb40SHemant Agrawal 1075f40adb40SHemant Agrawal if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 1076f40adb40SHemant Agrawal frame_cnt = qbman_fq_state_frame_count(&state); 10778d7d4fcdSKonstantin Ananyev DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u", 10788d7d4fcdSKonstantin Ananyev rx_queue, frame_cnt); 1079f40adb40SHemant Agrawal } 1080f40adb40SHemant Agrawal return frame_cnt; 1081f40adb40SHemant Agrawal } 1082f40adb40SHemant Agrawal 1083a5fc38d4SHemant Agrawal static const uint32_t * 1084ba6a168aSSivaramakrishnan Venkat dpaa2_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 1085a5fc38d4SHemant Agrawal { 1086a5fc38d4SHemant Agrawal static const uint32_t ptypes[] = { 1087a5fc38d4SHemant Agrawal /*todo -= add more types */ 1088a5fc38d4SHemant Agrawal RTE_PTYPE_L2_ETHER, 1089a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV4, 1090a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV4_EXT, 1091a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV6, 1092a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV6_EXT, 1093a5fc38d4SHemant Agrawal RTE_PTYPE_L4_TCP, 1094a5fc38d4SHemant Agrawal RTE_PTYPE_L4_UDP, 1095a5fc38d4SHemant Agrawal RTE_PTYPE_L4_SCTP, 1096a5fc38d4SHemant Agrawal RTE_PTYPE_L4_ICMP, 1097a5fc38d4SHemant Agrawal }; 1098a5fc38d4SHemant Agrawal 1099a3a997f0SHemant Agrawal if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx || 110020191ab3SNipun Gupta dev->rx_pkt_burst == dpaa2_dev_rx || 1101ba6a168aSSivaramakrishnan Venkat dev->rx_pkt_burst == dpaa2_dev_loopback_rx) { 1102ba6a168aSSivaramakrishnan Venkat *no_of_elements = RTE_DIM(ptypes); 1103a5fc38d4SHemant Agrawal return ptypes; 1104ba6a168aSSivaramakrishnan Venkat } 1105a5fc38d4SHemant Agrawal return NULL; 1106a5fc38d4SHemant Agrawal } 1107a5fc38d4SHemant Agrawal 1108c5acbb5eSHemant Agrawal /** 1109c5acbb5eSHemant Agrawal * Dpaa2 link Interrupt handler 1110c5acbb5eSHemant Agrawal * 1111c5acbb5eSHemant Agrawal * @param param 11127be78d02SJosh Soref * The address of parameter (struct rte_eth_dev *) registered before. 1113c5acbb5eSHemant Agrawal * 1114c5acbb5eSHemant Agrawal * @return 1115c5acbb5eSHemant Agrawal * void 1116c5acbb5eSHemant Agrawal */ 1117c5acbb5eSHemant Agrawal static void 1118c5acbb5eSHemant Agrawal dpaa2_interrupt_handler(void *param) 1119c5acbb5eSHemant Agrawal { 1120c5acbb5eSHemant Agrawal struct rte_eth_dev *dev = param; 1121c5acbb5eSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 112281c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1123c5acbb5eSHemant Agrawal int ret; 1124c5acbb5eSHemant Agrawal int irq_index = DPNI_IRQ_INDEX; 1125c5acbb5eSHemant Agrawal unsigned int status = 0, clear = 0; 1126c5acbb5eSHemant Agrawal 1127c5acbb5eSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1128c5acbb5eSHemant Agrawal 1129c5acbb5eSHemant Agrawal if (dpni == NULL) { 1130a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1131c5acbb5eSHemant Agrawal return; 1132c5acbb5eSHemant Agrawal } 1133c5acbb5eSHemant Agrawal 1134c5acbb5eSHemant Agrawal ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 1135c5acbb5eSHemant Agrawal irq_index, &status); 1136c5acbb5eSHemant Agrawal if (unlikely(ret)) { 1137a10a988aSShreyansh Jain DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); 1138c5acbb5eSHemant Agrawal clear = 0xffffffff; 1139c5acbb5eSHemant Agrawal goto out; 1140c5acbb5eSHemant Agrawal } 1141c5acbb5eSHemant Agrawal 1142c5acbb5eSHemant Agrawal if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 1143c5acbb5eSHemant Agrawal clear = DPNI_IRQ_EVENT_LINK_CHANGED; 1144c5acbb5eSHemant Agrawal dpaa2_dev_link_update(dev, 0); 1145c5acbb5eSHemant Agrawal /* calling all the apps registered for link status event */ 11465723fbedSFerruh Yigit rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1147c5acbb5eSHemant Agrawal } 1148c5acbb5eSHemant Agrawal out: 1149c5acbb5eSHemant Agrawal ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 1150c5acbb5eSHemant Agrawal irq_index, clear); 1151c5acbb5eSHemant Agrawal if (unlikely(ret)) 1152a10a988aSShreyansh Jain DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); 1153c5acbb5eSHemant Agrawal } 1154c5acbb5eSHemant Agrawal 1155c5acbb5eSHemant Agrawal static int 1156c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 1157c5acbb5eSHemant Agrawal { 1158c5acbb5eSHemant Agrawal int err = 0; 1159c5acbb5eSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 116081c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1161c5acbb5eSHemant Agrawal int irq_index = DPNI_IRQ_INDEX; 1162c5acbb5eSHemant Agrawal unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 1163c5acbb5eSHemant Agrawal 1164c5acbb5eSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1165c5acbb5eSHemant Agrawal 1166c5acbb5eSHemant Agrawal err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 1167c5acbb5eSHemant Agrawal irq_index, mask); 1168c5acbb5eSHemant Agrawal if (err < 0) { 1169a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, 1170c5acbb5eSHemant Agrawal strerror(-err)); 1171c5acbb5eSHemant Agrawal return err; 1172c5acbb5eSHemant Agrawal } 1173c5acbb5eSHemant Agrawal 1174c5acbb5eSHemant Agrawal err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 1175c5acbb5eSHemant Agrawal irq_index, enable); 1176c5acbb5eSHemant Agrawal if (err < 0) 1177a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, 1178c5acbb5eSHemant Agrawal strerror(-err)); 1179c5acbb5eSHemant Agrawal 1180c5acbb5eSHemant Agrawal return err; 1181c5acbb5eSHemant Agrawal } 1182c5acbb5eSHemant Agrawal 11833e5a335dSHemant Agrawal static int 11843e5a335dSHemant Agrawal dpaa2_dev_start(struct rte_eth_dev *dev) 11853e5a335dSHemant Agrawal { 1186c5acbb5eSHemant Agrawal struct rte_device *rdev = dev->device; 1187c5acbb5eSHemant Agrawal struct rte_dpaa2_device *dpaa2_dev; 11883e5a335dSHemant Agrawal struct rte_eth_dev_data *data = dev->data; 11893e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = data->dev_private; 119081c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 11913e5a335dSHemant Agrawal struct dpni_queue cfg; 1192ef18dafeSHemant Agrawal struct dpni_error_cfg err_cfg; 11933e5a335dSHemant Agrawal struct dpni_queue_id qid; 11943e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q; 11953e5a335dSHemant Agrawal int ret, i; 1196c5acbb5eSHemant Agrawal struct rte_intr_handle *intr_handle; 1197c5acbb5eSHemant Agrawal 1198c5acbb5eSHemant Agrawal dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 1199d61138d4SHarman Kalra intr_handle = dpaa2_dev->intr_handle; 12003e5a335dSHemant Agrawal 12013e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 12023e5a335dSHemant Agrawal ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 12033e5a335dSHemant Agrawal if (ret) { 1204a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", 1205a10a988aSShreyansh Jain priv->hw_id, ret); 12063e5a335dSHemant Agrawal return ret; 12073e5a335dSHemant Agrawal } 12083e5a335dSHemant Agrawal 1209aa8c595aSHemant Agrawal /* Power up the phy. Needed to make the link go UP */ 1210a1f3a12cSHemant Agrawal dpaa2_dev_set_link_up(dev); 1211a1f3a12cSHemant Agrawal 12123e5a335dSHemant Agrawal for (i = 0; i < data->nb_rx_queues; i++) { 12133e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 12143e5a335dSHemant Agrawal ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 12153e5a335dSHemant Agrawal DPNI_QUEUE_RX, dpaa2_q->tc_index, 12163e5a335dSHemant Agrawal dpaa2_q->flow_id, &cfg, &qid); 12173e5a335dSHemant Agrawal if (ret) { 1218a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in getting flow information: " 1219a10a988aSShreyansh Jain "err=%d", ret); 12203e5a335dSHemant Agrawal return ret; 12213e5a335dSHemant Agrawal } 12223e5a335dSHemant Agrawal dpaa2_q->fqid = qid.fqid; 12233e5a335dSHemant Agrawal } 12243e5a335dSHemant Agrawal 12254690a611SNipun Gupta if (dpaa2_enable_err_queue) { 12264690a611SNipun Gupta ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 12274690a611SNipun Gupta DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid); 12284690a611SNipun Gupta if (ret) { 12294690a611SNipun Gupta DPAA2_PMD_ERR("Error getting rx err flow information: err=%d", 12304690a611SNipun Gupta ret); 12314690a611SNipun Gupta return ret; 12324690a611SNipun Gupta } 12334690a611SNipun Gupta dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; 12344690a611SNipun Gupta dpaa2_q->fqid = qid.fqid; 12354690a611SNipun Gupta dpaa2_q->eth_data = dev->data; 12364690a611SNipun Gupta 12374690a611SNipun Gupta err_cfg.errors = DPNI_ERROR_DISC; 12384690a611SNipun Gupta err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; 12394690a611SNipun Gupta } else { 12404690a611SNipun Gupta /* checksum errors, send them to normal path 12414690a611SNipun Gupta * and set it in annotation 12424690a611SNipun Gupta */ 1243ef18dafeSHemant Agrawal err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 12444690a611SNipun Gupta 12454690a611SNipun Gupta /* if packet with parse error are not to be dropped */ 124634356a5dSShreyansh Jain err_cfg.errors |= DPNI_ERROR_PHE; 1247ef18dafeSHemant Agrawal 1248ef18dafeSHemant Agrawal err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 12494690a611SNipun Gupta } 1250ef18dafeSHemant Agrawal err_cfg.set_frame_annotation = true; 1251ef18dafeSHemant Agrawal 1252ef18dafeSHemant Agrawal ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 1253ef18dafeSHemant Agrawal priv->token, &err_cfg); 1254ef18dafeSHemant Agrawal if (ret) { 1255a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", 1256a10a988aSShreyansh Jain ret); 1257ef18dafeSHemant Agrawal return ret; 1258ef18dafeSHemant Agrawal } 1259ef18dafeSHemant Agrawal 1260c5acbb5eSHemant Agrawal /* if the interrupts were configured on this devices*/ 1261d61138d4SHarman Kalra if (intr_handle && rte_intr_fd_get(intr_handle) && 1262d61138d4SHarman Kalra dev->data->dev_conf.intr_conf.lsc != 0) { 1263c5acbb5eSHemant Agrawal /* Registering LSC interrupt handler */ 1264c5acbb5eSHemant Agrawal rte_intr_callback_register(intr_handle, 1265c5acbb5eSHemant Agrawal dpaa2_interrupt_handler, 1266c5acbb5eSHemant Agrawal (void *)dev); 1267c5acbb5eSHemant Agrawal 1268c5acbb5eSHemant Agrawal /* enable vfio intr/eventfd mapping 1269c5acbb5eSHemant Agrawal * Interrupt index 0 is required, so we can not use 1270c5acbb5eSHemant Agrawal * rte_intr_enable. 1271c5acbb5eSHemant Agrawal */ 1272c5acbb5eSHemant Agrawal rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 1273c5acbb5eSHemant Agrawal 1274c5acbb5eSHemant Agrawal /* enable dpni_irqs */ 1275c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(dev, 1); 1276c5acbb5eSHemant Agrawal } 1277c5acbb5eSHemant Agrawal 127816c4a3c4SNipun Gupta /* Change the tx burst function if ordered queues are used */ 127916c4a3c4SNipun Gupta if (priv->en_ordered) 128016c4a3c4SNipun Gupta dev->tx_pkt_burst = dpaa2_dev_tx_ordered; 128116c4a3c4SNipun Gupta 1282f4909c42SJie Hai for (i = 0; i < dev->data->nb_rx_queues; i++) 1283f4909c42SJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 1284f4909c42SJie Hai for (i = 0; i < dev->data->nb_tx_queues; i++) 1285f4909c42SJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 1286f4909c42SJie Hai 12873e5a335dSHemant Agrawal return 0; 12883e5a335dSHemant Agrawal } 12893e5a335dSHemant Agrawal 12903e5a335dSHemant Agrawal /** 12913e5a335dSHemant Agrawal * This routine disables all traffic on the adapter by issuing a 12923e5a335dSHemant Agrawal * global reset on the MAC. 12933e5a335dSHemant Agrawal */ 129462024eb8SIvan Ilchenko static int 12953e5a335dSHemant Agrawal dpaa2_dev_stop(struct rte_eth_dev *dev) 12963e5a335dSHemant Agrawal { 12973e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 129881c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 12993e5a335dSHemant Agrawal int ret; 1300c56c86ffSHemant Agrawal struct rte_eth_link link; 1301d192fd32SVanshika Shukla struct rte_device *rdev = dev->device; 1302d192fd32SVanshika Shukla struct rte_intr_handle *intr_handle; 1303d192fd32SVanshika Shukla struct rte_dpaa2_device *dpaa2_dev; 1304f4909c42SJie Hai uint16_t i; 1305d192fd32SVanshika Shukla 1306d192fd32SVanshika Shukla dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 1307d192fd32SVanshika Shukla intr_handle = dpaa2_dev->intr_handle; 13083e5a335dSHemant Agrawal 13093e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 13103e5a335dSHemant Agrawal 1311c5acbb5eSHemant Agrawal /* reset interrupt callback */ 1312d61138d4SHarman Kalra if (intr_handle && rte_intr_fd_get(intr_handle) && 1313d61138d4SHarman Kalra dev->data->dev_conf.intr_conf.lsc != 0) { 1314c5acbb5eSHemant Agrawal /*disable dpni irqs */ 1315c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(dev, 0); 1316c5acbb5eSHemant Agrawal 1317c5acbb5eSHemant Agrawal /* disable vfio intr before callback unregister */ 1318c5acbb5eSHemant Agrawal rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 1319c5acbb5eSHemant Agrawal 1320c5acbb5eSHemant Agrawal /* Unregistering LSC interrupt handler */ 1321c5acbb5eSHemant Agrawal rte_intr_callback_unregister(intr_handle, 1322c5acbb5eSHemant Agrawal dpaa2_interrupt_handler, 1323c5acbb5eSHemant Agrawal (void *)dev); 1324c5acbb5eSHemant Agrawal } 1325c5acbb5eSHemant Agrawal 1326a1f3a12cSHemant Agrawal dpaa2_dev_set_link_down(dev); 1327a1f3a12cSHemant Agrawal 13283e5a335dSHemant Agrawal ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 13293e5a335dSHemant Agrawal if (ret) { 1330a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", 13313e5a335dSHemant Agrawal ret, priv->hw_id); 133262024eb8SIvan Ilchenko return ret; 13333e5a335dSHemant Agrawal } 1334c56c86ffSHemant Agrawal 1335c56c86ffSHemant Agrawal /* clear the recorded link status */ 1336c56c86ffSHemant Agrawal memset(&link, 0, sizeof(link)); 13377e2eb5f0SStephen Hemminger rte_eth_linkstatus_set(dev, &link); 133862024eb8SIvan Ilchenko 1339f4909c42SJie Hai for (i = 0; i < dev->data->nb_rx_queues; i++) 1340f4909c42SJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1341f4909c42SJie Hai for (i = 0; i < dev->data->nb_tx_queues; i++) 1342f4909c42SJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1343f4909c42SJie Hai 134462024eb8SIvan Ilchenko return 0; 13453e5a335dSHemant Agrawal } 13463e5a335dSHemant Agrawal 1347b142387bSThomas Monjalon static int 13483e5a335dSHemant Agrawal dpaa2_dev_close(struct rte_eth_dev *dev) 13493e5a335dSHemant Agrawal { 13503e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 135181c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 13525964d36aSSachin Saxena int i, ret; 1353a1f3a12cSHemant Agrawal struct rte_eth_link link; 13543e5a335dSHemant Agrawal 13553e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 13563e5a335dSHemant Agrawal 13575964d36aSSachin Saxena if (rte_eal_process_type() != RTE_PROC_PRIMARY) 13585964d36aSSachin Saxena return 0; 13596a556bd6SHemant Agrawal 13605964d36aSSachin Saxena if (!dpni) { 13615964d36aSSachin Saxena DPAA2_PMD_WARN("Already closed or not started"); 13625964d36aSSachin Saxena return -1; 13635964d36aSSachin Saxena } 13645964d36aSSachin Saxena 1365ac624068SGagandeep Singh dpaa2_tm_deinit(dev); 13665964d36aSSachin Saxena dpaa2_flow_clean(dev); 13673e5a335dSHemant Agrawal /* Clean the device first */ 13683e5a335dSHemant Agrawal ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 13693e5a335dSHemant Agrawal if (ret) { 1370a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); 1371b142387bSThomas Monjalon return -1; 13723e5a335dSHemant Agrawal } 1373a1f3a12cSHemant Agrawal 1374a1f3a12cSHemant Agrawal memset(&link, 0, sizeof(link)); 13757e2eb5f0SStephen Hemminger rte_eth_linkstatus_set(dev, &link); 1376b142387bSThomas Monjalon 13775964d36aSSachin Saxena /* Free private queues memory */ 13785964d36aSSachin Saxena dpaa2_free_rx_tx_queues(dev); 13795964d36aSSachin Saxena /* Close the device at underlying layer*/ 13805964d36aSSachin Saxena ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 13815964d36aSSachin Saxena if (ret) { 13825964d36aSSachin Saxena DPAA2_PMD_ERR("Failure closing dpni device with err code %d", 13835964d36aSSachin Saxena ret); 13845964d36aSSachin Saxena } 13855964d36aSSachin Saxena 13865964d36aSSachin Saxena /* Free the allocated memory for ethernet private data and dpni*/ 13875964d36aSSachin Saxena priv->hw = NULL; 13885964d36aSSachin Saxena dev->process_private = NULL; 13895964d36aSSachin Saxena rte_free(dpni); 13905964d36aSSachin Saxena 13915964d36aSSachin Saxena for (i = 0; i < MAX_TCS; i++) 13925964d36aSSachin Saxena rte_free((void *)(size_t)priv->extract.tc_extract_param[i]); 13935964d36aSSachin Saxena 13945964d36aSSachin Saxena if (priv->extract.qos_extract_param) 13955964d36aSSachin Saxena rte_free((void *)(size_t)priv->extract.qos_extract_param); 13965964d36aSSachin Saxena 13975964d36aSSachin Saxena DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name); 1398b142387bSThomas Monjalon return 0; 13993e5a335dSHemant Agrawal } 14003e5a335dSHemant Agrawal 14019039c812SAndrew Rybchenko static int 1402c0e5c69aSHemant Agrawal dpaa2_dev_promiscuous_enable( 1403c0e5c69aSHemant Agrawal struct rte_eth_dev *dev) 1404c0e5c69aSHemant Agrawal { 1405c0e5c69aSHemant Agrawal int ret; 1406c0e5c69aSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 140781c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1408c0e5c69aSHemant Agrawal 1409c0e5c69aSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1410c0e5c69aSHemant Agrawal 1411c0e5c69aSHemant Agrawal if (dpni == NULL) { 1412a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 14139039c812SAndrew Rybchenko return -ENODEV; 1414c0e5c69aSHemant Agrawal } 1415c0e5c69aSHemant Agrawal 1416c0e5c69aSHemant Agrawal ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 1417c0e5c69aSHemant Agrawal if (ret < 0) 1418a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); 14195d5aeeedSHemant Agrawal 14205d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 14215d5aeeedSHemant Agrawal if (ret < 0) 1422a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); 14239039c812SAndrew Rybchenko 14249039c812SAndrew Rybchenko return ret; 1425c0e5c69aSHemant Agrawal } 1426c0e5c69aSHemant Agrawal 14279039c812SAndrew Rybchenko static int 1428c0e5c69aSHemant Agrawal dpaa2_dev_promiscuous_disable( 1429c0e5c69aSHemant Agrawal struct rte_eth_dev *dev) 1430c0e5c69aSHemant Agrawal { 1431c0e5c69aSHemant Agrawal int ret; 1432c0e5c69aSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 143381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1434c0e5c69aSHemant Agrawal 1435c0e5c69aSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1436c0e5c69aSHemant Agrawal 1437c0e5c69aSHemant Agrawal if (dpni == NULL) { 1438a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 14399039c812SAndrew Rybchenko return -ENODEV; 1440c0e5c69aSHemant Agrawal } 1441c0e5c69aSHemant Agrawal 1442c0e5c69aSHemant Agrawal ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 1443c0e5c69aSHemant Agrawal if (ret < 0) 1444a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); 14455d5aeeedSHemant Agrawal 14465d5aeeedSHemant Agrawal if (dev->data->all_multicast == 0) { 14475d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 14485d5aeeedSHemant Agrawal priv->token, false); 14495d5aeeedSHemant Agrawal if (ret < 0) 1450a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable M promisc mode %d", 14515d5aeeedSHemant Agrawal ret); 14525d5aeeedSHemant Agrawal } 14539039c812SAndrew Rybchenko 14549039c812SAndrew Rybchenko return ret; 14555d5aeeedSHemant Agrawal } 14565d5aeeedSHemant Agrawal 1457ca041cd4SIvan Ilchenko static int 14585d5aeeedSHemant Agrawal dpaa2_dev_allmulticast_enable( 14595d5aeeedSHemant Agrawal struct rte_eth_dev *dev) 14605d5aeeedSHemant Agrawal { 14615d5aeeedSHemant Agrawal int ret; 14625d5aeeedSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 146381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 14645d5aeeedSHemant Agrawal 14655d5aeeedSHemant Agrawal PMD_INIT_FUNC_TRACE(); 14665d5aeeedSHemant Agrawal 14675d5aeeedSHemant Agrawal if (dpni == NULL) { 1468a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1469ca041cd4SIvan Ilchenko return -ENODEV; 14705d5aeeedSHemant Agrawal } 14715d5aeeedSHemant Agrawal 14725d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 14735d5aeeedSHemant Agrawal if (ret < 0) 1474a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); 1475ca041cd4SIvan Ilchenko 1476ca041cd4SIvan Ilchenko return ret; 14775d5aeeedSHemant Agrawal } 14785d5aeeedSHemant Agrawal 1479ca041cd4SIvan Ilchenko static int 14805d5aeeedSHemant Agrawal dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 14815d5aeeedSHemant Agrawal { 14825d5aeeedSHemant Agrawal int ret; 14835d5aeeedSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 148481c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 14855d5aeeedSHemant Agrawal 14865d5aeeedSHemant Agrawal PMD_INIT_FUNC_TRACE(); 14875d5aeeedSHemant Agrawal 14885d5aeeedSHemant Agrawal if (dpni == NULL) { 1489a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1490ca041cd4SIvan Ilchenko return -ENODEV; 14915d5aeeedSHemant Agrawal } 14925d5aeeedSHemant Agrawal 14935d5aeeedSHemant Agrawal /* must remain on for all promiscuous */ 14945d5aeeedSHemant Agrawal if (dev->data->promiscuous == 1) 1495ca041cd4SIvan Ilchenko return 0; 14965d5aeeedSHemant Agrawal 14975d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 14985d5aeeedSHemant Agrawal if (ret < 0) 1499a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); 1500ca041cd4SIvan Ilchenko 1501ca041cd4SIvan Ilchenko return ret; 1502c0e5c69aSHemant Agrawal } 1503e31d4d21SHemant Agrawal 1504e31d4d21SHemant Agrawal static int 1505e31d4d21SHemant Agrawal dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1506e31d4d21SHemant Agrawal { 1507e31d4d21SHemant Agrawal int ret; 1508e31d4d21SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 150981c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 151035b2d13fSOlivier Matz uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN 151144ea7355SAshish Jain + VLAN_TAG_SIZE; 1512e31d4d21SHemant Agrawal 1513e31d4d21SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1514e31d4d21SHemant Agrawal 1515e31d4d21SHemant Agrawal if (dpni == NULL) { 1516a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1517e31d4d21SHemant Agrawal return -EINVAL; 1518e31d4d21SHemant Agrawal } 1519e31d4d21SHemant Agrawal 1520e31d4d21SHemant Agrawal /* Set the Max Rx frame length as 'mtu' + 1521e31d4d21SHemant Agrawal * Maximum Ethernet header length 1522e31d4d21SHemant Agrawal */ 1523e31d4d21SHemant Agrawal ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 15246f8be0fbSHemant Agrawal frame_size - RTE_ETHER_CRC_LEN); 1525e31d4d21SHemant Agrawal if (ret) { 1526a10a988aSShreyansh Jain DPAA2_PMD_ERR("Setting the max frame length failed"); 1527e31d4d21SHemant Agrawal return -1; 1528e31d4d21SHemant Agrawal } 1529a10a988aSShreyansh Jain DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); 1530e31d4d21SHemant Agrawal return 0; 1531e31d4d21SHemant Agrawal } 1532e31d4d21SHemant Agrawal 1533b4d97b7dSHemant Agrawal static int 1534b4d97b7dSHemant Agrawal dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 15356d13ea8eSOlivier Matz struct rte_ether_addr *addr, 1536b4d97b7dSHemant Agrawal __rte_unused uint32_t index, 1537b4d97b7dSHemant Agrawal __rte_unused uint32_t pool) 1538b4d97b7dSHemant Agrawal { 1539b4d97b7dSHemant Agrawal int ret; 1540b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 154181c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1542b4d97b7dSHemant Agrawal 1543b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1544b4d97b7dSHemant Agrawal 1545b4d97b7dSHemant Agrawal if (dpni == NULL) { 1546a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1547b4d97b7dSHemant Agrawal return -1; 1548b4d97b7dSHemant Agrawal } 1549b4d97b7dSHemant Agrawal 155096f7bfe8SSachin Saxena ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, 155196f7bfe8SSachin Saxena addr->addr_bytes, 0, 0, 0); 1552b4d97b7dSHemant Agrawal if (ret) 1553a10a988aSShreyansh Jain DPAA2_PMD_ERR( 1554a10a988aSShreyansh Jain "error: Adding the MAC ADDR failed: err = %d", ret); 1555b4d97b7dSHemant Agrawal return 0; 1556b4d97b7dSHemant Agrawal } 1557b4d97b7dSHemant Agrawal 1558b4d97b7dSHemant Agrawal static void 1559b4d97b7dSHemant Agrawal dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1560b4d97b7dSHemant Agrawal uint32_t index) 1561b4d97b7dSHemant Agrawal { 1562b4d97b7dSHemant Agrawal int ret; 1563b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 156481c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1565b4d97b7dSHemant Agrawal struct rte_eth_dev_data *data = dev->data; 15666d13ea8eSOlivier Matz struct rte_ether_addr *macaddr; 1567b4d97b7dSHemant Agrawal 1568b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1569b4d97b7dSHemant Agrawal 1570b4d97b7dSHemant Agrawal macaddr = &data->mac_addrs[index]; 1571b4d97b7dSHemant Agrawal 1572b4d97b7dSHemant Agrawal if (dpni == NULL) { 1573a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1574b4d97b7dSHemant Agrawal return; 1575b4d97b7dSHemant Agrawal } 1576b4d97b7dSHemant Agrawal 1577b4d97b7dSHemant Agrawal ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1578b4d97b7dSHemant Agrawal priv->token, macaddr->addr_bytes); 1579b4d97b7dSHemant Agrawal if (ret) 1580a10a988aSShreyansh Jain DPAA2_PMD_ERR( 1581a10a988aSShreyansh Jain "error: Removing the MAC ADDR failed: err = %d", ret); 1582b4d97b7dSHemant Agrawal } 1583b4d97b7dSHemant Agrawal 1584caccf8b3SOlivier Matz static int 1585b4d97b7dSHemant Agrawal dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 15866d13ea8eSOlivier Matz struct rte_ether_addr *addr) 1587b4d97b7dSHemant Agrawal { 1588b4d97b7dSHemant Agrawal int ret; 1589b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 159081c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1591b4d97b7dSHemant Agrawal 1592b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE(); 1593b4d97b7dSHemant Agrawal 1594b4d97b7dSHemant Agrawal if (dpni == NULL) { 1595a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1596caccf8b3SOlivier Matz return -EINVAL; 1597b4d97b7dSHemant Agrawal } 1598b4d97b7dSHemant Agrawal 1599b4d97b7dSHemant Agrawal ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1600b4d97b7dSHemant Agrawal priv->token, addr->addr_bytes); 1601b4d97b7dSHemant Agrawal 1602b4d97b7dSHemant Agrawal if (ret) 1603a10a988aSShreyansh Jain DPAA2_PMD_ERR( 1604a10a988aSShreyansh Jain "error: Setting the MAC ADDR failed %d", ret); 1605caccf8b3SOlivier Matz 1606caccf8b3SOlivier Matz return ret; 1607b4d97b7dSHemant Agrawal } 1608a10a988aSShreyansh Jain 1609b0aa5459SHemant Agrawal static 1610d5b0924bSMatan Azrad int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1611b0aa5459SHemant Agrawal struct rte_eth_stats *stats) 1612b0aa5459SHemant Agrawal { 1613b0aa5459SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 161481c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1615b0aa5459SHemant Agrawal int32_t retcode; 1616b0aa5459SHemant Agrawal uint8_t page0 = 0, page1 = 1, page2 = 2; 1617b0aa5459SHemant Agrawal union dpni_statistics value; 1618e43f2521SShreyansh Jain int i; 1619e43f2521SShreyansh Jain struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq; 1620b0aa5459SHemant Agrawal 1621b0aa5459SHemant Agrawal memset(&value, 0, sizeof(union dpni_statistics)); 1622b0aa5459SHemant Agrawal 1623b0aa5459SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1624b0aa5459SHemant Agrawal 1625b0aa5459SHemant Agrawal if (!dpni) { 1626a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1627d5b0924bSMatan Azrad return -EINVAL; 1628b0aa5459SHemant Agrawal } 1629b0aa5459SHemant Agrawal 1630b0aa5459SHemant Agrawal if (!stats) { 1631a10a988aSShreyansh Jain DPAA2_PMD_ERR("stats is NULL"); 1632d5b0924bSMatan Azrad return -EINVAL; 1633b0aa5459SHemant Agrawal } 1634b0aa5459SHemant Agrawal 1635b0aa5459SHemant Agrawal /*Get Counters from page_0*/ 1636b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 163716bbc98aSShreyansh Jain page0, 0, &value); 1638b0aa5459SHemant Agrawal if (retcode) 1639b0aa5459SHemant Agrawal goto err; 1640b0aa5459SHemant Agrawal 1641b0aa5459SHemant Agrawal stats->ipackets = value.page_0.ingress_all_frames; 1642b0aa5459SHemant Agrawal stats->ibytes = value.page_0.ingress_all_bytes; 1643b0aa5459SHemant Agrawal 1644b0aa5459SHemant Agrawal /*Get Counters from page_1*/ 1645b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 164616bbc98aSShreyansh Jain page1, 0, &value); 1647b0aa5459SHemant Agrawal if (retcode) 1648b0aa5459SHemant Agrawal goto err; 1649b0aa5459SHemant Agrawal 1650b0aa5459SHemant Agrawal stats->opackets = value.page_1.egress_all_frames; 1651b0aa5459SHemant Agrawal stats->obytes = value.page_1.egress_all_bytes; 1652b0aa5459SHemant Agrawal 1653b0aa5459SHemant Agrawal /*Get Counters from page_2*/ 1654b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 165516bbc98aSShreyansh Jain page2, 0, &value); 1656b0aa5459SHemant Agrawal if (retcode) 1657b0aa5459SHemant Agrawal goto err; 1658b0aa5459SHemant Agrawal 1659b4d97b7dSHemant Agrawal /* Ingress drop frame count due to configured rules */ 1660b4d97b7dSHemant Agrawal stats->ierrors = value.page_2.ingress_filtered_frames; 1661b4d97b7dSHemant Agrawal /* Ingress drop frame count due to error */ 1662b4d97b7dSHemant Agrawal stats->ierrors += value.page_2.ingress_discarded_frames; 1663b4d97b7dSHemant Agrawal 1664b0aa5459SHemant Agrawal stats->oerrors = value.page_2.egress_discarded_frames; 1665b0aa5459SHemant Agrawal stats->imissed = value.page_2.ingress_nobuffer_discards; 1666b0aa5459SHemant Agrawal 1667e43f2521SShreyansh Jain /* Fill in per queue stats */ 1668e43f2521SShreyansh Jain for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && 1669e43f2521SShreyansh Jain (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { 1670e43f2521SShreyansh Jain dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i]; 1671e43f2521SShreyansh Jain dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i]; 1672e43f2521SShreyansh Jain if (dpaa2_rxq) 1673e43f2521SShreyansh Jain stats->q_ipackets[i] = dpaa2_rxq->rx_pkts; 1674e43f2521SShreyansh Jain if (dpaa2_txq) 1675e43f2521SShreyansh Jain stats->q_opackets[i] = dpaa2_txq->tx_pkts; 1676e43f2521SShreyansh Jain 1677e43f2521SShreyansh Jain /* Byte counting is not implemented */ 1678e43f2521SShreyansh Jain stats->q_ibytes[i] = 0; 1679e43f2521SShreyansh Jain stats->q_obytes[i] = 0; 1680e43f2521SShreyansh Jain } 1681e43f2521SShreyansh Jain 1682d5b0924bSMatan Azrad return 0; 1683b0aa5459SHemant Agrawal 1684b0aa5459SHemant Agrawal err: 1685a10a988aSShreyansh Jain DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1686d5b0924bSMatan Azrad return retcode; 1687b0aa5459SHemant Agrawal }; 1688b0aa5459SHemant Agrawal 16891d6329b2SHemant Agrawal static int 16901d6329b2SHemant Agrawal dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 16911d6329b2SHemant Agrawal unsigned int n) 16921d6329b2SHemant Agrawal { 16931d6329b2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 169481c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 16951d6329b2SHemant Agrawal int32_t retcode; 1696c720c5f6SHemant Agrawal union dpni_statistics value[5] = {}; 16971d6329b2SHemant Agrawal unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 16981d6329b2SHemant Agrawal 16991d6329b2SHemant Agrawal if (n < num) 17001d6329b2SHemant Agrawal return num; 17011d6329b2SHemant Agrawal 1702876b2c90SHemant Agrawal if (xstats == NULL) 1703876b2c90SHemant Agrawal return 0; 1704876b2c90SHemant Agrawal 17051d6329b2SHemant Agrawal /* Get Counters from page_0*/ 17061d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17071d6329b2SHemant Agrawal 0, 0, &value[0]); 17081d6329b2SHemant Agrawal if (retcode) 17091d6329b2SHemant Agrawal goto err; 17101d6329b2SHemant Agrawal 17111d6329b2SHemant Agrawal /* Get Counters from page_1*/ 17121d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17131d6329b2SHemant Agrawal 1, 0, &value[1]); 17141d6329b2SHemant Agrawal if (retcode) 17151d6329b2SHemant Agrawal goto err; 17161d6329b2SHemant Agrawal 17171d6329b2SHemant Agrawal /* Get Counters from page_2*/ 17181d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17191d6329b2SHemant Agrawal 2, 0, &value[2]); 17201d6329b2SHemant Agrawal if (retcode) 17211d6329b2SHemant Agrawal goto err; 17221d6329b2SHemant Agrawal 1723c720c5f6SHemant Agrawal for (i = 0; i < priv->max_cgs; i++) { 1724c720c5f6SHemant Agrawal if (!priv->cgid_in_use[i]) { 1725c720c5f6SHemant Agrawal /* Get Counters from page_4*/ 1726c720c5f6SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, 1727c720c5f6SHemant Agrawal priv->token, 1728c720c5f6SHemant Agrawal 4, 0, &value[4]); 1729c720c5f6SHemant Agrawal if (retcode) 1730c720c5f6SHemant Agrawal goto err; 1731c720c5f6SHemant Agrawal break; 1732c720c5f6SHemant Agrawal } 1733c720c5f6SHemant Agrawal } 1734c720c5f6SHemant Agrawal 17351d6329b2SHemant Agrawal for (i = 0; i < num; i++) { 17361d6329b2SHemant Agrawal xstats[i].id = i; 17371d6329b2SHemant Agrawal xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 17381d6329b2SHemant Agrawal raw.counter[dpaa2_xstats_strings[i].stats_id]; 17391d6329b2SHemant Agrawal } 17401d6329b2SHemant Agrawal return i; 17411d6329b2SHemant Agrawal err: 1742a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); 17431d6329b2SHemant Agrawal return retcode; 17441d6329b2SHemant Agrawal } 17451d6329b2SHemant Agrawal 17461d6329b2SHemant Agrawal static int 17471d6329b2SHemant Agrawal dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 17481d6329b2SHemant Agrawal struct rte_eth_xstat_name *xstats_names, 1749876b2c90SHemant Agrawal unsigned int limit) 17501d6329b2SHemant Agrawal { 17511d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 17521d6329b2SHemant Agrawal 1753876b2c90SHemant Agrawal if (limit < stat_cnt) 1754876b2c90SHemant Agrawal return stat_cnt; 1755876b2c90SHemant Agrawal 17561d6329b2SHemant Agrawal if (xstats_names != NULL) 17571d6329b2SHemant Agrawal for (i = 0; i < stat_cnt; i++) 1758f9acaf84SBruce Richardson strlcpy(xstats_names[i].name, 1759f9acaf84SBruce Richardson dpaa2_xstats_strings[i].name, 1760f9acaf84SBruce Richardson sizeof(xstats_names[i].name)); 17611d6329b2SHemant Agrawal 17621d6329b2SHemant Agrawal return stat_cnt; 17631d6329b2SHemant Agrawal } 17641d6329b2SHemant Agrawal 17651d6329b2SHemant Agrawal static int 17661d6329b2SHemant Agrawal dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 17671d6329b2SHemant Agrawal uint64_t *values, unsigned int n) 17681d6329b2SHemant Agrawal { 17691d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 17701d6329b2SHemant Agrawal uint64_t values_copy[stat_cnt]; 17711d6329b2SHemant Agrawal 17721d6329b2SHemant Agrawal if (!ids) { 17731d6329b2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 177481c42c84SShreyansh Jain struct fsl_mc_io *dpni = 177581c42c84SShreyansh Jain (struct fsl_mc_io *)dev->process_private; 17761d6329b2SHemant Agrawal int32_t retcode; 1777c720c5f6SHemant Agrawal union dpni_statistics value[5] = {}; 17781d6329b2SHemant Agrawal 17791d6329b2SHemant Agrawal if (n < stat_cnt) 17801d6329b2SHemant Agrawal return stat_cnt; 17811d6329b2SHemant Agrawal 17821d6329b2SHemant Agrawal if (!values) 17831d6329b2SHemant Agrawal return 0; 17841d6329b2SHemant Agrawal 17851d6329b2SHemant Agrawal /* Get Counters from page_0*/ 17861d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17871d6329b2SHemant Agrawal 0, 0, &value[0]); 17881d6329b2SHemant Agrawal if (retcode) 17891d6329b2SHemant Agrawal return 0; 17901d6329b2SHemant Agrawal 17911d6329b2SHemant Agrawal /* Get Counters from page_1*/ 17921d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17931d6329b2SHemant Agrawal 1, 0, &value[1]); 17941d6329b2SHemant Agrawal if (retcode) 17951d6329b2SHemant Agrawal return 0; 17961d6329b2SHemant Agrawal 17971d6329b2SHemant Agrawal /* Get Counters from page_2*/ 17981d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 17991d6329b2SHemant Agrawal 2, 0, &value[2]); 18001d6329b2SHemant Agrawal if (retcode) 18011d6329b2SHemant Agrawal return 0; 18021d6329b2SHemant Agrawal 1803c720c5f6SHemant Agrawal /* Get Counters from page_4*/ 1804c720c5f6SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1805c720c5f6SHemant Agrawal 4, 0, &value[4]); 1806c720c5f6SHemant Agrawal if (retcode) 1807c720c5f6SHemant Agrawal return 0; 1808c720c5f6SHemant Agrawal 18091d6329b2SHemant Agrawal for (i = 0; i < stat_cnt; i++) { 18101d6329b2SHemant Agrawal values[i] = value[dpaa2_xstats_strings[i].page_id]. 18111d6329b2SHemant Agrawal raw.counter[dpaa2_xstats_strings[i].stats_id]; 18121d6329b2SHemant Agrawal } 18131d6329b2SHemant Agrawal return stat_cnt; 18141d6329b2SHemant Agrawal } 18151d6329b2SHemant Agrawal 18161d6329b2SHemant Agrawal dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 18171d6329b2SHemant Agrawal 18181d6329b2SHemant Agrawal for (i = 0; i < n; i++) { 18191d6329b2SHemant Agrawal if (ids[i] >= stat_cnt) { 1820a10a988aSShreyansh Jain DPAA2_PMD_ERR("xstats id value isn't valid"); 18211d6329b2SHemant Agrawal return -1; 18221d6329b2SHemant Agrawal } 18231d6329b2SHemant Agrawal values[i] = values_copy[ids[i]]; 18241d6329b2SHemant Agrawal } 18251d6329b2SHemant Agrawal return n; 18261d6329b2SHemant Agrawal } 18271d6329b2SHemant Agrawal 18281d6329b2SHemant Agrawal static int 18291d6329b2SHemant Agrawal dpaa2_xstats_get_names_by_id( 18301d6329b2SHemant Agrawal struct rte_eth_dev *dev, 18311d6329b2SHemant Agrawal const uint64_t *ids, 18328c9f976fSAndrew Rybchenko struct rte_eth_xstat_name *xstats_names, 18331d6329b2SHemant Agrawal unsigned int limit) 18341d6329b2SHemant Agrawal { 18351d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 18361d6329b2SHemant Agrawal struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 18371d6329b2SHemant Agrawal 18381d6329b2SHemant Agrawal if (!ids) 18391d6329b2SHemant Agrawal return dpaa2_xstats_get_names(dev, xstats_names, limit); 18401d6329b2SHemant Agrawal 18411d6329b2SHemant Agrawal dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 18421d6329b2SHemant Agrawal 18431d6329b2SHemant Agrawal for (i = 0; i < limit; i++) { 18441d6329b2SHemant Agrawal if (ids[i] >= stat_cnt) { 1845a10a988aSShreyansh Jain DPAA2_PMD_ERR("xstats id value isn't valid"); 18461d6329b2SHemant Agrawal return -1; 18471d6329b2SHemant Agrawal } 18481d6329b2SHemant Agrawal strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 18491d6329b2SHemant Agrawal } 18501d6329b2SHemant Agrawal return limit; 18511d6329b2SHemant Agrawal } 18521d6329b2SHemant Agrawal 18539970a9adSIgor Romanov static int 18541d6329b2SHemant Agrawal dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1855b0aa5459SHemant Agrawal { 1856b0aa5459SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 185781c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 18589970a9adSIgor Romanov int retcode; 1859e43f2521SShreyansh Jain int i; 1860e43f2521SShreyansh Jain struct dpaa2_queue *dpaa2_q; 1861b0aa5459SHemant Agrawal 1862b0aa5459SHemant Agrawal PMD_INIT_FUNC_TRACE(); 1863b0aa5459SHemant Agrawal 1864b0aa5459SHemant Agrawal if (dpni == NULL) { 1865a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 18669970a9adSIgor Romanov return -EINVAL; 1867b0aa5459SHemant Agrawal } 1868b0aa5459SHemant Agrawal 1869b0aa5459SHemant Agrawal retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1870b0aa5459SHemant Agrawal if (retcode) 1871b0aa5459SHemant Agrawal goto error; 1872b0aa5459SHemant Agrawal 1873e43f2521SShreyansh Jain /* Reset the per queue stats in dpaa2_queue structure */ 1874e43f2521SShreyansh Jain for (i = 0; i < priv->nb_rx_queues; i++) { 1875e43f2521SShreyansh Jain dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1876e43f2521SShreyansh Jain if (dpaa2_q) 1877e43f2521SShreyansh Jain dpaa2_q->rx_pkts = 0; 1878e43f2521SShreyansh Jain } 1879e43f2521SShreyansh Jain 1880e43f2521SShreyansh Jain for (i = 0; i < priv->nb_tx_queues; i++) { 1881e43f2521SShreyansh Jain dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 1882e43f2521SShreyansh Jain if (dpaa2_q) 1883e43f2521SShreyansh Jain dpaa2_q->tx_pkts = 0; 1884e43f2521SShreyansh Jain } 1885e43f2521SShreyansh Jain 18869970a9adSIgor Romanov return 0; 1887b0aa5459SHemant Agrawal 1888b0aa5459SHemant Agrawal error: 1889a10a988aSShreyansh Jain DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 18909970a9adSIgor Romanov return retcode; 1891b0aa5459SHemant Agrawal }; 1892b0aa5459SHemant Agrawal 1893c56c86ffSHemant Agrawal /* return 0 means link status changed, -1 means not changed */ 1894c56c86ffSHemant Agrawal static int 1895c56c86ffSHemant Agrawal dpaa2_dev_link_update(struct rte_eth_dev *dev, 1896eadcfd95SRohit Raj int wait_to_complete) 1897c56c86ffSHemant Agrawal { 1898c56c86ffSHemant Agrawal int ret; 1899c56c86ffSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 190081c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 19017e2eb5f0SStephen Hemminger struct rte_eth_link link; 1902c56c86ffSHemant Agrawal struct dpni_link_state state = {0}; 1903eadcfd95SRohit Raj uint8_t count; 1904c56c86ffSHemant Agrawal 1905c56c86ffSHemant Agrawal if (dpni == NULL) { 1906a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1907c56c86ffSHemant Agrawal return 0; 1908c56c86ffSHemant Agrawal } 1909c56c86ffSHemant Agrawal 1910eadcfd95SRohit Raj for (count = 0; count <= MAX_REPEAT_TIME; count++) { 1911eadcfd95SRohit Raj ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, 1912eadcfd95SRohit Raj &state); 1913c56c86ffSHemant Agrawal if (ret < 0) { 191444e87c27SShreyansh Jain DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); 1915c56c86ffSHemant Agrawal return -1; 1916c56c86ffSHemant Agrawal } 1917295968d1SFerruh Yigit if (state.up == RTE_ETH_LINK_DOWN && 1918eadcfd95SRohit Raj wait_to_complete) 1919eadcfd95SRohit Raj rte_delay_ms(CHECK_INTERVAL); 1920eadcfd95SRohit Raj else 1921eadcfd95SRohit Raj break; 1922eadcfd95SRohit Raj } 1923c56c86ffSHemant Agrawal 1924c56c86ffSHemant Agrawal memset(&link, 0, sizeof(struct rte_eth_link)); 1925c56c86ffSHemant Agrawal link.link_status = state.up; 1926c56c86ffSHemant Agrawal link.link_speed = state.rate; 1927c56c86ffSHemant Agrawal 1928c56c86ffSHemant Agrawal if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1929295968d1SFerruh Yigit link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 1930c56c86ffSHemant Agrawal else 1931295968d1SFerruh Yigit link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1932c56c86ffSHemant Agrawal 19337e2eb5f0SStephen Hemminger ret = rte_eth_linkstatus_set(dev, &link); 19347e2eb5f0SStephen Hemminger if (ret == -1) 1935a10a988aSShreyansh Jain DPAA2_PMD_DEBUG("No change in status"); 1936c56c86ffSHemant Agrawal else 1937*f665790aSDavid Marchand DPAA2_PMD_INFO("Port %d Link is %s", dev->data->port_id, 19387e2eb5f0SStephen Hemminger link.link_status ? "Up" : "Down"); 19397e2eb5f0SStephen Hemminger 19407e2eb5f0SStephen Hemminger return ret; 1941c56c86ffSHemant Agrawal } 1942c56c86ffSHemant Agrawal 1943a1f3a12cSHemant Agrawal /** 1944a1f3a12cSHemant Agrawal * Toggle the DPNI to enable, if not already enabled. 1945a1f3a12cSHemant Agrawal * This is not strictly PHY up/down - it is more of logical toggling. 1946a1f3a12cSHemant Agrawal */ 1947a1f3a12cSHemant Agrawal static int 1948a1f3a12cSHemant Agrawal dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1949a1f3a12cSHemant Agrawal { 1950a1f3a12cSHemant Agrawal int ret = -EINVAL; 1951a1f3a12cSHemant Agrawal struct dpaa2_dev_priv *priv; 1952a1f3a12cSHemant Agrawal struct fsl_mc_io *dpni; 1953a1f3a12cSHemant Agrawal int en = 0; 1954aa8c595aSHemant Agrawal struct dpni_link_state state = {0}; 1955a1f3a12cSHemant Agrawal 1956a1f3a12cSHemant Agrawal priv = dev->data->dev_private; 195781c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 1958a1f3a12cSHemant Agrawal 1959a1f3a12cSHemant Agrawal if (dpni == NULL) { 1960a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 1961a1f3a12cSHemant Agrawal return ret; 1962a1f3a12cSHemant Agrawal } 1963a1f3a12cSHemant Agrawal 1964a1f3a12cSHemant Agrawal /* Check if DPNI is currently enabled */ 1965a1f3a12cSHemant Agrawal ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1966a1f3a12cSHemant Agrawal if (ret) { 1967a1f3a12cSHemant Agrawal /* Unable to obtain dpni status; Not continuing */ 1968a10a988aSShreyansh Jain DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1969a1f3a12cSHemant Agrawal return -EINVAL; 1970a1f3a12cSHemant Agrawal } 1971a1f3a12cSHemant Agrawal 1972a1f3a12cSHemant Agrawal /* Enable link if not already enabled */ 1973a1f3a12cSHemant Agrawal if (!en) { 1974a1f3a12cSHemant Agrawal ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1975a1f3a12cSHemant Agrawal if (ret) { 1976a10a988aSShreyansh Jain DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1977a1f3a12cSHemant Agrawal return -EINVAL; 1978a1f3a12cSHemant Agrawal } 1979a1f3a12cSHemant Agrawal } 1980aa8c595aSHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1981aa8c595aSHemant Agrawal if (ret < 0) { 198244e87c27SShreyansh Jain DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret); 1983aa8c595aSHemant Agrawal return -1; 1984aa8c595aSHemant Agrawal } 1985aa8c595aSHemant Agrawal 1986a1f3a12cSHemant Agrawal /* changing tx burst function to start enqueues */ 1987a1f3a12cSHemant Agrawal dev->tx_pkt_burst = dpaa2_dev_tx; 1988aa8c595aSHemant Agrawal dev->data->dev_link.link_status = state.up; 19897e6ecac2SRohit Raj dev->data->dev_link.link_speed = state.rate; 1990a1f3a12cSHemant Agrawal 1991aa8c595aSHemant Agrawal if (state.up) 1992a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); 1993aa8c595aSHemant Agrawal else 1994a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); 1995a1f3a12cSHemant Agrawal return ret; 1996a1f3a12cSHemant Agrawal } 1997a1f3a12cSHemant Agrawal 1998a1f3a12cSHemant Agrawal /** 1999a1f3a12cSHemant Agrawal * Toggle the DPNI to disable, if not already disabled. 2000a1f3a12cSHemant Agrawal * This is not strictly PHY up/down - it is more of logical toggling. 2001a1f3a12cSHemant Agrawal */ 2002a1f3a12cSHemant Agrawal static int 2003a1f3a12cSHemant Agrawal dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 2004a1f3a12cSHemant Agrawal { 2005a1f3a12cSHemant Agrawal int ret = -EINVAL; 2006a1f3a12cSHemant Agrawal struct dpaa2_dev_priv *priv; 2007a1f3a12cSHemant Agrawal struct fsl_mc_io *dpni; 2008a1f3a12cSHemant Agrawal int dpni_enabled = 0; 2009a1f3a12cSHemant Agrawal int retries = 10; 2010a1f3a12cSHemant Agrawal 2011a1f3a12cSHemant Agrawal PMD_INIT_FUNC_TRACE(); 2012a1f3a12cSHemant Agrawal 2013a1f3a12cSHemant Agrawal priv = dev->data->dev_private; 201481c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 2015a1f3a12cSHemant Agrawal 2016a1f3a12cSHemant Agrawal if (dpni == NULL) { 2017a10a988aSShreyansh Jain DPAA2_PMD_ERR("Device has not yet been configured"); 2018a1f3a12cSHemant Agrawal return ret; 2019a1f3a12cSHemant Agrawal } 2020a1f3a12cSHemant Agrawal 2021a1f3a12cSHemant Agrawal /*changing tx burst function to avoid any more enqueues */ 2022a41f593fSFerruh Yigit dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 2023a1f3a12cSHemant Agrawal 2024a1f3a12cSHemant Agrawal /* Loop while dpni_disable() attempts to drain the egress FQs 2025a1f3a12cSHemant Agrawal * and confirm them back to us. 2026a1f3a12cSHemant Agrawal */ 2027a1f3a12cSHemant Agrawal do { 2028a1f3a12cSHemant Agrawal ret = dpni_disable(dpni, 0, priv->token); 2029a1f3a12cSHemant Agrawal if (ret) { 2030a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni disable failed (%d)", ret); 2031a1f3a12cSHemant Agrawal return ret; 2032a1f3a12cSHemant Agrawal } 2033a1f3a12cSHemant Agrawal ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 2034a1f3a12cSHemant Agrawal if (ret) { 2035a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); 2036a1f3a12cSHemant Agrawal return ret; 2037a1f3a12cSHemant Agrawal } 2038a1f3a12cSHemant Agrawal if (dpni_enabled) 2039a1f3a12cSHemant Agrawal /* Allow the MC some slack */ 2040a1f3a12cSHemant Agrawal rte_delay_us(100 * 1000); 2041a1f3a12cSHemant Agrawal } while (dpni_enabled && --retries); 2042a1f3a12cSHemant Agrawal 2043a1f3a12cSHemant Agrawal if (!retries) { 2044a10a988aSShreyansh Jain DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); 2045a1f3a12cSHemant Agrawal /* todo- we may have to manually cleanup queues. 2046a1f3a12cSHemant Agrawal */ 2047a1f3a12cSHemant Agrawal } else { 2048a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link DOWN successful", 2049a1f3a12cSHemant Agrawal dev->data->port_id); 2050a1f3a12cSHemant Agrawal } 2051a1f3a12cSHemant Agrawal 2052a1f3a12cSHemant Agrawal dev->data->dev_link.link_status = 0; 2053a1f3a12cSHemant Agrawal 2054a1f3a12cSHemant Agrawal return ret; 2055a1f3a12cSHemant Agrawal } 2056a1f3a12cSHemant Agrawal 2057977d0006SHemant Agrawal static int 2058977d0006SHemant Agrawal dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2059977d0006SHemant Agrawal { 2060977d0006SHemant Agrawal int ret = -EINVAL; 2061977d0006SHemant Agrawal struct dpaa2_dev_priv *priv; 2062977d0006SHemant Agrawal struct fsl_mc_io *dpni; 2063977d0006SHemant Agrawal struct dpni_link_state state = {0}; 2064977d0006SHemant Agrawal 2065977d0006SHemant Agrawal PMD_INIT_FUNC_TRACE(); 2066977d0006SHemant Agrawal 2067977d0006SHemant Agrawal priv = dev->data->dev_private; 206881c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 2069977d0006SHemant Agrawal 2070977d0006SHemant Agrawal if (dpni == NULL || fc_conf == NULL) { 2071a10a988aSShreyansh Jain DPAA2_PMD_ERR("device not configured"); 2072977d0006SHemant Agrawal return ret; 2073977d0006SHemant Agrawal } 2074977d0006SHemant Agrawal 2075977d0006SHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 2076977d0006SHemant Agrawal if (ret) { 2077a10a988aSShreyansh Jain DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 2078977d0006SHemant Agrawal return ret; 2079977d0006SHemant Agrawal } 2080977d0006SHemant Agrawal 2081977d0006SHemant Agrawal memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 2082977d0006SHemant Agrawal if (state.options & DPNI_LINK_OPT_PAUSE) { 2083977d0006SHemant Agrawal /* DPNI_LINK_OPT_PAUSE set 2084977d0006SHemant Agrawal * if ASYM_PAUSE not set, 2085977d0006SHemant Agrawal * RX Side flow control (handle received Pause frame) 2086977d0006SHemant Agrawal * TX side flow control (send Pause frame) 2087977d0006SHemant Agrawal * if ASYM_PAUSE set, 2088977d0006SHemant Agrawal * RX Side flow control (handle received Pause frame) 2089977d0006SHemant Agrawal * No TX side flow control (send Pause frame disabled) 2090977d0006SHemant Agrawal */ 2091977d0006SHemant Agrawal if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 2092295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_FULL; 2093977d0006SHemant Agrawal else 2094295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2095977d0006SHemant Agrawal } else { 2096977d0006SHemant Agrawal /* DPNI_LINK_OPT_PAUSE not set 2097977d0006SHemant Agrawal * if ASYM_PAUSE set, 2098977d0006SHemant Agrawal * TX side flow control (send Pause frame) 2099977d0006SHemant Agrawal * No RX side flow control (No action on pause frame rx) 2100977d0006SHemant Agrawal * if ASYM_PAUSE not set, 2101977d0006SHemant Agrawal * Flow control disabled 2102977d0006SHemant Agrawal */ 2103977d0006SHemant Agrawal if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 2104295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2105977d0006SHemant Agrawal else 2106295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_NONE; 2107977d0006SHemant Agrawal } 2108977d0006SHemant Agrawal 2109977d0006SHemant Agrawal return ret; 2110977d0006SHemant Agrawal } 2111977d0006SHemant Agrawal 2112977d0006SHemant Agrawal static int 2113977d0006SHemant Agrawal dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2114977d0006SHemant Agrawal { 2115977d0006SHemant Agrawal int ret = -EINVAL; 2116977d0006SHemant Agrawal struct dpaa2_dev_priv *priv; 2117977d0006SHemant Agrawal struct fsl_mc_io *dpni; 2118977d0006SHemant Agrawal struct dpni_link_state state = {0}; 2119977d0006SHemant Agrawal struct dpni_link_cfg cfg = {0}; 2120977d0006SHemant Agrawal 2121977d0006SHemant Agrawal PMD_INIT_FUNC_TRACE(); 2122977d0006SHemant Agrawal 2123977d0006SHemant Agrawal priv = dev->data->dev_private; 212481c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private; 2125977d0006SHemant Agrawal 2126977d0006SHemant Agrawal if (dpni == NULL) { 2127a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL"); 2128977d0006SHemant Agrawal return ret; 2129977d0006SHemant Agrawal } 2130977d0006SHemant Agrawal 2131977d0006SHemant Agrawal /* It is necessary to obtain the current state before setting fc_conf 2132977d0006SHemant Agrawal * as MC would return error in case rate, autoneg or duplex values are 2133977d0006SHemant Agrawal * different. 2134977d0006SHemant Agrawal */ 2135977d0006SHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 2136977d0006SHemant Agrawal if (ret) { 2137a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); 2138977d0006SHemant Agrawal return -1; 2139977d0006SHemant Agrawal } 2140977d0006SHemant Agrawal 2141977d0006SHemant Agrawal /* Disable link before setting configuration */ 2142977d0006SHemant Agrawal dpaa2_dev_set_link_down(dev); 2143977d0006SHemant Agrawal 2144977d0006SHemant Agrawal /* Based on fc_conf, update cfg */ 2145977d0006SHemant Agrawal cfg.rate = state.rate; 2146977d0006SHemant Agrawal cfg.options = state.options; 2147977d0006SHemant Agrawal 2148977d0006SHemant Agrawal /* update cfg with fc_conf */ 2149977d0006SHemant Agrawal switch (fc_conf->mode) { 2150295968d1SFerruh Yigit case RTE_ETH_FC_FULL: 2151977d0006SHemant Agrawal /* Full flow control; 2152977d0006SHemant Agrawal * OPT_PAUSE set, ASYM_PAUSE not set 2153977d0006SHemant Agrawal */ 2154977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_PAUSE; 2155977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 2156f090a4c3SHemant Agrawal break; 2157295968d1SFerruh Yigit case RTE_ETH_FC_TX_PAUSE: 2158977d0006SHemant Agrawal /* Enable RX flow control 2159977d0006SHemant Agrawal * OPT_PAUSE not set; 2160977d0006SHemant Agrawal * ASYM_PAUSE set; 2161977d0006SHemant Agrawal */ 2162977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 2163977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_PAUSE; 2164977d0006SHemant Agrawal break; 2165295968d1SFerruh Yigit case RTE_ETH_FC_RX_PAUSE: 2166977d0006SHemant Agrawal /* Enable TX Flow control 2167977d0006SHemant Agrawal * OPT_PAUSE set 2168977d0006SHemant Agrawal * ASYM_PAUSE set 2169977d0006SHemant Agrawal */ 2170977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_PAUSE; 2171977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 2172977d0006SHemant Agrawal break; 2173295968d1SFerruh Yigit case RTE_ETH_FC_NONE: 2174977d0006SHemant Agrawal /* Disable Flow control 2175977d0006SHemant Agrawal * OPT_PAUSE not set 2176977d0006SHemant Agrawal * ASYM_PAUSE not set 2177977d0006SHemant Agrawal */ 2178977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_PAUSE; 2179977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 2180977d0006SHemant Agrawal break; 2181977d0006SHemant Agrawal default: 2182a10a988aSShreyansh Jain DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", 2183977d0006SHemant Agrawal fc_conf->mode); 2184977d0006SHemant Agrawal return -1; 2185977d0006SHemant Agrawal } 2186977d0006SHemant Agrawal 2187977d0006SHemant Agrawal ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 2188977d0006SHemant Agrawal if (ret) 2189a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", 2190977d0006SHemant Agrawal ret); 2191977d0006SHemant Agrawal 2192977d0006SHemant Agrawal /* Enable link */ 2193977d0006SHemant Agrawal dpaa2_dev_set_link_up(dev); 2194977d0006SHemant Agrawal 2195977d0006SHemant Agrawal return ret; 2196977d0006SHemant Agrawal } 2197977d0006SHemant Agrawal 219863d5c3b0SHemant Agrawal static int 219963d5c3b0SHemant Agrawal dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 220063d5c3b0SHemant Agrawal struct rte_eth_rss_conf *rss_conf) 220163d5c3b0SHemant Agrawal { 220263d5c3b0SHemant Agrawal struct rte_eth_dev_data *data = dev->data; 2203271f5aeeSJun Yang struct dpaa2_dev_priv *priv = data->dev_private; 220463d5c3b0SHemant Agrawal struct rte_eth_conf *eth_conf = &data->dev_conf; 2205271f5aeeSJun Yang int ret, tc_index; 220663d5c3b0SHemant Agrawal 220763d5c3b0SHemant Agrawal PMD_INIT_FUNC_TRACE(); 220863d5c3b0SHemant Agrawal 220963d5c3b0SHemant Agrawal if (rss_conf->rss_hf) { 2210271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { 2211271f5aeeSJun Yang ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf, 2212271f5aeeSJun Yang tc_index); 221363d5c3b0SHemant Agrawal if (ret) { 2214271f5aeeSJun Yang DPAA2_PMD_ERR("Unable to set flow dist on tc%d", 2215271f5aeeSJun Yang tc_index); 221663d5c3b0SHemant Agrawal return ret; 221763d5c3b0SHemant Agrawal } 2218271f5aeeSJun Yang } 221963d5c3b0SHemant Agrawal } else { 2220271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { 2221271f5aeeSJun Yang ret = dpaa2_remove_flow_dist(dev, tc_index); 222263d5c3b0SHemant Agrawal if (ret) { 2223271f5aeeSJun Yang DPAA2_PMD_ERR( 2224271f5aeeSJun Yang "Unable to remove flow dist on tc%d", 2225271f5aeeSJun Yang tc_index); 222663d5c3b0SHemant Agrawal return ret; 222763d5c3b0SHemant Agrawal } 222863d5c3b0SHemant Agrawal } 2229271f5aeeSJun Yang } 223063d5c3b0SHemant Agrawal eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 223163d5c3b0SHemant Agrawal return 0; 223263d5c3b0SHemant Agrawal } 223363d5c3b0SHemant Agrawal 223463d5c3b0SHemant Agrawal static int 223563d5c3b0SHemant Agrawal dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 223663d5c3b0SHemant Agrawal struct rte_eth_rss_conf *rss_conf) 223763d5c3b0SHemant Agrawal { 223863d5c3b0SHemant Agrawal struct rte_eth_dev_data *data = dev->data; 223963d5c3b0SHemant Agrawal struct rte_eth_conf *eth_conf = &data->dev_conf; 224063d5c3b0SHemant Agrawal 224163d5c3b0SHemant Agrawal /* dpaa2 does not support rss_key, so length should be 0*/ 224263d5c3b0SHemant Agrawal rss_conf->rss_key_len = 0; 224363d5c3b0SHemant Agrawal rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 224463d5c3b0SHemant Agrawal return 0; 224563d5c3b0SHemant Agrawal } 224663d5c3b0SHemant Agrawal 2247b677d4c6SNipun Gupta int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 2248b677d4c6SNipun Gupta int eth_rx_queue_id, 22493835cc22SNipun Gupta struct dpaa2_dpcon_dev *dpcon, 2250b677d4c6SNipun Gupta const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 2251b677d4c6SNipun Gupta { 2252b677d4c6SNipun Gupta struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 225381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2254b677d4c6SNipun Gupta struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 2255b677d4c6SNipun Gupta uint8_t flow_id = dpaa2_ethq->flow_id; 2256b677d4c6SNipun Gupta struct dpni_queue cfg; 22573835cc22SNipun Gupta uint8_t options, priority; 2258b677d4c6SNipun Gupta int ret; 2259b677d4c6SNipun Gupta 2260b677d4c6SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 2261b677d4c6SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 22622d378863SNipun Gupta else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 22632d378863SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 226416c4a3c4SNipun Gupta else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED) 226516c4a3c4SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_ordered_event; 2266b677d4c6SNipun Gupta else 2267b677d4c6SNipun Gupta return -EINVAL; 2268b677d4c6SNipun Gupta 22693835cc22SNipun Gupta priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) * 22703835cc22SNipun Gupta (dpcon->num_priorities - 1); 22713835cc22SNipun Gupta 2272b677d4c6SNipun Gupta memset(&cfg, 0, sizeof(struct dpni_queue)); 2273b677d4c6SNipun Gupta options = DPNI_QUEUE_OPT_DEST; 2274b677d4c6SNipun Gupta cfg.destination.type = DPNI_DEST_DPCON; 22753835cc22SNipun Gupta cfg.destination.id = dpcon->dpcon_id; 22763835cc22SNipun Gupta cfg.destination.priority = priority; 2277b677d4c6SNipun Gupta 22782d378863SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 22792d378863SNipun Gupta options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 22802d378863SNipun Gupta cfg.destination.hold_active = 1; 22812d378863SNipun Gupta } 22822d378863SNipun Gupta 228316c4a3c4SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED && 228416c4a3c4SNipun Gupta !eth_priv->en_ordered) { 228516c4a3c4SNipun Gupta struct opr_cfg ocfg; 228616c4a3c4SNipun Gupta 228716c4a3c4SNipun Gupta /* Restoration window size = 256 frames */ 228816c4a3c4SNipun Gupta ocfg.oprrws = 3; 228916c4a3c4SNipun Gupta /* Restoration window size = 512 frames for LX2 */ 229016c4a3c4SNipun Gupta if (dpaa2_svr_family == SVR_LX2160A) 229116c4a3c4SNipun Gupta ocfg.oprrws = 4; 229216c4a3c4SNipun Gupta /* Auto advance NESN window enabled */ 229316c4a3c4SNipun Gupta ocfg.oa = 1; 229416c4a3c4SNipun Gupta /* Late arrival window size disabled */ 229516c4a3c4SNipun Gupta ocfg.olws = 0; 22967be78d02SJosh Soref /* ORL resource exhaustion advance NESN disabled */ 229716c4a3c4SNipun Gupta ocfg.oeane = 0; 229816c4a3c4SNipun Gupta /* Loose ordering enabled */ 229916c4a3c4SNipun Gupta ocfg.oloe = 1; 230016c4a3c4SNipun Gupta eth_priv->en_loose_ordered = 1; 230116c4a3c4SNipun Gupta /* Strict ordering enabled if explicitly set */ 230216c4a3c4SNipun Gupta if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) { 230316c4a3c4SNipun Gupta ocfg.oloe = 0; 230416c4a3c4SNipun Gupta eth_priv->en_loose_ordered = 0; 230516c4a3c4SNipun Gupta } 230616c4a3c4SNipun Gupta 230716c4a3c4SNipun Gupta ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token, 230816c4a3c4SNipun Gupta dpaa2_ethq->tc_index, flow_id, 23092cb2abf3SHemant Agrawal OPR_OPT_CREATE, &ocfg, 0); 231016c4a3c4SNipun Gupta if (ret) { 2311*f665790aSDavid Marchand DPAA2_PMD_ERR("Error setting opr: ret: %d", ret); 231216c4a3c4SNipun Gupta return ret; 231316c4a3c4SNipun Gupta } 231416c4a3c4SNipun Gupta 231516c4a3c4SNipun Gupta eth_priv->en_ordered = 1; 231616c4a3c4SNipun Gupta } 231716c4a3c4SNipun Gupta 2318b677d4c6SNipun Gupta options |= DPNI_QUEUE_OPT_USER_CTX; 23195ae1edffSHemant Agrawal cfg.user_context = (size_t)(dpaa2_ethq); 2320b677d4c6SNipun Gupta 2321b677d4c6SNipun Gupta ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 2322b677d4c6SNipun Gupta dpaa2_ethq->tc_index, flow_id, options, &cfg); 2323b677d4c6SNipun Gupta if (ret) { 2324a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 2325b677d4c6SNipun Gupta return ret; 2326b677d4c6SNipun Gupta } 2327b677d4c6SNipun Gupta 2328b677d4c6SNipun Gupta memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 2329b677d4c6SNipun Gupta 2330b677d4c6SNipun Gupta return 0; 2331b677d4c6SNipun Gupta } 2332b677d4c6SNipun Gupta 2333b677d4c6SNipun Gupta int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 2334b677d4c6SNipun Gupta int eth_rx_queue_id) 2335b677d4c6SNipun Gupta { 2336b677d4c6SNipun Gupta struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 233781c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2338b677d4c6SNipun Gupta struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 2339b677d4c6SNipun Gupta uint8_t flow_id = dpaa2_ethq->flow_id; 2340b677d4c6SNipun Gupta struct dpni_queue cfg; 2341b677d4c6SNipun Gupta uint8_t options; 2342b677d4c6SNipun Gupta int ret; 2343b677d4c6SNipun Gupta 2344b677d4c6SNipun Gupta memset(&cfg, 0, sizeof(struct dpni_queue)); 2345b677d4c6SNipun Gupta options = DPNI_QUEUE_OPT_DEST; 2346b677d4c6SNipun Gupta cfg.destination.type = DPNI_DEST_NONE; 2347b677d4c6SNipun Gupta 2348b677d4c6SNipun Gupta ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 2349b677d4c6SNipun Gupta dpaa2_ethq->tc_index, flow_id, options, &cfg); 2350b677d4c6SNipun Gupta if (ret) 2351a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 2352b677d4c6SNipun Gupta 2353b677d4c6SNipun Gupta return ret; 2354b677d4c6SNipun Gupta } 2355b677d4c6SNipun Gupta 2356fe2b986aSSunil Kumar Kori static int 2357fb7ad441SThomas Monjalon dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev, 2358fb7ad441SThomas Monjalon const struct rte_flow_ops **ops) 2359fe2b986aSSunil Kumar Kori { 2360fe2b986aSSunil Kumar Kori if (!dev) 2361fe2b986aSSunil Kumar Kori return -ENODEV; 2362fe2b986aSSunil Kumar Kori 2363fb7ad441SThomas Monjalon *ops = &dpaa2_flow_ops; 2364fb7ad441SThomas Monjalon return 0; 2365fe2b986aSSunil Kumar Kori } 2366fe2b986aSSunil Kumar Kori 2367de1d70f0SHemant Agrawal static void 2368de1d70f0SHemant Agrawal dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2369de1d70f0SHemant Agrawal struct rte_eth_rxq_info *qinfo) 2370de1d70f0SHemant Agrawal { 2371de1d70f0SHemant Agrawal struct dpaa2_queue *rxq; 2372731fa400SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private; 2373731fa400SHemant Agrawal struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2374731fa400SHemant Agrawal uint16_t max_frame_length; 2375de1d70f0SHemant Agrawal 2376de1d70f0SHemant Agrawal rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id]; 2377de1d70f0SHemant Agrawal 2378de1d70f0SHemant Agrawal qinfo->mp = rxq->mb_pool; 2379de1d70f0SHemant Agrawal qinfo->scattered_rx = dev->data->scattered_rx; 2380de1d70f0SHemant Agrawal qinfo->nb_desc = rxq->nb_desc; 2381731fa400SHemant Agrawal if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 2382731fa400SHemant Agrawal &max_frame_length) == 0) 2383731fa400SHemant Agrawal qinfo->rx_buf_size = max_frame_length; 2384de1d70f0SHemant Agrawal 2385de1d70f0SHemant Agrawal qinfo->conf.rx_free_thresh = 1; 2386de1d70f0SHemant Agrawal qinfo->conf.rx_drop_en = 1; 2387de1d70f0SHemant Agrawal qinfo->conf.rx_deferred_start = 0; 2388de1d70f0SHemant Agrawal qinfo->conf.offloads = rxq->offloads; 2389de1d70f0SHemant Agrawal } 2390de1d70f0SHemant Agrawal 2391de1d70f0SHemant Agrawal static void 2392de1d70f0SHemant Agrawal dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2393de1d70f0SHemant Agrawal struct rte_eth_txq_info *qinfo) 2394de1d70f0SHemant Agrawal { 2395de1d70f0SHemant Agrawal struct dpaa2_queue *txq; 2396de1d70f0SHemant Agrawal 2397de1d70f0SHemant Agrawal txq = dev->data->tx_queues[queue_id]; 2398de1d70f0SHemant Agrawal 2399de1d70f0SHemant Agrawal qinfo->nb_desc = txq->nb_desc; 2400de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.pthresh = 0; 2401de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.hthresh = 0; 2402de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.wthresh = 0; 2403de1d70f0SHemant Agrawal 2404de1d70f0SHemant Agrawal qinfo->conf.tx_free_thresh = 0; 2405de1d70f0SHemant Agrawal qinfo->conf.tx_rs_thresh = 0; 2406de1d70f0SHemant Agrawal qinfo->conf.offloads = txq->offloads; 2407de1d70f0SHemant Agrawal qinfo->conf.tx_deferred_start = 0; 2408de1d70f0SHemant Agrawal } 2409de1d70f0SHemant Agrawal 2410ac624068SGagandeep Singh static int 2411ac624068SGagandeep Singh dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) 2412ac624068SGagandeep Singh { 2413ac624068SGagandeep Singh *(const void **)ops = &dpaa2_tm_ops; 2414ac624068SGagandeep Singh 2415ac624068SGagandeep Singh return 0; 2416ac624068SGagandeep Singh } 2417ac624068SGagandeep Singh 2418a5b375edSNipun Gupta void 2419a5b375edSNipun Gupta rte_pmd_dpaa2_thread_init(void) 2420a5b375edSNipun Gupta { 2421a5b375edSNipun Gupta int ret; 2422a5b375edSNipun Gupta 2423a5b375edSNipun Gupta if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 2424a5b375edSNipun Gupta ret = dpaa2_affine_qbman_swp(); 2425a5b375edSNipun Gupta if (ret) { 2426a5b375edSNipun Gupta DPAA2_PMD_ERR( 2427*f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 2428a5b375edSNipun Gupta rte_gettid()); 2429a5b375edSNipun Gupta return; 2430a5b375edSNipun Gupta } 2431a5b375edSNipun Gupta } 2432a5b375edSNipun Gupta } 2433a5b375edSNipun Gupta 24343e5a335dSHemant Agrawal static struct eth_dev_ops dpaa2_ethdev_ops = { 24353e5a335dSHemant Agrawal .dev_configure = dpaa2_eth_dev_configure, 24363e5a335dSHemant Agrawal .dev_start = dpaa2_dev_start, 24373e5a335dSHemant Agrawal .dev_stop = dpaa2_dev_stop, 24383e5a335dSHemant Agrawal .dev_close = dpaa2_dev_close, 2439c0e5c69aSHemant Agrawal .promiscuous_enable = dpaa2_dev_promiscuous_enable, 2440c0e5c69aSHemant Agrawal .promiscuous_disable = dpaa2_dev_promiscuous_disable, 24415d5aeeedSHemant Agrawal .allmulticast_enable = dpaa2_dev_allmulticast_enable, 24425d5aeeedSHemant Agrawal .allmulticast_disable = dpaa2_dev_allmulticast_disable, 2443a1f3a12cSHemant Agrawal .dev_set_link_up = dpaa2_dev_set_link_up, 2444a1f3a12cSHemant Agrawal .dev_set_link_down = dpaa2_dev_set_link_down, 2445c56c86ffSHemant Agrawal .link_update = dpaa2_dev_link_update, 2446b0aa5459SHemant Agrawal .stats_get = dpaa2_dev_stats_get, 24471d6329b2SHemant Agrawal .xstats_get = dpaa2_dev_xstats_get, 24481d6329b2SHemant Agrawal .xstats_get_by_id = dpaa2_xstats_get_by_id, 24491d6329b2SHemant Agrawal .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 24501d6329b2SHemant Agrawal .xstats_get_names = dpaa2_xstats_get_names, 2451b0aa5459SHemant Agrawal .stats_reset = dpaa2_dev_stats_reset, 24521d6329b2SHemant Agrawal .xstats_reset = dpaa2_dev_stats_reset, 2453748eccb9SHemant Agrawal .fw_version_get = dpaa2_fw_version_get, 24543e5a335dSHemant Agrawal .dev_infos_get = dpaa2_dev_info_get, 2455a5fc38d4SHemant Agrawal .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 2456e31d4d21SHemant Agrawal .mtu_set = dpaa2_dev_mtu_set, 24573ce294f2SHemant Agrawal .vlan_filter_set = dpaa2_vlan_filter_set, 24583ce294f2SHemant Agrawal .vlan_offload_set = dpaa2_vlan_offload_set, 2459e59b75ffSHemant Agrawal .vlan_tpid_set = dpaa2_vlan_tpid_set, 24603e5a335dSHemant Agrawal .rx_queue_setup = dpaa2_dev_rx_queue_setup, 24613e5a335dSHemant Agrawal .rx_queue_release = dpaa2_dev_rx_queue_release, 24623e5a335dSHemant Agrawal .tx_queue_setup = dpaa2_dev_tx_queue_setup, 2463ddbc2b66SApeksha Gupta .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get, 2464ddbc2b66SApeksha Gupta .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get, 2465977d0006SHemant Agrawal .flow_ctrl_get = dpaa2_flow_ctrl_get, 2466977d0006SHemant Agrawal .flow_ctrl_set = dpaa2_flow_ctrl_set, 2467b4d97b7dSHemant Agrawal .mac_addr_add = dpaa2_dev_add_mac_addr, 2468b4d97b7dSHemant Agrawal .mac_addr_remove = dpaa2_dev_remove_mac_addr, 2469b4d97b7dSHemant Agrawal .mac_addr_set = dpaa2_dev_set_mac_addr, 247063d5c3b0SHemant Agrawal .rss_hash_update = dpaa2_dev_rss_hash_update, 247163d5c3b0SHemant Agrawal .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 2472fb7ad441SThomas Monjalon .flow_ops_get = dpaa2_dev_flow_ops_get, 2473de1d70f0SHemant Agrawal .rxq_info_get = dpaa2_rxq_info_get, 2474de1d70f0SHemant Agrawal .txq_info_get = dpaa2_txq_info_get, 2475ac624068SGagandeep Singh .tm_ops_get = dpaa2_tm_ops_get, 2476bc767866SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588) 2477bc767866SPriyanka Jain .timesync_enable = dpaa2_timesync_enable, 2478bc767866SPriyanka Jain .timesync_disable = dpaa2_timesync_disable, 2479bc767866SPriyanka Jain .timesync_read_time = dpaa2_timesync_read_time, 2480bc767866SPriyanka Jain .timesync_write_time = dpaa2_timesync_write_time, 2481bc767866SPriyanka Jain .timesync_adjust_time = dpaa2_timesync_adjust_time, 2482bc767866SPriyanka Jain .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp, 2483bc767866SPriyanka Jain .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp, 2484bc767866SPriyanka Jain #endif 24853e5a335dSHemant Agrawal }; 24863e5a335dSHemant Agrawal 2487c3e0a706SShreyansh Jain /* Populate the mac address from physically available (u-boot/firmware) and/or 2488c3e0a706SShreyansh Jain * one set by higher layers like MC (restool) etc. 2489c3e0a706SShreyansh Jain * Returns the table of MAC entries (multiple entries) 2490c3e0a706SShreyansh Jain */ 2491c3e0a706SShreyansh Jain static int 2492c3e0a706SShreyansh Jain populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, 24936d13ea8eSOlivier Matz struct rte_ether_addr *mac_entry) 2494c3e0a706SShreyansh Jain { 2495c3e0a706SShreyansh Jain int ret; 24966d13ea8eSOlivier Matz struct rte_ether_addr phy_mac, prime_mac; 249741c24ea2SShreyansh Jain 24986d13ea8eSOlivier Matz memset(&phy_mac, 0, sizeof(struct rte_ether_addr)); 24996d13ea8eSOlivier Matz memset(&prime_mac, 0, sizeof(struct rte_ether_addr)); 2500c3e0a706SShreyansh Jain 2501c3e0a706SShreyansh Jain /* Get the physical device MAC address */ 2502c3e0a706SShreyansh Jain ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 2503c3e0a706SShreyansh Jain phy_mac.addr_bytes); 2504c3e0a706SShreyansh Jain if (ret) { 2505c3e0a706SShreyansh Jain DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret); 2506c3e0a706SShreyansh Jain goto cleanup; 2507c3e0a706SShreyansh Jain } 2508c3e0a706SShreyansh Jain 2509c3e0a706SShreyansh Jain ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 2510c3e0a706SShreyansh Jain prime_mac.addr_bytes); 2511c3e0a706SShreyansh Jain if (ret) { 2512c3e0a706SShreyansh Jain DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret); 2513c3e0a706SShreyansh Jain goto cleanup; 2514c3e0a706SShreyansh Jain } 2515c3e0a706SShreyansh Jain 2516c3e0a706SShreyansh Jain /* Now that both MAC have been obtained, do: 2517c3e0a706SShreyansh Jain * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy 2518c3e0a706SShreyansh Jain * and return phy 2519c3e0a706SShreyansh Jain * If empty_mac(phy), return prime. 2520c3e0a706SShreyansh Jain * if both are empty, create random MAC, set as prime and return 2521c3e0a706SShreyansh Jain */ 2522538da7a1SOlivier Matz if (!rte_is_zero_ether_addr(&phy_mac)) { 2523c3e0a706SShreyansh Jain /* If the addresses are not same, overwrite prime */ 2524538da7a1SOlivier Matz if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) { 2525c3e0a706SShreyansh Jain ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 2526c3e0a706SShreyansh Jain priv->token, 2527c3e0a706SShreyansh Jain phy_mac.addr_bytes); 2528c3e0a706SShreyansh Jain if (ret) { 2529c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to set MAC Address: %d", 2530c3e0a706SShreyansh Jain ret); 2531c3e0a706SShreyansh Jain goto cleanup; 2532c3e0a706SShreyansh Jain } 25336d13ea8eSOlivier Matz memcpy(&prime_mac, &phy_mac, 25346d13ea8eSOlivier Matz sizeof(struct rte_ether_addr)); 2535c3e0a706SShreyansh Jain } 2536538da7a1SOlivier Matz } else if (rte_is_zero_ether_addr(&prime_mac)) { 2537c3e0a706SShreyansh Jain /* In case phys and prime, both are zero, create random MAC */ 2538538da7a1SOlivier Matz rte_eth_random_addr(prime_mac.addr_bytes); 2539c3e0a706SShreyansh Jain ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 2540c3e0a706SShreyansh Jain priv->token, 2541c3e0a706SShreyansh Jain prime_mac.addr_bytes); 2542c3e0a706SShreyansh Jain if (ret) { 2543c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret); 2544c3e0a706SShreyansh Jain goto cleanup; 2545c3e0a706SShreyansh Jain } 2546c3e0a706SShreyansh Jain } 2547c3e0a706SShreyansh Jain 2548c3e0a706SShreyansh Jain /* prime_mac the final MAC address */ 25496d13ea8eSOlivier Matz memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr)); 2550c3e0a706SShreyansh Jain return 0; 2551c3e0a706SShreyansh Jain 2552c3e0a706SShreyansh Jain cleanup: 2553c3e0a706SShreyansh Jain return -1; 2554c3e0a706SShreyansh Jain } 2555c3e0a706SShreyansh Jain 2556c147eae0SHemant Agrawal static int 2557a3a997f0SHemant Agrawal check_devargs_handler(__rte_unused const char *key, const char *value, 2558a3a997f0SHemant Agrawal __rte_unused void *opaque) 2559a3a997f0SHemant Agrawal { 2560a3a997f0SHemant Agrawal if (strcmp(value, "1")) 2561a3a997f0SHemant Agrawal return -1; 2562a3a997f0SHemant Agrawal 2563a3a997f0SHemant Agrawal return 0; 2564a3a997f0SHemant Agrawal } 2565a3a997f0SHemant Agrawal 2566a3a997f0SHemant Agrawal static int 2567a3a997f0SHemant Agrawal dpaa2_get_devargs(struct rte_devargs *devargs, const char *key) 2568a3a997f0SHemant Agrawal { 2569a3a997f0SHemant Agrawal struct rte_kvargs *kvlist; 2570a3a997f0SHemant Agrawal 2571a3a997f0SHemant Agrawal if (!devargs) 2572a3a997f0SHemant Agrawal return 0; 2573a3a997f0SHemant Agrawal 2574a3a997f0SHemant Agrawal kvlist = rte_kvargs_parse(devargs->args, NULL); 2575a3a997f0SHemant Agrawal if (!kvlist) 2576a3a997f0SHemant Agrawal return 0; 2577a3a997f0SHemant Agrawal 2578a3a997f0SHemant Agrawal if (!rte_kvargs_count(kvlist, key)) { 2579a3a997f0SHemant Agrawal rte_kvargs_free(kvlist); 2580a3a997f0SHemant Agrawal return 0; 2581a3a997f0SHemant Agrawal } 2582a3a997f0SHemant Agrawal 2583a3a997f0SHemant Agrawal if (rte_kvargs_process(kvlist, key, 2584a3a997f0SHemant Agrawal check_devargs_handler, NULL) < 0) { 2585a3a997f0SHemant Agrawal rte_kvargs_free(kvlist); 2586a3a997f0SHemant Agrawal return 0; 2587a3a997f0SHemant Agrawal } 2588a3a997f0SHemant Agrawal rte_kvargs_free(kvlist); 2589a3a997f0SHemant Agrawal 2590a3a997f0SHemant Agrawal return 1; 2591a3a997f0SHemant Agrawal } 2592a3a997f0SHemant Agrawal 2593a3a997f0SHemant Agrawal static int 2594c147eae0SHemant Agrawal dpaa2_dev_init(struct rte_eth_dev *eth_dev) 2595c147eae0SHemant Agrawal { 25963e5a335dSHemant Agrawal struct rte_device *dev = eth_dev->device; 25973e5a335dSHemant Agrawal struct rte_dpaa2_device *dpaa2_dev; 25983e5a335dSHemant Agrawal struct fsl_mc_io *dpni_dev; 25993e5a335dSHemant Agrawal struct dpni_attr attr; 26003e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 2601bee61d86SHemant Agrawal struct dpni_buffer_layout layout; 2602fe2b986aSSunil Kumar Kori int ret, hw_id, i; 26033e5a335dSHemant Agrawal 2604d401ead1SHemant Agrawal PMD_INIT_FUNC_TRACE(); 2605d401ead1SHemant Agrawal 260681c42c84SShreyansh Jain dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 260781c42c84SShreyansh Jain if (!dpni_dev) { 260881c42c84SShreyansh Jain DPAA2_PMD_ERR("Memory allocation failed for dpni device"); 260981c42c84SShreyansh Jain return -1; 261081c42c84SShreyansh Jain } 2611a6a5f4b4SHemant Agrawal dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 261281c42c84SShreyansh Jain eth_dev->process_private = (void *)dpni_dev; 261381c42c84SShreyansh Jain 2614c147eae0SHemant Agrawal /* For secondary processes, the primary has done all the work */ 2615e7b187dbSShreyansh Jain if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2616e7b187dbSShreyansh Jain /* In case of secondary, only burst and ops API need to be 2617e7b187dbSShreyansh Jain * plugged. 2618e7b187dbSShreyansh Jain */ 2619e7b187dbSShreyansh Jain eth_dev->dev_ops = &dpaa2_ethdev_ops; 2620cbfc6111SFerruh Yigit eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count; 2621a3a997f0SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) 2622a3a997f0SHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; 262320191ab3SNipun Gupta else if (dpaa2_get_devargs(dev->devargs, 262420191ab3SNipun Gupta DRIVER_NO_PREFETCH_MODE)) 262520191ab3SNipun Gupta eth_dev->rx_pkt_burst = dpaa2_dev_rx; 2626a3a997f0SHemant Agrawal else 2627e7b187dbSShreyansh Jain eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 2628e7b187dbSShreyansh Jain eth_dev->tx_pkt_burst = dpaa2_dev_tx; 2629c147eae0SHemant Agrawal return 0; 2630e7b187dbSShreyansh Jain } 2631c147eae0SHemant Agrawal 26323e5a335dSHemant Agrawal dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 26333e5a335dSHemant Agrawal 26343e5a335dSHemant Agrawal hw_id = dpaa2_dev->object_id; 26353e5a335dSHemant Agrawal ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 26363e5a335dSHemant Agrawal if (ret) { 2637a10a988aSShreyansh Jain DPAA2_PMD_ERR( 2638a10a988aSShreyansh Jain "Failure in opening dpni@%d with err code %d", 2639d4984046SHemant Agrawal hw_id, ret); 2640d4984046SHemant Agrawal rte_free(dpni_dev); 26413e5a335dSHemant Agrawal return -1; 26423e5a335dSHemant Agrawal } 26433e5a335dSHemant Agrawal 2644f023d059SJun Yang if (eth_dev->data->dev_conf.lpbk_mode) 2645f023d059SJun Yang dpaa2_dev_recycle_deconfig(eth_dev); 2646f023d059SJun Yang 26473e5a335dSHemant Agrawal /* Clean the device first */ 26483e5a335dSHemant Agrawal ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 26493e5a335dSHemant Agrawal if (ret) { 2650a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", 2651d4984046SHemant Agrawal hw_id, ret); 2652d4984046SHemant Agrawal goto init_err; 26533e5a335dSHemant Agrawal } 26543e5a335dSHemant Agrawal 26553e5a335dSHemant Agrawal ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 26563e5a335dSHemant Agrawal if (ret) { 2657a10a988aSShreyansh Jain DPAA2_PMD_ERR( 2658a10a988aSShreyansh Jain "Failure in get dpni@%d attribute, err code %d", 2659d4984046SHemant Agrawal hw_id, ret); 2660d4984046SHemant Agrawal goto init_err; 26613e5a335dSHemant Agrawal } 26623e5a335dSHemant Agrawal 266316bbc98aSShreyansh Jain priv->num_rx_tc = attr.num_rx_tcs; 266472100f0dSGagandeep Singh priv->num_tx_tc = attr.num_tx_tcs; 26654ce58f8aSJun Yang priv->qos_entries = attr.qos_entries; 26664ce58f8aSJun Yang priv->fs_entries = attr.fs_entries; 26674ce58f8aSJun Yang priv->dist_queues = attr.num_queues; 266872100f0dSGagandeep Singh priv->num_channels = attr.num_channels; 266972100f0dSGagandeep Singh priv->channel_inuse = 0; 2670f023d059SJun Yang rte_spinlock_init(&priv->lpbk_qp_lock); 26714ce58f8aSJun Yang 267213b856acSHemant Agrawal /* only if the custom CG is enabled */ 267313b856acSHemant Agrawal if (attr.options & DPNI_OPT_CUSTOM_CG) 267413b856acSHemant Agrawal priv->max_cgs = attr.num_cgs; 267513b856acSHemant Agrawal else 267613b856acSHemant Agrawal priv->max_cgs = 0; 267713b856acSHemant Agrawal 267813b856acSHemant Agrawal for (i = 0; i < priv->max_cgs; i++) 267913b856acSHemant Agrawal priv->cgid_in_use[i] = 0; 268089c2ea8fSHemant Agrawal 2681fe2b986aSSunil Kumar Kori for (i = 0; i < attr.num_rx_tcs; i++) 2682fe2b986aSSunil Kumar Kori priv->nb_rx_queues += attr.num_queues; 268389c2ea8fSHemant Agrawal 268472100f0dSGagandeep Singh priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels; 2685ef18dafeSHemant Agrawal 268613b856acSHemant Agrawal DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d", 2687a10a988aSShreyansh Jain priv->num_rx_tc, priv->nb_rx_queues, 268813b856acSHemant Agrawal priv->nb_tx_queues, priv->max_cgs); 26893e5a335dSHemant Agrawal 26903e5a335dSHemant Agrawal priv->hw = dpni_dev; 26913e5a335dSHemant Agrawal priv->hw_id = hw_id; 269233fad432SHemant Agrawal priv->options = attr.options; 269333fad432SHemant Agrawal priv->max_mac_filters = attr.mac_filter_entries; 269433fad432SHemant Agrawal priv->max_vlan_filters = attr.vlan_filter_entries; 26953e5a335dSHemant Agrawal priv->flags = 0; 2696e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588) 26970fcdbde0SHemant Agrawal DPAA2_PMD_INFO("DPDK IEEE1588 is enabled"); 26988d21c563SHemant Agrawal priv->flags |= DPAA2_TX_CONF_ENABLE; 2699e806bf87SPriyanka Jain #endif 27008d21c563SHemant Agrawal /* Used with ``fslmc:dpni.1,drv_tx_conf=1`` */ 27018d21c563SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_TX_CONF)) { 27028d21c563SHemant Agrawal priv->flags |= DPAA2_TX_CONF_ENABLE; 27038d21c563SHemant Agrawal DPAA2_PMD_INFO("TX_CONF Enabled"); 27048d21c563SHemant Agrawal } 27053e5a335dSHemant Agrawal 27064690a611SNipun Gupta if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) { 27074690a611SNipun Gupta dpaa2_enable_err_queue = 1; 27084690a611SNipun Gupta DPAA2_PMD_INFO("Enable error queue"); 27094690a611SNipun Gupta } 27104690a611SNipun Gupta 27113e5a335dSHemant Agrawal /* Allocate memory for hardware structure for queues */ 27123e5a335dSHemant Agrawal ret = dpaa2_alloc_rx_tx_queues(eth_dev); 27133e5a335dSHemant Agrawal if (ret) { 2714a10a988aSShreyansh Jain DPAA2_PMD_ERR("Queue allocation Failed"); 2715d4984046SHemant Agrawal goto init_err; 27163e5a335dSHemant Agrawal } 27173e5a335dSHemant Agrawal 2718c3e0a706SShreyansh Jain /* Allocate memory for storing MAC addresses. 2719c3e0a706SShreyansh Jain * Table of mac_filter_entries size is allocated so that RTE ether lib 2720c3e0a706SShreyansh Jain * can add MAC entries when rte_eth_dev_mac_addr_add is called. 2721c3e0a706SShreyansh Jain */ 272233fad432SHemant Agrawal eth_dev->data->mac_addrs = rte_zmalloc("dpni", 272335b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 272433fad432SHemant Agrawal if (eth_dev->data->mac_addrs == NULL) { 2725a10a988aSShreyansh Jain DPAA2_PMD_ERR( 2726d4984046SHemant Agrawal "Failed to allocate %d bytes needed to store MAC addresses", 272735b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * attr.mac_filter_entries); 2728d4984046SHemant Agrawal ret = -ENOMEM; 2729d4984046SHemant Agrawal goto init_err; 273033fad432SHemant Agrawal } 273133fad432SHemant Agrawal 2732c3e0a706SShreyansh Jain ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]); 273333fad432SHemant Agrawal if (ret) { 2734c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to fetch MAC Address for device"); 2735c3e0a706SShreyansh Jain rte_free(eth_dev->data->mac_addrs); 2736c3e0a706SShreyansh Jain eth_dev->data->mac_addrs = NULL; 2737d4984046SHemant Agrawal goto init_err; 273833fad432SHemant Agrawal } 273933fad432SHemant Agrawal 2740bee61d86SHemant Agrawal /* ... tx buffer layout ... */ 2741bee61d86SHemant Agrawal memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 27428d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 27439ceacab7SPriyanka Jain layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 27449ceacab7SPriyanka Jain DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 27459ceacab7SPriyanka Jain layout.pass_timestamp = true; 27469ceacab7SPriyanka Jain } else { 2747bee61d86SHemant Agrawal layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 27489ceacab7SPriyanka Jain } 2749bee61d86SHemant Agrawal layout.pass_frame_status = 1; 2750bee61d86SHemant Agrawal ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2751bee61d86SHemant Agrawal DPNI_QUEUE_TX, &layout); 2752bee61d86SHemant Agrawal if (ret) { 2753a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); 2754d4984046SHemant Agrawal goto init_err; 2755bee61d86SHemant Agrawal } 2756bee61d86SHemant Agrawal 2757bee61d86SHemant Agrawal /* ... tx-conf and error buffer layout ... */ 2758bee61d86SHemant Agrawal memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 27598d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) { 27608d21c563SHemant Agrawal layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 27619ceacab7SPriyanka Jain layout.pass_timestamp = true; 27629ceacab7SPriyanka Jain } 27638d21c563SHemant Agrawal layout.options |= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 2764bee61d86SHemant Agrawal layout.pass_frame_status = 1; 2765bee61d86SHemant Agrawal ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2766bee61d86SHemant Agrawal DPNI_QUEUE_TX_CONFIRM, &layout); 2767bee61d86SHemant Agrawal if (ret) { 2768a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", 2769d4984046SHemant Agrawal ret); 2770d4984046SHemant Agrawal goto init_err; 2771bee61d86SHemant Agrawal } 2772bee61d86SHemant Agrawal 27733e5a335dSHemant Agrawal eth_dev->dev_ops = &dpaa2_ethdev_ops; 2774c147eae0SHemant Agrawal 2775a3a997f0SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) { 2776a3a997f0SHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; 2777a3a997f0SHemant Agrawal DPAA2_PMD_INFO("Loopback mode"); 277820191ab3SNipun Gupta } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) { 277920191ab3SNipun Gupta eth_dev->rx_pkt_burst = dpaa2_dev_rx; 278020191ab3SNipun Gupta DPAA2_PMD_INFO("No Prefetch mode"); 2781a3a997f0SHemant Agrawal } else { 27825c6942fdSHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 2783a3a997f0SHemant Agrawal } 2784cd9935ceSHemant Agrawal eth_dev->tx_pkt_burst = dpaa2_dev_tx; 27851261cd68SHemant Agrawal 27867be78d02SJosh Soref /* Init fields w.r.t. classification */ 27875f176728SJun Yang memset(&priv->extract.qos_key_extract, 0, 27885f176728SJun Yang sizeof(struct dpaa2_key_extract)); 2789fe2b986aSSunil Kumar Kori priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); 2790fe2b986aSSunil Kumar Kori if (!priv->extract.qos_extract_param) { 2791fe2b986aSSunil Kumar Kori DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " 27927be78d02SJosh Soref " classification ", ret); 2793fe2b986aSSunil Kumar Kori goto init_err; 2794fe2b986aSSunil Kumar Kori } 27955f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv4_src_offset = 27965f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 27975f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv4_dst_offset = 27985f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 27995f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv6_src_offset = 28005f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28015f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv6_dst_offset = 28025f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28035f176728SJun Yang 2804fe2b986aSSunil Kumar Kori for (i = 0; i < MAX_TCS; i++) { 28055f176728SJun Yang memset(&priv->extract.tc_key_extract[i], 0, 28065f176728SJun Yang sizeof(struct dpaa2_key_extract)); 28075f176728SJun Yang priv->extract.tc_extract_param[i] = 2808fe2b986aSSunil Kumar Kori (size_t)rte_malloc(NULL, 256, 64); 28095f176728SJun Yang if (!priv->extract.tc_extract_param[i]) { 28107be78d02SJosh Soref DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification", 2811fe2b986aSSunil Kumar Kori ret); 2812fe2b986aSSunil Kumar Kori goto init_err; 2813fe2b986aSSunil Kumar Kori } 28145f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv4_src_offset = 28155f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28165f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset = 28175f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28185f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv6_src_offset = 28195f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 28205f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset = 28215f176728SJun Yang IP_ADDRESS_OFFSET_INVALID; 2822fe2b986aSSunil Kumar Kori } 2823fe2b986aSSunil Kumar Kori 28246f8be0fbSHemant Agrawal ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token, 28256f8be0fbSHemant Agrawal RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN 28266f8be0fbSHemant Agrawal + VLAN_TAG_SIZE); 28276f8be0fbSHemant Agrawal if (ret) { 28286f8be0fbSHemant Agrawal DPAA2_PMD_ERR("Unable to set mtu. check config"); 28296f8be0fbSHemant Agrawal goto init_err; 28306f8be0fbSHemant Agrawal } 28316f8be0fbSHemant Agrawal 283272ec7a67SSunil Kumar Kori /*TODO To enable soft parser support DPAA2 driver needs to integrate 283372ec7a67SSunil Kumar Kori * with external entity to receive byte code for software sequence 283472ec7a67SSunil Kumar Kori * and same will be offload to the H/W using MC interface. 283572ec7a67SSunil Kumar Kori * Currently it is assumed that DPAA2 driver has byte code by some 283672ec7a67SSunil Kumar Kori * mean and same if offloaded to H/W. 283772ec7a67SSunil Kumar Kori */ 283872ec7a67SSunil Kumar Kori if (getenv("DPAA2_ENABLE_SOFT_PARSER")) { 283972ec7a67SSunil Kumar Kori WRIOP_SS_INITIALIZER(priv); 284072ec7a67SSunil Kumar Kori ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS); 284172ec7a67SSunil Kumar Kori if (ret < 0) { 2842*f665790aSDavid Marchand DPAA2_PMD_ERR(" Error(%d) in loading softparser", 284372ec7a67SSunil Kumar Kori ret); 284472ec7a67SSunil Kumar Kori return ret; 284572ec7a67SSunil Kumar Kori } 284672ec7a67SSunil Kumar Kori 284772ec7a67SSunil Kumar Kori ret = dpaa2_eth_enable_wriop_soft_parser(priv, 284872ec7a67SSunil Kumar Kori DPNI_SS_INGRESS); 284972ec7a67SSunil Kumar Kori if (ret < 0) { 2850*f665790aSDavid Marchand DPAA2_PMD_ERR(" Error(%d) in enabling softparser", 285172ec7a67SSunil Kumar Kori ret); 285272ec7a67SSunil Kumar Kori return ret; 285372ec7a67SSunil Kumar Kori } 285472ec7a67SSunil Kumar Kori } 2855a247fcd9SStephen Hemminger DPAA2_PMD_INFO("%s: netdev created, connected to %s", 2856f023d059SJun Yang eth_dev->data->name, dpaa2_dev->ep_name); 2857f023d059SJun Yang 2858c147eae0SHemant Agrawal return 0; 2859d4984046SHemant Agrawal init_err: 28603e5a335dSHemant Agrawal dpaa2_dev_close(eth_dev); 28613e5a335dSHemant Agrawal 28625964d36aSSachin Saxena return ret; 2863c147eae0SHemant Agrawal } 2864c147eae0SHemant Agrawal 2865028d1dfdSJun Yang int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev) 2866028d1dfdSJun Yang { 2867028d1dfdSJun Yang return dev->device->driver == &rte_dpaa2_pmd.driver; 2868028d1dfdSJun Yang } 2869028d1dfdSJun Yang 2870c147eae0SHemant Agrawal static int 287155fd2703SHemant Agrawal rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 2872c147eae0SHemant Agrawal struct rte_dpaa2_device *dpaa2_dev) 2873c147eae0SHemant Agrawal { 2874c147eae0SHemant Agrawal struct rte_eth_dev *eth_dev; 287581c42c84SShreyansh Jain struct dpaa2_dev_priv *dev_priv; 2876c147eae0SHemant Agrawal int diag; 2877c147eae0SHemant Agrawal 2878f4435e38SHemant Agrawal if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > 2879f4435e38SHemant Agrawal RTE_PKTMBUF_HEADROOM) { 2880f4435e38SHemant Agrawal DPAA2_PMD_ERR( 2881f4435e38SHemant Agrawal "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)", 2882f4435e38SHemant Agrawal RTE_PKTMBUF_HEADROOM, 2883f4435e38SHemant Agrawal DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE); 2884f4435e38SHemant Agrawal 2885f4435e38SHemant Agrawal return -1; 2886f4435e38SHemant Agrawal } 2887f4435e38SHemant Agrawal 2888c147eae0SHemant Agrawal if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2889e729ec76SHemant Agrawal eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 2890e729ec76SHemant Agrawal if (!eth_dev) 2891e729ec76SHemant Agrawal return -ENODEV; 289281c42c84SShreyansh Jain dev_priv = rte_zmalloc("ethdev private structure", 2893c147eae0SHemant Agrawal sizeof(struct dpaa2_dev_priv), 2894c147eae0SHemant Agrawal RTE_CACHE_LINE_SIZE); 289581c42c84SShreyansh Jain if (dev_priv == NULL) { 2896a10a988aSShreyansh Jain DPAA2_PMD_CRIT( 2897a10a988aSShreyansh Jain "Unable to allocate memory for private data"); 2898c147eae0SHemant Agrawal rte_eth_dev_release_port(eth_dev); 2899c147eae0SHemant Agrawal return -ENOMEM; 2900c147eae0SHemant Agrawal } 290181c42c84SShreyansh Jain eth_dev->data->dev_private = (void *)dev_priv; 290281c42c84SShreyansh Jain /* Store a pointer to eth_dev in dev_private */ 290381c42c84SShreyansh Jain dev_priv->eth_dev = eth_dev; 2904e729ec76SHemant Agrawal } else { 2905e729ec76SHemant Agrawal eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 290681c42c84SShreyansh Jain if (!eth_dev) { 290781c42c84SShreyansh Jain DPAA2_PMD_DEBUG("returning enodev"); 2908e729ec76SHemant Agrawal return -ENODEV; 2909c147eae0SHemant Agrawal } 291081c42c84SShreyansh Jain } 2911e729ec76SHemant Agrawal 2912c147eae0SHemant Agrawal eth_dev->device = &dpaa2_dev->device; 291355fd2703SHemant Agrawal 2914c147eae0SHemant Agrawal dpaa2_dev->eth_dev = eth_dev; 2915c147eae0SHemant Agrawal eth_dev->data->rx_mbuf_alloc_failed = 0; 2916c147eae0SHemant Agrawal 291792b7e33eSHemant Agrawal if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 291892b7e33eSHemant Agrawal eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 291992b7e33eSHemant Agrawal 2920f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2921f30e69b4SFerruh Yigit 2922c147eae0SHemant Agrawal /* Invoke PMD device initialization function */ 2923c147eae0SHemant Agrawal diag = dpaa2_dev_init(eth_dev); 2924fbe90cddSThomas Monjalon if (diag == 0) { 292575e2a1d4SGagandeep Singh if (!dpaa2_tx_sg_pool) { 292675e2a1d4SGagandeep Singh dpaa2_tx_sg_pool = 292775e2a1d4SGagandeep Singh rte_pktmbuf_pool_create("dpaa2_mbuf_tx_sg_pool", 292875e2a1d4SGagandeep Singh DPAA2_POOL_SIZE, 292975e2a1d4SGagandeep Singh DPAA2_POOL_CACHE_SIZE, 0, 293075e2a1d4SGagandeep Singh DPAA2_MAX_SGS * sizeof(struct qbman_sge), 293175e2a1d4SGagandeep Singh rte_socket_id()); 293275e2a1d4SGagandeep Singh if (dpaa2_tx_sg_pool == NULL) { 2933*f665790aSDavid Marchand DPAA2_PMD_ERR("SG pool creation failed"); 293475e2a1d4SGagandeep Singh return -ENOMEM; 293575e2a1d4SGagandeep Singh } 293675e2a1d4SGagandeep Singh } 2937fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev); 293875e2a1d4SGagandeep Singh dpaa2_valid_dev++; 2939c147eae0SHemant Agrawal return 0; 2940fbe90cddSThomas Monjalon } 2941c147eae0SHemant Agrawal 2942c147eae0SHemant Agrawal rte_eth_dev_release_port(eth_dev); 2943c147eae0SHemant Agrawal return diag; 2944c147eae0SHemant Agrawal } 2945c147eae0SHemant Agrawal 2946c147eae0SHemant Agrawal static int 2947c147eae0SHemant Agrawal rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 2948c147eae0SHemant Agrawal { 2949c147eae0SHemant Agrawal struct rte_eth_dev *eth_dev; 29505964d36aSSachin Saxena int ret; 2951c147eae0SHemant Agrawal 2952c147eae0SHemant Agrawal eth_dev = dpaa2_dev->eth_dev; 29535964d36aSSachin Saxena dpaa2_dev_close(eth_dev); 295475e2a1d4SGagandeep Singh dpaa2_valid_dev--; 295575e2a1d4SGagandeep Singh if (!dpaa2_valid_dev) 295675e2a1d4SGagandeep Singh rte_mempool_free(dpaa2_tx_sg_pool); 29575964d36aSSachin Saxena ret = rte_eth_dev_release_port(eth_dev); 2958c147eae0SHemant Agrawal 29595964d36aSSachin Saxena return ret; 2960c147eae0SHemant Agrawal } 2961c147eae0SHemant Agrawal 2962c147eae0SHemant Agrawal static struct rte_dpaa2_driver rte_dpaa2_pmd = { 296392b7e33eSHemant Agrawal .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 2964bad555dfSShreyansh Jain .drv_type = DPAA2_ETH, 2965c147eae0SHemant Agrawal .probe = rte_dpaa2_probe, 2966c147eae0SHemant Agrawal .remove = rte_dpaa2_remove, 2967c147eae0SHemant Agrawal }; 2968c147eae0SHemant Agrawal 29694ed8a733SVanshika Shukla RTE_PMD_REGISTER_DPAA2(NET_DPAA2_PMD_DRIVER_NAME, rte_dpaa2_pmd); 29704ed8a733SVanshika Shukla RTE_PMD_REGISTER_PARAM_STRING(NET_DPAA2_PMD_DRIVER_NAME, 297120191ab3SNipun Gupta DRIVER_LOOPBACK_MODE "=<int> " 29728d21c563SHemant Agrawal DRIVER_NO_PREFETCH_MODE "=<int>" 29734690a611SNipun Gupta DRIVER_TX_CONF "=<int>" 29744690a611SNipun Gupta DRIVER_ERROR_QUEUE "=<int>"); 2975eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE); 2976