xref: /dpdk/drivers/net/dpaa/dpaa_ethdev.c (revision a0edbb8a8e521e0720d31fd910bd9dce41874d9c)
1d81734caSHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause
2ff9e112dSShreyansh Jain  *
3ff9e112dSShreyansh Jain  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
458e0420fSVanshika Shukla  *   Copyright 2017-2020,2022-2024 NXP
5ff9e112dSShreyansh Jain  *
6ff9e112dSShreyansh Jain  */
7ff9e112dSShreyansh Jain /* System headers */
8ff9e112dSShreyansh Jain #include <stdio.h>
9ff9e112dSShreyansh Jain #include <inttypes.h>
10ff9e112dSShreyansh Jain #include <unistd.h>
11ff9e112dSShreyansh Jain #include <limits.h>
12ff9e112dSShreyansh Jain #include <sched.h>
13ff9e112dSShreyansh Jain #include <signal.h>
14ff9e112dSShreyansh Jain #include <pthread.h>
15ff9e112dSShreyansh Jain #include <sys/types.h>
16ff9e112dSShreyansh Jain #include <sys/syscall.h>
17ee0fa755SRohit Raj #include <sys/ioctl.h>
18ff9e112dSShreyansh Jain 
196723c0fcSBruce Richardson #include <rte_string_fns.h>
20ff9e112dSShreyansh Jain #include <rte_byteorder.h>
21ff9e112dSShreyansh Jain #include <rte_common.h>
22ff9e112dSShreyansh Jain #include <rte_interrupts.h>
23ff9e112dSShreyansh Jain #include <rte_log.h>
24ff9e112dSShreyansh Jain #include <rte_debug.h>
25ff9e112dSShreyansh Jain #include <rte_pci.h>
26ff9e112dSShreyansh Jain #include <rte_atomic.h>
27ff9e112dSShreyansh Jain #include <rte_branch_prediction.h>
28ff9e112dSShreyansh Jain #include <rte_memory.h>
29ff9e112dSShreyansh Jain #include <rte_tailq.h>
30ff9e112dSShreyansh Jain #include <rte_eal.h>
31ff9e112dSShreyansh Jain #include <rte_alarm.h>
32ff9e112dSShreyansh Jain #include <rte_ether.h>
3358e0420fSVanshika Shukla #include <rte_kvargs.h>
34df96fd0dSBruce Richardson #include <ethdev_driver.h>
35ff9e112dSShreyansh Jain #include <rte_malloc.h>
36ff9e112dSShreyansh Jain #include <rte_ring.h>
37ff9e112dSShreyansh Jain 
38a2f1da7dSDavid Marchand #include <bus_dpaa_driver.h>
39ff9e112dSShreyansh Jain #include <rte_dpaa_logs.h>
4037f9b54bSShreyansh Jain #include <dpaa_mempool.h>
41ff9e112dSShreyansh Jain 
42ff9e112dSShreyansh Jain #include <dpaa_ethdev.h>
4337f9b54bSShreyansh Jain #include <dpaa_rxtx.h>
444defbc8cSSachin Saxena #include <dpaa_flow.h>
458c3495f5SHemant Agrawal #include <rte_pmd_dpaa.h>
4637f9b54bSShreyansh Jain 
4737f9b54bSShreyansh Jain #include <fsl_usd.h>
4837f9b54bSShreyansh Jain #include <fsl_qman.h>
4937f9b54bSShreyansh Jain #include <fsl_bman.h>
5037f9b54bSShreyansh Jain #include <fsl_fman.h>
512aa10990SRohit Raj #include <process.h>
5277393f56SSachin Saxena #include <fmlib/fm_ext.h>
53ff9e112dSShreyansh Jain 
5458e0420fSVanshika Shukla #define DRIVER_IEEE1588        "drv_ieee1588"
5589b9bb08SRohit Raj #define CHECK_INTERVAL         100  /* 100ms */
5689b9bb08SRohit Raj #define MAX_REPEAT_TIME        90   /* 9s (90 * 100ms) in total */
5789b9bb08SRohit Raj 
58c5836218SSunil Kumar Kori /* Supported Rx offloads */
59c5836218SSunil Kumar Kori static uint64_t dev_rx_offloads_sup =
60295968d1SFerruh Yigit 		RTE_ETH_RX_OFFLOAD_SCATTER;
61c5836218SSunil Kumar Kori 
62c5836218SSunil Kumar Kori /* Rx offloads which cannot be disabled */
63c5836218SSunil Kumar Kori static uint64_t dev_rx_offloads_nodis =
64295968d1SFerruh Yigit 		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
65295968d1SFerruh Yigit 		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
66295968d1SFerruh Yigit 		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
67295968d1SFerruh Yigit 		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
68295968d1SFerruh Yigit 		RTE_ETH_RX_OFFLOAD_RSS_HASH;
69c5836218SSunil Kumar Kori 
70c5836218SSunil Kumar Kori /* Supported Tx offloads */
711cd8d4ceSHemant Agrawal static uint64_t dev_tx_offloads_sup =
72295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
73295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
74c5836218SSunil Kumar Kori 
75c5836218SSunil Kumar Kori /* Tx offloads which cannot be disabled */
76c5836218SSunil Kumar Kori static uint64_t dev_tx_offloads_nodis =
77295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
78295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
79295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
80295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
81295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
82295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
83c5836218SSunil Kumar Kori 
84ff9e112dSShreyansh Jain /* Keep track of whether QMAN and BMAN have been globally initialized */
85ff9e112dSShreyansh Jain static int is_global_init;
864defbc8cSSachin Saxena static int fmc_q = 1;	/* Indicates the use of static fmc for distribution */
878d6fc8b6SHemant Agrawal static int default_q;	/* use default queue - FMC is not executed*/
8858e0420fSVanshika Shukla int dpaa_ieee_1588;	/* use to indicate if IEEE 1588 is enabled for the driver */
890b5deefbSShreyansh Jain /* At present we only allow up to 4 push mode queues as default - as each of
900b5deefbSShreyansh Jain  * this queue need dedicated portal and we are short of portals.
910c504f69SHemant Agrawal  */
920b5deefbSShreyansh Jain #define DPAA_MAX_PUSH_MODE_QUEUE       8
930b5deefbSShreyansh Jain #define DPAA_DEFAULT_PUSH_MODE_QUEUE   4
940c504f69SHemant Agrawal 
950b5deefbSShreyansh Jain static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
960c504f69SHemant Agrawal static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
970c504f69SHemant Agrawal 
98ff9e112dSShreyansh Jain 
999124e65dSGagandeep Singh /* Per RX FQ Taildrop in frame count */
10062f53995SHemant Agrawal static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
10162f53995SHemant Agrawal 
1029124e65dSGagandeep Singh /* Per TX FQ Taildrop in frame count, disabled by default */
1039124e65dSGagandeep Singh static unsigned int td_tx_threshold;
1049124e65dSGagandeep Singh 
105b21ed3e2SHemant Agrawal struct rte_dpaa_xstats_name_off {
106b21ed3e2SHemant Agrawal 	char name[RTE_ETH_XSTATS_NAME_SIZE];
107b21ed3e2SHemant Agrawal 	uint32_t offset;
108b21ed3e2SHemant Agrawal };
109b21ed3e2SHemant Agrawal 
110b21ed3e2SHemant Agrawal static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
111b21ed3e2SHemant Agrawal 	{"rx_align_err",
112b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, raln)},
113b21ed3e2SHemant Agrawal 	{"rx_valid_pause",
114b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, rxpf)},
115b21ed3e2SHemant Agrawal 	{"rx_fcs_err",
116b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, rfcs)},
117b21ed3e2SHemant Agrawal 	{"rx_vlan_frame",
118b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, rvlan)},
119b21ed3e2SHemant Agrawal 	{"rx_frame_err",
120b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, rerr)},
121b21ed3e2SHemant Agrawal 	{"rx_drop_err",
122b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, rdrp)},
123b21ed3e2SHemant Agrawal 	{"rx_undersized",
124b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, rund)},
125b21ed3e2SHemant Agrawal 	{"rx_oversize_err",
126b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, rovr)},
127b21ed3e2SHemant Agrawal 	{"rx_fragment_pkt",
128b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, rfrg)},
129b21ed3e2SHemant Agrawal 	{"tx_valid_pause",
130b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, txpf)},
131b21ed3e2SHemant Agrawal 	{"tx_fcs_err",
132b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, terr)},
133b21ed3e2SHemant Agrawal 	{"tx_vlan_frame",
134b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, tvlan)},
135b21ed3e2SHemant Agrawal 	{"rx_undersized",
136b21ed3e2SHemant Agrawal 		offsetof(struct dpaa_if_stats, tund)},
137d2536b00SHemant Agrawal 	{"rx_frame_counter",
138d2536b00SHemant Agrawal 		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfrc)},
139d2536b00SHemant Agrawal 	{"rx_bad_frames_count",
140d2536b00SHemant Agrawal 		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfbc)},
141d2536b00SHemant Agrawal 	{"rx_large_frames_count",
142d2536b00SHemant Agrawal 		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rlfc)},
143d2536b00SHemant Agrawal 	{"rx_filter_frames_count",
144d2536b00SHemant Agrawal 		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rffc)},
145d2536b00SHemant Agrawal 	{"rx_frame_discrad_count",
146d2536b00SHemant Agrawal 		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfdc)},
147d2536b00SHemant Agrawal 	{"rx_frame_list_dma_err_count",
148d2536b00SHemant Agrawal 		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfldec)},
149d2536b00SHemant Agrawal 	{"rx_out_of_buffer_discard ",
150d2536b00SHemant Agrawal 		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rodc)},
151d2536b00SHemant Agrawal 	{"rx_buf_diallocate",
152d2536b00SHemant Agrawal 		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rbdc)},
153b21ed3e2SHemant Agrawal };
154b21ed3e2SHemant Agrawal 
1558c3495f5SHemant Agrawal static struct rte_dpaa_driver rte_dpaa_pmd;
156533c31ccSGagandeep Singh int dpaa_valid_dev;
157533c31ccSGagandeep Singh struct rte_mempool *dpaa_tx_sg_pool;
1588c3495f5SHemant Agrawal 
159bdad90d1SIvan Ilchenko static int
16016e2c27fSSunil Kumar Kori dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
16116e2c27fSSunil Kumar Kori 
1622aa10990SRohit Raj static int dpaa_eth_link_update(struct rte_eth_dev *dev,
1632aa10990SRohit Raj 				int wait_to_complete __rte_unused);
1642aa10990SRohit Raj 
1652aa10990SRohit Raj static void dpaa_interrupt_handler(void *param);
1662aa10990SRohit Raj 
1675e745593SSunil Kumar Kori static inline void
1685e745593SSunil Kumar Kori dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
1695e745593SSunil Kumar Kori {
1705e745593SSunil Kumar Kori 	memset(opts, 0, sizeof(struct qm_mcc_initfq));
1715e745593SSunil Kumar Kori 	opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
1725e745593SSunil Kumar Kori 	opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
1735e745593SSunil Kumar Kori 			   QM_FQCTRL_PREFERINCACHE;
1745e745593SSunil Kumar Kori 	opts->fqd.context_a.stashing.exclusive = 0;
1755e745593SSunil Kumar Kori 	if (dpaa_svr_family != SVR_LS1046A_FAMILY)
1765e745593SSunil Kumar Kori 		opts->fqd.context_a.stashing.annotation_cl =
1775e745593SSunil Kumar Kori 						DPAA_IF_RX_ANNOTATION_STASH;
1785e745593SSunil Kumar Kori 	opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
1795e745593SSunil Kumar Kori 	opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
1805e745593SSunil Kumar Kori }
1815e745593SSunil Kumar Kori 
182ff9e112dSShreyansh Jain static int
1830cbec027SShreyansh Jain dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1840cbec027SShreyansh Jain {
18535b2d13fSOlivier Matz 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1869658ac3aSAshish Jain 				+ VLAN_TAG_SIZE;
18755576ac2SHemant Agrawal 	uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
188ee0fa755SRohit Raj 	struct fman_if *fif = dev->process_private;
1890cbec027SShreyansh Jain 
1900cbec027SShreyansh Jain 	PMD_INIT_FUNC_TRACE();
1910cbec027SShreyansh Jain 
192ee0fa755SRohit Raj 	if (fif->is_shared_mac) {
193ee0fa755SRohit Raj 		DPAA_PMD_ERR("Cannot configure mtu from DPDK in VSP mode.");
194ee0fa755SRohit Raj 		return -ENOTSUP;
195ee0fa755SRohit Raj 	}
196ee0fa755SRohit Raj 
19755576ac2SHemant Agrawal 	/*
19855576ac2SHemant Agrawal 	 * Refuse mtu that requires the support of scattered packets
19955576ac2SHemant Agrawal 	 * when this feature has not been enabled before.
20055576ac2SHemant Agrawal 	 */
20155576ac2SHemant Agrawal 	if (dev->data->min_rx_buf_size &&
20255576ac2SHemant Agrawal 		!dev->data->scattered_rx && frame_size > buffsz) {
20355576ac2SHemant Agrawal 		DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
20455576ac2SHemant Agrawal 		return -EINVAL;
20555576ac2SHemant Agrawal 	}
20655576ac2SHemant Agrawal 
20755576ac2SHemant Agrawal 	/* check <seg size> * <max_seg>  >= max_frame */
20855576ac2SHemant Agrawal 	if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
20955576ac2SHemant Agrawal 		(frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
21055576ac2SHemant Agrawal 		DPAA_PMD_ERR("Too big to fit for Max SG list %d",
21155576ac2SHemant Agrawal 				buffsz * DPAA_SGT_MAX_ENTRIES);
21255576ac2SHemant Agrawal 		return -EINVAL;
21355576ac2SHemant Agrawal 	}
21455576ac2SHemant Agrawal 
2156b10d1f7SNipun Gupta 	fman_if_set_maxfrm(dev->process_private, frame_size);
2160cbec027SShreyansh Jain 
2170cbec027SShreyansh Jain 	return 0;
2180cbec027SShreyansh Jain }
2190cbec027SShreyansh Jain 
2200cbec027SShreyansh Jain static int
22116e2c27fSSunil Kumar Kori dpaa_eth_dev_configure(struct rte_eth_dev *dev)
222ff9e112dSShreyansh Jain {
22316e2c27fSSunil Kumar Kori 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
22416e2c27fSSunil Kumar Kori 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
22516e2c27fSSunil Kumar Kori 	uint64_t tx_offloads = eth_conf->txmode.offloads;
226953b6fedSNipun Gupta 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
2272aa10990SRohit Raj 	struct rte_device *rdev = dev->device;
2287a292619SRohit Raj 	struct rte_eth_link *link = &dev->data->dev_link;
2292aa10990SRohit Raj 	struct rte_dpaa_device *dpaa_dev;
2302aa10990SRohit Raj 	struct fman_if *fif = dev->process_private;
2312aa10990SRohit Raj 	struct __fman_if *__fif;
2322aa10990SRohit Raj 	struct rte_intr_handle *intr_handle;
2331bb4a528SFerruh Yigit 	uint32_t max_rx_pktlen;
2347a292619SRohit Raj 	int speed, duplex;
235ee0fa755SRohit Raj 	int ret, rx_status, socket_fd;
236ee0fa755SRohit Raj 	struct ifreq ifr;
2379658ac3aSAshish Jain 
238ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
239ff9e112dSShreyansh Jain 
2402aa10990SRohit Raj 	dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
241d61138d4SHarman Kalra 	intr_handle = dpaa_dev->intr_handle;
2422aa10990SRohit Raj 	__fif = container_of(fif, struct __fman_if, __if);
2432aa10990SRohit Raj 
244953b6fedSNipun Gupta 	/* Check if interface is enabled in case of shared MAC */
245953b6fedSNipun Gupta 	if (fif->is_shared_mac) {
246953b6fedSNipun Gupta 		rx_status = fman_if_get_rx_status(fif);
247953b6fedSNipun Gupta 		if (!rx_status) {
248953b6fedSNipun Gupta 			DPAA_PMD_ERR("%s Interface not enabled in kernel!",
249953b6fedSNipun Gupta 				     dpaa_intf->name);
250953b6fedSNipun Gupta 			return -EHOSTDOWN;
251953b6fedSNipun Gupta 		}
252ee0fa755SRohit Raj 
253ee0fa755SRohit Raj 		socket_fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
254ee0fa755SRohit Raj 		if (socket_fd == -1) {
255ee0fa755SRohit Raj 			DPAA_PMD_ERR("Cannot open IF socket");
256ee0fa755SRohit Raj 			return -errno;
257ee0fa755SRohit Raj 		}
258ee0fa755SRohit Raj 		strncpy(ifr.ifr_name, dpaa_intf->name, IFNAMSIZ - 1);
259ee0fa755SRohit Raj 
260ee0fa755SRohit Raj 		if (ioctl(socket_fd, SIOCGIFMTU, &ifr) < 0) {
261ee0fa755SRohit Raj 			DPAA_PMD_ERR("Cannot get interface mtu");
262ee0fa755SRohit Raj 			close(socket_fd);
263ee0fa755SRohit Raj 			return -errno;
264ee0fa755SRohit Raj 		}
265ee0fa755SRohit Raj 
266ee0fa755SRohit Raj 		close(socket_fd);
267ee0fa755SRohit Raj 		DPAA_PMD_INFO("Using kernel configured mtu size(%u)",
268ee0fa755SRohit Raj 			     ifr.ifr_mtu);
269ee0fa755SRohit Raj 
270ee0fa755SRohit Raj 		eth_conf->rxmode.mtu = ifr.ifr_mtu;
271953b6fedSNipun Gupta 	}
272953b6fedSNipun Gupta 
2731cd8d4ceSHemant Agrawal 	/* Rx offloads which are enabled by default */
274c5836218SSunil Kumar Kori 	if (dev_rx_offloads_nodis & ~rx_offloads) {
2751cd8d4ceSHemant Agrawal 		DPAA_PMD_INFO(
2761cd8d4ceSHemant Agrawal 		"Some of rx offloads enabled by default - requested 0x%" PRIx64
2771cd8d4ceSHemant Agrawal 		" fixed are 0x%" PRIx64,
278c5836218SSunil Kumar Kori 		rx_offloads, dev_rx_offloads_nodis);
27916e2c27fSSunil Kumar Kori 	}
28016e2c27fSSunil Kumar Kori 
2811cd8d4ceSHemant Agrawal 	/* Tx offloads which are enabled by default */
282c5836218SSunil Kumar Kori 	if (dev_tx_offloads_nodis & ~tx_offloads) {
2831cd8d4ceSHemant Agrawal 		DPAA_PMD_INFO(
2841cd8d4ceSHemant Agrawal 		"Some of tx offloads enabled by default - requested 0x%" PRIx64
2851cd8d4ceSHemant Agrawal 		" fixed are 0x%" PRIx64,
286c5836218SSunil Kumar Kori 		tx_offloads, dev_tx_offloads_nodis);
28716e2c27fSSunil Kumar Kori 	}
28816e2c27fSSunil Kumar Kori 
2891bb4a528SFerruh Yigit 	max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
2901bb4a528SFerruh Yigit 			RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
2911bb4a528SFerruh Yigit 	if (max_rx_pktlen > DPAA_MAX_RX_PKT_LEN) {
292deeec8efSHemant Agrawal 		DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
293deeec8efSHemant Agrawal 			"supported is %d",
2941bb4a528SFerruh Yigit 			max_rx_pktlen, DPAA_MAX_RX_PKT_LEN);
2951bb4a528SFerruh Yigit 		max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
29625f85419SShreyansh Jain 	}
297deeec8efSHemant Agrawal 
298*a0edbb8aSRohit Raj 	if (fif->mac_type != fman_offline)
2991bb4a528SFerruh Yigit 		fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
30055576ac2SHemant Agrawal 
301295968d1SFerruh Yigit 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
30255576ac2SHemant Agrawal 		DPAA_PMD_DEBUG("enabling scatter mode");
3036b10d1f7SNipun Gupta 		fman_if_set_sg(dev->process_private, 1);
30455576ac2SHemant Agrawal 		dev->data->scattered_rx = 1;
30555576ac2SHemant Agrawal 	}
30655576ac2SHemant Agrawal 
307f5fe3eedSJun Yang 	if (!(default_q || fmc_q)) {
308f5fe3eedSJun Yang 		if (dpaa_fm_config(dev,
309f5fe3eedSJun Yang 			eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
310f5fe3eedSJun Yang 			dpaa_write_fm_config_to_file();
3111ec9a3afSHemant Agrawal 			DPAA_PMD_ERR("FM port configuration: Failed");
312f5fe3eedSJun Yang 			return -1;
313f5fe3eedSJun Yang 		}
314f5fe3eedSJun Yang 		dpaa_write_fm_config_to_file();
315f5fe3eedSJun Yang 	}
316f5fe3eedSJun Yang 
317*a0edbb8aSRohit Raj 	/* Disable interrupt support on offline port*/
318*a0edbb8aSRohit Raj 	if (fif->mac_type == fman_offline)
319*a0edbb8aSRohit Raj 		return 0;
320*a0edbb8aSRohit Raj 
3212aa10990SRohit Raj 	/* if the interrupts were configured on this devices*/
322d61138d4SHarman Kalra 	if (intr_handle && rte_intr_fd_get(intr_handle)) {
3232aa10990SRohit Raj 		if (dev->data->dev_conf.intr_conf.lsc != 0)
3242aa10990SRohit Raj 			rte_intr_callback_register(intr_handle,
3252aa10990SRohit Raj 					   dpaa_interrupt_handler,
3262aa10990SRohit Raj 					   (void *)dev);
3272aa10990SRohit Raj 
328d61138d4SHarman Kalra 		ret = dpaa_intr_enable(__fif->node_name,
329d61138d4SHarman Kalra 				       rte_intr_fd_get(intr_handle));
3302aa10990SRohit Raj 		if (ret) {
3312aa10990SRohit Raj 			if (dev->data->dev_conf.intr_conf.lsc != 0) {
3322aa10990SRohit Raj 				rte_intr_callback_unregister(intr_handle,
3332aa10990SRohit Raj 					dpaa_interrupt_handler,
3342aa10990SRohit Raj 					(void *)dev);
3352aa10990SRohit Raj 				if (ret == EINVAL)
3360fcdbde0SHemant Agrawal 					DPAA_PMD_ERR("Failed to enable interrupt: Not Supported");
3372aa10990SRohit Raj 				else
3380fcdbde0SHemant Agrawal 					DPAA_PMD_ERR("Failed to enable interrupt");
3392aa10990SRohit Raj 			}
3402aa10990SRohit Raj 			dev->data->dev_conf.intr_conf.lsc = 0;
3412aa10990SRohit Raj 			dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
3422aa10990SRohit Raj 		}
3432aa10990SRohit Raj 	}
3447a292619SRohit Raj 
3457a292619SRohit Raj 	/* Wait for link status to get updated */
3467a292619SRohit Raj 	if (!link->link_status)
3477a292619SRohit Raj 		sleep(1);
3487a292619SRohit Raj 
3497a292619SRohit Raj 	/* Configure link only if link is UP*/
3507a292619SRohit Raj 	if (link->link_status) {
351295968d1SFerruh Yigit 		if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
3527a292619SRohit Raj 			/* Start autoneg only if link is not in autoneg mode */
3537a292619SRohit Raj 			if (!link->link_autoneg)
3547a292619SRohit Raj 				dpaa_restart_link_autoneg(__fif->node_name);
355295968d1SFerruh Yigit 		} else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
356295968d1SFerruh Yigit 			switch (eth_conf->link_speeds &  RTE_ETH_LINK_SPEED_FIXED) {
357295968d1SFerruh Yigit 			case RTE_ETH_LINK_SPEED_10M_HD:
358295968d1SFerruh Yigit 				speed = RTE_ETH_SPEED_NUM_10M;
359295968d1SFerruh Yigit 				duplex = RTE_ETH_LINK_HALF_DUPLEX;
3607a292619SRohit Raj 				break;
361295968d1SFerruh Yigit 			case RTE_ETH_LINK_SPEED_10M:
362295968d1SFerruh Yigit 				speed = RTE_ETH_SPEED_NUM_10M;
363295968d1SFerruh Yigit 				duplex = RTE_ETH_LINK_FULL_DUPLEX;
3647a292619SRohit Raj 				break;
365295968d1SFerruh Yigit 			case RTE_ETH_LINK_SPEED_100M_HD:
366295968d1SFerruh Yigit 				speed = RTE_ETH_SPEED_NUM_100M;
367295968d1SFerruh Yigit 				duplex = RTE_ETH_LINK_HALF_DUPLEX;
3687a292619SRohit Raj 				break;
369295968d1SFerruh Yigit 			case RTE_ETH_LINK_SPEED_100M:
370295968d1SFerruh Yigit 				speed = RTE_ETH_SPEED_NUM_100M;
371295968d1SFerruh Yigit 				duplex = RTE_ETH_LINK_FULL_DUPLEX;
3727a292619SRohit Raj 				break;
373295968d1SFerruh Yigit 			case RTE_ETH_LINK_SPEED_1G:
374295968d1SFerruh Yigit 				speed = RTE_ETH_SPEED_NUM_1G;
375295968d1SFerruh Yigit 				duplex = RTE_ETH_LINK_FULL_DUPLEX;
3767a292619SRohit Raj 				break;
377295968d1SFerruh Yigit 			case RTE_ETH_LINK_SPEED_2_5G:
378295968d1SFerruh Yigit 				speed = RTE_ETH_SPEED_NUM_2_5G;
379295968d1SFerruh Yigit 				duplex = RTE_ETH_LINK_FULL_DUPLEX;
3807a292619SRohit Raj 				break;
381295968d1SFerruh Yigit 			case RTE_ETH_LINK_SPEED_10G:
382295968d1SFerruh Yigit 				speed = RTE_ETH_SPEED_NUM_10G;
383295968d1SFerruh Yigit 				duplex = RTE_ETH_LINK_FULL_DUPLEX;
3847a292619SRohit Raj 				break;
3857a292619SRohit Raj 			default:
386295968d1SFerruh Yigit 				speed = RTE_ETH_SPEED_NUM_NONE;
387295968d1SFerruh Yigit 				duplex = RTE_ETH_LINK_FULL_DUPLEX;
3887a292619SRohit Raj 				break;
3897a292619SRohit Raj 			}
3907a292619SRohit Raj 			/* Set link speed */
3917a292619SRohit Raj 			dpaa_update_link_speed(__fif->node_name, speed, duplex);
3927a292619SRohit Raj 		} else {
3937a292619SRohit Raj 			/* Manual autoneg - custom advertisement speed. */
3940fcdbde0SHemant Agrawal 			DPAA_PMD_ERR("Custom Advertisement speeds not supported");
3957a292619SRohit Raj 		}
3967a292619SRohit Raj 	}
3977a292619SRohit Raj 
398ff9e112dSShreyansh Jain 	return 0;
399ff9e112dSShreyansh Jain }
400ff9e112dSShreyansh Jain 
401a7bdc3bdSShreyansh Jain static const uint32_t *
402ba6a168aSSivaramakrishnan Venkat dpaa_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
403a7bdc3bdSShreyansh Jain {
404a7bdc3bdSShreyansh Jain 	static const uint32_t ptypes[] = {
405a7bdc3bdSShreyansh Jain 		RTE_PTYPE_L2_ETHER,
406ec503d8fSHemant Agrawal 		RTE_PTYPE_L2_ETHER_VLAN,
407ec503d8fSHemant Agrawal 		RTE_PTYPE_L2_ETHER_ARP,
408ec503d8fSHemant Agrawal 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
409ec503d8fSHemant Agrawal 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
410ec503d8fSHemant Agrawal 		RTE_PTYPE_L4_ICMP,
411ec503d8fSHemant Agrawal 		RTE_PTYPE_L4_TCP,
412ec503d8fSHemant Agrawal 		RTE_PTYPE_L4_UDP,
413ec503d8fSHemant Agrawal 		RTE_PTYPE_L4_FRAG,
414a7bdc3bdSShreyansh Jain 		RTE_PTYPE_L4_TCP,
415a7bdc3bdSShreyansh Jain 		RTE_PTYPE_L4_UDP,
416e7524271SGagandeep Singh 		RTE_PTYPE_L4_SCTP,
4172e3ddb56SSivaramakrishnan Venkat 		RTE_PTYPE_TUNNEL_ESP,
418a350a954SHemant Agrawal 		RTE_PTYPE_TUNNEL_GRE,
419a7bdc3bdSShreyansh Jain 	};
420a7bdc3bdSShreyansh Jain 
421a7bdc3bdSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
422a7bdc3bdSShreyansh Jain 
423ba6a168aSSivaramakrishnan Venkat 	if (dev->rx_pkt_burst == dpaa_eth_queue_rx) {
424ba6a168aSSivaramakrishnan Venkat 		*no_of_elements = RTE_DIM(ptypes);
425a7bdc3bdSShreyansh Jain 		return ptypes;
426ba6a168aSSivaramakrishnan Venkat 	}
427a7bdc3bdSShreyansh Jain 	return NULL;
428a7bdc3bdSShreyansh Jain }
429a7bdc3bdSShreyansh Jain 
4302aa10990SRohit Raj static void dpaa_interrupt_handler(void *param)
4312aa10990SRohit Raj {
4322aa10990SRohit Raj 	struct rte_eth_dev *dev = param;
4332aa10990SRohit Raj 	struct rte_device *rdev = dev->device;
4342aa10990SRohit Raj 	struct rte_dpaa_device *dpaa_dev;
4352aa10990SRohit Raj 	struct rte_intr_handle *intr_handle;
4362aa10990SRohit Raj 	uint64_t buf;
4372aa10990SRohit Raj 	int bytes_read;
4382aa10990SRohit Raj 
4392aa10990SRohit Raj 	dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
440d61138d4SHarman Kalra 	intr_handle = dpaa_dev->intr_handle;
4412aa10990SRohit Raj 
442aedd054cSHarman Kalra 	if (rte_intr_fd_get(intr_handle) < 0)
443aedd054cSHarman Kalra 		return;
444aedd054cSHarman Kalra 
445d61138d4SHarman Kalra 	bytes_read = read(rte_intr_fd_get(intr_handle), &buf,
446d61138d4SHarman Kalra 			  sizeof(uint64_t));
4472aa10990SRohit Raj 	if (bytes_read < 0)
4481ec9a3afSHemant Agrawal 		DPAA_PMD_ERR("Error reading eventfd");
4492aa10990SRohit Raj 	dpaa_eth_link_update(dev, 0);
4505723fbedSFerruh Yigit 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
4512aa10990SRohit Raj }
4522aa10990SRohit Raj 
453ff9e112dSShreyansh Jain static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
454ff9e112dSShreyansh Jain {
45537f9b54bSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
456d2536b00SHemant Agrawal 	struct fman_if *fif = dev->process_private;
457f1d381b4SJie Hai 	uint16_t i;
45837f9b54bSShreyansh Jain 
459ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
460ff9e112dSShreyansh Jain 
461f5fe3eedSJun Yang 	if (!(default_q || fmc_q))
462f5fe3eedSJun Yang 		dpaa_write_fm_config_to_file();
463f5fe3eedSJun Yang 
464ff9e112dSShreyansh Jain 	/* Change tx callback to the real one */
4659124e65dSGagandeep Singh 	if (dpaa_intf->cgr_tx)
4669124e65dSGagandeep Singh 		dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
4679124e65dSGagandeep Singh 	else
46837f9b54bSShreyansh Jain 		dev->tx_pkt_burst = dpaa_eth_queue_tx;
4699124e65dSGagandeep Singh 
470d2536b00SHemant Agrawal 	fman_if_bmi_stats_enable(fif);
471d2536b00SHemant Agrawal 	fman_if_bmi_stats_reset(fif);
472d2536b00SHemant Agrawal 	fman_if_enable_rx(fif);
473ff9e112dSShreyansh Jain 
474f1d381b4SJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
475f1d381b4SJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
476f1d381b4SJie Hai 	for (i = 0; i < dev->data->nb_tx_queues; i++)
477f1d381b4SJie Hai 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
478f1d381b4SJie Hai 
479ff9e112dSShreyansh Jain 	return 0;
480ff9e112dSShreyansh Jain }
481ff9e112dSShreyansh Jain 
48262024eb8SIvan Ilchenko static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
483ff9e112dSShreyansh Jain {
4846b10d1f7SNipun Gupta 	struct fman_if *fif = dev->process_private;
485f1d381b4SJie Hai 	uint16_t i;
48637f9b54bSShreyansh Jain 
48737f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
488b8f5d2aeSThomas Monjalon 	dev->data->dev_started = 0;
48937f9b54bSShreyansh Jain 
490d2536b00SHemant Agrawal 	if (!fif->is_shared_mac) {
491d2536b00SHemant Agrawal 		fman_if_bmi_stats_disable(fif);
4926b10d1f7SNipun Gupta 		fman_if_disable_rx(fif);
493d2536b00SHemant Agrawal 	}
49437f9b54bSShreyansh Jain 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
49562024eb8SIvan Ilchenko 
496f1d381b4SJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
497f1d381b4SJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
498f1d381b4SJie Hai 	for (i = 0; i < dev->data->nb_tx_queues; i++)
499f1d381b4SJie Hai 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
500f1d381b4SJie Hai 
50162024eb8SIvan Ilchenko 	return 0;
502ff9e112dSShreyansh Jain }
503ff9e112dSShreyansh Jain 
504b142387bSThomas Monjalon static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
50537f9b54bSShreyansh Jain {
5062aa10990SRohit Raj 	struct fman_if *fif = dev->process_private;
5072aa10990SRohit Raj 	struct __fman_if *__fif;
5082aa10990SRohit Raj 	struct rte_device *rdev = dev->device;
5092aa10990SRohit Raj 	struct rte_dpaa_device *dpaa_dev;
5102aa10990SRohit Raj 	struct rte_intr_handle *intr_handle;
5117a292619SRohit Raj 	struct rte_eth_link *link = &dev->data->dev_link;
5122defb114SSachin Saxena 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
5132defb114SSachin Saxena 	int loop;
51462024eb8SIvan Ilchenko 	int ret;
5152aa10990SRohit Raj 
51637f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
51737f9b54bSShreyansh Jain 
5182defb114SSachin Saxena 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5192defb114SSachin Saxena 		return 0;
5202defb114SSachin Saxena 
5212defb114SSachin Saxena 	if (!dpaa_intf) {
5222defb114SSachin Saxena 		DPAA_PMD_WARN("Already closed or not started");
5232defb114SSachin Saxena 		return -1;
5242defb114SSachin Saxena 	}
5252defb114SSachin Saxena 
5262defb114SSachin Saxena 	/* DPAA FM deconfig */
5272defb114SSachin Saxena 	if (!(default_q || fmc_q)) {
5282defb114SSachin Saxena 		if (dpaa_fm_deconfig(dpaa_intf, dev->process_private))
5291ec9a3afSHemant Agrawal 			DPAA_PMD_WARN("DPAA FM deconfig failed");
5302defb114SSachin Saxena 	}
5312defb114SSachin Saxena 
5322aa10990SRohit Raj 	dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
533d61138d4SHarman Kalra 	intr_handle = dpaa_dev->intr_handle;
5342aa10990SRohit Raj 	__fif = container_of(fif, struct __fman_if, __if);
5352aa10990SRohit Raj 
53662024eb8SIvan Ilchenko 	ret = dpaa_eth_dev_stop(dev);
5372aa10990SRohit Raj 
538*a0edbb8aSRohit Raj 	if (fif->mac_type == fman_offline)
539*a0edbb8aSRohit Raj 		return 0;
540*a0edbb8aSRohit Raj 
5417a292619SRohit Raj 	/* Reset link to autoneg */
5427a292619SRohit Raj 	if (link->link_status && !link->link_autoneg)
5437a292619SRohit Raj 		dpaa_restart_link_autoneg(__fif->node_name);
5447a292619SRohit Raj 
545d61138d4SHarman Kalra 	if (intr_handle && rte_intr_fd_get(intr_handle) &&
5462aa10990SRohit Raj 	    dev->data->dev_conf.intr_conf.lsc != 0) {
5472aa10990SRohit Raj 		dpaa_intr_disable(__fif->node_name);
5482aa10990SRohit Raj 		rte_intr_callback_unregister(intr_handle,
5492aa10990SRohit Raj 					     dpaa_interrupt_handler,
5502aa10990SRohit Raj 					     (void *)dev);
5512aa10990SRohit Raj 	}
552b142387bSThomas Monjalon 
5532defb114SSachin Saxena 	/* release configuration memory */
5542defb114SSachin Saxena 	rte_free(dpaa_intf->fc_conf);
5552defb114SSachin Saxena 
5562defb114SSachin Saxena 	/* Release RX congestion Groups */
5572defb114SSachin Saxena 	if (dpaa_intf->cgr_rx) {
5582defb114SSachin Saxena 		for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
5592defb114SSachin Saxena 			qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
5602defb114SSachin Saxena 	}
5612defb114SSachin Saxena 
5622defb114SSachin Saxena 	rte_free(dpaa_intf->cgr_rx);
5632defb114SSachin Saxena 	dpaa_intf->cgr_rx = NULL;
5642defb114SSachin Saxena 	/* Release TX congestion Groups */
5652defb114SSachin Saxena 	if (dpaa_intf->cgr_tx) {
5662defb114SSachin Saxena 		for (loop = 0; loop < MAX_DPAA_CORES; loop++)
5672defb114SSachin Saxena 			qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
5682defb114SSachin Saxena 		rte_free(dpaa_intf->cgr_tx);
5692defb114SSachin Saxena 		dpaa_intf->cgr_tx = NULL;
5702defb114SSachin Saxena 	}
5712defb114SSachin Saxena 
5722defb114SSachin Saxena 	rte_free(dpaa_intf->rx_queues);
5732defb114SSachin Saxena 	dpaa_intf->rx_queues = NULL;
5742defb114SSachin Saxena 
5752defb114SSachin Saxena 	rte_free(dpaa_intf->tx_queues);
5762defb114SSachin Saxena 	dpaa_intf->tx_queues = NULL;
5772defb114SSachin Saxena 
57862024eb8SIvan Ilchenko 	return ret;
57937f9b54bSShreyansh Jain }
58037f9b54bSShreyansh Jain 
581cf0fab1dSHemant Agrawal static int
582cf0fab1dSHemant Agrawal dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
583cf0fab1dSHemant Agrawal 		     char *fw_version,
584cf0fab1dSHemant Agrawal 		     size_t fw_size)
585cf0fab1dSHemant Agrawal {
586cf0fab1dSHemant Agrawal 	int ret;
587cf0fab1dSHemant Agrawal 	FILE *svr_file = NULL;
588cf0fab1dSHemant Agrawal 	unsigned int svr_ver = 0;
589cf0fab1dSHemant Agrawal 
590cf0fab1dSHemant Agrawal 	PMD_INIT_FUNC_TRACE();
591cf0fab1dSHemant Agrawal 
592cf0fab1dSHemant Agrawal 	svr_file = fopen(DPAA_SOC_ID_FILE, "r");
593cf0fab1dSHemant Agrawal 	if (!svr_file) {
594cf0fab1dSHemant Agrawal 		DPAA_PMD_ERR("Unable to open SoC device");
595cf0fab1dSHemant Agrawal 		return -ENOTSUP; /* Not supported on this infra */
596cf0fab1dSHemant Agrawal 	}
5973b59b73dSHemant Agrawal 	if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
5983b59b73dSHemant Agrawal 		dpaa_svr_family = svr_ver & SVR_MASK;
5993b59b73dSHemant Agrawal 	else
600cf0fab1dSHemant Agrawal 		DPAA_PMD_ERR("Unable to read SoC device");
601cf0fab1dSHemant Agrawal 
602a8e78906SHemant Agrawal 	fclose(svr_file);
603cf0fab1dSHemant Agrawal 
604a8e78906SHemant Agrawal 	ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
605a8e78906SHemant Agrawal 		       svr_ver, fman_ip_rev);
606d345d6c9SFerruh Yigit 	if (ret < 0)
607d345d6c9SFerruh Yigit 		return -EINVAL;
608a8e78906SHemant Agrawal 
609d345d6c9SFerruh Yigit 	ret += 1; /* add the size of '\0' */
610d345d6c9SFerruh Yigit 	if (fw_size < (size_t)ret)
611cf0fab1dSHemant Agrawal 		return ret;
612cf0fab1dSHemant Agrawal 	else
613cf0fab1dSHemant Agrawal 		return 0;
614cf0fab1dSHemant Agrawal }
615cf0fab1dSHemant Agrawal 
616bdad90d1SIvan Ilchenko static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
617799db456SShreyansh Jain 			     struct rte_eth_dev_info *dev_info)
618799db456SShreyansh Jain {
619799db456SShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
6206b10d1f7SNipun Gupta 	struct fman_if *fif = dev->process_private;
621799db456SShreyansh Jain 
62236528452SHemant Agrawal 	DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
623799db456SShreyansh Jain 
624799db456SShreyansh Jain 	dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
625799db456SShreyansh Jain 	dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
626799db456SShreyansh Jain 	dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
627799db456SShreyansh Jain 	dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
628799db456SShreyansh Jain 	dev_info->max_hash_mac_addrs = 0;
629799db456SShreyansh Jain 	dev_info->max_vfs = 0;
630295968d1SFerruh Yigit 	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
6314fa5e0bbSShreyansh Jain 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
632c1752a36SSachin Saxena 
6336b10d1f7SNipun Gupta 	if (fif->mac_type == fman_mac_1g) {
634295968d1SFerruh Yigit 		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
635295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_10M
636295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_100M_HD
637295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_100M
638295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_1G;
6396b10d1f7SNipun Gupta 	} else if (fif->mac_type == fman_mac_2_5g) {
640295968d1SFerruh Yigit 		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
641295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_10M
642295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_100M_HD
643295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_100M
644295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_1G
645295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_2_5G;
6466b10d1f7SNipun Gupta 	} else if (fif->mac_type == fman_mac_10g) {
647295968d1SFerruh Yigit 		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
648295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_10M
649295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_100M_HD
650295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_100M
651295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_1G
652295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_2_5G
653295968d1SFerruh Yigit 					| RTE_ETH_LINK_SPEED_10G;
654*a0edbb8aSRohit Raj 	} else if (fif->mac_type == fman_offline) {
655*a0edbb8aSRohit Raj 		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
656*a0edbb8aSRohit Raj 					| RTE_ETH_LINK_SPEED_10M
657*a0edbb8aSRohit Raj 					| RTE_ETH_LINK_SPEED_100M_HD
658*a0edbb8aSRohit Raj 					| RTE_ETH_LINK_SPEED_100M;
659bdad90d1SIvan Ilchenko 	} else {
660c1752a36SSachin Saxena 		DPAA_PMD_ERR("invalid link_speed: %s, %d",
6616b10d1f7SNipun Gupta 			     dpaa_intf->name, fif->mac_type);
662bdad90d1SIvan Ilchenko 		return -EINVAL;
663bdad90d1SIvan Ilchenko 	}
664c1752a36SSachin Saxena 
665c5836218SSunil Kumar Kori 	dev_info->rx_offload_capa = dev_rx_offloads_sup |
666c5836218SSunil Kumar Kori 					dev_rx_offloads_nodis;
667c5836218SSunil Kumar Kori 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
668c5836218SSunil Kumar Kori 					dev_tx_offloads_nodis;
6692c01a48aSShreyansh Jain 	dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
6702c01a48aSShreyansh Jain 	dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
671e35ead33SHemant Agrawal 	dev_info->default_rxportconf.nb_queues = 1;
672e35ead33SHemant Agrawal 	dev_info->default_txportconf.nb_queues = 1;
673e35ead33SHemant Agrawal 	dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH;
674e35ead33SHemant Agrawal 	dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH;
675bdad90d1SIvan Ilchenko 
676bdad90d1SIvan Ilchenko 	return 0;
677799db456SShreyansh Jain }
678799db456SShreyansh Jain 
6792e6f5657SApeksha Gupta static int
6802e6f5657SApeksha Gupta dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
6812e6f5657SApeksha Gupta 			__rte_unused uint16_t queue_id,
6822e6f5657SApeksha Gupta 			struct rte_eth_burst_mode *mode)
6832e6f5657SApeksha Gupta {
6842e6f5657SApeksha Gupta 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
6852e6f5657SApeksha Gupta 	int ret = -EINVAL;
6862e6f5657SApeksha Gupta 	unsigned int i;
6872e6f5657SApeksha Gupta 	const struct burst_info {
6882e6f5657SApeksha Gupta 		uint64_t flags;
6892e6f5657SApeksha Gupta 		const char *output;
6902e6f5657SApeksha Gupta 	} rx_offload_map[] = {
691295968d1SFerruh Yigit 			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
692295968d1SFerruh Yigit 			{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
693295968d1SFerruh Yigit 			{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
694295968d1SFerruh Yigit 			{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
695295968d1SFerruh Yigit 			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
696295968d1SFerruh Yigit 			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
6972e6f5657SApeksha Gupta 	};
6982e6f5657SApeksha Gupta 
6992e6f5657SApeksha Gupta 	/* Update Rx offload info */
7002e6f5657SApeksha Gupta 	for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
7012e6f5657SApeksha Gupta 		if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
7022e6f5657SApeksha Gupta 			snprintf(mode->info, sizeof(mode->info), "%s",
7032e6f5657SApeksha Gupta 				rx_offload_map[i].output);
7042e6f5657SApeksha Gupta 			ret = 0;
7052e6f5657SApeksha Gupta 			break;
7062e6f5657SApeksha Gupta 		}
7072e6f5657SApeksha Gupta 	}
7082e6f5657SApeksha Gupta 	return ret;
7092e6f5657SApeksha Gupta }
7102e6f5657SApeksha Gupta 
7112e6f5657SApeksha Gupta static int
7122e6f5657SApeksha Gupta dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
7132e6f5657SApeksha Gupta 			__rte_unused uint16_t queue_id,
7142e6f5657SApeksha Gupta 			struct rte_eth_burst_mode *mode)
7152e6f5657SApeksha Gupta {
7162e6f5657SApeksha Gupta 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
7172e6f5657SApeksha Gupta 	int ret = -EINVAL;
7182e6f5657SApeksha Gupta 	unsigned int i;
7192e6f5657SApeksha Gupta 	const struct burst_info {
7202e6f5657SApeksha Gupta 		uint64_t flags;
7212e6f5657SApeksha Gupta 		const char *output;
7222e6f5657SApeksha Gupta 	} tx_offload_map[] = {
723295968d1SFerruh Yigit 			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
724295968d1SFerruh Yigit 			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
725295968d1SFerruh Yigit 			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
726295968d1SFerruh Yigit 			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
727295968d1SFerruh Yigit 			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
728295968d1SFerruh Yigit 			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
729295968d1SFerruh Yigit 			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
730295968d1SFerruh Yigit 			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
7312e6f5657SApeksha Gupta 	};
7322e6f5657SApeksha Gupta 
7332e6f5657SApeksha Gupta 	/* Update Tx offload info */
7342e6f5657SApeksha Gupta 	for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
7352e6f5657SApeksha Gupta 		if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
7362e6f5657SApeksha Gupta 			snprintf(mode->info, sizeof(mode->info), "%s",
7372e6f5657SApeksha Gupta 				tx_offload_map[i].output);
7382e6f5657SApeksha Gupta 			ret = 0;
7392e6f5657SApeksha Gupta 			break;
7402e6f5657SApeksha Gupta 		}
7412e6f5657SApeksha Gupta 	}
7422e6f5657SApeksha Gupta 	return ret;
7432e6f5657SApeksha Gupta }
7442e6f5657SApeksha Gupta 
745e124a69fSShreyansh Jain static int dpaa_eth_link_update(struct rte_eth_dev *dev,
74689b9bb08SRohit Raj 				int wait_to_complete)
747e124a69fSShreyansh Jain {
748e124a69fSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
749e124a69fSShreyansh Jain 	struct rte_eth_link *link = &dev->data->dev_link;
7506b10d1f7SNipun Gupta 	struct fman_if *fif = dev->process_private;
7512aa10990SRohit Raj 	struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
7527a292619SRohit Raj 	int ret, ioctl_version;
75389b9bb08SRohit Raj 	uint8_t count;
754e124a69fSShreyansh Jain 
755e124a69fSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
756e124a69fSShreyansh Jain 
7577a292619SRohit Raj 	ioctl_version = dpaa_get_ioctl_version_number();
7587a292619SRohit Raj 
759*a0edbb8aSRohit Raj 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC &&
760*a0edbb8aSRohit Raj 	    fif->mac_type != fman_offline) {
76189b9bb08SRohit Raj 		for (count = 0; count <= MAX_REPEAT_TIME; count++) {
7627a292619SRohit Raj 			ret = dpaa_get_link_status(__fif->node_name, link);
7637a292619SRohit Raj 			if (ret)
7647a292619SRohit Raj 				return ret;
765295968d1SFerruh Yigit 			if (link->link_status == RTE_ETH_LINK_DOWN &&
76689b9bb08SRohit Raj 			    wait_to_complete)
76789b9bb08SRohit Raj 				rte_delay_ms(CHECK_INTERVAL);
76889b9bb08SRohit Raj 			else
76989b9bb08SRohit Raj 				break;
77089b9bb08SRohit Raj 		}
7717a292619SRohit Raj 	} else {
7727a292619SRohit Raj 		link->link_status = dpaa_intf->valid;
773*a0edbb8aSRohit Raj 		if (fif->mac_type == fman_offline) {
774*a0edbb8aSRohit Raj 			/*Max supported rate for O/H port is 3.75Mpps*/
775*a0edbb8aSRohit Raj 			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
776*a0edbb8aSRohit Raj 			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
777*a0edbb8aSRohit Raj 		}
7787a292619SRohit Raj 	}
7797a292619SRohit Raj 
7807a292619SRohit Raj 	if (ioctl_version < 2) {
781295968d1SFerruh Yigit 		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
782295968d1SFerruh Yigit 		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
7837a292619SRohit Raj 
7846b10d1f7SNipun Gupta 		if (fif->mac_type == fman_mac_1g)
785295968d1SFerruh Yigit 			link->link_speed = RTE_ETH_SPEED_NUM_1G;
7866b10d1f7SNipun Gupta 		else if (fif->mac_type == fman_mac_2_5g)
787295968d1SFerruh Yigit 			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
7886b10d1f7SNipun Gupta 		else if (fif->mac_type == fman_mac_10g)
789295968d1SFerruh Yigit 			link->link_speed = RTE_ETH_SPEED_NUM_10G;
790e124a69fSShreyansh Jain 		else
791e124a69fSShreyansh Jain 			DPAA_PMD_ERR("invalid link_speed: %s, %d",
7926b10d1f7SNipun Gupta 				     dpaa_intf->name, fif->mac_type);
7932aa10990SRohit Raj 	}
7942aa10990SRohit Raj 
7951ec9a3afSHemant Agrawal 	DPAA_PMD_INFO("Port %d Link is %s", dev->data->port_id,
7962aa10990SRohit Raj 		      link->link_status ? "Up" : "Down");
797e124a69fSShreyansh Jain 	return 0;
798e124a69fSShreyansh Jain }
799e124a69fSShreyansh Jain 
800d5b0924bSMatan Azrad static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
801e1ad3a05SShreyansh Jain 			       struct rte_eth_stats *stats)
802e1ad3a05SShreyansh Jain {
803e1ad3a05SShreyansh Jain 	PMD_INIT_FUNC_TRACE();
804e1ad3a05SShreyansh Jain 
8056b10d1f7SNipun Gupta 	fman_if_stats_get(dev->process_private, stats);
806d5b0924bSMatan Azrad 	return 0;
807e1ad3a05SShreyansh Jain }
808e1ad3a05SShreyansh Jain 
8099970a9adSIgor Romanov static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
810e1ad3a05SShreyansh Jain {
811e1ad3a05SShreyansh Jain 	PMD_INIT_FUNC_TRACE();
812e1ad3a05SShreyansh Jain 
8136b10d1f7SNipun Gupta 	fman_if_stats_reset(dev->process_private);
814d2536b00SHemant Agrawal 	fman_if_bmi_stats_reset(dev->process_private);
8159970a9adSIgor Romanov 
8169970a9adSIgor Romanov 	return 0;
817e1ad3a05SShreyansh Jain }
81895ef603dSShreyansh Jain 
819b21ed3e2SHemant Agrawal static int
820b21ed3e2SHemant Agrawal dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
821b21ed3e2SHemant Agrawal 		    unsigned int n)
822b21ed3e2SHemant Agrawal {
823d2536b00SHemant Agrawal 	unsigned int i = 0, j, num = RTE_DIM(dpaa_xstats_strings);
824b21ed3e2SHemant Agrawal 	uint64_t values[sizeof(struct dpaa_if_stats) / 8];
825d2536b00SHemant Agrawal 	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
826b21ed3e2SHemant Agrawal 
827b21ed3e2SHemant Agrawal 	if (n < num)
828b21ed3e2SHemant Agrawal 		return num;
829b21ed3e2SHemant Agrawal 
830339c1025SHemant Agrawal 	if (xstats == NULL)
831339c1025SHemant Agrawal 		return 0;
832339c1025SHemant Agrawal 
8336b10d1f7SNipun Gupta 	fman_if_stats_get_all(dev->process_private, values,
834b21ed3e2SHemant Agrawal 			      sizeof(struct dpaa_if_stats) / 8);
835b21ed3e2SHemant Agrawal 
836d2536b00SHemant Agrawal 	for (i = 0; i < num - (bmi_count - 1); i++) {
837b21ed3e2SHemant Agrawal 		xstats[i].id = i;
838b21ed3e2SHemant Agrawal 		xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
839b21ed3e2SHemant Agrawal 	}
840d2536b00SHemant Agrawal 	fman_if_bmi_stats_get_all(dev->process_private, values);
841d2536b00SHemant Agrawal 	for (j = 0; i < num; i++, j++) {
842d2536b00SHemant Agrawal 		xstats[i].id = i;
843d2536b00SHemant Agrawal 		xstats[i].value = values[j];
844d2536b00SHemant Agrawal 	}
845d2536b00SHemant Agrawal 
846b21ed3e2SHemant Agrawal 	return i;
847b21ed3e2SHemant Agrawal }
848b21ed3e2SHemant Agrawal 
849b21ed3e2SHemant Agrawal static int
850b21ed3e2SHemant Agrawal dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
851b21ed3e2SHemant Agrawal 		      struct rte_eth_xstat_name *xstats_names,
8525c3fc73eSHemant Agrawal 		      unsigned int limit)
853b21ed3e2SHemant Agrawal {
854b21ed3e2SHemant Agrawal 	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
855b21ed3e2SHemant Agrawal 
8565c3fc73eSHemant Agrawal 	if (limit < stat_cnt)
8575c3fc73eSHemant Agrawal 		return stat_cnt;
8585c3fc73eSHemant Agrawal 
859b21ed3e2SHemant Agrawal 	if (xstats_names != NULL)
860b21ed3e2SHemant Agrawal 		for (i = 0; i < stat_cnt; i++)
8616723c0fcSBruce Richardson 			strlcpy(xstats_names[i].name,
8626723c0fcSBruce Richardson 				dpaa_xstats_strings[i].name,
8636723c0fcSBruce Richardson 				sizeof(xstats_names[i].name));
864b21ed3e2SHemant Agrawal 
865b21ed3e2SHemant Agrawal 	return stat_cnt;
866b21ed3e2SHemant Agrawal }
867b21ed3e2SHemant Agrawal 
868b21ed3e2SHemant Agrawal static int
869b21ed3e2SHemant Agrawal dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
870b21ed3e2SHemant Agrawal 		      uint64_t *values, unsigned int n)
871b21ed3e2SHemant Agrawal {
872d2536b00SHemant Agrawal 	unsigned int i, j, stat_cnt = RTE_DIM(dpaa_xstats_strings);
873b21ed3e2SHemant Agrawal 	uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
874d2536b00SHemant Agrawal 	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
875b21ed3e2SHemant Agrawal 
876b21ed3e2SHemant Agrawal 	if (!ids) {
877b21ed3e2SHemant Agrawal 		if (n < stat_cnt)
878b21ed3e2SHemant Agrawal 			return stat_cnt;
879b21ed3e2SHemant Agrawal 
880b21ed3e2SHemant Agrawal 		if (!values)
881b21ed3e2SHemant Agrawal 			return 0;
882b21ed3e2SHemant Agrawal 
8836b10d1f7SNipun Gupta 		fman_if_stats_get_all(dev->process_private, values_copy,
8845c3fc73eSHemant Agrawal 				      sizeof(struct dpaa_if_stats) / 8);
885b21ed3e2SHemant Agrawal 
886d2536b00SHemant Agrawal 		for (i = 0; i < stat_cnt - (bmi_count - 1); i++)
887b21ed3e2SHemant Agrawal 			values[i] =
888b21ed3e2SHemant Agrawal 				values_copy[dpaa_xstats_strings[i].offset / 8];
889b21ed3e2SHemant Agrawal 
890d2536b00SHemant Agrawal 		fman_if_bmi_stats_get_all(dev->process_private, values);
891d2536b00SHemant Agrawal 		for (j = 0; i < stat_cnt; i++, j++)
892d2536b00SHemant Agrawal 			values[i] = values_copy[j];
893d2536b00SHemant Agrawal 
894b21ed3e2SHemant Agrawal 		return stat_cnt;
895b21ed3e2SHemant Agrawal 	}
896b21ed3e2SHemant Agrawal 
897b21ed3e2SHemant Agrawal 	dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
898b21ed3e2SHemant Agrawal 
899b21ed3e2SHemant Agrawal 	for (i = 0; i < n; i++) {
900b21ed3e2SHemant Agrawal 		if (ids[i] >= stat_cnt) {
901b21ed3e2SHemant Agrawal 			DPAA_PMD_ERR("id value isn't valid");
902b21ed3e2SHemant Agrawal 			return -1;
903b21ed3e2SHemant Agrawal 		}
904b21ed3e2SHemant Agrawal 		values[i] = values_copy[ids[i]];
905b21ed3e2SHemant Agrawal 	}
906b21ed3e2SHemant Agrawal 	return n;
907b21ed3e2SHemant Agrawal }
908b21ed3e2SHemant Agrawal 
909b21ed3e2SHemant Agrawal static int
910b21ed3e2SHemant Agrawal dpaa_xstats_get_names_by_id(
911b21ed3e2SHemant Agrawal 	struct rte_eth_dev *dev,
912b21ed3e2SHemant Agrawal 	const uint64_t *ids,
9138c9f976fSAndrew Rybchenko 	struct rte_eth_xstat_name *xstats_names,
914b21ed3e2SHemant Agrawal 	unsigned int limit)
915b21ed3e2SHemant Agrawal {
916b21ed3e2SHemant Agrawal 	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
917b21ed3e2SHemant Agrawal 	struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
918b21ed3e2SHemant Agrawal 
919b21ed3e2SHemant Agrawal 	if (!ids)
920b21ed3e2SHemant Agrawal 		return dpaa_xstats_get_names(dev, xstats_names, limit);
921b21ed3e2SHemant Agrawal 
922b21ed3e2SHemant Agrawal 	dpaa_xstats_get_names(dev, xstats_names_copy, limit);
923b21ed3e2SHemant Agrawal 
924b21ed3e2SHemant Agrawal 	for (i = 0; i < limit; i++) {
925b21ed3e2SHemant Agrawal 		if (ids[i] >= stat_cnt) {
926b21ed3e2SHemant Agrawal 			DPAA_PMD_ERR("id value isn't valid");
927b21ed3e2SHemant Agrawal 			return -1;
928b21ed3e2SHemant Agrawal 		}
929b21ed3e2SHemant Agrawal 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
930b21ed3e2SHemant Agrawal 	}
931b21ed3e2SHemant Agrawal 	return limit;
932b21ed3e2SHemant Agrawal }
933b21ed3e2SHemant Agrawal 
9349039c812SAndrew Rybchenko static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
93595ef603dSShreyansh Jain {
93695ef603dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
93795ef603dSShreyansh Jain 
9386b10d1f7SNipun Gupta 	fman_if_promiscuous_enable(dev->process_private);
9399039c812SAndrew Rybchenko 
9409039c812SAndrew Rybchenko 	return 0;
94195ef603dSShreyansh Jain }
94295ef603dSShreyansh Jain 
9439039c812SAndrew Rybchenko static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
94495ef603dSShreyansh Jain {
94595ef603dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
94695ef603dSShreyansh Jain 
9476b10d1f7SNipun Gupta 	fman_if_promiscuous_disable(dev->process_private);
9489039c812SAndrew Rybchenko 
9499039c812SAndrew Rybchenko 	return 0;
95095ef603dSShreyansh Jain }
95195ef603dSShreyansh Jain 
952ca041cd4SIvan Ilchenko static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
95344dd70a3SShreyansh Jain {
95444dd70a3SShreyansh Jain 	PMD_INIT_FUNC_TRACE();
95544dd70a3SShreyansh Jain 
9566b10d1f7SNipun Gupta 	fman_if_set_mcast_filter_table(dev->process_private);
957ca041cd4SIvan Ilchenko 
958ca041cd4SIvan Ilchenko 	return 0;
95944dd70a3SShreyansh Jain }
96044dd70a3SShreyansh Jain 
961ca041cd4SIvan Ilchenko static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
96244dd70a3SShreyansh Jain {
96344dd70a3SShreyansh Jain 	PMD_INIT_FUNC_TRACE();
96444dd70a3SShreyansh Jain 
9656b10d1f7SNipun Gupta 	fman_if_reset_mcast_filter_table(dev->process_private);
966ca041cd4SIvan Ilchenko 
967ca041cd4SIvan Ilchenko 	return 0;
96844dd70a3SShreyansh Jain }
96944dd70a3SShreyansh Jain 
970e4abd4ffSJun Yang static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
971e4abd4ffSJun Yang {
972e4abd4ffSJun Yang 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
973e4abd4ffSJun Yang 	struct fman_if_ic_params icp;
974e4abd4ffSJun Yang 	uint32_t fd_offset;
975e4abd4ffSJun Yang 	uint32_t bp_size;
976e4abd4ffSJun Yang 
977e4abd4ffSJun Yang 	memset(&icp, 0, sizeof(icp));
978e4abd4ffSJun Yang 	/* set ICEOF for to the default value , which is 0*/
979e4abd4ffSJun Yang 	icp.iciof = DEFAULT_ICIOF;
980e4abd4ffSJun Yang 	icp.iceof = DEFAULT_RX_ICEOF;
981e4abd4ffSJun Yang 	icp.icsz = DEFAULT_ICSZ;
982e4abd4ffSJun Yang 	fman_if_set_ic_params(dev->process_private, &icp);
983e4abd4ffSJun Yang 
984e4abd4ffSJun Yang 	fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
985e4abd4ffSJun Yang 	fman_if_set_fdoff(dev->process_private, fd_offset);
986e4abd4ffSJun Yang 
987e4abd4ffSJun Yang 	/* Buffer pool size should be equal to Dataroom Size*/
988e4abd4ffSJun Yang 	bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
989e4abd4ffSJun Yang 
990e4abd4ffSJun Yang 	fman_if_set_bp(dev->process_private,
991e4abd4ffSJun Yang 		       dpaa_intf->bp_info->mp->size,
992e4abd4ffSJun Yang 		       dpaa_intf->bp_info->bpid, bp_size);
993e4abd4ffSJun Yang }
994e4abd4ffSJun Yang 
995e4abd4ffSJun Yang static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
996e4abd4ffSJun Yang 					     int8_t vsp_id, uint32_t bpid)
997e4abd4ffSJun Yang {
998e4abd4ffSJun Yang 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
999e4abd4ffSJun Yang 	struct fman_if *fif = dev->process_private;
1000e4abd4ffSJun Yang 
1001e4abd4ffSJun Yang 	if (fif->num_profiles) {
1002e4abd4ffSJun Yang 		if (vsp_id < 0)
1003e4abd4ffSJun Yang 			vsp_id = fif->base_profile_id;
1004e4abd4ffSJun Yang 	} else {
1005e4abd4ffSJun Yang 		if (vsp_id < 0)
1006e4abd4ffSJun Yang 			vsp_id = 0;
1007e4abd4ffSJun Yang 	}
1008e4abd4ffSJun Yang 
1009e4abd4ffSJun Yang 	if (dpaa_intf->vsp_bpid[vsp_id] &&
1010e4abd4ffSJun Yang 		bpid != dpaa_intf->vsp_bpid[vsp_id]) {
1011e4abd4ffSJun Yang 		DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
1012e4abd4ffSJun Yang 
1013e4abd4ffSJun Yang 		return -1;
1014e4abd4ffSJun Yang 	}
1015e4abd4ffSJun Yang 
1016e4abd4ffSJun Yang 	return 0;
1017e4abd4ffSJun Yang }
1018e4abd4ffSJun Yang 
101937f9b54bSShreyansh Jain static
102037f9b54bSShreyansh Jain int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
102162f53995SHemant Agrawal 			    uint16_t nb_desc,
102237f9b54bSShreyansh Jain 			    unsigned int socket_id __rte_unused,
1023e335cce4SHemant Agrawal 			    const struct rte_eth_rxconf *rx_conf,
102437f9b54bSShreyansh Jain 			    struct rte_mempool *mp)
102537f9b54bSShreyansh Jain {
102637f9b54bSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
10276b10d1f7SNipun Gupta 	struct fman_if *fif = dev->process_private;
102862f53995SHemant Agrawal 	struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
10290c504f69SHemant Agrawal 	struct qm_mcc_initfq opts = {0};
10305edc61eeSRohit Raj 	u32 ch_id, flags = 0;
10310c504f69SHemant Agrawal 	int ret;
103255576ac2SHemant Agrawal 	u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
10331bb4a528SFerruh Yigit 	uint32_t max_rx_pktlen;
103437f9b54bSShreyansh Jain 
103537f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
103637f9b54bSShreyansh Jain 
10376fd3639aSHemant Agrawal 	if (queue_idx >= dev->data->nb_rx_queues) {
10386fd3639aSHemant Agrawal 		rte_errno = EOVERFLOW;
10396fd3639aSHemant Agrawal 		DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
10406fd3639aSHemant Agrawal 		      (void *)dev, queue_idx, dev->data->nb_rx_queues);
10416fd3639aSHemant Agrawal 		return -rte_errno;
10426fd3639aSHemant Agrawal 	}
10436fd3639aSHemant Agrawal 
1044e335cce4SHemant Agrawal 	/* Rx deferred start is not supported */
1045e335cce4SHemant Agrawal 	if (rx_conf->rx_deferred_start) {
1046e335cce4SHemant Agrawal 		DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
1047e335cce4SHemant Agrawal 		return -EINVAL;
1048e335cce4SHemant Agrawal 	}
10492cf9264fSHemant Agrawal 	rxq->nb_desc = UINT16_MAX;
10502cf9264fSHemant Agrawal 	rxq->offloads = rx_conf->offloads;
1051e335cce4SHemant Agrawal 
10526fd3639aSHemant Agrawal 	DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
10536fd3639aSHemant Agrawal 			queue_idx, rxq->fqid);
105437f9b54bSShreyansh Jain 
1055e4abd4ffSJun Yang 	if (!fif->num_profiles) {
1056e4abd4ffSJun Yang 		if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
1057e4abd4ffSJun Yang 			dpaa_intf->bp_info->mp != mp) {
1058e4abd4ffSJun Yang 			DPAA_PMD_WARN("Multiple pools on same interface not"
1059e4abd4ffSJun Yang 				      " supported");
1060e4abd4ffSJun Yang 			return -EINVAL;
1061e4abd4ffSJun Yang 		}
1062e4abd4ffSJun Yang 	} else {
1063e4abd4ffSJun Yang 		if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
1064e4abd4ffSJun Yang 			DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
1065e4abd4ffSJun Yang 			return -EINVAL;
1066e4abd4ffSJun Yang 		}
1067e4abd4ffSJun Yang 	}
1068e4abd4ffSJun Yang 
1069376fb49eSNipun Gupta 	if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
1070376fb49eSNipun Gupta 	    dpaa_intf->bp_info->mp != mp) {
1071376fb49eSNipun Gupta 		DPAA_PMD_WARN("Multiple pools on same interface not supported");
1072376fb49eSNipun Gupta 		return -EINVAL;
1073376fb49eSNipun Gupta 	}
1074376fb49eSNipun Gupta 
10751bb4a528SFerruh Yigit 	max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
10761bb4a528SFerruh Yigit 		VLAN_TAG_SIZE;
107755576ac2SHemant Agrawal 	/* Max packet can fit in single buffer */
10781bb4a528SFerruh Yigit 	if (max_rx_pktlen <= buffsz) {
107955576ac2SHemant Agrawal 		;
108055576ac2SHemant Agrawal 	} else if (dev->data->dev_conf.rxmode.offloads &
1081295968d1SFerruh Yigit 			RTE_ETH_RX_OFFLOAD_SCATTER) {
10821bb4a528SFerruh Yigit 		if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
10831bb4a528SFerruh Yigit 			DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
108455576ac2SHemant Agrawal 				"MaxSGlist %d",
10851bb4a528SFerruh Yigit 				max_rx_pktlen, buffsz * DPAA_SGT_MAX_ENTRIES);
108655576ac2SHemant Agrawal 			rte_errno = EOVERFLOW;
108755576ac2SHemant Agrawal 			return -rte_errno;
108855576ac2SHemant Agrawal 		}
108955576ac2SHemant Agrawal 	} else {
109055576ac2SHemant Agrawal 		DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
109155576ac2SHemant Agrawal 		     " larger than a single mbuf (%u) and scattered"
109265afdda0SRohit Raj 		     " mode has not been requested", max_rx_pktlen, buffsz);
109355576ac2SHemant Agrawal 	}
109455576ac2SHemant Agrawal 
109537f9b54bSShreyansh Jain 	dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
109637f9b54bSShreyansh Jain 
1097e4abd4ffSJun Yang 	/* For shared interface, it's done in kernel, skip.*/
1098*a0edbb8aSRohit Raj 	if (!fif->is_shared_mac && fif->mac_type != fman_offline)
1099e4abd4ffSJun Yang 		dpaa_fman_if_pool_setup(dev);
110037f9b54bSShreyansh Jain 
1101e4abd4ffSJun Yang 	if (fif->num_profiles) {
1102e4abd4ffSJun Yang 		int8_t vsp_id = rxq->vsp_id;
110337f9b54bSShreyansh Jain 
1104e4abd4ffSJun Yang 		if (vsp_id >= 0) {
1105e4abd4ffSJun Yang 			ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
1106e4abd4ffSJun Yang 					DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
110765afdda0SRohit Raj 					fif, buffsz + RTE_PKTMBUF_HEADROOM);
1108e4abd4ffSJun Yang 			if (ret) {
1109e4abd4ffSJun Yang 				DPAA_PMD_ERR("dpaa_port_vsp_update failed");
1110e4abd4ffSJun Yang 				return ret;
111137f9b54bSShreyansh Jain 			}
1112e4abd4ffSJun Yang 		} else {
1113e4abd4ffSJun Yang 			DPAA_PMD_INFO("Base profile is associated to"
11141ec9a3afSHemant Agrawal 				" RXQ fqid:%d", rxq->fqid);
1115e4abd4ffSJun Yang 			if (fif->is_shared_mac) {
1116e4abd4ffSJun Yang 				DPAA_PMD_ERR("Fatal: Base profile is associated"
1117e4abd4ffSJun Yang 					     " to shared interface on DPDK.");
1118e4abd4ffSJun Yang 				return -EINVAL;
1119e4abd4ffSJun Yang 			}
1120e4abd4ffSJun Yang 			dpaa_intf->vsp_bpid[fif->base_profile_id] =
1121e4abd4ffSJun Yang 				DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1122e4abd4ffSJun Yang 		}
1123e4abd4ffSJun Yang 	} else {
1124e4abd4ffSJun Yang 		dpaa_intf->vsp_bpid[0] =
1125e4abd4ffSJun Yang 			DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1126e4abd4ffSJun Yang 	}
1127e4abd4ffSJun Yang 
1128e4abd4ffSJun Yang 	dpaa_intf->valid = 1;
112955576ac2SHemant Agrawal 	DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
11301bb4a528SFerruh Yigit 		fman_if_get_sg_enable(fif), max_rx_pktlen);
11310c504f69SHemant Agrawal 	/* checking if push mode only, no error check for now */
1132a6a75240SNipun Gupta 	if (!rxq->is_static &&
1133a6a75240SNipun Gupta 	    dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
1134b9c94167SNipun Gupta 		struct qman_portal *qp;
1135a6a75240SNipun Gupta 		int q_fd;
1136b9c94167SNipun Gupta 
11370c504f69SHemant Agrawal 		dpaa_push_queue_idx++;
11380c504f69SHemant Agrawal 		opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
11390c504f69SHemant Agrawal 		opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
11400c504f69SHemant Agrawal 				   QM_FQCTRL_CTXASTASHING |
11410c504f69SHemant Agrawal 				   QM_FQCTRL_PREFERINCACHE;
11420c504f69SHemant Agrawal 		opts.fqd.context_a.stashing.exclusive = 0;
11437be78d02SJosh Soref 		/* In multicore scenario stashing becomes a bottleneck on LS1046.
1144b9083ea5SNipun Gupta 		 * So do not enable stashing in this case
1145b9083ea5SNipun Gupta 		 */
1146b9083ea5SNipun Gupta 		if (dpaa_svr_family != SVR_LS1046A_FAMILY)
11470c504f69SHemant Agrawal 			opts.fqd.context_a.stashing.annotation_cl =
11480c504f69SHemant Agrawal 						DPAA_IF_RX_ANNOTATION_STASH;
11490c504f69SHemant Agrawal 		opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
11500c504f69SHemant Agrawal 		opts.fqd.context_a.stashing.context_cl =
11510c504f69SHemant Agrawal 						DPAA_IF_RX_CONTEXT_STASH;
115262f53995SHemant Agrawal 
11530c504f69SHemant Agrawal 		/*Create a channel and associate given queue with the channel*/
11545edc61eeSRohit Raj 		qman_alloc_pool_range(&ch_id, 1, 1, 0);
11555edc61eeSRohit Raj 		rxq->ch_id = (u16)ch_id;
11565edc61eeSRohit Raj 
11570c504f69SHemant Agrawal 		opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
11580c504f69SHemant Agrawal 		opts.fqd.dest.channel = rxq->ch_id;
11590c504f69SHemant Agrawal 		opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
11600c504f69SHemant Agrawal 		flags = QMAN_INITFQ_FLAG_SCHED;
11610c504f69SHemant Agrawal 
11620c504f69SHemant Agrawal 		/* Configure tail drop */
11630c504f69SHemant Agrawal 		if (dpaa_intf->cgr_rx) {
11640c504f69SHemant Agrawal 			opts.we_mask |= QM_INITFQ_WE_CGID;
11650c504f69SHemant Agrawal 			opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
11660c504f69SHemant Agrawal 			opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
11670c504f69SHemant Agrawal 		}
11680c504f69SHemant Agrawal 		ret = qman_init_fq(rxq, flags, &opts);
11696fd3639aSHemant Agrawal 		if (ret) {
11706fd3639aSHemant Agrawal 			DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
11716fd3639aSHemant Agrawal 				"ret:%d(%s)", rxq->fqid, ret, strerror(ret));
11726fd3639aSHemant Agrawal 			return ret;
11736fd3639aSHemant Agrawal 		}
117419b4aba2SHemant Agrawal 		if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
117519b4aba2SHemant Agrawal 			rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
117619b4aba2SHemant Agrawal 		} else {
1177b9083ea5SNipun Gupta 			rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
1178b9083ea5SNipun Gupta 			rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
117919b4aba2SHemant Agrawal 		}
118019b4aba2SHemant Agrawal 
11810c504f69SHemant Agrawal 		rxq->is_static = true;
1182b9c94167SNipun Gupta 
1183b9c94167SNipun Gupta 		/* Allocate qman specific portals */
1184a6a75240SNipun Gupta 		qp = fsl_qman_fq_portal_create(&q_fd);
1185b9c94167SNipun Gupta 		if (!qp) {
1186b9c94167SNipun Gupta 			DPAA_PMD_ERR("Unable to alloc fq portal");
1187b9c94167SNipun Gupta 			return -1;
1188b9c94167SNipun Gupta 		}
1189b9c94167SNipun Gupta 		rxq->qp = qp;
1190a6a75240SNipun Gupta 
1191a6a75240SNipun Gupta 		/* Set up the device interrupt handler */
1192d61138d4SHarman Kalra 		if (dev->intr_handle == NULL) {
1193a6a75240SNipun Gupta 			struct rte_dpaa_device *dpaa_dev;
1194a6a75240SNipun Gupta 			struct rte_device *rdev = dev->device;
1195a6a75240SNipun Gupta 
1196a6a75240SNipun Gupta 			dpaa_dev = container_of(rdev, struct rte_dpaa_device,
1197a6a75240SNipun Gupta 						device);
1198d61138d4SHarman Kalra 			dev->intr_handle = dpaa_dev->intr_handle;
1199d61138d4SHarman Kalra 			if (rte_intr_vec_list_alloc(dev->intr_handle,
1200d61138d4SHarman Kalra 					NULL, dpaa_push_mode_max_queue)) {
1201a6a75240SNipun Gupta 				DPAA_PMD_ERR("intr_vec alloc failed");
1202a6a75240SNipun Gupta 				return -ENOMEM;
1203a6a75240SNipun Gupta 			}
1204d61138d4SHarman Kalra 			if (rte_intr_nb_efd_set(dev->intr_handle,
1205d61138d4SHarman Kalra 					dpaa_push_mode_max_queue))
1206d61138d4SHarman Kalra 				return -rte_errno;
1207d61138d4SHarman Kalra 
1208d61138d4SHarman Kalra 			if (rte_intr_max_intr_set(dev->intr_handle,
1209d61138d4SHarman Kalra 					dpaa_push_mode_max_queue))
1210d61138d4SHarman Kalra 				return -rte_errno;
1211a6a75240SNipun Gupta 		}
1212a6a75240SNipun Gupta 
1213d61138d4SHarman Kalra 		if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_EXT))
1214d61138d4SHarman Kalra 			return -rte_errno;
1215d61138d4SHarman Kalra 
1216d61138d4SHarman Kalra 		if (rte_intr_vec_list_index_set(dev->intr_handle,
1217d61138d4SHarman Kalra 						queue_idx, queue_idx + 1))
1218d61138d4SHarman Kalra 			return -rte_errno;
1219d61138d4SHarman Kalra 
1220d61138d4SHarman Kalra 		if (rte_intr_efds_index_set(dev->intr_handle, queue_idx,
1221d61138d4SHarman Kalra 						   q_fd))
1222d61138d4SHarman Kalra 			return -rte_errno;
1223d61138d4SHarman Kalra 
1224a6a75240SNipun Gupta 		rxq->q_fd = q_fd;
12250c504f69SHemant Agrawal 	}
1226e1797f4bSAkhil Goyal 	rxq->bp_array = rte_dpaa_bpid_info;
122762f53995SHemant Agrawal 	dev->data->rx_queues[queue_idx] = rxq;
122862f53995SHemant Agrawal 
122962f53995SHemant Agrawal 	/* configure the CGR size as per the desc size */
123062f53995SHemant Agrawal 	if (dpaa_intf->cgr_rx) {
123162f53995SHemant Agrawal 		struct qm_mcc_initcgr cgr_opts = {0};
123262f53995SHemant Agrawal 
12332cf9264fSHemant Agrawal 		rxq->nb_desc = nb_desc;
123462f53995SHemant Agrawal 		/* Enable tail drop with cgr on this queue */
123562f53995SHemant Agrawal 		qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
123662f53995SHemant Agrawal 		ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
123762f53995SHemant Agrawal 		if (ret) {
123862f53995SHemant Agrawal 			DPAA_PMD_WARN(
123962f53995SHemant Agrawal 				"rx taildrop modify fail on fqid %d (ret=%d)",
124062f53995SHemant Agrawal 				rxq->fqid, ret);
124162f53995SHemant Agrawal 		}
124262f53995SHemant Agrawal 	}
1243*a0edbb8aSRohit Raj 
124495d226f0SNipun Gupta 	/* Enable main queue to receive error packets also by default */
1245*a0edbb8aSRohit Raj 	if (fif->mac_type != fman_offline)
124695d226f0SNipun Gupta 		fman_if_set_err_fqid(fif, rxq->fqid);
1247*a0edbb8aSRohit Raj 
124837f9b54bSShreyansh Jain 	return 0;
124937f9b54bSShreyansh Jain }
125037f9b54bSShreyansh Jain 
12511e06b6dcSHemant Agrawal int
125277b7b81eSNeil Horman dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
12535e745593SSunil Kumar Kori 		int eth_rx_queue_id,
12545e745593SSunil Kumar Kori 		u16 ch_id,
12555e745593SSunil Kumar Kori 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
12565e745593SSunil Kumar Kori {
12575e745593SSunil Kumar Kori 	int ret;
12585e745593SSunil Kumar Kori 	u32 flags = 0;
12595e745593SSunil Kumar Kori 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
12605e745593SSunil Kumar Kori 	struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
12615e745593SSunil Kumar Kori 	struct qm_mcc_initfq opts = {0};
12625e745593SSunil Kumar Kori 
12631af8b0b2SDavid Marchand 	if (dpaa_push_mode_max_queue) {
12641af8b0b2SDavid Marchand 		DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible");
12651af8b0b2SDavid Marchand 		DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.",
12665e745593SSunil Kumar Kori 			      dpaa_push_mode_max_queue);
12671af8b0b2SDavid Marchand 		DPAA_PMD_WARN("To disable set DPAA_PUSH_QUEUES_NUMBER to 0");
12681af8b0b2SDavid Marchand 	}
12695e745593SSunil Kumar Kori 
12705e745593SSunil Kumar Kori 	dpaa_poll_queue_default_config(&opts);
12715e745593SSunil Kumar Kori 
12725e745593SSunil Kumar Kori 	switch (queue_conf->ev.sched_type) {
12735e745593SSunil Kumar Kori 	case RTE_SCHED_TYPE_ATOMIC:
12745e745593SSunil Kumar Kori 		opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
12755e745593SSunil Kumar Kori 		/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
12765e745593SSunil Kumar Kori 		 * configuration with HOLD_ACTIVE setting
12775e745593SSunil Kumar Kori 		 */
12785e745593SSunil Kumar Kori 		opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
12795e745593SSunil Kumar Kori 		rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
12805e745593SSunil Kumar Kori 		break;
12815e745593SSunil Kumar Kori 	case RTE_SCHED_TYPE_ORDERED:
12821ec9a3afSHemant Agrawal 		DPAA_PMD_ERR("Ordered queue schedule type is not supported");
12835e745593SSunil Kumar Kori 		return -1;
12845e745593SSunil Kumar Kori 	default:
12855e745593SSunil Kumar Kori 		opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
12865e745593SSunil Kumar Kori 		rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
12875e745593SSunil Kumar Kori 		break;
12885e745593SSunil Kumar Kori 	}
12895e745593SSunil Kumar Kori 
12905e745593SSunil Kumar Kori 	opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
12915e745593SSunil Kumar Kori 	opts.fqd.dest.channel = ch_id;
12925e745593SSunil Kumar Kori 	opts.fqd.dest.wq = queue_conf->ev.priority;
12935e745593SSunil Kumar Kori 
12945e745593SSunil Kumar Kori 	if (dpaa_intf->cgr_rx) {
12955e745593SSunil Kumar Kori 		opts.we_mask |= QM_INITFQ_WE_CGID;
12965e745593SSunil Kumar Kori 		opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
12975e745593SSunil Kumar Kori 		opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
12985e745593SSunil Kumar Kori 	}
12995e745593SSunil Kumar Kori 
13005e745593SSunil Kumar Kori 	flags = QMAN_INITFQ_FLAG_SCHED;
13015e745593SSunil Kumar Kori 
13025e745593SSunil Kumar Kori 	ret = qman_init_fq(rxq, flags, &opts);
13035e745593SSunil Kumar Kori 	if (ret) {
13046fd3639aSHemant Agrawal 		DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
13056fd3639aSHemant Agrawal 				"ret:%d(%s)", rxq->fqid, ret, strerror(ret));
13065e745593SSunil Kumar Kori 		return ret;
13075e745593SSunil Kumar Kori 	}
13085e745593SSunil Kumar Kori 
13095e745593SSunil Kumar Kori 	/* copy configuration which needs to be filled during dequeue */
13105e745593SSunil Kumar Kori 	memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
13115e745593SSunil Kumar Kori 	dev->data->rx_queues[eth_rx_queue_id] = rxq;
13125e745593SSunil Kumar Kori 
13135e745593SSunil Kumar Kori 	return ret;
13145e745593SSunil Kumar Kori }
13155e745593SSunil Kumar Kori 
13161e06b6dcSHemant Agrawal int
131777b7b81eSNeil Horman dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
13185e745593SSunil Kumar Kori 		int eth_rx_queue_id)
13195e745593SSunil Kumar Kori {
1320ee6647e0SGagandeep Singh 	struct qm_mcc_initfq opts = {0};
13215e745593SSunil Kumar Kori 	int ret;
13225e745593SSunil Kumar Kori 	u32 flags = 0;
13235e745593SSunil Kumar Kori 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
13245e745593SSunil Kumar Kori 	struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
13255e745593SSunil Kumar Kori 
1326ee6647e0SGagandeep Singh 	qman_retire_fq(rxq, NULL);
1327ee6647e0SGagandeep Singh 	qman_oos_fq(rxq);
13285e745593SSunil Kumar Kori 	ret = qman_init_fq(rxq, flags, &opts);
13295e745593SSunil Kumar Kori 	if (ret) {
1330ee6647e0SGagandeep Singh 		DPAA_PMD_ERR("detach rx fqid %d failed with ret: %d",
13315e745593SSunil Kumar Kori 			     rxq->fqid, ret);
13325e745593SSunil Kumar Kori 	}
13335e745593SSunil Kumar Kori 
13345e745593SSunil Kumar Kori 	rxq->cb.dqrr_dpdk_cb = NULL;
13355e745593SSunil Kumar Kori 	dev->data->rx_queues[eth_rx_queue_id] = NULL;
13365e745593SSunil Kumar Kori 
13375e745593SSunil Kumar Kori 	return 0;
13385e745593SSunil Kumar Kori }
13395e745593SSunil Kumar Kori 
134037f9b54bSShreyansh Jain static
134137f9b54bSShreyansh Jain int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
134237f9b54bSShreyansh Jain 			    uint16_t nb_desc __rte_unused,
134337f9b54bSShreyansh Jain 		unsigned int socket_id __rte_unused,
1344e335cce4SHemant Agrawal 		const struct rte_eth_txconf *tx_conf)
134537f9b54bSShreyansh Jain {
134637f9b54bSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
13472cf9264fSHemant Agrawal 	struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx];
134837f9b54bSShreyansh Jain 
134937f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
135037f9b54bSShreyansh Jain 
1351e335cce4SHemant Agrawal 	/* Tx deferred start is not supported */
1352e335cce4SHemant Agrawal 	if (tx_conf->tx_deferred_start) {
1353e335cce4SHemant Agrawal 		DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
1354e335cce4SHemant Agrawal 		return -EINVAL;
1355e335cce4SHemant Agrawal 	}
13562cf9264fSHemant Agrawal 	txq->nb_desc = UINT16_MAX;
13572cf9264fSHemant Agrawal 	txq->offloads = tx_conf->offloads;
13582cf9264fSHemant Agrawal 
13596fd3639aSHemant Agrawal 	if (queue_idx >= dev->data->nb_tx_queues) {
13606fd3639aSHemant Agrawal 		rte_errno = EOVERFLOW;
13616fd3639aSHemant Agrawal 		DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
13626fd3639aSHemant Agrawal 		      (void *)dev, queue_idx, dev->data->nb_tx_queues);
13636fd3639aSHemant Agrawal 		return -rte_errno;
13646fd3639aSHemant Agrawal 	}
13656fd3639aSHemant Agrawal 
13666fd3639aSHemant Agrawal 	DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
13672cf9264fSHemant Agrawal 			queue_idx, txq->fqid);
13682cf9264fSHemant Agrawal 	dev->data->tx_queues[queue_idx] = txq;
13699124e65dSGagandeep Singh 
137037f9b54bSShreyansh Jain 	return 0;
137137f9b54bSShreyansh Jain }
137237f9b54bSShreyansh Jain 
1373b005d729SHemant Agrawal static uint32_t
13748d7d4fcdSKonstantin Ananyev dpaa_dev_rx_queue_count(void *rx_queue)
1375b005d729SHemant Agrawal {
13768d7d4fcdSKonstantin Ananyev 	struct qman_fq *rxq = rx_queue;
1377b005d729SHemant Agrawal 	u32 frm_cnt = 0;
1378b005d729SHemant Agrawal 
1379b005d729SHemant Agrawal 	PMD_INIT_FUNC_TRACE();
1380b005d729SHemant Agrawal 
1381b005d729SHemant Agrawal 	if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
13828d7d4fcdSKonstantin Ananyev 		DPAA_PMD_DEBUG("RX frame count for q(%p) is %u",
13838d7d4fcdSKonstantin Ananyev 			       rx_queue, frm_cnt);
1384b005d729SHemant Agrawal 	}
1385b005d729SHemant Agrawal 	return frm_cnt;
1386b005d729SHemant Agrawal }
1387b005d729SHemant Agrawal 
1388e124a69fSShreyansh Jain static int dpaa_link_down(struct rte_eth_dev *dev)
1389e124a69fSShreyansh Jain {
1390f231d48dSRohit Raj 	struct fman_if *fif = dev->process_private;
1391f231d48dSRohit Raj 	struct __fman_if *__fif;
1392f231d48dSRohit Raj 
1393e124a69fSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
1394e124a69fSShreyansh Jain 
1395f231d48dSRohit Raj 	__fif = container_of(fif, struct __fman_if, __if);
1396f231d48dSRohit Raj 
1397*a0edbb8aSRohit Raj 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC &&
1398*a0edbb8aSRohit Raj 	    fif->mac_type != fman_offline)
1399295968d1SFerruh Yigit 		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
1400f231d48dSRohit Raj 	else
140162024eb8SIvan Ilchenko 		return dpaa_eth_dev_stop(dev);
1402e124a69fSShreyansh Jain 	return 0;
1403e124a69fSShreyansh Jain }
1404e124a69fSShreyansh Jain 
1405e124a69fSShreyansh Jain static int dpaa_link_up(struct rte_eth_dev *dev)
1406e124a69fSShreyansh Jain {
1407f231d48dSRohit Raj 	struct fman_if *fif = dev->process_private;
1408f231d48dSRohit Raj 	struct __fman_if *__fif;
1409f231d48dSRohit Raj 
1410e124a69fSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
1411e124a69fSShreyansh Jain 
1412f231d48dSRohit Raj 	__fif = container_of(fif, struct __fman_if, __if);
1413f231d48dSRohit Raj 
1414*a0edbb8aSRohit Raj 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC &&
1415*a0edbb8aSRohit Raj 	    fif->mac_type != fman_offline)
1416295968d1SFerruh Yigit 		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
1417f231d48dSRohit Raj 	else
1418e124a69fSShreyansh Jain 		dpaa_eth_dev_start(dev);
1419e124a69fSShreyansh Jain 	return 0;
1420e124a69fSShreyansh Jain }
1421e124a69fSShreyansh Jain 
1422fe6c6032SShreyansh Jain static int
142312a4678aSShreyansh Jain dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
142412a4678aSShreyansh Jain 		   struct rte_eth_fc_conf *fc_conf)
142512a4678aSShreyansh Jain {
142612a4678aSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
142712a4678aSShreyansh Jain 	struct rte_eth_fc_conf *net_fc;
142812a4678aSShreyansh Jain 
142912a4678aSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
143012a4678aSShreyansh Jain 
143112a4678aSShreyansh Jain 	if (!(dpaa_intf->fc_conf)) {
143212a4678aSShreyansh Jain 		dpaa_intf->fc_conf = rte_zmalloc(NULL,
143312a4678aSShreyansh Jain 			sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
143412a4678aSShreyansh Jain 		if (!dpaa_intf->fc_conf) {
143512a4678aSShreyansh Jain 			DPAA_PMD_ERR("unable to save flow control info");
143612a4678aSShreyansh Jain 			return -ENOMEM;
143712a4678aSShreyansh Jain 		}
143812a4678aSShreyansh Jain 	}
143912a4678aSShreyansh Jain 	net_fc = dpaa_intf->fc_conf;
144012a4678aSShreyansh Jain 
144112a4678aSShreyansh Jain 	if (fc_conf->high_water < fc_conf->low_water) {
144212a4678aSShreyansh Jain 		DPAA_PMD_ERR("Incorrect Flow Control Configuration");
144312a4678aSShreyansh Jain 		return -EINVAL;
144412a4678aSShreyansh Jain 	}
144512a4678aSShreyansh Jain 
1446295968d1SFerruh Yigit 	if (fc_conf->mode == RTE_ETH_FC_NONE) {
144712a4678aSShreyansh Jain 		return 0;
1448295968d1SFerruh Yigit 	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
1449295968d1SFerruh Yigit 		 fc_conf->mode == RTE_ETH_FC_FULL) {
14506b10d1f7SNipun Gupta 		fman_if_set_fc_threshold(dev->process_private,
14516b10d1f7SNipun Gupta 					 fc_conf->high_water,
145212a4678aSShreyansh Jain 					 fc_conf->low_water,
145312a4678aSShreyansh Jain 					 dpaa_intf->bp_info->bpid);
145412a4678aSShreyansh Jain 		if (fc_conf->pause_time)
14556b10d1f7SNipun Gupta 			fman_if_set_fc_quanta(dev->process_private,
145612a4678aSShreyansh Jain 					      fc_conf->pause_time);
145712a4678aSShreyansh Jain 	}
145812a4678aSShreyansh Jain 
145912a4678aSShreyansh Jain 	/* Save the information in dpaa device */
146012a4678aSShreyansh Jain 	net_fc->pause_time = fc_conf->pause_time;
146112a4678aSShreyansh Jain 	net_fc->high_water = fc_conf->high_water;
146212a4678aSShreyansh Jain 	net_fc->low_water = fc_conf->low_water;
146312a4678aSShreyansh Jain 	net_fc->send_xon = fc_conf->send_xon;
146412a4678aSShreyansh Jain 	net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
146512a4678aSShreyansh Jain 	net_fc->mode = fc_conf->mode;
146612a4678aSShreyansh Jain 	net_fc->autoneg = fc_conf->autoneg;
146712a4678aSShreyansh Jain 
146812a4678aSShreyansh Jain 	return 0;
146912a4678aSShreyansh Jain }
147012a4678aSShreyansh Jain 
147112a4678aSShreyansh Jain static int
147212a4678aSShreyansh Jain dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
147312a4678aSShreyansh Jain 		   struct rte_eth_fc_conf *fc_conf)
147412a4678aSShreyansh Jain {
147512a4678aSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
147612a4678aSShreyansh Jain 	struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
147712a4678aSShreyansh Jain 	int ret;
147812a4678aSShreyansh Jain 
147912a4678aSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
148012a4678aSShreyansh Jain 
148112a4678aSShreyansh Jain 	if (net_fc) {
148212a4678aSShreyansh Jain 		fc_conf->pause_time = net_fc->pause_time;
148312a4678aSShreyansh Jain 		fc_conf->high_water = net_fc->high_water;
148412a4678aSShreyansh Jain 		fc_conf->low_water = net_fc->low_water;
148512a4678aSShreyansh Jain 		fc_conf->send_xon = net_fc->send_xon;
148612a4678aSShreyansh Jain 		fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
148712a4678aSShreyansh Jain 		fc_conf->mode = net_fc->mode;
148812a4678aSShreyansh Jain 		fc_conf->autoneg = net_fc->autoneg;
148912a4678aSShreyansh Jain 		return 0;
149012a4678aSShreyansh Jain 	}
14916b10d1f7SNipun Gupta 	ret = fman_if_get_fc_threshold(dev->process_private);
149212a4678aSShreyansh Jain 	if (ret) {
1493295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
14946b10d1f7SNipun Gupta 		fc_conf->pause_time =
14956b10d1f7SNipun Gupta 			fman_if_get_fc_quanta(dev->process_private);
149612a4678aSShreyansh Jain 	} else {
1497295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_NONE;
149812a4678aSShreyansh Jain 	}
149912a4678aSShreyansh Jain 
150012a4678aSShreyansh Jain 	return 0;
150112a4678aSShreyansh Jain }
150212a4678aSShreyansh Jain 
150312a4678aSShreyansh Jain static int
1504fe6c6032SShreyansh Jain dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
15056d13ea8eSOlivier Matz 			     struct rte_ether_addr *addr,
1506fe6c6032SShreyansh Jain 			     uint32_t index,
1507fe6c6032SShreyansh Jain 			     __rte_unused uint32_t pool)
1508fe6c6032SShreyansh Jain {
1509fe6c6032SShreyansh Jain 	int ret;
1510*a0edbb8aSRohit Raj 	struct fman_if *fif = dev->process_private;
1511fe6c6032SShreyansh Jain 
1512fe6c6032SShreyansh Jain 	PMD_INIT_FUNC_TRACE();
1513fe6c6032SShreyansh Jain 
1514*a0edbb8aSRohit Raj 	if (fif->mac_type == fman_offline) {
1515*a0edbb8aSRohit Raj 		DPAA_PMD_DEBUG("Add MAC Address not supported on O/H port");
1516*a0edbb8aSRohit Raj 		return 0;
1517*a0edbb8aSRohit Raj 	}
1518*a0edbb8aSRohit Raj 
15196b10d1f7SNipun Gupta 	ret = fman_if_add_mac_addr(dev->process_private,
15206b10d1f7SNipun Gupta 				   addr->addr_bytes, index);
1521fe6c6032SShreyansh Jain 
1522fe6c6032SShreyansh Jain 	if (ret)
1523b7c7ff6eSStephen Hemminger 		DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
1524fe6c6032SShreyansh Jain 	return 0;
1525fe6c6032SShreyansh Jain }
1526fe6c6032SShreyansh Jain 
1527fe6c6032SShreyansh Jain static void
1528fe6c6032SShreyansh Jain dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
1529fe6c6032SShreyansh Jain 			  uint32_t index)
1530fe6c6032SShreyansh Jain {
1531*a0edbb8aSRohit Raj 	struct fman_if *fif = dev->process_private;
1532*a0edbb8aSRohit Raj 
1533fe6c6032SShreyansh Jain 	PMD_INIT_FUNC_TRACE();
1534fe6c6032SShreyansh Jain 
1535*a0edbb8aSRohit Raj 	if (fif->mac_type == fman_offline) {
1536*a0edbb8aSRohit Raj 		DPAA_PMD_DEBUG("Remove MAC Address not supported on O/H port");
1537*a0edbb8aSRohit Raj 		return;
1538*a0edbb8aSRohit Raj 	}
1539*a0edbb8aSRohit Raj 
15406b10d1f7SNipun Gupta 	fman_if_clear_mac_addr(dev->process_private, index);
1541fe6c6032SShreyansh Jain }
1542fe6c6032SShreyansh Jain 
1543caccf8b3SOlivier Matz static int
1544fe6c6032SShreyansh Jain dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
15456d13ea8eSOlivier Matz 		       struct rte_ether_addr *addr)
1546fe6c6032SShreyansh Jain {
1547fe6c6032SShreyansh Jain 	int ret;
1548*a0edbb8aSRohit Raj 	struct fman_if *fif = dev->process_private;
1549fe6c6032SShreyansh Jain 
1550fe6c6032SShreyansh Jain 	PMD_INIT_FUNC_TRACE();
1551fe6c6032SShreyansh Jain 
1552*a0edbb8aSRohit Raj 	if (fif->mac_type == fman_offline) {
1553*a0edbb8aSRohit Raj 		DPAA_PMD_DEBUG("Set MAC Address not supported on O/H port");
1554*a0edbb8aSRohit Raj 		return 0;
1555*a0edbb8aSRohit Raj 	}
1556*a0edbb8aSRohit Raj 
15576b10d1f7SNipun Gupta 	ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0);
1558fe6c6032SShreyansh Jain 	if (ret)
1559b7c7ff6eSStephen Hemminger 		DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret);
1560caccf8b3SOlivier Matz 
1561caccf8b3SOlivier Matz 	return ret;
1562fe6c6032SShreyansh Jain }
1563fe6c6032SShreyansh Jain 
1564627e677dSSachin Saxena static int
1565627e677dSSachin Saxena dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
1566627e677dSSachin Saxena 			 struct rte_eth_rss_conf *rss_conf)
1567627e677dSSachin Saxena {
1568627e677dSSachin Saxena 	struct rte_eth_dev_data *data = dev->data;
1569627e677dSSachin Saxena 	struct rte_eth_conf *eth_conf = &data->dev_conf;
1570627e677dSSachin Saxena 
1571627e677dSSachin Saxena 	PMD_INIT_FUNC_TRACE();
1572627e677dSSachin Saxena 
1573627e677dSSachin Saxena 	if (!(default_q || fmc_q)) {
1574627e677dSSachin Saxena 		if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
15751ec9a3afSHemant Agrawal 			DPAA_PMD_ERR("FM port configuration: Failed");
1576627e677dSSachin Saxena 			return -1;
1577627e677dSSachin Saxena 		}
1578627e677dSSachin Saxena 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1579627e677dSSachin Saxena 	} else {
15801ec9a3afSHemant Agrawal 		DPAA_PMD_ERR("Function not supported");
1581627e677dSSachin Saxena 		return -ENOTSUP;
1582627e677dSSachin Saxena 	}
1583627e677dSSachin Saxena 	return 0;
1584627e677dSSachin Saxena }
1585627e677dSSachin Saxena 
1586627e677dSSachin Saxena static int
1587627e677dSSachin Saxena dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1588627e677dSSachin Saxena 			   struct rte_eth_rss_conf *rss_conf)
1589627e677dSSachin Saxena {
1590627e677dSSachin Saxena 	struct rte_eth_dev_data *data = dev->data;
1591627e677dSSachin Saxena 	struct rte_eth_conf *eth_conf = &data->dev_conf;
1592627e677dSSachin Saxena 
1593627e677dSSachin Saxena 	/* dpaa does not support rss_key, so length should be 0*/
1594627e677dSSachin Saxena 	rss_conf->rss_key_len = 0;
1595627e677dSSachin Saxena 	rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1596627e677dSSachin Saxena 	return 0;
1597627e677dSSachin Saxena }
1598627e677dSSachin Saxena 
1599b1b5d6c9SNipun Gupta static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
1600b1b5d6c9SNipun Gupta 				      uint16_t queue_id)
1601b1b5d6c9SNipun Gupta {
1602b1b5d6c9SNipun Gupta 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
1603b1b5d6c9SNipun Gupta 	struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1604b1b5d6c9SNipun Gupta 
1605b1b5d6c9SNipun Gupta 	if (!rxq->is_static)
1606b1b5d6c9SNipun Gupta 		return -EINVAL;
1607b1b5d6c9SNipun Gupta 
1608b1b5d6c9SNipun Gupta 	return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
1609b1b5d6c9SNipun Gupta }
1610b1b5d6c9SNipun Gupta 
1611b1b5d6c9SNipun Gupta static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
1612b1b5d6c9SNipun Gupta 				       uint16_t queue_id)
1613b1b5d6c9SNipun Gupta {
1614b1b5d6c9SNipun Gupta 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
1615b1b5d6c9SNipun Gupta 	struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1616b1b5d6c9SNipun Gupta 	uint32_t temp;
1617b1b5d6c9SNipun Gupta 	ssize_t temp1;
1618b1b5d6c9SNipun Gupta 
1619b1b5d6c9SNipun Gupta 	if (!rxq->is_static)
1620b1b5d6c9SNipun Gupta 		return -EINVAL;
1621b1b5d6c9SNipun Gupta 
1622b1b5d6c9SNipun Gupta 	qman_fq_portal_irqsource_remove(rxq->qp, ~0);
1623b1b5d6c9SNipun Gupta 
1624b1b5d6c9SNipun Gupta 	temp1 = read(rxq->q_fd, &temp, sizeof(temp));
1625b1b5d6c9SNipun Gupta 	if (temp1 != sizeof(temp))
162605500852SVanshika Shukla 		DPAA_PMD_DEBUG("read did not return anything");
1627b1b5d6c9SNipun Gupta 
1628b1b5d6c9SNipun Gupta 	qman_fq_portal_thread_irq(rxq->qp);
1629b1b5d6c9SNipun Gupta 
1630b1b5d6c9SNipun Gupta 	return 0;
1631b1b5d6c9SNipun Gupta }
1632b1b5d6c9SNipun Gupta 
16332cf9264fSHemant Agrawal static void
16342cf9264fSHemant Agrawal dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
16352cf9264fSHemant Agrawal 	struct rte_eth_rxq_info *qinfo)
16362cf9264fSHemant Agrawal {
16372cf9264fSHemant Agrawal 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
16382cf9264fSHemant Agrawal 	struct qman_fq *rxq;
1639378cd488SHemant Agrawal 	int ret;
16402cf9264fSHemant Agrawal 
16412cf9264fSHemant Agrawal 	rxq = dev->data->rx_queues[queue_id];
16422cf9264fSHemant Agrawal 
16432cf9264fSHemant Agrawal 	qinfo->mp = dpaa_intf->bp_info->mp;
16442cf9264fSHemant Agrawal 	qinfo->scattered_rx = dev->data->scattered_rx;
16452cf9264fSHemant Agrawal 	qinfo->nb_desc = rxq->nb_desc;
1646378cd488SHemant Agrawal 
1647378cd488SHemant Agrawal 	/* Report the HW Rx buffer length to user */
1648378cd488SHemant Agrawal 	ret = fman_if_get_maxfrm(dev->process_private);
1649378cd488SHemant Agrawal 	if (ret > 0)
1650378cd488SHemant Agrawal 		qinfo->rx_buf_size = ret;
1651378cd488SHemant Agrawal 
16522cf9264fSHemant Agrawal 	qinfo->conf.rx_free_thresh = 1;
16532cf9264fSHemant Agrawal 	qinfo->conf.rx_drop_en = 1;
16542cf9264fSHemant Agrawal 	qinfo->conf.rx_deferred_start = 0;
16552cf9264fSHemant Agrawal 	qinfo->conf.offloads = rxq->offloads;
16562cf9264fSHemant Agrawal }
16572cf9264fSHemant Agrawal 
16582cf9264fSHemant Agrawal static void
16592cf9264fSHemant Agrawal dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
16602cf9264fSHemant Agrawal 	struct rte_eth_txq_info *qinfo)
16612cf9264fSHemant Agrawal {
16622cf9264fSHemant Agrawal 	struct qman_fq *txq;
16632cf9264fSHemant Agrawal 
16642cf9264fSHemant Agrawal 	txq = dev->data->tx_queues[queue_id];
16652cf9264fSHemant Agrawal 
16662cf9264fSHemant Agrawal 	qinfo->nb_desc = txq->nb_desc;
16672cf9264fSHemant Agrawal 	qinfo->conf.tx_thresh.pthresh = 0;
16682cf9264fSHemant Agrawal 	qinfo->conf.tx_thresh.hthresh = 0;
16692cf9264fSHemant Agrawal 	qinfo->conf.tx_thresh.wthresh = 0;
16702cf9264fSHemant Agrawal 
16712cf9264fSHemant Agrawal 	qinfo->conf.tx_free_thresh = 0;
16722cf9264fSHemant Agrawal 	qinfo->conf.tx_rs_thresh = 0;
16732cf9264fSHemant Agrawal 	qinfo->conf.offloads = txq->offloads;
16742cf9264fSHemant Agrawal 	qinfo->conf.tx_deferred_start = 0;
16752cf9264fSHemant Agrawal }
16762cf9264fSHemant Agrawal 
1677ff9e112dSShreyansh Jain static struct eth_dev_ops dpaa_devops = {
1678ff9e112dSShreyansh Jain 	.dev_configure		  = dpaa_eth_dev_configure,
1679ff9e112dSShreyansh Jain 	.dev_start		  = dpaa_eth_dev_start,
1680ff9e112dSShreyansh Jain 	.dev_stop		  = dpaa_eth_dev_stop,
1681ff9e112dSShreyansh Jain 	.dev_close		  = dpaa_eth_dev_close,
1682799db456SShreyansh Jain 	.dev_infos_get		  = dpaa_eth_dev_info,
1683a7bdc3bdSShreyansh Jain 	.dev_supported_ptypes_get = dpaa_supported_ptypes_get,
168437f9b54bSShreyansh Jain 
168537f9b54bSShreyansh Jain 	.rx_queue_setup		  = dpaa_eth_rx_queue_setup,
168637f9b54bSShreyansh Jain 	.tx_queue_setup		  = dpaa_eth_tx_queue_setup,
16872e6f5657SApeksha Gupta 	.rx_burst_mode_get	  = dpaa_dev_rx_burst_mode_get,
16882e6f5657SApeksha Gupta 	.tx_burst_mode_get	  = dpaa_dev_tx_burst_mode_get,
16892cf9264fSHemant Agrawal 	.rxq_info_get		  = dpaa_rxq_info_get,
16902cf9264fSHemant Agrawal 	.txq_info_get		  = dpaa_txq_info_get,
16912cf9264fSHemant Agrawal 
169212a4678aSShreyansh Jain 	.flow_ctrl_get		  = dpaa_flow_ctrl_get,
169312a4678aSShreyansh Jain 	.flow_ctrl_set		  = dpaa_flow_ctrl_set,
169412a4678aSShreyansh Jain 
1695e124a69fSShreyansh Jain 	.link_update		  = dpaa_eth_link_update,
1696e1ad3a05SShreyansh Jain 	.stats_get		  = dpaa_eth_stats_get,
1697b21ed3e2SHemant Agrawal 	.xstats_get		  = dpaa_dev_xstats_get,
1698b21ed3e2SHemant Agrawal 	.xstats_get_by_id	  = dpaa_xstats_get_by_id,
1699b21ed3e2SHemant Agrawal 	.xstats_get_names_by_id	  = dpaa_xstats_get_names_by_id,
1700b21ed3e2SHemant Agrawal 	.xstats_get_names	  = dpaa_xstats_get_names,
1701b21ed3e2SHemant Agrawal 	.xstats_reset		  = dpaa_eth_stats_reset,
1702e1ad3a05SShreyansh Jain 	.stats_reset		  = dpaa_eth_stats_reset,
170395ef603dSShreyansh Jain 	.promiscuous_enable	  = dpaa_eth_promiscuous_enable,
170495ef603dSShreyansh Jain 	.promiscuous_disable	  = dpaa_eth_promiscuous_disable,
170544dd70a3SShreyansh Jain 	.allmulticast_enable	  = dpaa_eth_multicast_enable,
170644dd70a3SShreyansh Jain 	.allmulticast_disable	  = dpaa_eth_multicast_disable,
17070cbec027SShreyansh Jain 	.mtu_set		  = dpaa_mtu_set,
1708e124a69fSShreyansh Jain 	.dev_set_link_down	  = dpaa_link_down,
1709e124a69fSShreyansh Jain 	.dev_set_link_up	  = dpaa_link_up,
1710fe6c6032SShreyansh Jain 	.mac_addr_add		  = dpaa_dev_add_mac_addr,
1711fe6c6032SShreyansh Jain 	.mac_addr_remove	  = dpaa_dev_remove_mac_addr,
1712fe6c6032SShreyansh Jain 	.mac_addr_set		  = dpaa_dev_set_mac_addr,
1713fe6c6032SShreyansh Jain 
1714cf0fab1dSHemant Agrawal 	.fw_version_get		  = dpaa_fw_version_get,
1715b1b5d6c9SNipun Gupta 
1716b1b5d6c9SNipun Gupta 	.rx_queue_intr_enable	  = dpaa_dev_queue_intr_enable,
1717b1b5d6c9SNipun Gupta 	.rx_queue_intr_disable	  = dpaa_dev_queue_intr_disable,
1718627e677dSSachin Saxena 	.rss_hash_update	  = dpaa_dev_rss_hash_update,
1719627e677dSSachin Saxena 	.rss_hash_conf_get        = dpaa_dev_rss_hash_conf_get,
172073585446SVanshika Shukla 	.timesync_enable	  = dpaa_timesync_enable,
172173585446SVanshika Shukla 	.timesync_disable	  = dpaa_timesync_disable,
172273585446SVanshika Shukla 	.timesync_read_time	  = dpaa_timesync_read_time,
172373585446SVanshika Shukla 	.timesync_write_time	  = dpaa_timesync_write_time,
172473585446SVanshika Shukla 	.timesync_adjust_time	  = dpaa_timesync_adjust_time,
1725615352f5SVanshika Shukla 	.timesync_read_rx_timestamp = dpaa_timesync_read_rx_timestamp,
1726615352f5SVanshika Shukla 	.timesync_read_tx_timestamp = dpaa_timesync_read_tx_timestamp,
1727ff9e112dSShreyansh Jain };
1728ff9e112dSShreyansh Jain 
17298c3495f5SHemant Agrawal static bool
17308c3495f5SHemant Agrawal is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
17318c3495f5SHemant Agrawal {
17328c3495f5SHemant Agrawal 	if (strcmp(dev->device->driver->name,
17338c3495f5SHemant Agrawal 		   drv->driver.name))
17348c3495f5SHemant Agrawal 		return false;
17358c3495f5SHemant Agrawal 
17368c3495f5SHemant Agrawal 	return true;
17378c3495f5SHemant Agrawal }
17388c3495f5SHemant Agrawal 
17398c3495f5SHemant Agrawal static bool
17408c3495f5SHemant Agrawal is_dpaa_supported(struct rte_eth_dev *dev)
17418c3495f5SHemant Agrawal {
17428c3495f5SHemant Agrawal 	return is_device_supported(dev, &rte_dpaa_pmd);
17438c3495f5SHemant Agrawal }
17448c3495f5SHemant Agrawal 
17451e06b6dcSHemant Agrawal int
1746ae8f4cf3SFerruh Yigit rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on)
17478c3495f5SHemant Agrawal {
17488c3495f5SHemant Agrawal 	struct rte_eth_dev *dev;
17498c3495f5SHemant Agrawal 
17508c3495f5SHemant Agrawal 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
17518c3495f5SHemant Agrawal 
17528c3495f5SHemant Agrawal 	dev = &rte_eth_devices[port];
17538c3495f5SHemant Agrawal 
17548c3495f5SHemant Agrawal 	if (!is_dpaa_supported(dev))
17558c3495f5SHemant Agrawal 		return -ENOTSUP;
17568c3495f5SHemant Agrawal 
17578c3495f5SHemant Agrawal 	if (on)
17586b10d1f7SNipun Gupta 		fman_if_loopback_enable(dev->process_private);
17598c3495f5SHemant Agrawal 	else
17606b10d1f7SNipun Gupta 		fman_if_loopback_disable(dev->process_private);
17618c3495f5SHemant Agrawal 
17628c3495f5SHemant Agrawal 	return 0;
17638c3495f5SHemant Agrawal }
17648c3495f5SHemant Agrawal 
17656b10d1f7SNipun Gupta static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
17666b10d1f7SNipun Gupta 			       struct fman_if *fman_intf)
176712a4678aSShreyansh Jain {
176812a4678aSShreyansh Jain 	struct rte_eth_fc_conf *fc_conf;
176912a4678aSShreyansh Jain 	int ret;
177012a4678aSShreyansh Jain 
177112a4678aSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
177212a4678aSShreyansh Jain 
177312a4678aSShreyansh Jain 	if (!(dpaa_intf->fc_conf)) {
177412a4678aSShreyansh Jain 		dpaa_intf->fc_conf = rte_zmalloc(NULL,
177512a4678aSShreyansh Jain 			sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
177612a4678aSShreyansh Jain 		if (!dpaa_intf->fc_conf) {
177712a4678aSShreyansh Jain 			DPAA_PMD_ERR("unable to save flow control info");
177812a4678aSShreyansh Jain 			return -ENOMEM;
177912a4678aSShreyansh Jain 		}
178012a4678aSShreyansh Jain 	}
178112a4678aSShreyansh Jain 	fc_conf = dpaa_intf->fc_conf;
17826b10d1f7SNipun Gupta 	ret = fman_if_get_fc_threshold(fman_intf);
178312a4678aSShreyansh Jain 	if (ret) {
1784295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
17856b10d1f7SNipun Gupta 		fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
178612a4678aSShreyansh Jain 	} else {
1787295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_NONE;
178812a4678aSShreyansh Jain 	}
178912a4678aSShreyansh Jain 
179012a4678aSShreyansh Jain 	return 0;
179112a4678aSShreyansh Jain }
179212a4678aSShreyansh Jain 
179337f9b54bSShreyansh Jain /* Initialise an Rx FQ */
179462f53995SHemant Agrawal static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
179537f9b54bSShreyansh Jain 			      uint32_t fqid)
179637f9b54bSShreyansh Jain {
17978d804cf1SHemant Agrawal 	struct qm_mcc_initfq opts = {0};
179837f9b54bSShreyansh Jain 	int ret;
1799f04e7139SHemant Agrawal 	u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
180062f53995SHemant Agrawal 	struct qm_mcc_initcgr cgr_opts = {
180162f53995SHemant Agrawal 		.we_mask = QM_CGR_WE_CS_THRES |
180262f53995SHemant Agrawal 				QM_CGR_WE_CSTD_EN |
180362f53995SHemant Agrawal 				QM_CGR_WE_MODE,
180462f53995SHemant Agrawal 		.cgr = {
180562f53995SHemant Agrawal 			.cstd_en = QM_CGR_EN,
180662f53995SHemant Agrawal 			.mode = QMAN_CGR_MODE_FRAME
180762f53995SHemant Agrawal 		}
180862f53995SHemant Agrawal 	};
180937f9b54bSShreyansh Jain 
18104defbc8cSSachin Saxena 	if (fmc_q || default_q) {
181137f9b54bSShreyansh Jain 		ret = qman_reserve_fqid(fqid);
181237f9b54bSShreyansh Jain 		if (ret) {
18134defbc8cSSachin Saxena 			DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d",
181437f9b54bSShreyansh Jain 				     fqid, ret);
181537f9b54bSShreyansh Jain 			return -EINVAL;
181637f9b54bSShreyansh Jain 		}
1817f04e7139SHemant Agrawal 	}
18184defbc8cSSachin Saxena 
18198d6fc8b6SHemant Agrawal 	DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
1820f04e7139SHemant Agrawal 	ret = qman_create_fq(fqid, flags, fq);
182137f9b54bSShreyansh Jain 	if (ret) {
18226fd3639aSHemant Agrawal 		DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
182337f9b54bSShreyansh Jain 			fqid, ret);
182437f9b54bSShreyansh Jain 		return ret;
182537f9b54bSShreyansh Jain 	}
18260c504f69SHemant Agrawal 	fq->is_static = false;
18275e745593SSunil Kumar Kori 
18285e745593SSunil Kumar Kori 	dpaa_poll_queue_default_config(&opts);
182937f9b54bSShreyansh Jain 
183062f53995SHemant Agrawal 	if (cgr_rx) {
183162f53995SHemant Agrawal 		/* Enable tail drop with cgr on this queue */
183262f53995SHemant Agrawal 		qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
183362f53995SHemant Agrawal 		cgr_rx->cb = NULL;
183462f53995SHemant Agrawal 		ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
183562f53995SHemant Agrawal 				      &cgr_opts);
183662f53995SHemant Agrawal 		if (ret) {
183762f53995SHemant Agrawal 			DPAA_PMD_WARN(
18388d6fc8b6SHemant Agrawal 				"rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1839f04e7139SHemant Agrawal 				fq->fqid, ret);
184062f53995SHemant Agrawal 			goto without_cgr;
184162f53995SHemant Agrawal 		}
184262f53995SHemant Agrawal 		opts.we_mask |= QM_INITFQ_WE_CGID;
184362f53995SHemant Agrawal 		opts.fqd.cgid = cgr_rx->cgrid;
184462f53995SHemant Agrawal 		opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
184562f53995SHemant Agrawal 	}
184662f53995SHemant Agrawal without_cgr:
1847f04e7139SHemant Agrawal 	ret = qman_init_fq(fq, 0, &opts);
184837f9b54bSShreyansh Jain 	if (ret)
18498d6fc8b6SHemant Agrawal 		DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
185037f9b54bSShreyansh Jain 	return ret;
185137f9b54bSShreyansh Jain }
185237f9b54bSShreyansh Jain 
1853*a0edbb8aSRohit Raj uint8_t fm_default_vsp_id(struct fman_if *fif)
1854*a0edbb8aSRohit Raj {
1855*a0edbb8aSRohit Raj 	/* Avoid being same as base profile which could be used
1856*a0edbb8aSRohit Raj 	 * for kernel interface of shared mac.
1857*a0edbb8aSRohit Raj 	 */
1858*a0edbb8aSRohit Raj 	if (fif->base_profile_id)
1859*a0edbb8aSRohit Raj 		return 0;
1860*a0edbb8aSRohit Raj 	else
1861*a0edbb8aSRohit Raj 		return DPAA_DEFAULT_RXQ_VSP_ID;
1862*a0edbb8aSRohit Raj }
1863*a0edbb8aSRohit Raj 
186437f9b54bSShreyansh Jain /* Initialise a Tx FQ */
186537f9b54bSShreyansh Jain static int dpaa_tx_queue_init(struct qman_fq *fq,
18669124e65dSGagandeep Singh 			      struct fman_if *fman_intf,
18679124e65dSGagandeep Singh 			      struct qman_cgr *cgr_tx)
186837f9b54bSShreyansh Jain {
18698d804cf1SHemant Agrawal 	struct qm_mcc_initfq opts = {0};
18709124e65dSGagandeep Singh 	struct qm_mcc_initcgr cgr_opts = {
18719124e65dSGagandeep Singh 		.we_mask = QM_CGR_WE_CS_THRES |
18729124e65dSGagandeep Singh 				QM_CGR_WE_CSTD_EN |
18739124e65dSGagandeep Singh 				QM_CGR_WE_MODE,
18749124e65dSGagandeep Singh 		.cgr = {
18759124e65dSGagandeep Singh 			.cstd_en = QM_CGR_EN,
18769124e65dSGagandeep Singh 			.mode = QMAN_CGR_MODE_FRAME
18779124e65dSGagandeep Singh 		}
18789124e65dSGagandeep Singh 	};
187937f9b54bSShreyansh Jain 	int ret;
188037f9b54bSShreyansh Jain 
188137f9b54bSShreyansh Jain 	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
188237f9b54bSShreyansh Jain 			     QMAN_FQ_FLAG_TO_DCPORTAL, fq);
188337f9b54bSShreyansh Jain 	if (ret) {
188437f9b54bSShreyansh Jain 		DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
188537f9b54bSShreyansh Jain 		return ret;
188637f9b54bSShreyansh Jain 	}
188737f9b54bSShreyansh Jain 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
188837f9b54bSShreyansh Jain 		       QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
188937f9b54bSShreyansh Jain 	opts.fqd.dest.channel = fman_intf->tx_channel_id;
189037f9b54bSShreyansh Jain 	opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
189137f9b54bSShreyansh Jain 	opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
189237f9b54bSShreyansh Jain 	opts.fqd.context_b = 0;
189358e0420fSVanshika Shukla 	if (dpaa_ieee_1588) {
189458e0420fSVanshika Shukla 		opts.fqd.context_a.lo = 0;
189558e0420fSVanshika Shukla 		opts.fqd.context_a.hi = fman_dealloc_bufs_mask_hi;
189658e0420fSVanshika Shukla 	} else {
189737f9b54bSShreyansh Jain 		/* no tx-confirmation */
189858e0420fSVanshika Shukla 		opts.fqd.context_a.lo = fman_dealloc_bufs_mask_lo;
1899*a0edbb8aSRohit Raj 		opts.fqd.context_a.hi = DPAA_FQD_CTX_A_OVERRIDE_FQ |
1900*a0edbb8aSRohit Raj 					fman_dealloc_bufs_mask_hi;
190158e0420fSVanshika Shukla 	}
190258e0420fSVanshika Shukla 
1903*a0edbb8aSRohit Raj 	if (fman_ip_rev >= FMAN_V3)
190472e9e0c9SNipun Gupta 		/* Set B0V bit in contextA to set ASPID to 0 */
1905*a0edbb8aSRohit Raj 		opts.fqd.context_a.hi |= DPAA_FQD_CTX_A_B0_FIELD_VALID;
1906*a0edbb8aSRohit Raj 
1907*a0edbb8aSRohit Raj 	if (fman_intf->mac_type == fman_offline) {
1908*a0edbb8aSRohit Raj 		opts.fqd.context_a.lo |= DPAA_FQD_CTX_A2_VSPE_BIT;
1909*a0edbb8aSRohit Raj 		opts.fqd.context_b = fm_default_vsp_id(fman_intf) <<
1910*a0edbb8aSRohit Raj 				     DPAA_FQD_CTX_B_SHIFT_BITS;
191172e9e0c9SNipun Gupta 	}
1912*a0edbb8aSRohit Raj 
19138d6fc8b6SHemant Agrawal 	DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
19149124e65dSGagandeep Singh 
19159124e65dSGagandeep Singh 	if (cgr_tx) {
19169124e65dSGagandeep Singh 		/* Enable tail drop with cgr on this queue */
19179124e65dSGagandeep Singh 		qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,
19189124e65dSGagandeep Singh 				      td_tx_threshold, 0);
19199124e65dSGagandeep Singh 		cgr_tx->cb = NULL;
19209124e65dSGagandeep Singh 		ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,
19219124e65dSGagandeep Singh 				      &cgr_opts);
19229124e65dSGagandeep Singh 		if (ret) {
19239124e65dSGagandeep Singh 			DPAA_PMD_WARN(
19249124e65dSGagandeep Singh 				"rx taildrop init fail on rx fqid 0x%x(ret=%d)",
19259124e65dSGagandeep Singh 				fq->fqid, ret);
19269124e65dSGagandeep Singh 			goto without_cgr;
19279124e65dSGagandeep Singh 		}
19289124e65dSGagandeep Singh 		opts.we_mask |= QM_INITFQ_WE_CGID;
19299124e65dSGagandeep Singh 		opts.fqd.cgid = cgr_tx->cgrid;
19309124e65dSGagandeep Singh 		opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
19311ec9a3afSHemant Agrawal 		DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d",
19329124e65dSGagandeep Singh 				td_tx_threshold);
19339124e65dSGagandeep Singh 	}
19349124e65dSGagandeep Singh without_cgr:
193537f9b54bSShreyansh Jain 	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
193637f9b54bSShreyansh Jain 	if (ret)
19378d6fc8b6SHemant Agrawal 		DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
193837f9b54bSShreyansh Jain 	return ret;
193937f9b54bSShreyansh Jain }
194037f9b54bSShreyansh Jain 
1941d11482d9SVanshika Shukla static int
1942d11482d9SVanshika Shukla dpaa_tx_conf_queue_init(struct qman_fq *fq)
1943d11482d9SVanshika Shukla {
1944d11482d9SVanshika Shukla 	struct qm_mcc_initfq opts = {0};
1945d11482d9SVanshika Shukla 	int ret;
1946d11482d9SVanshika Shukla 
1947d11482d9SVanshika Shukla 	PMD_INIT_FUNC_TRACE();
1948d11482d9SVanshika Shukla 
1949d11482d9SVanshika Shukla 	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
1950d11482d9SVanshika Shukla 	if (ret) {
1951d11482d9SVanshika Shukla 		DPAA_PMD_ERR("create Tx_conf failed with ret: %d", ret);
1952d11482d9SVanshika Shukla 		return ret;
1953d11482d9SVanshika Shukla 	}
1954d11482d9SVanshika Shukla 
1955d11482d9SVanshika Shukla 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
1956d11482d9SVanshika Shukla 	opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
1957d11482d9SVanshika Shukla 	ret = qman_init_fq(fq, 0, &opts);
1958d11482d9SVanshika Shukla 	if (ret)
1959d11482d9SVanshika Shukla 		DPAA_PMD_ERR("init Tx_conf fqid %d failed with ret: %d",
1960d11482d9SVanshika Shukla 			fq->fqid, ret);
1961d11482d9SVanshika Shukla 	return ret;
1962d11482d9SVanshika Shukla }
1963d11482d9SVanshika Shukla 
19649e97abf2SJun Yang #if defined(RTE_LIBRTE_DPAA_DEBUG_DRIVER)
1965d11482d9SVanshika Shukla /* Initialise a DEBUG FQ ([rt]x_error, rx_default) */
196658e0420fSVanshika Shukla static int dpaa_def_queue_init(struct qman_fq *fq, uint32_t fqid)
196705ba55bcSShreyansh Jain {
19688d804cf1SHemant Agrawal 	struct qm_mcc_initfq opts = {0};
196905ba55bcSShreyansh Jain 	int ret;
197005ba55bcSShreyansh Jain 
197105ba55bcSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
197205ba55bcSShreyansh Jain 
197305ba55bcSShreyansh Jain 	ret = qman_reserve_fqid(fqid);
197405ba55bcSShreyansh Jain 	if (ret) {
197558e0420fSVanshika Shukla 		DPAA_PMD_ERR("Reserve fqid %d failed with ret: %d",
197605ba55bcSShreyansh Jain 			fqid, ret);
197705ba55bcSShreyansh Jain 		return -EINVAL;
197805ba55bcSShreyansh Jain 	}
197905ba55bcSShreyansh Jain 	/* "map" this Rx FQ to one of the interfaces Tx FQID */
198058e0420fSVanshika Shukla 	DPAA_PMD_DEBUG("Creating fq %p, fqid %d", fq, fqid);
198105ba55bcSShreyansh Jain 	ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
198205ba55bcSShreyansh Jain 	if (ret) {
198358e0420fSVanshika Shukla 		DPAA_PMD_ERR("create fqid %d failed with ret: %d",
198405ba55bcSShreyansh Jain 			fqid, ret);
198505ba55bcSShreyansh Jain 		return ret;
198605ba55bcSShreyansh Jain 	}
198705ba55bcSShreyansh Jain 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
198805ba55bcSShreyansh Jain 	opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
198905ba55bcSShreyansh Jain 	ret = qman_init_fq(fq, 0, &opts);
199005ba55bcSShreyansh Jain 	if (ret)
199158e0420fSVanshika Shukla 		DPAA_PMD_ERR("init fqid %d failed with ret: %d",
199205ba55bcSShreyansh Jain 			    fqid, ret);
199305ba55bcSShreyansh Jain 	return ret;
199405ba55bcSShreyansh Jain }
19959e97abf2SJun Yang #endif
199605ba55bcSShreyansh Jain 
1997ff9e112dSShreyansh Jain /* Initialise a network interface */
1998ff9e112dSShreyansh Jain static int
19996b10d1f7SNipun Gupta dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
20006b10d1f7SNipun Gupta {
20016b10d1f7SNipun Gupta 	struct rte_dpaa_device *dpaa_device;
20026b10d1f7SNipun Gupta 	struct fm_eth_port_cfg *cfg;
20036b10d1f7SNipun Gupta 	struct dpaa_if *dpaa_intf;
20046b10d1f7SNipun Gupta 	struct fman_if *fman_intf;
20056b10d1f7SNipun Gupta 	int dev_id;
20066b10d1f7SNipun Gupta 
20076b10d1f7SNipun Gupta 	PMD_INIT_FUNC_TRACE();
20086b10d1f7SNipun Gupta 
20096b10d1f7SNipun Gupta 	dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
20106b10d1f7SNipun Gupta 	dev_id = dpaa_device->id.dev_id;
20116b10d1f7SNipun Gupta 	cfg = dpaa_get_eth_port_cfg(dev_id);
20126b10d1f7SNipun Gupta 	fman_intf = cfg->fman_if;
20136b10d1f7SNipun Gupta 	eth_dev->process_private = fman_intf;
20146b10d1f7SNipun Gupta 
20156b10d1f7SNipun Gupta 	/* Plugging of UCODE burst API not supported in Secondary */
20166b10d1f7SNipun Gupta 	dpaa_intf = eth_dev->data->dev_private;
20176b10d1f7SNipun Gupta 	eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
20186b10d1f7SNipun Gupta 	if (dpaa_intf->cgr_tx)
20196b10d1f7SNipun Gupta 		eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
20206b10d1f7SNipun Gupta 	else
20216b10d1f7SNipun Gupta 		eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
20226b10d1f7SNipun Gupta #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
20236b10d1f7SNipun Gupta 	qman_set_fq_lookup_table(
20246b10d1f7SNipun Gupta 		dpaa_intf->rx_queues->qman_fq_lookup_table);
20256b10d1f7SNipun Gupta #endif
20266b10d1f7SNipun Gupta 
20276b10d1f7SNipun Gupta 	return 0;
20286b10d1f7SNipun Gupta }
20296b10d1f7SNipun Gupta 
20309e97abf2SJun Yang #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
20319e97abf2SJun Yang static int
20329e97abf2SJun Yang dpaa_error_queue_init(struct dpaa_if *dpaa_intf,
20339e97abf2SJun Yang 	struct fman_if *fman_intf)
20349e97abf2SJun Yang {
20359e97abf2SJun Yang 	int i, ret;
20369e97abf2SJun Yang 	struct qman_fq *err_queues = dpaa_intf->debug_queues;
20379e97abf2SJun Yang 	uint32_t err_fqid = 0;
20389e97abf2SJun Yang 
20399e97abf2SJun Yang 	if (fman_intf->is_shared_mac) {
20409e97abf2SJun Yang 		DPAA_PMD_DEBUG("Shared MAC's err queues are handled in kernel");
20419e97abf2SJun Yang 		return 0;
20429e97abf2SJun Yang 	}
20439e97abf2SJun Yang 
20449e97abf2SJun Yang 	for (i = 0; i < DPAA_DEBUG_FQ_MAX_NUM; i++) {
20459e97abf2SJun Yang 		if (i == DPAA_DEBUG_FQ_RX_ERROR)
20469e97abf2SJun Yang 			err_fqid = fman_intf->fqid_rx_err;
20479e97abf2SJun Yang 		else if (i == DPAA_DEBUG_FQ_TX_ERROR)
20489e97abf2SJun Yang 			err_fqid = fman_intf->fqid_tx_err;
20499e97abf2SJun Yang 		else
20509e97abf2SJun Yang 			continue;
20519e97abf2SJun Yang 		ret = dpaa_def_queue_init(&err_queues[i], err_fqid);
20529e97abf2SJun Yang 		if (ret) {
20539e97abf2SJun Yang 			DPAA_PMD_ERR("DPAA %s ERROR queue init failed!",
20549e97abf2SJun Yang 				i == DPAA_DEBUG_FQ_RX_ERROR ?
20559e97abf2SJun Yang 				"RX" : "TX");
20569e97abf2SJun Yang 			return ret;
20579e97abf2SJun Yang 		}
20589e97abf2SJun Yang 		err_queues[i].dpaa_intf = dpaa_intf;
20599e97abf2SJun Yang 	}
20609e97abf2SJun Yang 
20619e97abf2SJun Yang 	return 0;
20629e97abf2SJun Yang }
20639e97abf2SJun Yang #endif
20649e97abf2SJun Yang 
206558e0420fSVanshika Shukla static int
206658e0420fSVanshika Shukla check_devargs_handler(__rte_unused const char *key, const char *value,
206758e0420fSVanshika Shukla 		      __rte_unused void *opaque)
206858e0420fSVanshika Shukla {
206958e0420fSVanshika Shukla 	if (strcmp(value, "1"))
207058e0420fSVanshika Shukla 		return -1;
207158e0420fSVanshika Shukla 
207258e0420fSVanshika Shukla 	return 0;
207358e0420fSVanshika Shukla }
207458e0420fSVanshika Shukla 
207558e0420fSVanshika Shukla static int
207658e0420fSVanshika Shukla dpaa_get_devargs(struct rte_devargs *devargs, const char *key)
207758e0420fSVanshika Shukla {
207858e0420fSVanshika Shukla 	struct rte_kvargs *kvlist;
207958e0420fSVanshika Shukla 
208058e0420fSVanshika Shukla 	if (!devargs)
208158e0420fSVanshika Shukla 		return 0;
208258e0420fSVanshika Shukla 
208358e0420fSVanshika Shukla 	kvlist = rte_kvargs_parse(devargs->args, NULL);
208458e0420fSVanshika Shukla 	if (!kvlist)
208558e0420fSVanshika Shukla 		return 0;
208658e0420fSVanshika Shukla 
208758e0420fSVanshika Shukla 	if (!rte_kvargs_count(kvlist, key)) {
208858e0420fSVanshika Shukla 		rte_kvargs_free(kvlist);
208958e0420fSVanshika Shukla 		return 0;
209058e0420fSVanshika Shukla 	}
209158e0420fSVanshika Shukla 
209258e0420fSVanshika Shukla 	if (rte_kvargs_process(kvlist, key,
209358e0420fSVanshika Shukla 			       check_devargs_handler, NULL) < 0) {
209458e0420fSVanshika Shukla 		rte_kvargs_free(kvlist);
209558e0420fSVanshika Shukla 		return 0;
209658e0420fSVanshika Shukla 	}
209758e0420fSVanshika Shukla 	rte_kvargs_free(kvlist);
209858e0420fSVanshika Shukla 
209958e0420fSVanshika Shukla 	return 1;
210058e0420fSVanshika Shukla }
210158e0420fSVanshika Shukla 
21026b10d1f7SNipun Gupta /* Initialise a network interface */
21036b10d1f7SNipun Gupta static int
2104ff9e112dSShreyansh Jain dpaa_dev_init(struct rte_eth_dev *eth_dev)
2105ff9e112dSShreyansh Jain {
2106af2828cfSAkhil Goyal 	int num_rx_fqs, fqid;
210737f9b54bSShreyansh Jain 	int loop, ret = 0;
2108ff9e112dSShreyansh Jain 	int dev_id;
2109ff9e112dSShreyansh Jain 	struct rte_dpaa_device *dpaa_device;
2110ff9e112dSShreyansh Jain 	struct dpaa_if *dpaa_intf;
211137f9b54bSShreyansh Jain 	struct fm_eth_port_cfg *cfg;
211237f9b54bSShreyansh Jain 	struct fman_if *fman_intf;
211337f9b54bSShreyansh Jain 	struct fman_if_bpool *bp, *tmp_bp;
211462f53995SHemant Agrawal 	uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
21159124e65dSGagandeep Singh 	uint32_t cgrid_tx[MAX_DPAA_CORES];
21164defbc8cSSachin Saxena 	uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
2117e4abd4ffSJun Yang 	int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
2118e4abd4ffSJun Yang 	int8_t vsp_id = -1;
211958e0420fSVanshika Shukla 	struct rte_device *dev = eth_dev->device;
2120480ec5b4SHemant Agrawal #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
2121480ec5b4SHemant Agrawal 	char *penv;
2122480ec5b4SHemant Agrawal #endif
2123ff9e112dSShreyansh Jain 
2124ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
2125ff9e112dSShreyansh Jain 
2126ff9e112dSShreyansh Jain 	dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
2127ff9e112dSShreyansh Jain 	dev_id = dpaa_device->id.dev_id;
2128ff9e112dSShreyansh Jain 	dpaa_intf = eth_dev->data->dev_private;
2129051ae3afSHemant Agrawal 	cfg = dpaa_get_eth_port_cfg(dev_id);
213037f9b54bSShreyansh Jain 	fman_intf = cfg->fman_if;
2131ff9e112dSShreyansh Jain 
2132ff9e112dSShreyansh Jain 	dpaa_intf->name = dpaa_device->name;
2133ff9e112dSShreyansh Jain 
21347be78d02SJosh Soref 	/* save fman_if & cfg in the interface structure */
21356b10d1f7SNipun Gupta 	eth_dev->process_private = fman_intf;
2136ff9e112dSShreyansh Jain 	dpaa_intf->ifid = dev_id;
213737f9b54bSShreyansh Jain 	dpaa_intf->cfg = cfg;
2138ff9e112dSShreyansh Jain 
213958e0420fSVanshika Shukla 	if (dpaa_get_devargs(dev->devargs, DRIVER_IEEE1588))
214058e0420fSVanshika Shukla 		dpaa_ieee_1588 = 1;
214158e0420fSVanshika Shukla 
21424defbc8cSSachin Saxena 	memset((char *)dev_rx_fqids, 0,
21434defbc8cSSachin Saxena 		sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
21444defbc8cSSachin Saxena 
2145e4abd4ffSJun Yang 	memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
2146e4abd4ffSJun Yang 
214737f9b54bSShreyansh Jain 	/* Initialize Rx FQ's */
21488d6fc8b6SHemant Agrawal 	if (default_q) {
21498d6fc8b6SHemant Agrawal 		num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
21504defbc8cSSachin Saxena 	} else if (fmc_q) {
2151f5fe3eedSJun Yang 		num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
2152f5fe3eedSJun Yang 						dev_vspids,
2153f5fe3eedSJun Yang 						DPAA_MAX_NUM_PCD_QUEUES);
2154f5fe3eedSJun Yang 		if (num_rx_fqs < 0) {
2155f5fe3eedSJun Yang 			DPAA_PMD_ERR("%s FMC initializes failed!",
2156f5fe3eedSJun Yang 				dpaa_intf->name);
2157f5fe3eedSJun Yang 			goto free_rx;
2158f5fe3eedSJun Yang 		}
2159f5fe3eedSJun Yang 		if (!num_rx_fqs) {
2160f5fe3eedSJun Yang 			DPAA_PMD_WARN("%s is not configured by FMC.",
2161f5fe3eedSJun Yang 				dpaa_intf->name);
2162f5fe3eedSJun Yang 		}
21638d6fc8b6SHemant Agrawal 	} else {
21644defbc8cSSachin Saxena 		/* FMCLESS mode, load balance to multiple cores.*/
21654defbc8cSSachin Saxena 		num_rx_fqs = rte_lcore_count();
21668d6fc8b6SHemant Agrawal 	}
21678d6fc8b6SHemant Agrawal 
2168e4f931ccSHemant Agrawal 	/* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
216937f9b54bSShreyansh Jain 	 * queues.
217037f9b54bSShreyansh Jain 	 */
21714defbc8cSSachin Saxena 	if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
21721ec9a3afSHemant Agrawal 		DPAA_PMD_ERR("Invalid number of RX queues");
217337f9b54bSShreyansh Jain 		return -EINVAL;
217437f9b54bSShreyansh Jain 	}
217537f9b54bSShreyansh Jain 
21764defbc8cSSachin Saxena 	if (num_rx_fqs > 0) {
217737f9b54bSShreyansh Jain 		dpaa_intf->rx_queues = rte_zmalloc(NULL,
217837f9b54bSShreyansh Jain 			sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
21790ff76833SYong Wang 		if (!dpaa_intf->rx_queues) {
21801ec9a3afSHemant Agrawal 			DPAA_PMD_ERR("Failed to alloc mem for RX queues");
21810ff76833SYong Wang 			return -ENOMEM;
21820ff76833SYong Wang 		}
21834defbc8cSSachin Saxena 	} else {
21844defbc8cSSachin Saxena 		dpaa_intf->rx_queues = NULL;
21854defbc8cSSachin Saxena 	}
218662f53995SHemant Agrawal 
21879124e65dSGagandeep Singh 	memset(cgrid, 0, sizeof(cgrid));
21889124e65dSGagandeep Singh 	memset(cgrid_tx, 0, sizeof(cgrid_tx));
21899124e65dSGagandeep Singh 
21909124e65dSGagandeep Singh 	/* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means
21919124e65dSGagandeep Singh 	 * Tx tail drop is disabled.
21929124e65dSGagandeep Singh 	 */
21939124e65dSGagandeep Singh 	if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) {
21949124e65dSGagandeep Singh 		td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD"));
21959124e65dSGagandeep Singh 		DPAA_PMD_DEBUG("Tail drop threshold env configured: %u",
21969124e65dSGagandeep Singh 			       td_tx_threshold);
21979124e65dSGagandeep Singh 		/* if a very large value is being configured */
21989124e65dSGagandeep Singh 		if (td_tx_threshold > UINT16_MAX)
21999124e65dSGagandeep Singh 			td_tx_threshold = CGR_RX_PERFQ_THRESH;
22009124e65dSGagandeep Singh 	}
22019124e65dSGagandeep Singh 
2202480ec5b4SHemant Agrawal #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
2203480ec5b4SHemant Agrawal 	penv = getenv("DPAA_DISPLAY_FRAME_AND_PARSER_RESULT");
2204480ec5b4SHemant Agrawal 	if (penv)
2205480ec5b4SHemant Agrawal 		dpaa_force_display_frame_set(atoi(penv));
2206480ec5b4SHemant Agrawal #endif
2207480ec5b4SHemant Agrawal 
220862f53995SHemant Agrawal 	/* If congestion control is enabled globally*/
22094defbc8cSSachin Saxena 	if (num_rx_fqs > 0 && td_threshold) {
221062f53995SHemant Agrawal 		dpaa_intf->cgr_rx = rte_zmalloc(NULL,
221162f53995SHemant Agrawal 			sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
22120ff76833SYong Wang 		if (!dpaa_intf->cgr_rx) {
22131ec9a3afSHemant Agrawal 			DPAA_PMD_ERR("Failed to alloc mem for cgr_rx");
22140ff76833SYong Wang 			ret = -ENOMEM;
22150ff76833SYong Wang 			goto free_rx;
22160ff76833SYong Wang 		}
221762f53995SHemant Agrawal 
221862f53995SHemant Agrawal 		ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
221962f53995SHemant Agrawal 		if (ret != num_rx_fqs) {
222062f53995SHemant Agrawal 			DPAA_PMD_WARN("insufficient CGRIDs available");
22210ff76833SYong Wang 			ret = -EINVAL;
22220ff76833SYong Wang 			goto free_rx;
222362f53995SHemant Agrawal 		}
222462f53995SHemant Agrawal 	} else {
222562f53995SHemant Agrawal 		dpaa_intf->cgr_rx = NULL;
222662f53995SHemant Agrawal 	}
222762f53995SHemant Agrawal 
22284defbc8cSSachin Saxena 	if (!fmc_q && !default_q) {
22294defbc8cSSachin Saxena 		ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs,
22304defbc8cSSachin Saxena 					    num_rx_fqs, 0);
22314defbc8cSSachin Saxena 		if (ret < 0) {
22321ec9a3afSHemant Agrawal 			DPAA_PMD_ERR("Failed to alloc rx fqid's");
22334defbc8cSSachin Saxena 			goto free_rx;
22344defbc8cSSachin Saxena 		}
22354defbc8cSSachin Saxena 	}
22364defbc8cSSachin Saxena 
223737f9b54bSShreyansh Jain 	for (loop = 0; loop < num_rx_fqs; loop++) {
22388d6fc8b6SHemant Agrawal 		if (default_q)
22398d6fc8b6SHemant Agrawal 			fqid = cfg->rx_def;
22408d6fc8b6SHemant Agrawal 		else
22414defbc8cSSachin Saxena 			fqid = dev_rx_fqids[loop];
224262f53995SHemant Agrawal 
2243e4abd4ffSJun Yang 		vsp_id = dev_vspids[loop];
2244e4abd4ffSJun Yang 
224562f53995SHemant Agrawal 		if (dpaa_intf->cgr_rx)
224662f53995SHemant Agrawal 			dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
224762f53995SHemant Agrawal 
224862f53995SHemant Agrawal 		ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
224962f53995SHemant Agrawal 			dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
225062f53995SHemant Agrawal 			fqid);
225137f9b54bSShreyansh Jain 		if (ret)
22520ff76833SYong Wang 			goto free_rx;
2253e4abd4ffSJun Yang 		dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
225437f9b54bSShreyansh Jain 		dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
225537f9b54bSShreyansh Jain 	}
225637f9b54bSShreyansh Jain 	dpaa_intf->nb_rx_queues = num_rx_fqs;
225737f9b54bSShreyansh Jain 
22580ff76833SYong Wang 	/* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
225937f9b54bSShreyansh Jain 	dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
2260af2828cfSAkhil Goyal 		MAX_DPAA_CORES, MAX_CACHELINE);
22610ff76833SYong Wang 	if (!dpaa_intf->tx_queues) {
22621ec9a3afSHemant Agrawal 		DPAA_PMD_ERR("Failed to alloc mem for TX queues");
22630ff76833SYong Wang 		ret = -ENOMEM;
22640ff76833SYong Wang 		goto free_rx;
22650ff76833SYong Wang 	}
226637f9b54bSShreyansh Jain 
226758e0420fSVanshika Shukla 	dpaa_intf->tx_conf_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
226858e0420fSVanshika Shukla 		MAX_DPAA_CORES, MAX_CACHELINE);
226958e0420fSVanshika Shukla 	if (!dpaa_intf->tx_conf_queues) {
227058e0420fSVanshika Shukla 		DPAA_PMD_ERR("Failed to alloc mem for TX conf queues");
227158e0420fSVanshika Shukla 		ret = -ENOMEM;
227258e0420fSVanshika Shukla 		goto free_rx;
227358e0420fSVanshika Shukla 	}
227458e0420fSVanshika Shukla 
22759124e65dSGagandeep Singh 	/* If congestion control is enabled globally*/
22769124e65dSGagandeep Singh 	if (td_tx_threshold) {
22779124e65dSGagandeep Singh 		dpaa_intf->cgr_tx = rte_zmalloc(NULL,
22789124e65dSGagandeep Singh 			sizeof(struct qman_cgr) * MAX_DPAA_CORES,
22799124e65dSGagandeep Singh 			MAX_CACHELINE);
22809124e65dSGagandeep Singh 		if (!dpaa_intf->cgr_tx) {
22811ec9a3afSHemant Agrawal 			DPAA_PMD_ERR("Failed to alloc mem for cgr_tx");
22829124e65dSGagandeep Singh 			ret = -ENOMEM;
22839124e65dSGagandeep Singh 			goto free_rx;
22849124e65dSGagandeep Singh 		}
22859124e65dSGagandeep Singh 
22869124e65dSGagandeep Singh 		ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,
22879124e65dSGagandeep Singh 					     1, 0);
22889124e65dSGagandeep Singh 		if (ret != MAX_DPAA_CORES) {
22899124e65dSGagandeep Singh 			DPAA_PMD_WARN("insufficient CGRIDs available");
22909124e65dSGagandeep Singh 			ret = -EINVAL;
22919124e65dSGagandeep Singh 			goto free_rx;
22929124e65dSGagandeep Singh 		}
22939124e65dSGagandeep Singh 	} else {
22949124e65dSGagandeep Singh 		dpaa_intf->cgr_tx = NULL;
22959124e65dSGagandeep Singh 	}
22969124e65dSGagandeep Singh 
22979124e65dSGagandeep Singh 
2298af2828cfSAkhil Goyal 	for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
22999124e65dSGagandeep Singh 		if (dpaa_intf->cgr_tx)
23009124e65dSGagandeep Singh 			dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];
23019124e65dSGagandeep Singh 
230237f9b54bSShreyansh Jain 		ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
23039124e65dSGagandeep Singh 			fman_intf,
23049124e65dSGagandeep Singh 			dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);
230537f9b54bSShreyansh Jain 		if (ret)
23060ff76833SYong Wang 			goto free_tx;
230737f9b54bSShreyansh Jain 		dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
2308d11482d9SVanshika Shukla 
2309d11482d9SVanshika Shukla 		if (dpaa_ieee_1588) {
2310d11482d9SVanshika Shukla 			ret = dpaa_tx_conf_queue_init(&dpaa_intf->tx_conf_queues[loop]);
2311d11482d9SVanshika Shukla 			if (ret)
2312d11482d9SVanshika Shukla 				goto free_tx;
2313d11482d9SVanshika Shukla 
2314d11482d9SVanshika Shukla 			dpaa_intf->tx_conf_queues[loop].dpaa_intf = dpaa_intf;
2315d11482d9SVanshika Shukla 			dpaa_intf->tx_queues[loop].tx_conf_queue = &dpaa_intf->tx_conf_queues[loop];
2316d11482d9SVanshika Shukla 		}
231737f9b54bSShreyansh Jain 	}
2318af2828cfSAkhil Goyal 	dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
23199e97abf2SJun Yang #if defined(RTE_LIBRTE_DPAA_DEBUG_DRIVER)
23209e97abf2SJun Yang 	ret = dpaa_error_queue_init(dpaa_intf, fman_intf);
23219e97abf2SJun Yang 	if (ret)
23229e97abf2SJun Yang 		goto free_tx;
232358e0420fSVanshika Shukla #endif
232437f9b54bSShreyansh Jain 	DPAA_PMD_DEBUG("All frame queues created");
232537f9b54bSShreyansh Jain 
232612a4678aSShreyansh Jain 	/* Get the initial configuration for flow control */
2327*a0edbb8aSRohit Raj 	if (fman_intf->mac_type != fman_offline)
23286b10d1f7SNipun Gupta 		dpaa_fc_set_default(dpaa_intf, fman_intf);
232912a4678aSShreyansh Jain 
233037f9b54bSShreyansh Jain 	/* reset bpool list, initialize bpool dynamically */
233137f9b54bSShreyansh Jain 	list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
233237f9b54bSShreyansh Jain 		list_del(&bp->node);
23334762b3d4SHemant Agrawal 		rte_free(bp);
233437f9b54bSShreyansh Jain 	}
233537f9b54bSShreyansh Jain 
233637f9b54bSShreyansh Jain 	/* Populate ethdev structure */
2337ff9e112dSShreyansh Jain 	eth_dev->dev_ops = &dpaa_devops;
2338cbfc6111SFerruh Yigit 	eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
233937f9b54bSShreyansh Jain 	eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
234037f9b54bSShreyansh Jain 	eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
234137f9b54bSShreyansh Jain 
234237f9b54bSShreyansh Jain 	/* Allocate memory for storing MAC addresses */
234337f9b54bSShreyansh Jain 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
234435b2d13fSOlivier Matz 		RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
234537f9b54bSShreyansh Jain 	if (eth_dev->data->mac_addrs == NULL) {
234637f9b54bSShreyansh Jain 		DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
234737f9b54bSShreyansh Jain 						"store MAC addresses",
234835b2d13fSOlivier Matz 				RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
23490ff76833SYong Wang 		ret = -ENOMEM;
23500ff76833SYong Wang 		goto free_tx;
235137f9b54bSShreyansh Jain 	}
235237f9b54bSShreyansh Jain 
235337f9b54bSShreyansh Jain 	/* copy the primary mac address */
2354538da7a1SOlivier Matz 	rte_ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
235537f9b54bSShreyansh Jain 
2356a247fcd9SStephen Hemminger 	DPAA_PMD_INFO("net: dpaa: %s: " RTE_ETHER_ADDR_PRT_FMT,
2357a7db3afcSAman Deep Singh 		      dpaa_device->name, RTE_ETHER_ADDR_BYTES(&fman_intf->mac_addr));
23584defbc8cSSachin Saxena 
2359*a0edbb8aSRohit Raj 	if (!fman_intf->is_shared_mac && fman_intf->mac_type != fman_offline) {
236095d226f0SNipun Gupta 		/* Configure error packet handling */
236177393f56SSachin Saxena 		fman_if_receive_rx_errors(fman_intf,
236277393f56SSachin Saxena 					  FM_FD_RX_STATUS_ERR_MASK);
236395d226f0SNipun Gupta 		/* Disable RX mode */
236437f9b54bSShreyansh Jain 		fman_if_disable_rx(fman_intf);
236537f9b54bSShreyansh Jain 		/* Disable promiscuous mode */
236637f9b54bSShreyansh Jain 		fman_if_promiscuous_disable(fman_intf);
236737f9b54bSShreyansh Jain 		/* Disable multicast */
236837f9b54bSShreyansh Jain 		fman_if_reset_mcast_filter_table(fman_intf);
236937f9b54bSShreyansh Jain 		/* Reset interface statistics */
237037f9b54bSShreyansh Jain 		fman_if_stats_reset(fman_intf);
237155576ac2SHemant Agrawal 		/* Disable SG by default */
237255576ac2SHemant Agrawal 		fman_if_set_sg(fman_intf, 0);
2373133332f0SRadu Bulie 		fman_if_set_maxfrm(fman_intf,
2374133332f0SRadu Bulie 				   RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
2375133332f0SRadu Bulie 	}
2376ff9e112dSShreyansh Jain 
2377ff9e112dSShreyansh Jain 	return 0;
23780ff76833SYong Wang 
23790ff76833SYong Wang free_tx:
23800ff76833SYong Wang 	rte_free(dpaa_intf->tx_queues);
23810ff76833SYong Wang 	dpaa_intf->tx_queues = NULL;
23820ff76833SYong Wang 	dpaa_intf->nb_tx_queues = 0;
23830ff76833SYong Wang 
23840ff76833SYong Wang free_rx:
23850ff76833SYong Wang 	rte_free(dpaa_intf->cgr_rx);
23869124e65dSGagandeep Singh 	rte_free(dpaa_intf->cgr_tx);
23870ff76833SYong Wang 	rte_free(dpaa_intf->rx_queues);
23880ff76833SYong Wang 	dpaa_intf->rx_queues = NULL;
23890ff76833SYong Wang 	dpaa_intf->nb_rx_queues = 0;
23900ff76833SYong Wang 	return ret;
2391ff9e112dSShreyansh Jain }
2392ff9e112dSShreyansh Jain 
2393ff9e112dSShreyansh Jain static int
23944defbc8cSSachin Saxena rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
2395ff9e112dSShreyansh Jain 	       struct rte_dpaa_device *dpaa_dev)
2396ff9e112dSShreyansh Jain {
2397ff9e112dSShreyansh Jain 	int diag;
2398ff9e112dSShreyansh Jain 	int ret;
2399ff9e112dSShreyansh Jain 	struct rte_eth_dev *eth_dev;
2400ff9e112dSShreyansh Jain 
2401ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
2402ff9e112dSShreyansh Jain 
240347854c18SHemant Agrawal 	if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
240447854c18SHemant Agrawal 		RTE_PKTMBUF_HEADROOM) {
240547854c18SHemant Agrawal 		DPAA_PMD_ERR(
240647854c18SHemant Agrawal 		"RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
240747854c18SHemant Agrawal 		RTE_PKTMBUF_HEADROOM,
240847854c18SHemant Agrawal 		DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
240947854c18SHemant Agrawal 
241047854c18SHemant Agrawal 		return -1;
241147854c18SHemant Agrawal 	}
241247854c18SHemant Agrawal 
2413ff9e112dSShreyansh Jain 	/* In case of secondary process, the device is already configured
2414ff9e112dSShreyansh Jain 	 * and no further action is required, except portal initialization
2415ff9e112dSShreyansh Jain 	 * and verifying secondary attachment to port name.
2416ff9e112dSShreyansh Jain 	 */
2417ff9e112dSShreyansh Jain 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2418ff9e112dSShreyansh Jain 		eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
2419ff9e112dSShreyansh Jain 		if (!eth_dev)
2420ff9e112dSShreyansh Jain 			return -ENOMEM;
2421d1c3ab22SFerruh Yigit 		eth_dev->device = &dpaa_dev->device;
2422d1c3ab22SFerruh Yigit 		eth_dev->dev_ops = &dpaa_devops;
24236b10d1f7SNipun Gupta 
24246b10d1f7SNipun Gupta 		ret = dpaa_dev_init_secondary(eth_dev);
24256b10d1f7SNipun Gupta 		if (ret != 0) {
2426a247fcd9SStephen Hemminger 			DPAA_PMD_ERR("secondary dev init failed");
24276b10d1f7SNipun Gupta 			return ret;
24286b10d1f7SNipun Gupta 		}
24296b10d1f7SNipun Gupta 
2430fbe90cddSThomas Monjalon 		rte_eth_dev_probing_finish(eth_dev);
2431ff9e112dSShreyansh Jain 		return 0;
2432ff9e112dSShreyansh Jain 	}
2433ff9e112dSShreyansh Jain 
2434af2828cfSAkhil Goyal 	if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
24358d6fc8b6SHemant Agrawal 		if (access("/tmp/fmc.bin", F_OK) == -1) {
2436b7c7ff6eSStephen Hemminger 			DPAA_PMD_INFO("* FMC not configured.Enabling default mode");
24378d6fc8b6SHemant Agrawal 			default_q = 1;
24388d6fc8b6SHemant Agrawal 		}
24398d6fc8b6SHemant Agrawal 
24404defbc8cSSachin Saxena 		if (!(default_q || fmc_q)) {
24414defbc8cSSachin Saxena 			if (dpaa_fm_init()) {
24421ec9a3afSHemant Agrawal 				DPAA_PMD_ERR("FM init failed");
24434defbc8cSSachin Saxena 				return -1;
24444defbc8cSSachin Saxena 			}
24454defbc8cSSachin Saxena 		}
24464defbc8cSSachin Saxena 
2447e507498dSHemant Agrawal 		/* disabling the default push mode for LS1043 */
2448e507498dSHemant Agrawal 		if (dpaa_svr_family == SVR_LS1043A_FAMILY)
2449e507498dSHemant Agrawal 			dpaa_push_mode_max_queue = 0;
2450e507498dSHemant Agrawal 
24517be78d02SJosh Soref 		/* if push mode queues to be enabled. Currently we are allowing
2452e507498dSHemant Agrawal 		 * only one queue per thread.
2453e507498dSHemant Agrawal 		 */
2454e507498dSHemant Agrawal 		if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
2455e507498dSHemant Agrawal 			dpaa_push_mode_max_queue =
2456e507498dSHemant Agrawal 					atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
2457e507498dSHemant Agrawal 			if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
2458e507498dSHemant Agrawal 			    dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
2459e507498dSHemant Agrawal 		}
2460e507498dSHemant Agrawal 
2461ff9e112dSShreyansh Jain 		is_global_init = 1;
2462ff9e112dSShreyansh Jain 	}
2463ff9e112dSShreyansh Jain 
2464e5872221SRohit Raj 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2465ff9e112dSShreyansh Jain 		ret = rte_dpaa_portal_init((void *)1);
2466ff9e112dSShreyansh Jain 		if (ret) {
2467ff9e112dSShreyansh Jain 			DPAA_PMD_ERR("Unable to initialize portal");
2468ff9e112dSShreyansh Jain 			return ret;
2469ff9e112dSShreyansh Jain 		}
24705d944582SNipun Gupta 	}
2471ff9e112dSShreyansh Jain 
24726b10d1f7SNipun Gupta 	eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
2473af2828cfSAkhil Goyal 	if (!eth_dev)
2474af2828cfSAkhil Goyal 		return -ENOMEM;
2475ff9e112dSShreyansh Jain 
24766b10d1f7SNipun Gupta 	eth_dev->data->dev_private =
24776b10d1f7SNipun Gupta 			rte_zmalloc("ethdev private structure",
2478ff9e112dSShreyansh Jain 					sizeof(struct dpaa_if),
2479ff9e112dSShreyansh Jain 					RTE_CACHE_LINE_SIZE);
2480ff9e112dSShreyansh Jain 	if (!eth_dev->data->dev_private) {
2481ff9e112dSShreyansh Jain 		DPAA_PMD_ERR("Cannot allocate memzone for port data");
2482ff9e112dSShreyansh Jain 		rte_eth_dev_release_port(eth_dev);
2483ff9e112dSShreyansh Jain 		return -ENOMEM;
2484ff9e112dSShreyansh Jain 	}
24856b10d1f7SNipun Gupta 
2486ff9e112dSShreyansh Jain 	eth_dev->device = &dpaa_dev->device;
2487ff9e112dSShreyansh Jain 	dpaa_dev->eth_dev = eth_dev;
2488ff9e112dSShreyansh Jain 
24899124e65dSGagandeep Singh 	qman_ern_register_cb(dpaa_free_mbuf);
24909124e65dSGagandeep Singh 
24912aa10990SRohit Raj 	if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
24922aa10990SRohit Raj 		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
24932aa10990SRohit Raj 
2494ff9e112dSShreyansh Jain 	/* Invoke PMD device initialization function */
2495ff9e112dSShreyansh Jain 	diag = dpaa_dev_init(eth_dev);
2496fbe90cddSThomas Monjalon 	if (diag == 0) {
2497533c31ccSGagandeep Singh 		if (!dpaa_tx_sg_pool) {
2498533c31ccSGagandeep Singh 			dpaa_tx_sg_pool =
2499533c31ccSGagandeep Singh 				rte_pktmbuf_pool_create("dpaa_mbuf_tx_sg_pool",
2500533c31ccSGagandeep Singh 				DPAA_POOL_SIZE,
2501533c31ccSGagandeep Singh 				DPAA_POOL_CACHE_SIZE, 0,
2502533c31ccSGagandeep Singh 				DPAA_MAX_SGS * sizeof(struct qm_sg_entry),
2503533c31ccSGagandeep Singh 				rte_socket_id());
2504533c31ccSGagandeep Singh 			if (dpaa_tx_sg_pool == NULL) {
25051ec9a3afSHemant Agrawal 				DPAA_PMD_ERR("SG pool creation failed");
2506533c31ccSGagandeep Singh 				return -ENOMEM;
2507533c31ccSGagandeep Singh 			}
2508533c31ccSGagandeep Singh 		}
2509fbe90cddSThomas Monjalon 		rte_eth_dev_probing_finish(eth_dev);
2510533c31ccSGagandeep Singh 		dpaa_valid_dev++;
2511ff9e112dSShreyansh Jain 		return 0;
2512fbe90cddSThomas Monjalon 	}
2513ff9e112dSShreyansh Jain 
2514ff9e112dSShreyansh Jain 	rte_eth_dev_release_port(eth_dev);
2515ff9e112dSShreyansh Jain 	return diag;
2516ff9e112dSShreyansh Jain }
2517ff9e112dSShreyansh Jain 
2518ff9e112dSShreyansh Jain static int
2519ff9e112dSShreyansh Jain rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
2520ff9e112dSShreyansh Jain {
2521ff9e112dSShreyansh Jain 	struct rte_eth_dev *eth_dev;
25222defb114SSachin Saxena 	int ret;
2523ff9e112dSShreyansh Jain 
2524ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
2525ff9e112dSShreyansh Jain 
2526ff9e112dSShreyansh Jain 	eth_dev = dpaa_dev->eth_dev;
25272defb114SSachin Saxena 	dpaa_eth_dev_close(eth_dev);
2528533c31ccSGagandeep Singh 	dpaa_valid_dev--;
2529533c31ccSGagandeep Singh 	if (!dpaa_valid_dev)
2530533c31ccSGagandeep Singh 		rte_mempool_free(dpaa_tx_sg_pool);
25312defb114SSachin Saxena 	ret = rte_eth_dev_release_port(eth_dev);
2532ff9e112dSShreyansh Jain 
25332defb114SSachin Saxena 	return ret;
2534ff9e112dSShreyansh Jain }
2535ff9e112dSShreyansh Jain 
25364defbc8cSSachin Saxena static void __attribute__((destructor(102))) dpaa_finish(void)
25374defbc8cSSachin Saxena {
25384defbc8cSSachin Saxena 	/* For secondary, primary will do all the cleanup */
25394defbc8cSSachin Saxena 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
25404defbc8cSSachin Saxena 		return;
25414defbc8cSSachin Saxena 
25424defbc8cSSachin Saxena 	if (!(default_q || fmc_q)) {
25434defbc8cSSachin Saxena 		unsigned int i;
25444defbc8cSSachin Saxena 
25454defbc8cSSachin Saxena 		for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
25464defbc8cSSachin Saxena 			if (rte_eth_devices[i].dev_ops == &dpaa_devops) {
25474defbc8cSSachin Saxena 				struct rte_eth_dev *dev = &rte_eth_devices[i];
25484defbc8cSSachin Saxena 				struct dpaa_if *dpaa_intf =
25494defbc8cSSachin Saxena 					dev->data->dev_private;
25504defbc8cSSachin Saxena 				struct fman_if *fif =
25514defbc8cSSachin Saxena 					dev->process_private;
25524defbc8cSSachin Saxena 				if (dpaa_intf->port_handle)
25534defbc8cSSachin Saxena 					if (dpaa_fm_deconfig(dpaa_intf, fif))
25544defbc8cSSachin Saxena 						DPAA_PMD_WARN("DPAA FM "
25551ec9a3afSHemant Agrawal 							"deconfig failed");
2556e4abd4ffSJun Yang 				if (fif->num_profiles) {
2557e4abd4ffSJun Yang 					if (dpaa_port_vsp_cleanup(dpaa_intf,
2558e4abd4ffSJun Yang 								  fif))
25591ec9a3afSHemant Agrawal 						DPAA_PMD_WARN("DPAA FM vsp cleanup failed");
2560e4abd4ffSJun Yang 				}
25614defbc8cSSachin Saxena 			}
25624defbc8cSSachin Saxena 		}
25634defbc8cSSachin Saxena 		if (is_global_init)
25644defbc8cSSachin Saxena 			if (dpaa_fm_term())
25651ec9a3afSHemant Agrawal 				DPAA_PMD_WARN("DPAA FM term failed");
25664defbc8cSSachin Saxena 
25674defbc8cSSachin Saxena 		is_global_init = 0;
25684defbc8cSSachin Saxena 
25694defbc8cSSachin Saxena 		DPAA_PMD_INFO("DPAA fman cleaned up");
25704defbc8cSSachin Saxena 	}
25714defbc8cSSachin Saxena }
25724defbc8cSSachin Saxena 
2573ff9e112dSShreyansh Jain static struct rte_dpaa_driver rte_dpaa_pmd = {
25742aa10990SRohit Raj 	.drv_flags = RTE_DPAA_DRV_INTR_LSC,
2575ff9e112dSShreyansh Jain 	.drv_type = FSL_DPAA_ETH,
2576ff9e112dSShreyansh Jain 	.probe = rte_dpaa_probe,
2577ff9e112dSShreyansh Jain 	.remove = rte_dpaa_remove,
2578ff9e112dSShreyansh Jain };
2579ff9e112dSShreyansh Jain 
2580ff9e112dSShreyansh Jain RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
258158e0420fSVanshika Shukla RTE_PMD_REGISTER_PARAM_STRING(net_dpaa,
258258e0420fSVanshika Shukla 		DRIVER_IEEE1588 "=<int>");
2583eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE);
2584