xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.c (revision f30a1bbd63f494f5ba623582d7e9166c817794a4)
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2024 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <dev_driver.h>
19 #include <bus_fslmc_driver.h>
20 #include <rte_flow_driver.h>
21 #include "rte_dpaa2_mempool.h"
22 
23 #include "dpaa2_pmd_logs.h"
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <dpaa2_hw_dpio.h>
28 #include <mc/fsl_dpmng.h>
29 #include "dpaa2_ethdev.h"
30 #include "dpaa2_sparser.h"
31 #include <fsl_qbman_debug.h>
32 
33 #define DRIVER_LOOPBACK_MODE "drv_loopback"
34 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
35 #define DRIVER_TX_CONF "drv_tx_conf"
36 #define DRIVER_ERROR_QUEUE  "drv_err_queue"
37 #define CHECK_INTERVAL         100  /* 100ms */
38 #define MAX_REPEAT_TIME        90   /* 9s (90 * 100ms) in total */
39 
40 /* Supported Rx offloads */
41 static uint64_t dev_rx_offloads_sup =
42 		RTE_ETH_RX_OFFLOAD_CHECKSUM |
43 		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
44 		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
45 		RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
46 		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
47 		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
48 		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
49 
50 /* Rx offloads which cannot be disabled */
51 static uint64_t dev_rx_offloads_nodis =
52 		RTE_ETH_RX_OFFLOAD_RSS_HASH |
53 		RTE_ETH_RX_OFFLOAD_SCATTER;
54 
55 /* Supported Tx offloads */
56 static uint64_t dev_tx_offloads_sup =
57 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
58 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
59 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
60 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
61 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
62 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
63 		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
64 		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
65 
66 /* Tx offloads which cannot be disabled */
67 static uint64_t dev_tx_offloads_nodis =
68 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
69 
70 /* enable timestamp in mbuf */
71 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
72 uint64_t dpaa2_timestamp_rx_dynflag;
73 int dpaa2_timestamp_dynfield_offset = -1;
74 
75 /* Enable error queue */
76 bool dpaa2_enable_err_queue;
77 
78 bool dpaa2_print_parser_result;
79 
80 #define MAX_NB_RX_DESC		11264
81 int total_nb_rx_desc;
82 
83 int dpaa2_valid_dev;
84 struct rte_mempool *dpaa2_tx_sg_pool;
85 
86 struct rte_dpaa2_xstats_name_off {
87 	char name[RTE_ETH_XSTATS_NAME_SIZE];
88 	uint8_t page_id; /* dpni statistics page id */
89 	uint8_t stats_id; /* stats id in the given page */
90 };
91 
92 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
93 	{"ingress_multicast_frames", 0, 2},
94 	{"ingress_multicast_bytes", 0, 3},
95 	{"ingress_broadcast_frames", 0, 4},
96 	{"ingress_broadcast_bytes", 0, 5},
97 	{"egress_multicast_frames", 1, 2},
98 	{"egress_multicast_bytes", 1, 3},
99 	{"egress_broadcast_frames", 1, 4},
100 	{"egress_broadcast_bytes", 1, 5},
101 	{"ingress_filtered_frames", 2, 0},
102 	{"ingress_discarded_frames", 2, 1},
103 	{"ingress_nobuffer_discards", 2, 2},
104 	{"egress_discarded_frames", 2, 3},
105 	{"egress_confirmed_frames", 2, 4},
106 	{"cgr_reject_frames", 4, 0},
107 	{"cgr_reject_bytes", 4, 1},
108 };
109 
110 static struct rte_dpaa2_driver rte_dpaa2_pmd;
111 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
112 				 int wait_to_complete);
113 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
114 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
115 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
116 
117 static int
118 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
119 {
120 	int ret;
121 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
122 	struct fsl_mc_io *dpni = dev->process_private;
123 
124 	PMD_INIT_FUNC_TRACE();
125 
126 	if (!dpni) {
127 		DPAA2_PMD_ERR("dpni is NULL");
128 		return -EINVAL;
129 	}
130 
131 	if (on)
132 		ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token,
133 				       vlan_id, 0, 0, 0);
134 	else
135 		ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
136 					  priv->token, vlan_id);
137 
138 	if (ret < 0)
139 		DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
140 			      ret, vlan_id, priv->hw_id);
141 
142 	return ret;
143 }
144 
145 static int
146 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
147 {
148 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
149 	struct fsl_mc_io *dpni = dev->process_private;
150 	int ret = 0;
151 
152 	PMD_INIT_FUNC_TRACE();
153 
154 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
155 		/* VLAN Filter not available */
156 		if (!priv->max_vlan_filters) {
157 			DPAA2_PMD_INFO("VLAN filter not available");
158 			return -ENOTSUP;
159 		}
160 
161 		if (dev->data->dev_conf.rxmode.offloads &
162 			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
163 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
164 						      priv->token, true);
165 		else
166 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
167 						      priv->token, false);
168 		if (ret < 0)
169 			DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
170 	}
171 
172 	return ret;
173 }
174 
175 static int
176 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
177 	enum rte_vlan_type vlan_type __rte_unused,
178 	uint16_t tpid)
179 {
180 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
181 	struct fsl_mc_io *dpni = dev->process_private;
182 	int ret = -ENOTSUP;
183 
184 	PMD_INIT_FUNC_TRACE();
185 
186 	/* nothing to be done for standard vlan tpids */
187 	if (tpid == 0x8100 || tpid == 0x88A8)
188 		return 0;
189 
190 	ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
191 				   priv->token, tpid);
192 	if (ret < 0)
193 		DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
194 	/* if already configured tpids, remove them first */
195 	if (ret == -EBUSY) {
196 		struct dpni_custom_tpid_cfg tpid_list = {0};
197 
198 		ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
199 				   priv->token, &tpid_list);
200 		if (ret < 0)
201 			goto fail;
202 		ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
203 				   priv->token, tpid_list.tpid1);
204 		if (ret < 0)
205 			goto fail;
206 		ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
207 					   priv->token, tpid);
208 	}
209 fail:
210 	return ret;
211 }
212 
213 static int
214 dpaa2_fw_version_get(struct rte_eth_dev *dev,
215 	char *fw_version, size_t fw_size)
216 {
217 	int ret;
218 	struct fsl_mc_io *dpni = dev->process_private;
219 	struct mc_soc_version mc_plat_info = {0};
220 	struct mc_version mc_ver_info = {0};
221 
222 	PMD_INIT_FUNC_TRACE();
223 
224 	if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
225 		DPAA2_PMD_WARN("\tmc_get_soc_version failed");
226 
227 	if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
228 		DPAA2_PMD_WARN("\tmc_get_version failed");
229 
230 	ret = snprintf(fw_version, fw_size,
231 		       "%x-%d.%d.%d",
232 		       mc_plat_info.svr,
233 		       mc_ver_info.major,
234 		       mc_ver_info.minor,
235 		       mc_ver_info.revision);
236 	if (ret < 0)
237 		return -EINVAL;
238 
239 	ret += 1; /* add the size of '\0' */
240 	if (fw_size < (size_t)ret)
241 		return ret;
242 	else
243 		return 0;
244 }
245 
246 static int
247 dpaa2_dev_info_get(struct rte_eth_dev *dev,
248 	struct rte_eth_dev_info *dev_info)
249 {
250 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
251 
252 	PMD_INIT_FUNC_TRACE();
253 
254 	dev_info->max_mac_addrs = priv->max_mac_filters;
255 	dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
256 	dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
257 	dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
258 	dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
259 	dev_info->rx_offload_capa = dev_rx_offloads_sup |
260 					dev_rx_offloads_nodis;
261 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
262 					dev_tx_offloads_nodis;
263 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
264 			RTE_ETH_LINK_SPEED_2_5G |
265 			RTE_ETH_LINK_SPEED_10G;
266 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
267 
268 	dev_info->max_hash_mac_addrs = 0;
269 	dev_info->max_vfs = 0;
270 	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
271 	dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
272 
273 	dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
274 	/* same is rx size for best perf */
275 	dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
276 
277 	dev_info->default_rxportconf.nb_queues = 1;
278 	dev_info->default_txportconf.nb_queues = 1;
279 	dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
280 	dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
281 
282 	if (dpaa2_svr_family == SVR_LX2160A) {
283 		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
284 				RTE_ETH_LINK_SPEED_40G |
285 				RTE_ETH_LINK_SPEED_50G |
286 				RTE_ETH_LINK_SPEED_100G;
287 	}
288 
289 	return 0;
290 }
291 
292 static int
293 dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
294 	__rte_unused uint16_t queue_id,
295 	struct rte_eth_burst_mode *mode)
296 {
297 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
298 	int ret = -EINVAL;
299 	unsigned int i;
300 	const struct burst_info {
301 		uint64_t flags;
302 		const char *output;
303 	} rx_offload_map[] = {
304 			{RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
305 			{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
306 			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
307 			{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
308 			{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
309 			{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
310 			{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
311 			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
312 			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
313 	};
314 
315 	/* Update Rx offload info */
316 	for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
317 		if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
318 			snprintf(mode->info, sizeof(mode->info), "%s",
319 				rx_offload_map[i].output);
320 			ret = 0;
321 			break;
322 		}
323 	}
324 	return ret;
325 }
326 
327 static int
328 dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
329 			__rte_unused uint16_t queue_id,
330 			struct rte_eth_burst_mode *mode)
331 {
332 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
333 	int ret = -EINVAL;
334 	unsigned int i;
335 	const struct burst_info {
336 		uint64_t flags;
337 		const char *output;
338 	} tx_offload_map[] = {
339 			{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
340 			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
341 			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
342 			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
343 			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
344 			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
345 			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
346 			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
347 			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
348 	};
349 
350 	/* Update Tx offload info */
351 	for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
352 		if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
353 			snprintf(mode->info, sizeof(mode->info), "%s",
354 				tx_offload_map[i].output);
355 			ret = 0;
356 			break;
357 		}
358 	}
359 	return ret;
360 }
361 
362 static int
363 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
364 {
365 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
366 	uint16_t dist_idx;
367 	uint32_t vq_id;
368 	uint8_t num_rxqueue_per_tc;
369 	struct dpaa2_queue *mc_q, *mcq;
370 	uint32_t tot_queues;
371 	int i, ret = 0;
372 	struct dpaa2_queue *dpaa2_q;
373 
374 	PMD_INIT_FUNC_TRACE();
375 
376 	num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
377 	if (priv->flags & DPAA2_TX_CONF_ENABLE)
378 		tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
379 	else
380 		tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
381 	mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
382 			  RTE_CACHE_LINE_SIZE);
383 	if (!mc_q) {
384 		DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
385 		return -ENOBUFS;
386 	}
387 
388 	for (i = 0; i < priv->nb_rx_queues; i++) {
389 		mc_q->eth_data = dev->data;
390 		priv->rx_vq[i] = mc_q++;
391 		dpaa2_q = priv->rx_vq[i];
392 		ret = dpaa2_queue_storage_alloc(dpaa2_q,
393 			RTE_MAX_LCORE);
394 		if (ret)
395 			goto fail;
396 	}
397 
398 	if (dpaa2_enable_err_queue) {
399 		priv->rx_err_vq = rte_zmalloc("dpni_rx_err",
400 			sizeof(struct dpaa2_queue), 0);
401 		if (!priv->rx_err_vq) {
402 			ret = -ENOBUFS;
403 			goto fail;
404 		}
405 
406 		dpaa2_q = priv->rx_err_vq;
407 		ret = dpaa2_queue_storage_alloc(dpaa2_q,
408 			RTE_MAX_LCORE);
409 		if (ret)
410 			goto fail;
411 	}
412 
413 	for (i = 0; i < priv->nb_tx_queues; i++) {
414 		mc_q->eth_data = dev->data;
415 		mc_q->flow_id = DPAA2_INVALID_FLOW_ID;
416 		priv->tx_vq[i] = mc_q++;
417 		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
418 		dpaa2_q->cscn = rte_malloc(NULL,
419 					   sizeof(struct qbman_result), 16);
420 		if (!dpaa2_q->cscn) {
421 			ret = -ENOBUFS;
422 			goto fail_tx;
423 		}
424 	}
425 
426 	if (priv->flags & DPAA2_TX_CONF_ENABLE) {
427 		/*Setup tx confirmation queues*/
428 		for (i = 0; i < priv->nb_tx_queues; i++) {
429 			mc_q->eth_data = dev->data;
430 			mc_q->tc_index = i;
431 			mc_q->flow_id = 0;
432 			priv->tx_conf_vq[i] = mc_q++;
433 			dpaa2_q = priv->tx_conf_vq[i];
434 			ret = dpaa2_queue_storage_alloc(dpaa2_q,
435 					RTE_MAX_LCORE);
436 			if (ret)
437 				goto fail_tx_conf;
438 		}
439 	}
440 
441 	vq_id = 0;
442 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
443 		mcq = priv->rx_vq[vq_id];
444 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
445 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
446 		vq_id++;
447 	}
448 
449 	return 0;
450 fail_tx_conf:
451 	i -= 1;
452 	while (i >= 0) {
453 		dpaa2_q = priv->tx_conf_vq[i];
454 		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
455 		priv->tx_conf_vq[i--] = NULL;
456 	}
457 	i = priv->nb_tx_queues;
458 fail_tx:
459 	i -= 1;
460 	while (i >= 0) {
461 		dpaa2_q = priv->tx_vq[i];
462 		rte_free(dpaa2_q->cscn);
463 		priv->tx_vq[i--] = NULL;
464 	}
465 	i = priv->nb_rx_queues;
466 fail:
467 	i -= 1;
468 	mc_q = priv->rx_vq[0];
469 	while (i >= 0) {
470 		dpaa2_q = priv->rx_vq[i];
471 		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
472 		priv->rx_vq[i--] = NULL;
473 	}
474 
475 	if (dpaa2_enable_err_queue) {
476 		dpaa2_q = priv->rx_err_vq;
477 		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
478 	}
479 
480 	rte_free(mc_q);
481 	return ret;
482 }
483 
484 static void
485 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
486 {
487 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
488 	struct dpaa2_queue *dpaa2_q;
489 	int i;
490 
491 	PMD_INIT_FUNC_TRACE();
492 
493 	/* Queue allocation base */
494 	if (priv->rx_vq[0]) {
495 		/* cleaning up queue storage */
496 		for (i = 0; i < priv->nb_rx_queues; i++) {
497 			dpaa2_q = priv->rx_vq[i];
498 			dpaa2_queue_storage_free(dpaa2_q,
499 				RTE_MAX_LCORE);
500 		}
501 		/* cleanup tx queue cscn */
502 		for (i = 0; i < priv->nb_tx_queues; i++) {
503 			dpaa2_q = priv->tx_vq[i];
504 			rte_free(dpaa2_q->cscn);
505 		}
506 		if (priv->flags & DPAA2_TX_CONF_ENABLE) {
507 			/* cleanup tx conf queue storage */
508 			for (i = 0; i < priv->nb_tx_queues; i++) {
509 				dpaa2_q = priv->tx_conf_vq[i];
510 				dpaa2_queue_storage_free(dpaa2_q,
511 					RTE_MAX_LCORE);
512 			}
513 		}
514 		/*free memory for all queues (RX+TX) */
515 		rte_free(priv->rx_vq[0]);
516 		priv->rx_vq[0] = NULL;
517 	}
518 }
519 
520 static int
521 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
522 {
523 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
524 	struct fsl_mc_io *dpni = dev->process_private;
525 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
526 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
527 	uint64_t tx_offloads = eth_conf->txmode.offloads;
528 	int rx_l3_csum_offload = false;
529 	int rx_l4_csum_offload = false;
530 	int tx_l3_csum_offload = false;
531 	int tx_l4_csum_offload = false;
532 	int ret, tc_index;
533 	uint32_t max_rx_pktlen;
534 #if defined(RTE_LIBRTE_IEEE1588)
535 	uint16_t ptp_correction_offset;
536 #endif
537 
538 	PMD_INIT_FUNC_TRACE();
539 
540 	/* Rx offloads which are enabled by default */
541 	if (dev_rx_offloads_nodis & ~rx_offloads) {
542 		DPAA2_PMD_INFO(
543 		"Some of rx offloads enabled by default - requested 0x%" PRIx64
544 		" fixed are 0x%" PRIx64,
545 		rx_offloads, dev_rx_offloads_nodis);
546 	}
547 
548 	/* Tx offloads which are enabled by default */
549 	if (dev_tx_offloads_nodis & ~tx_offloads) {
550 		DPAA2_PMD_INFO(
551 		"Some of tx offloads enabled by default - requested 0x%" PRIx64
552 		" fixed are 0x%" PRIx64,
553 		tx_offloads, dev_tx_offloads_nodis);
554 	}
555 
556 	max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
557 				RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
558 	if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) {
559 		ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
560 			priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN);
561 		if (ret != 0) {
562 			DPAA2_PMD_ERR("Unable to set mtu. check config");
563 			return ret;
564 		}
565 		DPAA2_PMD_DEBUG("MTU configured for the device: %d",
566 				dev->data->mtu);
567 	} else {
568 		DPAA2_PMD_ERR("Configured mtu %d and calculated max-pkt-len is %d which should be <= %d",
569 			eth_conf->rxmode.mtu, max_rx_pktlen, DPAA2_MAX_RX_PKT_LEN);
570 		return -1;
571 	}
572 
573 	if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
574 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
575 			ret = dpaa2_setup_flow_dist(dev,
576 					eth_conf->rx_adv_conf.rss_conf.rss_hf,
577 					tc_index);
578 			if (ret) {
579 				DPAA2_PMD_ERR(
580 					"Unable to set flow distribution on tc%d."
581 					"Check queue config", tc_index);
582 				return ret;
583 			}
584 		}
585 	}
586 
587 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
588 		rx_l3_csum_offload = true;
589 
590 	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
591 		(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
592 		(rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
593 		rx_l4_csum_offload = true;
594 
595 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
596 			       DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
597 	if (ret) {
598 		DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
599 		return ret;
600 	}
601 
602 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
603 			       DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
604 	if (ret) {
605 		DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
606 		return ret;
607 	}
608 
609 #if !defined(RTE_LIBRTE_IEEE1588)
610 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
611 #endif
612 	{
613 		ret = rte_mbuf_dyn_rx_timestamp_register(
614 				&dpaa2_timestamp_dynfield_offset,
615 				&dpaa2_timestamp_rx_dynflag);
616 		if (ret != 0) {
617 			DPAA2_PMD_ERR("Error to register timestamp field/flag");
618 			return -rte_errno;
619 		}
620 		dpaa2_enable_ts[dev->data->port_id] = true;
621 	}
622 
623 #if defined(RTE_LIBRTE_IEEE1588)
624 	/* By default setting ptp correction offset for Ethernet SYNC packets */
625 	ptp_correction_offset = RTE_ETHER_HDR_LEN + 8;
626 	rte_pmd_dpaa2_set_one_step_ts(dev->data->port_id, ptp_correction_offset, 0);
627 #endif
628 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
629 		tx_l3_csum_offload = true;
630 
631 	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
632 		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
633 		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
634 		tx_l4_csum_offload = true;
635 
636 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
637 			       DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
638 	if (ret) {
639 		DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
640 		return ret;
641 	}
642 
643 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
644 			       DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
645 	if (ret) {
646 		DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
647 		return ret;
648 	}
649 
650 	/* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
651 	 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
652 	 * to 0 for LS2 in the hardware thus disabling data/annotation
653 	 * stashing. For LX2 this is fixed in hardware and thus hash result and
654 	 * parse results can be received in FD using this option.
655 	 */
656 	if (dpaa2_svr_family == SVR_LX2160A) {
657 		ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
658 				       DPNI_FLCTYPE_HASH, true);
659 		if (ret) {
660 			DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
661 			return ret;
662 		}
663 	}
664 
665 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
666 		dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
667 
668 	if (eth_conf->lpbk_mode) {
669 		ret = dpaa2_dev_recycle_config(dev);
670 		if (ret) {
671 			DPAA2_PMD_ERR("Error to configure %s to recycle port.",
672 				dev->data->name);
673 
674 			return ret;
675 		}
676 	} else {
677 		/** User may disable loopback mode by calling
678 		 * "dev_configure" with lpbk_mode cleared.
679 		 * No matter the port was configured recycle or not,
680 		 * recycle de-configure is called here.
681 		 * If port is not recycled, the de-configure will return directly.
682 		 */
683 		ret = dpaa2_dev_recycle_deconfig(dev);
684 		if (ret) {
685 			DPAA2_PMD_ERR("Error to de-configure recycle port %s.",
686 				dev->data->name);
687 
688 			return ret;
689 		}
690 	}
691 
692 	dpaa2_tm_init(dev);
693 
694 	return 0;
695 }
696 
697 /* Function to setup RX flow information. It contains traffic class ID,
698  * flow ID, destination configuration etc.
699  */
700 static int
701 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
702 	uint16_t rx_queue_id,
703 	uint16_t nb_rx_desc,
704 	unsigned int socket_id __rte_unused,
705 	const struct rte_eth_rxconf *rx_conf,
706 	struct rte_mempool *mb_pool)
707 {
708 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
709 	struct fsl_mc_io *dpni = dev->process_private;
710 	struct dpaa2_queue *dpaa2_q;
711 	struct dpni_queue cfg;
712 	uint8_t options = 0;
713 	uint8_t flow_id;
714 	uint32_t bpid;
715 	int i, ret;
716 
717 	PMD_INIT_FUNC_TRACE();
718 
719 	DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
720 			dev, rx_queue_id, mb_pool, rx_conf);
721 
722 	total_nb_rx_desc += nb_rx_desc;
723 	if (total_nb_rx_desc > MAX_NB_RX_DESC) {
724 		DPAA2_PMD_WARN("Total nb_rx_desc exceeds %d limit. Please use Normal buffers",
725 			       MAX_NB_RX_DESC);
726 		DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script");
727 	}
728 
729 	/* Rx deferred start is not supported */
730 	if (rx_conf->rx_deferred_start) {
731 		DPAA2_PMD_ERR("%s:Rx deferred start not supported",
732 			dev->data->name);
733 		return -EINVAL;
734 	}
735 
736 	if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
737 		if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
738 			ret = rte_dpaa2_bpid_info_init(mb_pool);
739 			if (ret)
740 				return ret;
741 		}
742 		bpid = mempool_to_bpid(mb_pool);
743 		ret = dpaa2_attach_bp_list(priv, dpni,
744 				rte_dpaa2_bpid_info[bpid].bp_list);
745 		if (ret)
746 			return ret;
747 	}
748 	dpaa2_q = priv->rx_vq[rx_queue_id];
749 	dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
750 	dpaa2_q->bp_array = rte_dpaa2_bpid_info;
751 	dpaa2_q->nb_desc = UINT16_MAX;
752 	dpaa2_q->offloads = rx_conf->offloads;
753 
754 	/*Get the flow id from given VQ id*/
755 	flow_id = dpaa2_q->flow_id;
756 	memset(&cfg, 0, sizeof(struct dpni_queue));
757 
758 	options = options | DPNI_QUEUE_OPT_USER_CTX;
759 	cfg.user_context = (size_t)(dpaa2_q);
760 
761 	/* check if a private cgr available. */
762 	for (i = 0; i < priv->max_cgs; i++) {
763 		if (!priv->cgid_in_use[i]) {
764 			priv->cgid_in_use[i] = 1;
765 			break;
766 		}
767 	}
768 
769 	if (i < priv->max_cgs) {
770 		options |= DPNI_QUEUE_OPT_SET_CGID;
771 		cfg.cgid = i;
772 		dpaa2_q->cgid = cfg.cgid;
773 	} else {
774 		dpaa2_q->cgid = DPAA2_INVALID_CGID;
775 	}
776 
777 	/*if ls2088 or rev2 device, enable the stashing */
778 
779 	if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
780 		options |= DPNI_QUEUE_OPT_FLC;
781 		cfg.flc.stash_control = true;
782 		dpaa2_flc_stashing_clear_all(&cfg.flc.value);
783 		if (getenv("DPAA2_DATA_STASHING_OFF")) {
784 			dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 0,
785 				&cfg.flc.value);
786 			dpaa2_q->data_stashing_off = 1;
787 		} else {
788 			dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1,
789 				&cfg.flc.value);
790 			dpaa2_q->data_stashing_off = 0;
791 		}
792 		if ((dpaa2_svr_family & 0xffff0000) != SVR_LX2160A) {
793 			dpaa2_flc_stashing_set(DPAA2_FLC_ANNO_STASHING, 1,
794 				&cfg.flc.value);
795 		}
796 	}
797 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
798 			dpaa2_q->tc_index, flow_id, options, &cfg);
799 	if (ret) {
800 		DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
801 		return ret;
802 	}
803 
804 	if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
805 		struct dpni_taildrop taildrop;
806 
807 		taildrop.enable = 1;
808 		dpaa2_q->nb_desc = nb_rx_desc;
809 		/* Private CGR will use tail drop length as nb_rx_desc.
810 		 * for rest cases we can use standard byte based tail drop.
811 		 * There is no HW restriction, but number of CGRs are limited,
812 		 * hence this restriction is placed.
813 		 */
814 		if (dpaa2_q->cgid != DPAA2_INVALID_CGID) {
815 			/*enabling per rx queue congestion control */
816 			taildrop.threshold = nb_rx_desc;
817 			taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
818 			taildrop.oal = 0;
819 			DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
820 					rx_queue_id);
821 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
822 						DPNI_CP_CONGESTION_GROUP,
823 						DPNI_QUEUE_RX,
824 						dpaa2_q->tc_index,
825 						dpaa2_q->cgid, &taildrop);
826 		} else {
827 			/*enabling per rx queue congestion control */
828 			taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
829 			taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
830 			taildrop.oal = CONG_RX_OAL;
831 			DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
832 					rx_queue_id);
833 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
834 						DPNI_CP_QUEUE, DPNI_QUEUE_RX,
835 						dpaa2_q->tc_index, flow_id,
836 						&taildrop);
837 		}
838 		if (ret) {
839 			DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
840 				ret);
841 			return ret;
842 		}
843 	} else { /* Disable tail Drop */
844 		struct dpni_taildrop taildrop = {0};
845 		DPAA2_PMD_INFO("Tail drop is disabled on queue");
846 
847 		taildrop.enable = 0;
848 		if (dpaa2_q->cgid != DPAA2_INVALID_CGID) {
849 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
850 					DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
851 					dpaa2_q->tc_index,
852 					dpaa2_q->cgid, &taildrop);
853 		} else {
854 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
855 					DPNI_CP_QUEUE, DPNI_QUEUE_RX,
856 					dpaa2_q->tc_index, flow_id, &taildrop);
857 		}
858 		if (ret) {
859 			DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
860 				ret);
861 			return ret;
862 		}
863 	}
864 
865 	dev->data->rx_queues[rx_queue_id] = dpaa2_q;
866 	return 0;
867 }
868 
869 static int
870 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
871 	uint16_t tx_queue_id,
872 	uint16_t nb_tx_desc,
873 	unsigned int socket_id __rte_unused,
874 	const struct rte_eth_txconf *tx_conf)
875 {
876 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
877 	struct dpaa2_queue *dpaa2_q = priv->tx_vq[tx_queue_id];
878 	struct dpaa2_queue *dpaa2_tx_conf_q = priv->tx_conf_vq[tx_queue_id];
879 	struct fsl_mc_io *dpni = dev->process_private;
880 	struct dpni_queue tx_conf_cfg;
881 	struct dpni_queue tx_flow_cfg;
882 	uint8_t options = 0, flow_id;
883 	uint8_t ceetm_ch_idx;
884 	uint16_t channel_id;
885 	struct dpni_queue_id qid;
886 	uint32_t tc_id;
887 	int ret;
888 	uint64_t iova;
889 
890 	PMD_INIT_FUNC_TRACE();
891 
892 	/* Tx deferred start is not supported */
893 	if (tx_conf->tx_deferred_start) {
894 		DPAA2_PMD_ERR("%s:Tx deferred start not supported",
895 			dev->data->name);
896 		return -EINVAL;
897 	}
898 
899 	dpaa2_q->nb_desc = UINT16_MAX;
900 	dpaa2_q->offloads = tx_conf->offloads;
901 
902 	/* Return if queue already configured */
903 	if (dpaa2_q->flow_id != DPAA2_INVALID_FLOW_ID) {
904 		dev->data->tx_queues[tx_queue_id] = dpaa2_q;
905 		return 0;
906 	}
907 
908 	memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
909 	memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
910 
911 	if (!tx_queue_id) {
912 		for (ceetm_ch_idx = 0;
913 			ceetm_ch_idx <= (priv->num_channels - 1);
914 			ceetm_ch_idx++) {
915 			/*Set tx-conf and error configuration*/
916 			if (priv->flags & DPAA2_TX_CONF_ENABLE) {
917 				ret = dpni_set_tx_confirmation_mode(dpni,
918 						CMD_PRI_LOW, priv->token,
919 						ceetm_ch_idx,
920 						DPNI_CONF_AFFINE);
921 			} else {
922 				ret = dpni_set_tx_confirmation_mode(dpni,
923 						CMD_PRI_LOW, priv->token,
924 						ceetm_ch_idx,
925 						DPNI_CONF_DISABLE);
926 			}
927 			if (ret) {
928 				DPAA2_PMD_ERR("Error(%d) in tx conf setting",
929 					ret);
930 				return ret;
931 			}
932 		}
933 	}
934 
935 	tc_id = tx_queue_id % priv->num_tx_tc;
936 	channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels;
937 	flow_id = 0;
938 
939 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
940 			((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg);
941 	if (ret) {
942 		DPAA2_PMD_ERR("Error in setting the tx flow: "
943 			"tc_id=%d, flow=%d err=%d",
944 			tc_id, flow_id, ret);
945 			return ret;
946 	}
947 
948 	dpaa2_q->flow_id = flow_id;
949 
950 	dpaa2_q->tc_index = tc_id;
951 
952 	ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
953 			DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
954 			dpaa2_q->flow_id, &tx_flow_cfg, &qid);
955 	if (ret) {
956 		DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
957 		return ret;
958 	}
959 	dpaa2_q->fqid = qid.fqid;
960 
961 	if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
962 		struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
963 
964 		dpaa2_q->nb_desc = nb_tx_desc;
965 
966 		cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
967 		cong_notif_cfg.threshold_entry = nb_tx_desc;
968 		/* Notify that the queue is not congested when the data in
969 		 * the queue is below this threshold.(90% of value)
970 		 */
971 		cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
972 		cong_notif_cfg.message_ctx = 0;
973 
974 		iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(dpaa2_q->cscn,
975 			sizeof(struct qbman_result));
976 		if (iova == RTE_BAD_IOVA) {
977 			DPAA2_PMD_ERR("No IOMMU map for cscn(%p)(size=%x)",
978 				dpaa2_q->cscn, (uint32_t)sizeof(struct qbman_result));
979 
980 			return -ENOBUFS;
981 		}
982 
983 		cong_notif_cfg.message_iova = iova;
984 		cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
985 		cong_notif_cfg.notification_mode =
986 					 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
987 					 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
988 					 DPNI_CONG_OPT_COHERENT_WRITE;
989 		cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
990 
991 		ret = dpni_set_congestion_notification(dpni,
992 				CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
993 				((channel_id << 8) | tc_id), &cong_notif_cfg);
994 		if (ret) {
995 			DPAA2_PMD_ERR("Set TX congestion notification err=%d",
996 			   ret);
997 			return ret;
998 		}
999 	}
1000 	dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
1001 	dev->data->tx_queues[tx_queue_id] = dpaa2_q;
1002 
1003 	if (priv->flags & DPAA2_TX_CONF_ENABLE) {
1004 		dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
1005 		options = options | DPNI_QUEUE_OPT_USER_CTX;
1006 		tx_conf_cfg.user_context = (size_t)(dpaa2_q);
1007 		ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
1008 				DPNI_QUEUE_TX_CONFIRM,
1009 				((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
1010 				dpaa2_tx_conf_q->flow_id,
1011 				options, &tx_conf_cfg);
1012 		if (ret) {
1013 			DPAA2_PMD_ERR("Set TC[%d].TX[%d] conf flow err=%d",
1014 				dpaa2_tx_conf_q->tc_index,
1015 				dpaa2_tx_conf_q->flow_id, ret);
1016 			return ret;
1017 		}
1018 
1019 		ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1020 				DPNI_QUEUE_TX_CONFIRM,
1021 				((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
1022 				dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
1023 		if (ret) {
1024 			DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
1025 			return ret;
1026 		}
1027 		dpaa2_tx_conf_q->fqid = qid.fqid;
1028 	}
1029 	return 0;
1030 }
1031 
1032 static void
1033 dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1034 {
1035 	struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id];
1036 	struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
1037 	struct fsl_mc_io *dpni = priv->eth_dev->process_private;
1038 	uint8_t options = 0;
1039 	int ret;
1040 	struct dpni_queue cfg;
1041 
1042 	memset(&cfg, 0, sizeof(struct dpni_queue));
1043 	PMD_INIT_FUNC_TRACE();
1044 
1045 	total_nb_rx_desc -= dpaa2_q->nb_desc;
1046 
1047 	if (dpaa2_q->cgid != DPAA2_INVALID_CGID) {
1048 		options = DPNI_QUEUE_OPT_CLEAR_CGID;
1049 		cfg.cgid = dpaa2_q->cgid;
1050 
1051 		ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
1052 				     DPNI_QUEUE_RX,
1053 				     dpaa2_q->tc_index, dpaa2_q->flow_id,
1054 				     options, &cfg);
1055 		if (ret)
1056 			DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
1057 					dpaa2_q->fqid, ret);
1058 		priv->cgid_in_use[dpaa2_q->cgid] = 0;
1059 		dpaa2_q->cgid = DPAA2_INVALID_CGID;
1060 	}
1061 }
1062 
1063 static uint32_t
1064 dpaa2_dev_rx_queue_count(void *rx_queue)
1065 {
1066 	int32_t ret;
1067 	struct dpaa2_queue *dpaa2_q;
1068 	struct qbman_swp *swp;
1069 	struct qbman_fq_query_np_rslt state;
1070 	uint32_t frame_cnt = 0;
1071 
1072 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1073 		ret = dpaa2_affine_qbman_swp();
1074 		if (ret) {
1075 			DPAA2_PMD_ERR(
1076 				"Failed to allocate IO portal, tid: %d",
1077 				rte_gettid());
1078 			return -EINVAL;
1079 		}
1080 	}
1081 	swp = DPAA2_PER_LCORE_PORTAL;
1082 
1083 	dpaa2_q = rx_queue;
1084 
1085 	if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
1086 		frame_cnt = qbman_fq_state_frame_count(&state);
1087 		DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u",
1088 				rx_queue, frame_cnt);
1089 	}
1090 	return frame_cnt;
1091 }
1092 
1093 static const uint32_t *
1094 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1095 {
1096 	static const uint32_t ptypes[] = {
1097 		/*todo -= add more types */
1098 		RTE_PTYPE_L2_ETHER,
1099 		RTE_PTYPE_L3_IPV4,
1100 		RTE_PTYPE_L3_IPV4_EXT,
1101 		RTE_PTYPE_L3_IPV6,
1102 		RTE_PTYPE_L3_IPV6_EXT,
1103 		RTE_PTYPE_L4_TCP,
1104 		RTE_PTYPE_L4_UDP,
1105 		RTE_PTYPE_L4_SCTP,
1106 		RTE_PTYPE_L4_ICMP,
1107 	};
1108 
1109 	if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
1110 		dev->rx_pkt_burst == dpaa2_dev_rx ||
1111 		dev->rx_pkt_burst == dpaa2_dev_loopback_rx) {
1112 		*no_of_elements = RTE_DIM(ptypes);
1113 		return ptypes;
1114 	}
1115 	return NULL;
1116 }
1117 
1118 /**
1119  * Dpaa2 link Interrupt handler
1120  *
1121  * @param param
1122  *  The address of parameter (struct rte_eth_dev *) registered before.
1123  *
1124  * @return
1125  *  void
1126  */
1127 static void
1128 dpaa2_interrupt_handler(void *param)
1129 {
1130 	struct rte_eth_dev *dev = param;
1131 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1132 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1133 	int ret;
1134 	int irq_index = DPNI_IRQ_INDEX;
1135 	unsigned int status = 0, clear = 0;
1136 
1137 	PMD_INIT_FUNC_TRACE();
1138 
1139 	if (dpni == NULL) {
1140 		DPAA2_PMD_ERR("dpni is NULL");
1141 		return;
1142 	}
1143 
1144 	ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
1145 				  irq_index, &status);
1146 	if (unlikely(ret)) {
1147 		DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
1148 		clear = 0xffffffff;
1149 		goto out;
1150 	}
1151 
1152 	if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
1153 		clear = DPNI_IRQ_EVENT_LINK_CHANGED;
1154 		dpaa2_dev_link_update(dev, 0);
1155 		/* calling all the apps registered for link status event */
1156 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1157 	}
1158 out:
1159 	ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
1160 				    irq_index, clear);
1161 	if (unlikely(ret))
1162 		DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
1163 }
1164 
1165 static int
1166 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
1167 {
1168 	int err = 0;
1169 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1170 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1171 	int irq_index = DPNI_IRQ_INDEX;
1172 	unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
1173 
1174 	PMD_INIT_FUNC_TRACE();
1175 
1176 	err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
1177 				irq_index, mask);
1178 	if (err < 0) {
1179 		DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
1180 			      strerror(-err));
1181 		return err;
1182 	}
1183 
1184 	err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
1185 				  irq_index, enable);
1186 	if (err < 0)
1187 		DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
1188 			      strerror(-err));
1189 
1190 	return err;
1191 }
1192 
1193 static int
1194 dpaa2_dev_start(struct rte_eth_dev *dev)
1195 {
1196 	struct rte_device *rdev = dev->device;
1197 	struct rte_dpaa2_device *dpaa2_dev;
1198 	struct rte_eth_dev_data *data = dev->data;
1199 	struct dpaa2_dev_priv *priv = data->dev_private;
1200 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1201 	struct dpni_queue cfg;
1202 	struct dpni_error_cfg	err_cfg;
1203 	struct dpni_queue_id qid;
1204 	struct dpaa2_queue *dpaa2_q;
1205 	int ret, i;
1206 	struct rte_intr_handle *intr_handle;
1207 
1208 	dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1209 	intr_handle = dpaa2_dev->intr_handle;
1210 
1211 	PMD_INIT_FUNC_TRACE();
1212 	ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1213 	if (ret) {
1214 		DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
1215 			      priv->hw_id, ret);
1216 		return ret;
1217 	}
1218 
1219 	/* Power up the phy. Needed to make the link go UP */
1220 	dpaa2_dev_set_link_up(dev);
1221 
1222 	for (i = 0; i < data->nb_rx_queues; i++) {
1223 		dpaa2_q = data->rx_queues[i];
1224 		ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1225 				DPNI_QUEUE_RX, dpaa2_q->tc_index,
1226 				dpaa2_q->flow_id, &cfg, &qid);
1227 		if (ret) {
1228 			DPAA2_PMD_ERR("Error in getting flow information: "
1229 				      "err=%d", ret);
1230 			return ret;
1231 		}
1232 		dpaa2_q->fqid = qid.fqid;
1233 	}
1234 
1235 	if (dpaa2_enable_err_queue) {
1236 		ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1237 				     DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid);
1238 		if (ret) {
1239 			DPAA2_PMD_ERR("Error getting rx err flow information: err=%d",
1240 						ret);
1241 			return ret;
1242 		}
1243 		dpaa2_q = priv->rx_err_vq;
1244 		dpaa2_q->fqid = qid.fqid;
1245 		dpaa2_q->eth_data = dev->data;
1246 
1247 		err_cfg.errors =  DPNI_ERROR_DISC;
1248 		err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
1249 	} else {
1250 		/* checksum errors, send them to normal path
1251 		 * and set it in annotation
1252 		 */
1253 		err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
1254 
1255 		/* if packet with parse error are not to be dropped */
1256 		err_cfg.errors |= DPNI_ERROR_PHE;
1257 
1258 		err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
1259 	}
1260 	err_cfg.set_frame_annotation = true;
1261 
1262 	ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
1263 				       priv->token, &err_cfg);
1264 	if (ret) {
1265 		DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
1266 			      ret);
1267 		return ret;
1268 	}
1269 
1270 	/* if the interrupts were configured on this devices*/
1271 	if (intr_handle && rte_intr_fd_get(intr_handle) &&
1272 	    dev->data->dev_conf.intr_conf.lsc != 0) {
1273 		/* Registering LSC interrupt handler */
1274 		rte_intr_callback_register(intr_handle,
1275 					   dpaa2_interrupt_handler,
1276 					   (void *)dev);
1277 
1278 		/* enable vfio intr/eventfd mapping
1279 		 * Interrupt index 0 is required, so we can not use
1280 		 * rte_intr_enable.
1281 		 */
1282 		rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
1283 
1284 		/* enable dpni_irqs */
1285 		dpaa2_eth_setup_irqs(dev, 1);
1286 	}
1287 
1288 	/* Change the tx burst function if ordered queues are used */
1289 	if (priv->en_ordered)
1290 		dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
1291 
1292 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1293 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
1294 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1295 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
1296 
1297 	return 0;
1298 }
1299 
1300 /**
1301  *  This routine disables all traffic on the adapter by issuing a
1302  *  global reset on the MAC.
1303  */
1304 static int
1305 dpaa2_dev_stop(struct rte_eth_dev *dev)
1306 {
1307 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1308 	struct fsl_mc_io *dpni = dev->process_private;
1309 	int ret;
1310 	struct rte_eth_link link;
1311 	struct rte_device *rdev = dev->device;
1312 	struct rte_intr_handle *intr_handle;
1313 	struct rte_dpaa2_device *dpaa2_dev;
1314 	uint16_t i;
1315 
1316 	dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1317 	intr_handle = dpaa2_dev->intr_handle;
1318 
1319 	PMD_INIT_FUNC_TRACE();
1320 
1321 	/* reset interrupt callback  */
1322 	if (intr_handle && rte_intr_fd_get(intr_handle) &&
1323 	    dev->data->dev_conf.intr_conf.lsc != 0) {
1324 		/*disable dpni irqs */
1325 		dpaa2_eth_setup_irqs(dev, 0);
1326 
1327 		/* disable vfio intr before callback unregister */
1328 		rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1329 
1330 		/* Unregistering LSC interrupt handler */
1331 		rte_intr_callback_unregister(intr_handle,
1332 					     dpaa2_interrupt_handler,
1333 					     (void *)dev);
1334 	}
1335 
1336 	dpaa2_dev_set_link_down(dev);
1337 
1338 	ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
1339 	if (ret) {
1340 		DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
1341 			      ret, priv->hw_id);
1342 		return ret;
1343 	}
1344 
1345 	/* clear the recorded link status */
1346 	memset(&link, 0, sizeof(link));
1347 	rte_eth_linkstatus_set(dev, &link);
1348 
1349 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1350 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1351 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1352 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1353 
1354 	return 0;
1355 }
1356 
1357 static int
1358 dpaa2_dev_close(struct rte_eth_dev *dev)
1359 {
1360 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1361 	struct fsl_mc_io *dpni = dev->process_private;
1362 	int i, ret;
1363 	struct rte_eth_link link;
1364 
1365 	PMD_INIT_FUNC_TRACE();
1366 
1367 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1368 		return 0;
1369 
1370 	if (!dpni) {
1371 		DPAA2_PMD_WARN("Already closed or not started");
1372 		return -EINVAL;
1373 	}
1374 
1375 	dpaa2_tm_deinit(dev);
1376 	dpaa2_flow_clean(dev);
1377 	/* Clean the device first */
1378 	ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
1379 	if (ret) {
1380 		DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1381 		return ret;
1382 	}
1383 
1384 	memset(&link, 0, sizeof(link));
1385 	rte_eth_linkstatus_set(dev, &link);
1386 
1387 	/* Free private queues memory */
1388 	dpaa2_free_rx_tx_queues(dev);
1389 	/* Close the device at underlying layer*/
1390 	ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1391 	if (ret) {
1392 		DPAA2_PMD_ERR("Failure closing dpni device with err code %d",
1393 			ret);
1394 	}
1395 
1396 	/* Free the allocated memory for ethernet private data and dpni*/
1397 	priv->hw = NULL;
1398 	dev->process_private = NULL;
1399 	rte_free(dpni);
1400 
1401 	for (i = 0; i < MAX_TCS; i++)
1402 		rte_free(priv->extract.tc_extract_param[i]);
1403 
1404 	if (priv->extract.qos_extract_param)
1405 		rte_free(priv->extract.qos_extract_param);
1406 
1407 	DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name);
1408 	return 0;
1409 }
1410 
1411 static int
1412 dpaa2_dev_promiscuous_enable(struct rte_eth_dev *dev)
1413 {
1414 	int ret;
1415 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1416 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1417 
1418 	PMD_INIT_FUNC_TRACE();
1419 
1420 	if (dpni == NULL) {
1421 		DPAA2_PMD_ERR("dpni is NULL");
1422 		return -ENODEV;
1423 	}
1424 
1425 	ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1426 	if (ret < 0)
1427 		DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1428 
1429 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1430 	if (ret < 0)
1431 		DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1432 
1433 	return ret;
1434 }
1435 
1436 static int
1437 dpaa2_dev_promiscuous_disable(
1438 		struct rte_eth_dev *dev)
1439 {
1440 	int ret;
1441 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1442 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1443 
1444 	PMD_INIT_FUNC_TRACE();
1445 
1446 	if (dpni == NULL) {
1447 		DPAA2_PMD_ERR("dpni is NULL");
1448 		return -ENODEV;
1449 	}
1450 
1451 	ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1452 	if (ret < 0)
1453 		DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1454 
1455 	if (dev->data->all_multicast == 0) {
1456 		ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1457 						 priv->token, false);
1458 		if (ret < 0)
1459 			DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1460 				      ret);
1461 	}
1462 
1463 	return ret;
1464 }
1465 
1466 static int
1467 dpaa2_dev_allmulticast_enable(
1468 		struct rte_eth_dev *dev)
1469 {
1470 	int ret;
1471 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1472 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1473 
1474 	PMD_INIT_FUNC_TRACE();
1475 
1476 	if (dpni == NULL) {
1477 		DPAA2_PMD_ERR("dpni is NULL");
1478 		return -ENODEV;
1479 	}
1480 
1481 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1482 	if (ret < 0)
1483 		DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1484 
1485 	return ret;
1486 }
1487 
1488 static int
1489 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1490 {
1491 	int ret;
1492 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1493 	struct fsl_mc_io *dpni = dev->process_private;
1494 
1495 	PMD_INIT_FUNC_TRACE();
1496 
1497 	if (dpni == NULL) {
1498 		DPAA2_PMD_ERR("dpni is NULL");
1499 		return -ENODEV;
1500 	}
1501 
1502 	/* must remain on for all promiscuous */
1503 	if (dev->data->promiscuous == 1)
1504 		return 0;
1505 
1506 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1507 	if (ret < 0)
1508 		DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1509 
1510 	return ret;
1511 }
1512 
1513 static int
1514 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1515 {
1516 	int ret;
1517 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1518 	struct fsl_mc_io *dpni = dev->process_private;
1519 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1520 				+ VLAN_TAG_SIZE;
1521 
1522 	PMD_INIT_FUNC_TRACE();
1523 
1524 	if (!dpni) {
1525 		DPAA2_PMD_ERR("dpni is NULL");
1526 		return -EINVAL;
1527 	}
1528 
1529 	/* Set the Max Rx frame length as 'mtu' +
1530 	 * Maximum Ethernet header length
1531 	 */
1532 	ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1533 					frame_size - RTE_ETHER_CRC_LEN);
1534 	if (ret) {
1535 		DPAA2_PMD_ERR("Setting the max frame length failed");
1536 		return ret;
1537 	}
1538 	dev->data->mtu = mtu;
1539 	DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1540 	return 0;
1541 }
1542 
1543 static int
1544 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1545 	struct rte_ether_addr *addr,
1546 	__rte_unused uint32_t index,
1547 	__rte_unused uint32_t pool)
1548 {
1549 	int ret;
1550 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1551 	struct fsl_mc_io *dpni = dev->process_private;
1552 
1553 	PMD_INIT_FUNC_TRACE();
1554 
1555 	if (dpni == NULL) {
1556 		DPAA2_PMD_ERR("dpni is NULL");
1557 		return -EINVAL;
1558 	}
1559 
1560 	ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
1561 				addr->addr_bytes, 0, 0, 0);
1562 	if (ret)
1563 		DPAA2_PMD_ERR("ERR(%d) Adding the MAC ADDR failed", ret);
1564 	return ret;
1565 }
1566 
1567 static void
1568 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1569 	uint32_t index)
1570 {
1571 	int ret;
1572 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1573 	struct fsl_mc_io *dpni = dev->process_private;
1574 	struct rte_eth_dev_data *data = dev->data;
1575 	struct rte_ether_addr *macaddr;
1576 
1577 	PMD_INIT_FUNC_TRACE();
1578 
1579 	macaddr = &data->mac_addrs[index];
1580 
1581 	if (!dpni) {
1582 		DPAA2_PMD_ERR("dpni is NULL");
1583 		return;
1584 	}
1585 
1586 	ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1587 				   priv->token, macaddr->addr_bytes);
1588 	if (ret)
1589 		DPAA2_PMD_ERR(
1590 			"error: Removing the MAC ADDR failed: err = %d", ret);
1591 }
1592 
1593 static int
1594 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1595 	struct rte_ether_addr *addr)
1596 {
1597 	int ret;
1598 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1599 	struct fsl_mc_io *dpni = dev->process_private;
1600 
1601 	PMD_INIT_FUNC_TRACE();
1602 
1603 	if (!dpni) {
1604 		DPAA2_PMD_ERR("dpni is NULL");
1605 		return -EINVAL;
1606 	}
1607 
1608 	ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1609 					priv->token, addr->addr_bytes);
1610 
1611 	if (ret)
1612 		DPAA2_PMD_ERR("ERR(%d) Setting the MAC ADDR failed", ret);
1613 
1614 	return ret;
1615 }
1616 
1617 static int
1618 dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1619 	struct rte_eth_stats *stats)
1620 {
1621 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1622 	struct fsl_mc_io *dpni = dev->process_private;
1623 	int32_t retcode;
1624 	uint8_t page0 = 0, page1 = 1, page2 = 2;
1625 	union dpni_statistics value;
1626 	int i;
1627 	struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1628 
1629 	memset(&value, 0, sizeof(union dpni_statistics));
1630 
1631 	PMD_INIT_FUNC_TRACE();
1632 
1633 	if (!dpni) {
1634 		DPAA2_PMD_ERR("dpni is NULL");
1635 		return -EINVAL;
1636 	}
1637 
1638 	if (!stats) {
1639 		DPAA2_PMD_ERR("stats is NULL");
1640 		return -EINVAL;
1641 	}
1642 
1643 	/*Get Counters from page_0*/
1644 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1645 				      page0, 0, &value);
1646 	if (retcode)
1647 		goto err;
1648 
1649 	stats->ipackets = value.page_0.ingress_all_frames;
1650 	stats->ibytes = value.page_0.ingress_all_bytes;
1651 
1652 	/*Get Counters from page_1*/
1653 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1654 				      page1, 0, &value);
1655 	if (retcode)
1656 		goto err;
1657 
1658 	stats->opackets = value.page_1.egress_all_frames;
1659 	stats->obytes = value.page_1.egress_all_bytes;
1660 
1661 	/*Get Counters from page_2*/
1662 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1663 				      page2, 0, &value);
1664 	if (retcode)
1665 		goto err;
1666 
1667 	/* Ingress drop frame count due to configured rules */
1668 	stats->ierrors = value.page_2.ingress_filtered_frames;
1669 	/* Ingress drop frame count due to error */
1670 	stats->ierrors += value.page_2.ingress_discarded_frames;
1671 
1672 	stats->oerrors = value.page_2.egress_discarded_frames;
1673 	stats->imissed = value.page_2.ingress_nobuffer_discards;
1674 
1675 	/* Fill in per queue stats */
1676 	for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1677 		(i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1678 		dpaa2_rxq = priv->rx_vq[i];
1679 		dpaa2_txq = priv->tx_vq[i];
1680 		if (dpaa2_rxq)
1681 			stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1682 		if (dpaa2_txq)
1683 			stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1684 
1685 		/* Byte counting is not implemented */
1686 		stats->q_ibytes[i]   = 0;
1687 		stats->q_obytes[i]   = 0;
1688 	}
1689 
1690 	return 0;
1691 
1692 err:
1693 	DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1694 	return retcode;
1695 };
1696 
1697 static int
1698 dpaa2_dev_xstats_get(struct rte_eth_dev *dev,
1699 	struct rte_eth_xstat *xstats, unsigned int n)
1700 {
1701 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1702 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1703 	int32_t retcode;
1704 	union dpni_statistics value[5] = {};
1705 	unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1706 	uint8_t page_id, stats_id;
1707 
1708 	if (n < num)
1709 		return num;
1710 
1711 	if (!xstats)
1712 		return 0;
1713 
1714 	/* Get Counters from page_0*/
1715 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1716 				      0, 0, &value[0]);
1717 	if (retcode)
1718 		goto err;
1719 
1720 	/* Get Counters from page_1*/
1721 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1722 				      1, 0, &value[1]);
1723 	if (retcode)
1724 		goto err;
1725 
1726 	/* Get Counters from page_2*/
1727 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1728 				      2, 0, &value[2]);
1729 	if (retcode)
1730 		goto err;
1731 
1732 	for (i = 0; i < priv->max_cgs; i++) {
1733 		if (!priv->cgid_in_use[i]) {
1734 			/* Get Counters from page_4*/
1735 			retcode = dpni_get_statistics(dpni, CMD_PRI_LOW,
1736 						      priv->token,
1737 						      4, 0, &value[4]);
1738 			if (retcode)
1739 				goto err;
1740 			break;
1741 		}
1742 	}
1743 
1744 	for (i = 0; i < num; i++) {
1745 		xstats[i].id = i;
1746 		page_id = dpaa2_xstats_strings[i].page_id;
1747 		stats_id = dpaa2_xstats_strings[i].stats_id;
1748 		xstats[i].value = value[page_id].raw.counter[stats_id];
1749 	}
1750 	return i;
1751 err:
1752 	DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1753 	return retcode;
1754 }
1755 
1756 static int
1757 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1758 	struct rte_eth_xstat_name *xstats_names,
1759 	unsigned int limit)
1760 {
1761 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1762 
1763 	if (limit < stat_cnt)
1764 		return stat_cnt;
1765 
1766 	if (xstats_names != NULL)
1767 		for (i = 0; i < stat_cnt; i++)
1768 			strlcpy(xstats_names[i].name,
1769 				dpaa2_xstats_strings[i].name,
1770 				sizeof(xstats_names[i].name));
1771 
1772 	return stat_cnt;
1773 }
1774 
1775 static int
1776 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1777 	uint64_t *values, unsigned int n)
1778 {
1779 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1780 	uint64_t values_copy[stat_cnt];
1781 	uint8_t page_id, stats_id;
1782 
1783 	if (!ids) {
1784 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
1785 		struct fsl_mc_io *dpni = dev->process_private;
1786 		int32_t retcode;
1787 		union dpni_statistics value[5] = {};
1788 
1789 		if (n < stat_cnt)
1790 			return stat_cnt;
1791 
1792 		if (!values)
1793 			return 0;
1794 
1795 		/* Get Counters from page_0*/
1796 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1797 					      0, 0, &value[0]);
1798 		if (retcode)
1799 			return 0;
1800 
1801 		/* Get Counters from page_1*/
1802 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1803 					      1, 0, &value[1]);
1804 		if (retcode)
1805 			return 0;
1806 
1807 		/* Get Counters from page_2*/
1808 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1809 					      2, 0, &value[2]);
1810 		if (retcode)
1811 			return 0;
1812 
1813 		/* Get Counters from page_4*/
1814 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1815 					      4, 0, &value[4]);
1816 		if (retcode)
1817 			return 0;
1818 
1819 		for (i = 0; i < stat_cnt; i++) {
1820 			page_id = dpaa2_xstats_strings[i].page_id;
1821 			stats_id = dpaa2_xstats_strings[i].stats_id;
1822 			values[i] = value[page_id].raw.counter[stats_id];
1823 		}
1824 		return stat_cnt;
1825 	}
1826 
1827 	dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1828 
1829 	for (i = 0; i < n; i++) {
1830 		if (ids[i] >= stat_cnt) {
1831 			DPAA2_PMD_ERR("xstats id value isn't valid");
1832 			return -EINVAL;
1833 		}
1834 		values[i] = values_copy[ids[i]];
1835 	}
1836 	return n;
1837 }
1838 
1839 static int
1840 dpaa2_xstats_get_names_by_id(struct rte_eth_dev *dev,
1841 	const uint64_t *ids,
1842 	struct rte_eth_xstat_name *xstats_names,
1843 	unsigned int limit)
1844 {
1845 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1846 	struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1847 
1848 	if (!ids)
1849 		return dpaa2_xstats_get_names(dev, xstats_names, limit);
1850 
1851 	dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1852 
1853 	for (i = 0; i < limit; i++) {
1854 		if (ids[i] >= stat_cnt) {
1855 			DPAA2_PMD_ERR("xstats id value isn't valid");
1856 			return -1;
1857 		}
1858 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1859 	}
1860 	return limit;
1861 }
1862 
1863 static int
1864 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1865 {
1866 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1867 	struct fsl_mc_io *dpni = dev->process_private;
1868 	int retcode;
1869 	int i;
1870 	struct dpaa2_queue *dpaa2_q;
1871 
1872 	PMD_INIT_FUNC_TRACE();
1873 
1874 	if (!dpni) {
1875 		DPAA2_PMD_ERR("dpni is NULL");
1876 		return -EINVAL;
1877 	}
1878 
1879 	retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1880 	if (retcode)
1881 		goto error;
1882 
1883 	/* Reset the per queue stats in dpaa2_queue structure */
1884 	for (i = 0; i < priv->nb_rx_queues; i++) {
1885 		dpaa2_q = priv->rx_vq[i];
1886 		if (dpaa2_q)
1887 			dpaa2_q->rx_pkts = 0;
1888 	}
1889 
1890 	for (i = 0; i < priv->nb_tx_queues; i++) {
1891 		dpaa2_q = priv->tx_vq[i];
1892 		if (dpaa2_q)
1893 			dpaa2_q->tx_pkts = 0;
1894 	}
1895 
1896 	return 0;
1897 
1898 error:
1899 	DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1900 	return retcode;
1901 };
1902 
1903 /* return 0 means link status changed, -1 means not changed */
1904 static int
1905 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1906 		      int wait_to_complete)
1907 {
1908 	int ret;
1909 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1910 	struct fsl_mc_io *dpni = dev->process_private;
1911 	struct rte_eth_link link;
1912 	struct dpni_link_state state = {0};
1913 	uint8_t count;
1914 
1915 	if (!dpni) {
1916 		DPAA2_PMD_ERR("dpni is NULL");
1917 		return 0;
1918 	}
1919 
1920 	for (count = 0; count <= MAX_REPEAT_TIME; count++) {
1921 		ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token,
1922 					  &state);
1923 		if (ret < 0) {
1924 			DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1925 			return ret;
1926 		}
1927 		if (state.up == RTE_ETH_LINK_DOWN &&
1928 		    wait_to_complete)
1929 			rte_delay_ms(CHECK_INTERVAL);
1930 		else
1931 			break;
1932 	}
1933 
1934 	memset(&link, 0, sizeof(struct rte_eth_link));
1935 	link.link_status = state.up;
1936 	link.link_speed = state.rate;
1937 
1938 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1939 		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1940 	else
1941 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1942 
1943 	ret = rte_eth_linkstatus_set(dev, &link);
1944 	if (ret < 0)
1945 		DPAA2_PMD_DEBUG("No change in status");
1946 	else
1947 		DPAA2_PMD_INFO("Port %d Link is %s", dev->data->port_id,
1948 			       link.link_status ? "Up" : "Down");
1949 
1950 	return ret;
1951 }
1952 
1953 /**
1954  * Toggle the DPNI to enable, if not already enabled.
1955  * This is not strictly PHY up/down - it is more of logical toggling.
1956  */
1957 static int
1958 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1959 {
1960 	int ret = -EINVAL;
1961 	struct dpaa2_dev_priv *priv;
1962 	struct fsl_mc_io *dpni;
1963 	int en = 0;
1964 	struct dpni_link_state state = {0};
1965 
1966 	priv = dev->data->dev_private;
1967 	dpni = dev->process_private;
1968 
1969 	if (!dpni) {
1970 		DPAA2_PMD_ERR("dpni is NULL");
1971 		return ret;
1972 	}
1973 
1974 	/* Check if DPNI is currently enabled */
1975 	ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1976 	if (ret) {
1977 		/* Unable to obtain dpni status; Not continuing */
1978 		DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1979 		return ret;
1980 	}
1981 
1982 	/* Enable link if not already enabled */
1983 	if (!en) {
1984 		ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1985 		if (ret) {
1986 			DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1987 			return ret;
1988 		}
1989 	}
1990 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1991 	if (ret < 0) {
1992 		DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1993 		return ret;
1994 	}
1995 
1996 	/* changing tx burst function to start enqueues */
1997 	dev->tx_pkt_burst = dpaa2_dev_tx;
1998 	dev->data->dev_link.link_status = state.up;
1999 	dev->data->dev_link.link_speed = state.rate;
2000 
2001 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
2002 		dev->data->dev_link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
2003 	else
2004 		dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2005 
2006 	if (state.up)
2007 		DPAA2_PMD_DEBUG("Port %d Link is Up", dev->data->port_id);
2008 	else
2009 		DPAA2_PMD_DEBUG("Port %d Link is Down", dev->data->port_id);
2010 	return ret;
2011 }
2012 
2013 /**
2014  * Toggle the DPNI to disable, if not already disabled.
2015  * This is not strictly PHY up/down - it is more of logical toggling.
2016  */
2017 static int
2018 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
2019 {
2020 	int ret = -EINVAL;
2021 	struct dpaa2_dev_priv *priv;
2022 	struct fsl_mc_io *dpni;
2023 	int dpni_enabled = 0;
2024 	int retries = 10;
2025 
2026 	PMD_INIT_FUNC_TRACE();
2027 
2028 	priv = dev->data->dev_private;
2029 	dpni = dev->process_private;
2030 
2031 	if (!dpni) {
2032 		DPAA2_PMD_ERR("Device has not yet been configured");
2033 		return ret;
2034 	}
2035 
2036 	/*changing  tx burst function to avoid any more enqueues */
2037 	dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
2038 
2039 	/* Loop while dpni_disable() attempts to drain the egress FQs
2040 	 * and confirm them back to us.
2041 	 */
2042 	do {
2043 		ret = dpni_disable(dpni, 0, priv->token);
2044 		if (ret) {
2045 			DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
2046 			return ret;
2047 		}
2048 		ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
2049 		if (ret) {
2050 			DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
2051 			return ret;
2052 		}
2053 		if (dpni_enabled)
2054 			/* Allow the MC some slack */
2055 			rte_delay_us(100 * 1000);
2056 	} while (dpni_enabled && --retries);
2057 
2058 	if (!retries) {
2059 		DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
2060 		/* todo- we may have to manually cleanup queues.
2061 		 */
2062 	} else {
2063 		DPAA2_PMD_INFO("Port %d Link DOWN successful",
2064 			       dev->data->port_id);
2065 	}
2066 
2067 	dev->data->dev_link.link_status = 0;
2068 
2069 	return ret;
2070 }
2071 
2072 static int
2073 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2074 {
2075 	int ret = -EINVAL;
2076 	struct dpaa2_dev_priv *priv;
2077 	struct fsl_mc_io *dpni;
2078 	struct dpni_link_cfg cfg = {0};
2079 
2080 	PMD_INIT_FUNC_TRACE();
2081 
2082 	priv = dev->data->dev_private;
2083 	dpni = dev->process_private;
2084 
2085 	if (!dpni || !fc_conf) {
2086 		DPAA2_PMD_ERR("device not configured");
2087 		return ret;
2088 	}
2089 
2090 	ret = dpni_get_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2091 	if (ret) {
2092 		DPAA2_PMD_ERR("error: dpni_get_link_cfg %d", ret);
2093 		return ret;
2094 	}
2095 
2096 	memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
2097 	if (cfg.options & DPNI_LINK_OPT_PAUSE) {
2098 		/* DPNI_LINK_OPT_PAUSE set
2099 		 *  if ASYM_PAUSE not set,
2100 		 *	RX Side flow control (handle received Pause frame)
2101 		 *	TX side flow control (send Pause frame)
2102 		 *  if ASYM_PAUSE set,
2103 		 *	RX Side flow control (handle received Pause frame)
2104 		 *	No TX side flow control (send Pause frame disabled)
2105 		 */
2106 		if (!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE))
2107 			fc_conf->mode = RTE_ETH_FC_FULL;
2108 		else
2109 			fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2110 	} else {
2111 		/* DPNI_LINK_OPT_PAUSE not set
2112 		 *  if ASYM_PAUSE set,
2113 		 *	TX side flow control (send Pause frame)
2114 		 *	No RX side flow control (No action on pause frame rx)
2115 		 *  if ASYM_PAUSE not set,
2116 		 *	Flow control disabled
2117 		 */
2118 		if (cfg.options & DPNI_LINK_OPT_ASYM_PAUSE)
2119 			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2120 		else
2121 			fc_conf->mode = RTE_ETH_FC_NONE;
2122 	}
2123 
2124 	return ret;
2125 }
2126 
2127 static int
2128 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2129 {
2130 	int ret = -EINVAL;
2131 	struct dpaa2_dev_priv *priv;
2132 	struct fsl_mc_io *dpni;
2133 	struct dpni_link_cfg cfg = {0};
2134 
2135 	PMD_INIT_FUNC_TRACE();
2136 
2137 	priv = dev->data->dev_private;
2138 	dpni = dev->process_private;
2139 
2140 	if (!dpni) {
2141 		DPAA2_PMD_ERR("dpni is NULL");
2142 		return ret;
2143 	}
2144 
2145 	/* It is necessary to obtain the current cfg before setting fc_conf
2146 	 * as MC would return error in case rate, autoneg or duplex values are
2147 	 * different.
2148 	 */
2149 	ret = dpni_get_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2150 	if (ret) {
2151 		DPAA2_PMD_ERR("Unable to get link cfg (err=%d)", ret);
2152 		return ret;
2153 	}
2154 
2155 	/* Disable link before setting configuration */
2156 	dpaa2_dev_set_link_down(dev);
2157 
2158 	/* update cfg with fc_conf */
2159 	switch (fc_conf->mode) {
2160 	case RTE_ETH_FC_FULL:
2161 		/* Full flow control;
2162 		 * OPT_PAUSE set, ASYM_PAUSE not set
2163 		 */
2164 		cfg.options |= DPNI_LINK_OPT_PAUSE;
2165 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2166 		break;
2167 	case RTE_ETH_FC_TX_PAUSE:
2168 		/* Enable RX flow control
2169 		 * OPT_PAUSE not set;
2170 		 * ASYM_PAUSE set;
2171 		 */
2172 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2173 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2174 		break;
2175 	case RTE_ETH_FC_RX_PAUSE:
2176 		/* Enable TX Flow control
2177 		 * OPT_PAUSE set
2178 		 * ASYM_PAUSE set
2179 		 */
2180 		cfg.options |= DPNI_LINK_OPT_PAUSE;
2181 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2182 		break;
2183 	case RTE_ETH_FC_NONE:
2184 		/* Disable Flow control
2185 		 * OPT_PAUSE not set
2186 		 * ASYM_PAUSE not set
2187 		 */
2188 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2189 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2190 		break;
2191 	default:
2192 		DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
2193 			      fc_conf->mode);
2194 		return -EINVAL;
2195 	}
2196 
2197 	ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2198 	if (ret)
2199 		DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
2200 			      ret);
2201 
2202 	/* Enable link */
2203 	dpaa2_dev_set_link_up(dev);
2204 
2205 	return ret;
2206 }
2207 
2208 static int
2209 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
2210 			  struct rte_eth_rss_conf *rss_conf)
2211 {
2212 	struct rte_eth_dev_data *data = dev->data;
2213 	struct dpaa2_dev_priv *priv = data->dev_private;
2214 	struct rte_eth_conf *eth_conf = &data->dev_conf;
2215 	int ret, tc_index;
2216 
2217 	PMD_INIT_FUNC_TRACE();
2218 
2219 	if (rss_conf->rss_hf) {
2220 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2221 			ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf,
2222 				tc_index);
2223 			if (ret) {
2224 				DPAA2_PMD_ERR("Unable to set flow dist on tc%d",
2225 					tc_index);
2226 				return ret;
2227 			}
2228 		}
2229 	} else {
2230 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2231 			ret = dpaa2_remove_flow_dist(dev, tc_index);
2232 			if (ret) {
2233 				DPAA2_PMD_ERR(
2234 					"Unable to remove flow dist on tc%d",
2235 					tc_index);
2236 				return ret;
2237 			}
2238 		}
2239 	}
2240 	eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
2241 	return 0;
2242 }
2243 
2244 static int
2245 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2246 			    struct rte_eth_rss_conf *rss_conf)
2247 {
2248 	struct rte_eth_dev_data *data = dev->data;
2249 	struct rte_eth_conf *eth_conf = &data->dev_conf;
2250 
2251 	/* dpaa2 does not support rss_key, so length should be 0*/
2252 	rss_conf->rss_key_len = 0;
2253 	rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
2254 	return 0;
2255 }
2256 
2257 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
2258 		int eth_rx_queue_id,
2259 		struct dpaa2_dpcon_dev *dpcon,
2260 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2261 {
2262 	struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2263 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2264 	struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2265 	uint8_t flow_id = dpaa2_ethq->flow_id;
2266 	struct dpni_queue cfg;
2267 	uint8_t options, priority;
2268 	int ret;
2269 
2270 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
2271 		dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
2272 	else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
2273 		dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
2274 	else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
2275 		dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
2276 	else
2277 		return -EINVAL;
2278 
2279 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
2280 		   (dpcon->num_priorities - 1);
2281 
2282 	memset(&cfg, 0, sizeof(struct dpni_queue));
2283 	options = DPNI_QUEUE_OPT_DEST;
2284 	cfg.destination.type = DPNI_DEST_DPCON;
2285 	cfg.destination.id = dpcon->dpcon_id;
2286 	cfg.destination.priority = priority;
2287 
2288 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
2289 		options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
2290 		cfg.destination.hold_active = 1;
2291 	}
2292 
2293 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
2294 			!eth_priv->en_ordered) {
2295 		struct opr_cfg ocfg;
2296 
2297 		/* Restoration window size = 256 frames */
2298 		ocfg.oprrws = 3;
2299 		/* Restoration window size = 512 frames for LX2 */
2300 		if (dpaa2_svr_family == SVR_LX2160A)
2301 			ocfg.oprrws = 4;
2302 		/* Auto advance NESN window enabled */
2303 		ocfg.oa = 1;
2304 		/* Late arrival window size disabled */
2305 		ocfg.olws = 0;
2306 		/* ORL resource exhaustion advance NESN disabled */
2307 		ocfg.oeane = 0;
2308 		/* Loose ordering enabled */
2309 		ocfg.oloe = 1;
2310 		eth_priv->en_loose_ordered = 1;
2311 		/* Strict ordering enabled if explicitly set */
2312 		if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
2313 			ocfg.oloe = 0;
2314 			eth_priv->en_loose_ordered = 0;
2315 		}
2316 
2317 		ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
2318 				   dpaa2_ethq->tc_index, flow_id,
2319 				   OPR_OPT_CREATE, &ocfg, 0);
2320 		if (ret) {
2321 			DPAA2_PMD_ERR("Error setting opr: ret: %d", ret);
2322 			return ret;
2323 		}
2324 
2325 		eth_priv->en_ordered = 1;
2326 	}
2327 
2328 	options |= DPNI_QUEUE_OPT_USER_CTX;
2329 	cfg.user_context = (size_t)(dpaa2_ethq);
2330 
2331 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2332 			     dpaa2_ethq->tc_index, flow_id, options, &cfg);
2333 	if (ret) {
2334 		DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2335 		return ret;
2336 	}
2337 
2338 	memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
2339 
2340 	return 0;
2341 }
2342 
2343 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
2344 		int eth_rx_queue_id)
2345 {
2346 	struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2347 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2348 	struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2349 	uint8_t flow_id = dpaa2_ethq->flow_id;
2350 	struct dpni_queue cfg;
2351 	uint8_t options;
2352 	int ret;
2353 
2354 	memset(&cfg, 0, sizeof(struct dpni_queue));
2355 	options = DPNI_QUEUE_OPT_DEST;
2356 	cfg.destination.type = DPNI_DEST_NONE;
2357 
2358 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2359 			     dpaa2_ethq->tc_index, flow_id, options, &cfg);
2360 	if (ret)
2361 		DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2362 
2363 	return ret;
2364 }
2365 
2366 static int
2367 dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev,
2368 		       const struct rte_flow_ops **ops)
2369 {
2370 	if (!dev)
2371 		return -ENODEV;
2372 
2373 	*ops = &dpaa2_flow_ops;
2374 	return 0;
2375 }
2376 
2377 static void
2378 dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2379 	struct rte_eth_rxq_info *qinfo)
2380 {
2381 	struct dpaa2_queue *rxq;
2382 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2383 	struct fsl_mc_io *dpni = dev->process_private;
2384 	uint16_t max_frame_length;
2385 
2386 	rxq = dev->data->rx_queues[queue_id];
2387 
2388 	qinfo->mp = rxq->mb_pool;
2389 	qinfo->scattered_rx = dev->data->scattered_rx;
2390 	qinfo->nb_desc = rxq->nb_desc;
2391 	if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
2392 				&max_frame_length) == 0)
2393 		qinfo->rx_buf_size = max_frame_length;
2394 
2395 	qinfo->conf.rx_free_thresh = 1;
2396 	qinfo->conf.rx_drop_en = 1;
2397 	qinfo->conf.rx_deferred_start = 0;
2398 	qinfo->conf.offloads = rxq->offloads;
2399 }
2400 
2401 static void
2402 dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2403 	struct rte_eth_txq_info *qinfo)
2404 {
2405 	struct dpaa2_queue *txq;
2406 
2407 	txq = dev->data->tx_queues[queue_id];
2408 
2409 	qinfo->nb_desc = txq->nb_desc;
2410 	qinfo->conf.tx_thresh.pthresh = 0;
2411 	qinfo->conf.tx_thresh.hthresh = 0;
2412 	qinfo->conf.tx_thresh.wthresh = 0;
2413 
2414 	qinfo->conf.tx_free_thresh = 0;
2415 	qinfo->conf.tx_rs_thresh = 0;
2416 	qinfo->conf.offloads = txq->offloads;
2417 	qinfo->conf.tx_deferred_start = 0;
2418 }
2419 
2420 static int
2421 dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2422 {
2423 	*(const void **)ops = &dpaa2_tm_ops;
2424 
2425 	return 0;
2426 }
2427 
2428 void
2429 rte_pmd_dpaa2_thread_init(void)
2430 {
2431 	int ret;
2432 
2433 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
2434 		ret = dpaa2_affine_qbman_swp();
2435 		if (ret) {
2436 			DPAA2_PMD_ERR(
2437 				"Failed to allocate IO portal, tid: %d",
2438 				rte_gettid());
2439 			return;
2440 		}
2441 	}
2442 }
2443 
2444 static struct eth_dev_ops dpaa2_ethdev_ops = {
2445 	.dev_configure	  = dpaa2_eth_dev_configure,
2446 	.dev_start	      = dpaa2_dev_start,
2447 	.dev_stop	      = dpaa2_dev_stop,
2448 	.dev_close	      = dpaa2_dev_close,
2449 	.promiscuous_enable   = dpaa2_dev_promiscuous_enable,
2450 	.promiscuous_disable  = dpaa2_dev_promiscuous_disable,
2451 	.allmulticast_enable  = dpaa2_dev_allmulticast_enable,
2452 	.allmulticast_disable = dpaa2_dev_allmulticast_disable,
2453 	.dev_set_link_up      = dpaa2_dev_set_link_up,
2454 	.dev_set_link_down    = dpaa2_dev_set_link_down,
2455 	.link_update	   = dpaa2_dev_link_update,
2456 	.stats_get	       = dpaa2_dev_stats_get,
2457 	.xstats_get	       = dpaa2_dev_xstats_get,
2458 	.xstats_get_by_id     = dpaa2_xstats_get_by_id,
2459 	.xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
2460 	.xstats_get_names      = dpaa2_xstats_get_names,
2461 	.stats_reset	   = dpaa2_dev_stats_reset,
2462 	.xstats_reset	      = dpaa2_dev_stats_reset,
2463 	.fw_version_get	   = dpaa2_fw_version_get,
2464 	.dev_infos_get	   = dpaa2_dev_info_get,
2465 	.dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2466 	.mtu_set           = dpaa2_dev_mtu_set,
2467 	.vlan_filter_set      = dpaa2_vlan_filter_set,
2468 	.vlan_offload_set     = dpaa2_vlan_offload_set,
2469 	.vlan_tpid_set	      = dpaa2_vlan_tpid_set,
2470 	.rx_queue_setup    = dpaa2_dev_rx_queue_setup,
2471 	.rx_queue_release  = dpaa2_dev_rx_queue_release,
2472 	.tx_queue_setup    = dpaa2_dev_tx_queue_setup,
2473 	.rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
2474 	.tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
2475 	.flow_ctrl_get	      = dpaa2_flow_ctrl_get,
2476 	.flow_ctrl_set	      = dpaa2_flow_ctrl_set,
2477 	.mac_addr_add         = dpaa2_dev_add_mac_addr,
2478 	.mac_addr_remove      = dpaa2_dev_remove_mac_addr,
2479 	.mac_addr_set         = dpaa2_dev_set_mac_addr,
2480 	.rss_hash_update      = dpaa2_dev_rss_hash_update,
2481 	.rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
2482 	.flow_ops_get         = dpaa2_dev_flow_ops_get,
2483 	.rxq_info_get	      = dpaa2_rxq_info_get,
2484 	.txq_info_get	      = dpaa2_txq_info_get,
2485 	.tm_ops_get	      = dpaa2_tm_ops_get,
2486 #if defined(RTE_LIBRTE_IEEE1588)
2487 	.timesync_enable      = dpaa2_timesync_enable,
2488 	.timesync_disable     = dpaa2_timesync_disable,
2489 	.timesync_read_time   = dpaa2_timesync_read_time,
2490 	.timesync_write_time  = dpaa2_timesync_write_time,
2491 	.timesync_adjust_time = dpaa2_timesync_adjust_time,
2492 	.timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp,
2493 	.timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp,
2494 #endif
2495 };
2496 
2497 /* Populate the mac address from physically available (u-boot/firmware) and/or
2498  * one set by higher layers like MC (restool) etc.
2499  * Returns the table of MAC entries (multiple entries)
2500  */
2501 static int
2502 populate_mac_addr(struct fsl_mc_io *dpni_dev,
2503 	struct dpaa2_dev_priv *priv, struct rte_ether_addr *mac_entry)
2504 {
2505 	int ret = 0;
2506 	struct rte_ether_addr phy_mac, prime_mac;
2507 
2508 	memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2509 	memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2510 
2511 	/* Get the physical device MAC address */
2512 	ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2513 				     phy_mac.addr_bytes);
2514 	if (ret) {
2515 		DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2516 		goto cleanup;
2517 	}
2518 
2519 	ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2520 					prime_mac.addr_bytes);
2521 	if (ret) {
2522 		DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2523 		goto cleanup;
2524 	}
2525 
2526 	/* Now that both MAC have been obtained, do:
2527 	 *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2528 	 *     and return phy
2529 	 *  If empty_mac(phy), return prime.
2530 	 *  if both are empty, create random MAC, set as prime and return
2531 	 */
2532 	if (!rte_is_zero_ether_addr(&phy_mac)) {
2533 		/* If the addresses are not same, overwrite prime */
2534 		if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2535 			ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2536 							priv->token,
2537 							phy_mac.addr_bytes);
2538 			if (ret) {
2539 				DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2540 					      ret);
2541 				goto cleanup;
2542 			}
2543 			memcpy(&prime_mac, &phy_mac,
2544 				sizeof(struct rte_ether_addr));
2545 		}
2546 	} else if (rte_is_zero_ether_addr(&prime_mac)) {
2547 		/* In case phys and prime, both are zero, create random MAC */
2548 		rte_eth_random_addr(prime_mac.addr_bytes);
2549 		ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2550 						priv->token,
2551 						prime_mac.addr_bytes);
2552 		if (ret) {
2553 			DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2554 			goto cleanup;
2555 		}
2556 	}
2557 
2558 	/* prime_mac the final MAC address */
2559 	memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2560 	return 0;
2561 
2562 cleanup:
2563 	return ret;
2564 }
2565 
2566 static int
2567 check_devargs_handler(__rte_unused const char *key, const char *value,
2568 		      __rte_unused void *opaque)
2569 {
2570 	if (strcmp(value, "1"))
2571 		return -1;
2572 
2573 	return 0;
2574 }
2575 
2576 static int
2577 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2578 {
2579 	struct rte_kvargs *kvlist;
2580 
2581 	if (!devargs)
2582 		return 0;
2583 
2584 	kvlist = rte_kvargs_parse(devargs->args, NULL);
2585 	if (!kvlist)
2586 		return 0;
2587 
2588 	if (!rte_kvargs_count(kvlist, key)) {
2589 		rte_kvargs_free(kvlist);
2590 		return 0;
2591 	}
2592 
2593 	if (rte_kvargs_process(kvlist, key,
2594 			       check_devargs_handler, NULL) < 0) {
2595 		rte_kvargs_free(kvlist);
2596 		return 0;
2597 	}
2598 	rte_kvargs_free(kvlist);
2599 
2600 	return 1;
2601 }
2602 
2603 static int
2604 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2605 {
2606 	struct rte_device *dev = eth_dev->device;
2607 	struct rte_dpaa2_device *dpaa2_dev;
2608 	struct fsl_mc_io *dpni_dev;
2609 	struct dpni_attr attr;
2610 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2611 	struct dpni_buffer_layout layout;
2612 	int ret, hw_id, i;
2613 
2614 	PMD_INIT_FUNC_TRACE();
2615 
2616 	dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2617 	if (!dpni_dev) {
2618 		DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2619 		return -1;
2620 	}
2621 	dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
2622 	eth_dev->process_private = dpni_dev;
2623 
2624 	/* For secondary processes, the primary has done all the work */
2625 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2626 		/* In case of secondary, only burst and ops API need to be
2627 		 * plugged.
2628 		 */
2629 		eth_dev->dev_ops = &dpaa2_ethdev_ops;
2630 		eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count;
2631 		if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2632 			eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2633 		else if (dpaa2_get_devargs(dev->devargs,
2634 					DRIVER_NO_PREFETCH_MODE))
2635 			eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2636 		else
2637 			eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2638 		eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2639 		return 0;
2640 	}
2641 
2642 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2643 
2644 	hw_id = dpaa2_dev->object_id;
2645 	ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2646 	if (ret) {
2647 		DPAA2_PMD_ERR(
2648 			     "Failure in opening dpni@%d with err code %d",
2649 			     hw_id, ret);
2650 		rte_free(dpni_dev);
2651 		return ret;
2652 	}
2653 
2654 	if (eth_dev->data->dev_conf.lpbk_mode)
2655 		dpaa2_dev_recycle_deconfig(eth_dev);
2656 
2657 	/* Clean the device first */
2658 	ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2659 	if (ret) {
2660 		DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2661 			      hw_id, ret);
2662 		goto init_err;
2663 	}
2664 
2665 	ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2666 	if (ret) {
2667 		DPAA2_PMD_ERR(
2668 			     "Failure in get dpni@%d attribute, err code %d",
2669 			     hw_id, ret);
2670 		goto init_err;
2671 	}
2672 
2673 	priv->num_rx_tc = attr.num_rx_tcs;
2674 	priv->num_tx_tc = attr.num_tx_tcs;
2675 	priv->qos_entries = attr.qos_entries;
2676 	priv->fs_entries = attr.fs_entries;
2677 	priv->dist_queues = attr.num_queues;
2678 	priv->num_channels = attr.num_channels;
2679 	priv->channel_inuse = 0;
2680 	rte_spinlock_init(&priv->lpbk_qp_lock);
2681 
2682 	/* only if the custom CG is enabled */
2683 	if (attr.options & DPNI_OPT_CUSTOM_CG)
2684 		priv->max_cgs = attr.num_cgs;
2685 	else
2686 		priv->max_cgs = 0;
2687 
2688 	for (i = 0; i < priv->max_cgs; i++)
2689 		priv->cgid_in_use[i] = 0;
2690 
2691 	for (i = 0; i < attr.num_rx_tcs; i++)
2692 		priv->nb_rx_queues += attr.num_queues;
2693 
2694 	priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels;
2695 
2696 	DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2697 			priv->num_rx_tc, priv->nb_rx_queues,
2698 			priv->nb_tx_queues, priv->max_cgs);
2699 
2700 	priv->hw = dpni_dev;
2701 	priv->hw_id = hw_id;
2702 	priv->options = attr.options;
2703 	priv->max_mac_filters = attr.mac_filter_entries;
2704 	priv->max_vlan_filters = attr.vlan_filter_entries;
2705 	priv->flags = 0;
2706 #if defined(RTE_LIBRTE_IEEE1588)
2707 	DPAA2_PMD_INFO("DPDK IEEE1588 is enabled");
2708 	priv->flags |= DPAA2_TX_CONF_ENABLE;
2709 #endif
2710 	/* Used with ``fslmc:dpni.1,drv_tx_conf=1`` */
2711 	if (dpaa2_get_devargs(dev->devargs, DRIVER_TX_CONF)) {
2712 		priv->flags |= DPAA2_TX_CONF_ENABLE;
2713 		DPAA2_PMD_INFO("TX_CONF Enabled");
2714 	}
2715 
2716 	if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
2717 		dpaa2_enable_err_queue = 1;
2718 		DPAA2_PMD_INFO("Enable DMA error checks");
2719 	}
2720 
2721 	if (getenv("DPAA2_PRINT_RX_PARSER_RESULT"))
2722 		dpaa2_print_parser_result = 1;
2723 
2724 	/* Allocate memory for hardware structure for queues */
2725 	ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2726 	if (ret) {
2727 		DPAA2_PMD_ERR("Queue allocation Failed");
2728 		goto init_err;
2729 	}
2730 
2731 	/* Allocate memory for storing MAC addresses.
2732 	 * Table of mac_filter_entries size is allocated so that RTE ether lib
2733 	 * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2734 	 */
2735 	eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2736 		RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2737 	if (eth_dev->data->mac_addrs == NULL) {
2738 		DPAA2_PMD_ERR(
2739 		   "Failed to allocate %d bytes needed to store MAC addresses",
2740 		   RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2741 		ret = -ENOMEM;
2742 		goto init_err;
2743 	}
2744 
2745 	ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2746 	if (ret) {
2747 		DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2748 		rte_free(eth_dev->data->mac_addrs);
2749 		eth_dev->data->mac_addrs = NULL;
2750 		goto init_err;
2751 	}
2752 
2753 	/* ... tx buffer layout ... */
2754 	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2755 	if (priv->flags & DPAA2_TX_CONF_ENABLE) {
2756 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2757 				 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2758 		layout.pass_timestamp = true;
2759 	} else {
2760 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2761 	}
2762 	layout.pass_frame_status = 1;
2763 	ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2764 				     DPNI_QUEUE_TX, &layout);
2765 	if (ret) {
2766 		DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2767 		goto init_err;
2768 	}
2769 
2770 	/* ... tx-conf and error buffer layout ... */
2771 	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2772 	if (priv->flags & DPAA2_TX_CONF_ENABLE) {
2773 		layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2774 		layout.pass_timestamp = true;
2775 	}
2776 	layout.options |= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2777 	layout.pass_frame_status = 1;
2778 	ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2779 				     DPNI_QUEUE_TX_CONFIRM, &layout);
2780 	if (ret) {
2781 		DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2782 			     ret);
2783 		goto init_err;
2784 	}
2785 
2786 	eth_dev->dev_ops = &dpaa2_ethdev_ops;
2787 
2788 	if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2789 		eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2790 		DPAA2_PMD_INFO("Loopback mode");
2791 	} else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
2792 		eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2793 		DPAA2_PMD_INFO("No Prefetch mode");
2794 	} else {
2795 		eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2796 	}
2797 	eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2798 
2799 	/* Init fields w.r.t. classification */
2800 	memset(&priv->extract.qos_key_extract, 0,
2801 		sizeof(struct dpaa2_key_extract));
2802 	priv->extract.qos_extract_param = rte_malloc(NULL,
2803 		DPAA2_EXTRACT_PARAM_MAX_SIZE,
2804 		RTE_CACHE_LINE_SIZE);
2805 	if (!priv->extract.qos_extract_param) {
2806 		DPAA2_PMD_ERR("Memory alloc failed");
2807 		goto init_err;
2808 	}
2809 
2810 	for (i = 0; i < MAX_TCS; i++) {
2811 		memset(&priv->extract.tc_key_extract[i], 0,
2812 			sizeof(struct dpaa2_key_extract));
2813 		priv->extract.tc_extract_param[i] = rte_malloc(NULL,
2814 			DPAA2_EXTRACT_PARAM_MAX_SIZE,
2815 			RTE_CACHE_LINE_SIZE);
2816 		if (!priv->extract.tc_extract_param[i]) {
2817 			DPAA2_PMD_ERR("Memory alloc failed");
2818 			goto init_err;
2819 		}
2820 	}
2821 
2822 	ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
2823 					RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN
2824 					+ VLAN_TAG_SIZE);
2825 	if (ret) {
2826 		DPAA2_PMD_ERR("Unable to set mtu. check config");
2827 		goto init_err;
2828 	}
2829 	eth_dev->data->mtu = RTE_ETHER_MTU;
2830 
2831 	/*TODO To enable soft parser support DPAA2 driver needs to integrate
2832 	 * with external entity to receive byte code for software sequence
2833 	 * and same will be offload to the H/W using MC interface.
2834 	 * Currently it is assumed that DPAA2 driver has byte code by some
2835 	 * mean and same if offloaded to H/W.
2836 	 */
2837 	if (getenv("DPAA2_ENABLE_SOFT_PARSER")) {
2838 		WRIOP_SS_INITIALIZER(priv);
2839 		ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS);
2840 		if (ret < 0) {
2841 			DPAA2_PMD_ERR(" Error(%d) in loading softparser",
2842 				      ret);
2843 			return ret;
2844 		}
2845 
2846 		ret = dpaa2_eth_enable_wriop_soft_parser(priv,
2847 							 DPNI_SS_INGRESS);
2848 		if (ret < 0) {
2849 			DPAA2_PMD_ERR(" Error(%d) in enabling softparser",
2850 				      ret);
2851 			return ret;
2852 		}
2853 	}
2854 
2855 	ret = dpaa2_soft_parser_loaded();
2856 	if (ret > 0)
2857 		DPAA2_PMD_INFO("soft parser is loaded");
2858 	DPAA2_PMD_INFO("%s: netdev created, connected to %s",
2859 		eth_dev->data->name, dpaa2_dev->ep_name);
2860 
2861 	return 0;
2862 init_err:
2863 	dpaa2_dev_close(eth_dev);
2864 
2865 	return ret;
2866 }
2867 
2868 int
2869 rte_pmd_dpaa2_dev_is_dpaa2(uint32_t eth_id)
2870 {
2871 	struct rte_eth_dev *dev;
2872 
2873 	if (eth_id >= RTE_MAX_ETHPORTS)
2874 		return false;
2875 
2876 	dev = &rte_eth_devices[eth_id];
2877 	if (!dev->device)
2878 		return false;
2879 
2880 	return dev->device->driver == &rte_dpaa2_pmd.driver;
2881 }
2882 
2883 const char *
2884 rte_pmd_dpaa2_ep_name(uint32_t eth_id)
2885 {
2886 	struct rte_eth_dev *dev;
2887 	struct dpaa2_dev_priv *priv;
2888 
2889 	if (eth_id >= RTE_MAX_ETHPORTS)
2890 		return NULL;
2891 
2892 	if (!rte_pmd_dpaa2_dev_is_dpaa2(eth_id))
2893 		return NULL;
2894 
2895 	dev = &rte_eth_devices[eth_id];
2896 	if (!dev->data)
2897 		return NULL;
2898 
2899 	if (!dev->data->dev_private)
2900 		return NULL;
2901 
2902 	priv = dev->data->dev_private;
2903 
2904 	return priv->ep_name;
2905 }
2906 
2907 #if defined(RTE_LIBRTE_IEEE1588)
2908 int
2909 rte_pmd_dpaa2_get_one_step_ts(uint16_t port_id, bool mc_query)
2910 {
2911 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2912 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2913 	struct fsl_mc_io *dpni = priv->eth_dev->process_private;
2914 	struct dpni_single_step_cfg ptp_cfg;
2915 	int err;
2916 
2917 	if (!mc_query)
2918 		return priv->ptp_correction_offset;
2919 
2920 	err = dpni_get_single_step_cfg(dpni, CMD_PRI_LOW, priv->token, &ptp_cfg);
2921 	if (err) {
2922 		DPAA2_PMD_ERR("Failed to retrieve onestep configuration");
2923 		return err;
2924 	}
2925 
2926 	if (!ptp_cfg.ptp_onestep_reg_base) {
2927 		DPAA2_PMD_ERR("1588 onestep reg not available");
2928 		return -1;
2929 	}
2930 
2931 	priv->ptp_correction_offset = ptp_cfg.offset;
2932 
2933 	return priv->ptp_correction_offset;
2934 }
2935 
2936 int
2937 rte_pmd_dpaa2_set_one_step_ts(uint16_t port_id, uint16_t offset, uint8_t ch_update)
2938 {
2939 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2940 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2941 	struct fsl_mc_io *dpni = dev->process_private;
2942 	struct dpni_single_step_cfg cfg;
2943 	int err;
2944 
2945 	cfg.en = 1;
2946 	cfg.ch_update = ch_update;
2947 	cfg.offset = offset;
2948 	cfg.peer_delay = 0;
2949 
2950 	err = dpni_set_single_step_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2951 	if (err)
2952 		return err;
2953 
2954 	priv->ptp_correction_offset = offset;
2955 
2956 	return 0;
2957 }
2958 #endif
2959 
2960 static int dpaa2_tx_sg_pool_init(void)
2961 {
2962 	char name[RTE_MEMZONE_NAMESIZE];
2963 
2964 	if (dpaa2_tx_sg_pool)
2965 		return 0;
2966 
2967 	sprintf(name, "dpaa2_mbuf_tx_sg_pool");
2968 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2969 		dpaa2_tx_sg_pool = rte_pktmbuf_pool_create(name,
2970 			DPAA2_POOL_SIZE,
2971 			DPAA2_POOL_CACHE_SIZE, 0,
2972 			DPAA2_MAX_SGS * sizeof(struct qbman_sge),
2973 			rte_socket_id());
2974 		if (!dpaa2_tx_sg_pool) {
2975 			DPAA2_PMD_ERR("SG pool creation failed");
2976 			return -ENOMEM;
2977 		}
2978 	} else {
2979 		dpaa2_tx_sg_pool = rte_mempool_lookup(name);
2980 		if (!dpaa2_tx_sg_pool) {
2981 			DPAA2_PMD_ERR("SG pool lookup failed");
2982 			return -ENOMEM;
2983 		}
2984 	}
2985 
2986 	return 0;
2987 }
2988 
2989 static int
2990 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2991 		struct rte_dpaa2_device *dpaa2_dev)
2992 {
2993 	struct rte_eth_dev *eth_dev;
2994 	struct dpaa2_dev_priv *dev_priv;
2995 	int diag;
2996 
2997 	if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2998 		RTE_PKTMBUF_HEADROOM) {
2999 		DPAA2_PMD_ERR("RTE_PKTMBUF_HEADROOM(%d) < DPAA2 Annotation(%d)",
3000 			RTE_PKTMBUF_HEADROOM,
3001 			DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
3002 
3003 		return -EINVAL;
3004 	}
3005 
3006 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3007 		eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
3008 		if (!eth_dev)
3009 			return -ENODEV;
3010 		dev_priv = rte_zmalloc("ethdev private structure",
3011 				       sizeof(struct dpaa2_dev_priv),
3012 				       RTE_CACHE_LINE_SIZE);
3013 		if (dev_priv == NULL) {
3014 			DPAA2_PMD_CRIT(
3015 				"Unable to allocate memory for private data");
3016 			rte_eth_dev_release_port(eth_dev);
3017 			return -ENOMEM;
3018 		}
3019 		eth_dev->data->dev_private = (void *)dev_priv;
3020 		/* Store a pointer to eth_dev in dev_private */
3021 		dev_priv->eth_dev = eth_dev;
3022 	} else {
3023 		eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
3024 		if (!eth_dev) {
3025 			DPAA2_PMD_DEBUG("returning enodev");
3026 			return -ENODEV;
3027 		}
3028 	}
3029 
3030 	eth_dev->device = &dpaa2_dev->device;
3031 
3032 	dpaa2_dev->eth_dev = eth_dev;
3033 	eth_dev->data->rx_mbuf_alloc_failed = 0;
3034 
3035 	if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
3036 		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3037 
3038 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3039 
3040 	/* Invoke PMD device initialization function */
3041 	diag = dpaa2_dev_init(eth_dev);
3042 	if (!diag) {
3043 		diag = dpaa2_tx_sg_pool_init();
3044 		if (diag)
3045 			return diag;
3046 		rte_eth_dev_probing_finish(eth_dev);
3047 		dpaa2_valid_dev++;
3048 		return 0;
3049 	}
3050 
3051 	rte_eth_dev_release_port(eth_dev);
3052 	return diag;
3053 }
3054 
3055 static int
3056 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
3057 {
3058 	struct rte_eth_dev *eth_dev;
3059 	int ret;
3060 
3061 	eth_dev = dpaa2_dev->eth_dev;
3062 	dpaa2_dev_close(eth_dev);
3063 	dpaa2_valid_dev--;
3064 	if (!dpaa2_valid_dev)
3065 		rte_mempool_free(dpaa2_tx_sg_pool);
3066 	ret = rte_eth_dev_release_port(eth_dev);
3067 
3068 	return ret;
3069 }
3070 
3071 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
3072 	.drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
3073 	.drv_type = DPAA2_ETH,
3074 	.probe = rte_dpaa2_probe,
3075 	.remove = rte_dpaa2_remove,
3076 };
3077 
3078 RTE_PMD_REGISTER_DPAA2(NET_DPAA2_PMD_DRIVER_NAME, rte_dpaa2_pmd);
3079 RTE_PMD_REGISTER_PARAM_STRING(NET_DPAA2_PMD_DRIVER_NAME,
3080 		DRIVER_LOOPBACK_MODE "=<int> "
3081 		DRIVER_NO_PREFETCH_MODE "=<int>"
3082 		DRIVER_TX_CONF "=<int>"
3083 		DRIVER_ERROR_QUEUE "=<int>");
3084 RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE);
3085