xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.c (revision f9dfb59edbccae50e7c5508348aa2b4b84413048)
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <dev_driver.h>
19 #include <bus_fslmc_driver.h>
20 #include <rte_flow_driver.h>
21 #include "rte_dpaa2_mempool.h"
22 
23 #include "dpaa2_pmd_logs.h"
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <dpaa2_hw_dpio.h>
28 #include <mc/fsl_dpmng.h>
29 #include "dpaa2_ethdev.h"
30 #include "dpaa2_sparser.h"
31 #include <fsl_qbman_debug.h>
32 
33 #define DRIVER_LOOPBACK_MODE "drv_loopback"
34 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
35 #define DRIVER_TX_CONF "drv_tx_conf"
36 #define DRIVER_ERROR_QUEUE  "drv_err_queue"
37 #define CHECK_INTERVAL         100  /* 100ms */
38 #define MAX_REPEAT_TIME        90   /* 9s (90 * 100ms) in total */
39 
40 /* Supported Rx offloads */
41 static uint64_t dev_rx_offloads_sup =
42 		RTE_ETH_RX_OFFLOAD_CHECKSUM |
43 		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
44 		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
45 		RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
46 		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
47 		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
48 		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
49 
50 /* Rx offloads which cannot be disabled */
51 static uint64_t dev_rx_offloads_nodis =
52 		RTE_ETH_RX_OFFLOAD_RSS_HASH |
53 		RTE_ETH_RX_OFFLOAD_SCATTER;
54 
55 /* Supported Tx offloads */
56 static uint64_t dev_tx_offloads_sup =
57 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
58 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
59 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
60 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
61 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
62 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
63 		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
64 		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
65 
66 /* Tx offloads which cannot be disabled */
67 static uint64_t dev_tx_offloads_nodis =
68 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
69 
70 /* enable timestamp in mbuf */
71 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
72 uint64_t dpaa2_timestamp_rx_dynflag;
73 int dpaa2_timestamp_dynfield_offset = -1;
74 
75 /* Enable error queue */
76 bool dpaa2_enable_err_queue;
77 
78 #define MAX_NB_RX_DESC		11264
79 int total_nb_rx_desc;
80 
81 int dpaa2_valid_dev;
82 struct rte_mempool *dpaa2_tx_sg_pool;
83 
84 struct rte_dpaa2_xstats_name_off {
85 	char name[RTE_ETH_XSTATS_NAME_SIZE];
86 	uint8_t page_id; /* dpni statistics page id */
87 	uint8_t stats_id; /* stats id in the given page */
88 };
89 
90 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
91 	{"ingress_multicast_frames", 0, 2},
92 	{"ingress_multicast_bytes", 0, 3},
93 	{"ingress_broadcast_frames", 0, 4},
94 	{"ingress_broadcast_bytes", 0, 5},
95 	{"egress_multicast_frames", 1, 2},
96 	{"egress_multicast_bytes", 1, 3},
97 	{"egress_broadcast_frames", 1, 4},
98 	{"egress_broadcast_bytes", 1, 5},
99 	{"ingress_filtered_frames", 2, 0},
100 	{"ingress_discarded_frames", 2, 1},
101 	{"ingress_nobuffer_discards", 2, 2},
102 	{"egress_discarded_frames", 2, 3},
103 	{"egress_confirmed_frames", 2, 4},
104 	{"cgr_reject_frames", 4, 0},
105 	{"cgr_reject_bytes", 4, 1},
106 };
107 
108 static struct rte_dpaa2_driver rte_dpaa2_pmd;
109 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
110 				 int wait_to_complete);
111 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
112 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
113 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
114 
115 static int
116 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
117 {
118 	int ret;
119 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
120 	struct fsl_mc_io *dpni = dev->process_private;
121 
122 	PMD_INIT_FUNC_TRACE();
123 
124 	if (dpni == NULL) {
125 		DPAA2_PMD_ERR("dpni is NULL");
126 		return -1;
127 	}
128 
129 	if (on)
130 		ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token,
131 				       vlan_id, 0, 0, 0);
132 	else
133 		ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
134 					  priv->token, vlan_id);
135 
136 	if (ret < 0)
137 		DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
138 			      ret, vlan_id, priv->hw_id);
139 
140 	return ret;
141 }
142 
143 static int
144 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
145 {
146 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
147 	struct fsl_mc_io *dpni = dev->process_private;
148 	int ret = 0;
149 
150 	PMD_INIT_FUNC_TRACE();
151 
152 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
153 		/* VLAN Filter not available */
154 		if (!priv->max_vlan_filters) {
155 			DPAA2_PMD_INFO("VLAN filter not available");
156 			return -ENOTSUP;
157 		}
158 
159 		if (dev->data->dev_conf.rxmode.offloads &
160 			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
161 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
162 						      priv->token, true);
163 		else
164 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
165 						      priv->token, false);
166 		if (ret < 0)
167 			DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
168 	}
169 
170 	return ret;
171 }
172 
173 static int
174 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
175 		      enum rte_vlan_type vlan_type __rte_unused,
176 		      uint16_t tpid)
177 {
178 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
179 	struct fsl_mc_io *dpni = dev->process_private;
180 	int ret = -ENOTSUP;
181 
182 	PMD_INIT_FUNC_TRACE();
183 
184 	/* nothing to be done for standard vlan tpids */
185 	if (tpid == 0x8100 || tpid == 0x88A8)
186 		return 0;
187 
188 	ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
189 				   priv->token, tpid);
190 	if (ret < 0)
191 		DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
192 	/* if already configured tpids, remove them first */
193 	if (ret == -EBUSY) {
194 		struct dpni_custom_tpid_cfg tpid_list = {0};
195 
196 		ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
197 				   priv->token, &tpid_list);
198 		if (ret < 0)
199 			goto fail;
200 		ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
201 				   priv->token, tpid_list.tpid1);
202 		if (ret < 0)
203 			goto fail;
204 		ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
205 					   priv->token, tpid);
206 	}
207 fail:
208 	return ret;
209 }
210 
211 static int
212 dpaa2_fw_version_get(struct rte_eth_dev *dev,
213 		     char *fw_version,
214 		     size_t fw_size)
215 {
216 	int ret;
217 	struct fsl_mc_io *dpni = dev->process_private;
218 	struct mc_soc_version mc_plat_info = {0};
219 	struct mc_version mc_ver_info = {0};
220 
221 	PMD_INIT_FUNC_TRACE();
222 
223 	if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
224 		DPAA2_PMD_WARN("\tmc_get_soc_version failed");
225 
226 	if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
227 		DPAA2_PMD_WARN("\tmc_get_version failed");
228 
229 	ret = snprintf(fw_version, fw_size,
230 		       "%x-%d.%d.%d",
231 		       mc_plat_info.svr,
232 		       mc_ver_info.major,
233 		       mc_ver_info.minor,
234 		       mc_ver_info.revision);
235 	if (ret < 0)
236 		return -EINVAL;
237 
238 	ret += 1; /* add the size of '\0' */
239 	if (fw_size < (size_t)ret)
240 		return ret;
241 	else
242 		return 0;
243 }
244 
245 static int
246 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
247 {
248 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
249 
250 	PMD_INIT_FUNC_TRACE();
251 
252 	dev_info->max_mac_addrs = priv->max_mac_filters;
253 	dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
254 	dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
255 	dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
256 	dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
257 	dev_info->rx_offload_capa = dev_rx_offloads_sup |
258 					dev_rx_offloads_nodis;
259 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
260 					dev_tx_offloads_nodis;
261 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
262 			RTE_ETH_LINK_SPEED_2_5G |
263 			RTE_ETH_LINK_SPEED_10G;
264 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
265 
266 	dev_info->max_hash_mac_addrs = 0;
267 	dev_info->max_vfs = 0;
268 	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
269 	dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
270 
271 	dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
272 	/* same is rx size for best perf */
273 	dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
274 
275 	dev_info->default_rxportconf.nb_queues = 1;
276 	dev_info->default_txportconf.nb_queues = 1;
277 	dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
278 	dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
279 
280 	if (dpaa2_svr_family == SVR_LX2160A) {
281 		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
282 				RTE_ETH_LINK_SPEED_40G |
283 				RTE_ETH_LINK_SPEED_50G |
284 				RTE_ETH_LINK_SPEED_100G;
285 	}
286 
287 	return 0;
288 }
289 
290 static int
291 dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
292 			__rte_unused uint16_t queue_id,
293 			struct rte_eth_burst_mode *mode)
294 {
295 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
296 	int ret = -EINVAL;
297 	unsigned int i;
298 	const struct burst_info {
299 		uint64_t flags;
300 		const char *output;
301 	} rx_offload_map[] = {
302 			{RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
303 			{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
304 			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
305 			{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
306 			{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
307 			{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
308 			{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
309 			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
310 			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
311 	};
312 
313 	/* Update Rx offload info */
314 	for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
315 		if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
316 			snprintf(mode->info, sizeof(mode->info), "%s",
317 				rx_offload_map[i].output);
318 			ret = 0;
319 			break;
320 		}
321 	}
322 	return ret;
323 }
324 
325 static int
326 dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
327 			__rte_unused uint16_t queue_id,
328 			struct rte_eth_burst_mode *mode)
329 {
330 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
331 	int ret = -EINVAL;
332 	unsigned int i;
333 	const struct burst_info {
334 		uint64_t flags;
335 		const char *output;
336 	} tx_offload_map[] = {
337 			{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
338 			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
339 			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
340 			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
341 			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
342 			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
343 			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
344 			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
345 			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
346 	};
347 
348 	/* Update Tx offload info */
349 	for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
350 		if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
351 			snprintf(mode->info, sizeof(mode->info), "%s",
352 				tx_offload_map[i].output);
353 			ret = 0;
354 			break;
355 		}
356 	}
357 	return ret;
358 }
359 
360 static int
361 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
362 {
363 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
364 	uint16_t dist_idx;
365 	uint32_t vq_id;
366 	uint8_t num_rxqueue_per_tc;
367 	struct dpaa2_queue *mc_q, *mcq;
368 	uint32_t tot_queues;
369 	int i;
370 	struct dpaa2_queue *dpaa2_q;
371 
372 	PMD_INIT_FUNC_TRACE();
373 
374 	num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
375 	if (priv->flags & DPAA2_TX_CONF_ENABLE)
376 		tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
377 	else
378 		tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
379 	mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
380 			  RTE_CACHE_LINE_SIZE);
381 	if (!mc_q) {
382 		DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
383 		return -1;
384 	}
385 
386 	for (i = 0; i < priv->nb_rx_queues; i++) {
387 		mc_q->eth_data = dev->data;
388 		priv->rx_vq[i] = mc_q++;
389 		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
390 		dpaa2_q->q_storage = rte_malloc("dq_storage",
391 					sizeof(struct queue_storage_info_t),
392 					RTE_CACHE_LINE_SIZE);
393 		if (!dpaa2_q->q_storage)
394 			goto fail;
395 
396 		memset(dpaa2_q->q_storage, 0,
397 		       sizeof(struct queue_storage_info_t));
398 		if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
399 			goto fail;
400 	}
401 
402 	if (dpaa2_enable_err_queue) {
403 		priv->rx_err_vq = rte_zmalloc("dpni_rx_err",
404 			sizeof(struct dpaa2_queue), 0);
405 		if (!priv->rx_err_vq)
406 			goto fail;
407 
408 		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
409 		dpaa2_q->q_storage = rte_malloc("err_dq_storage",
410 					sizeof(struct queue_storage_info_t) *
411 					RTE_MAX_LCORE,
412 					RTE_CACHE_LINE_SIZE);
413 		if (!dpaa2_q->q_storage)
414 			goto fail;
415 
416 		memset(dpaa2_q->q_storage, 0,
417 		       sizeof(struct queue_storage_info_t));
418 		for (i = 0; i < RTE_MAX_LCORE; i++)
419 			if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
420 				goto fail;
421 	}
422 
423 	for (i = 0; i < priv->nb_tx_queues; i++) {
424 		mc_q->eth_data = dev->data;
425 		mc_q->flow_id = 0xffff;
426 		priv->tx_vq[i] = mc_q++;
427 		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
428 		dpaa2_q->cscn = rte_malloc(NULL,
429 					   sizeof(struct qbman_result), 16);
430 		if (!dpaa2_q->cscn)
431 			goto fail_tx;
432 	}
433 
434 	if (priv->flags & DPAA2_TX_CONF_ENABLE) {
435 		/*Setup tx confirmation queues*/
436 		for (i = 0; i < priv->nb_tx_queues; i++) {
437 			mc_q->eth_data = dev->data;
438 			mc_q->tc_index = i;
439 			mc_q->flow_id = 0;
440 			priv->tx_conf_vq[i] = mc_q++;
441 			dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
442 			dpaa2_q->q_storage =
443 				rte_malloc("dq_storage",
444 					sizeof(struct queue_storage_info_t),
445 					RTE_CACHE_LINE_SIZE);
446 			if (!dpaa2_q->q_storage)
447 				goto fail_tx_conf;
448 
449 			memset(dpaa2_q->q_storage, 0,
450 			       sizeof(struct queue_storage_info_t));
451 			if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
452 				goto fail_tx_conf;
453 		}
454 	}
455 
456 	vq_id = 0;
457 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
458 		mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
459 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
460 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
461 		vq_id++;
462 	}
463 
464 	return 0;
465 fail_tx_conf:
466 	i -= 1;
467 	while (i >= 0) {
468 		dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
469 		rte_free(dpaa2_q->q_storage);
470 		priv->tx_conf_vq[i--] = NULL;
471 	}
472 	i = priv->nb_tx_queues;
473 fail_tx:
474 	i -= 1;
475 	while (i >= 0) {
476 		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
477 		rte_free(dpaa2_q->cscn);
478 		priv->tx_vq[i--] = NULL;
479 	}
480 	i = priv->nb_rx_queues;
481 fail:
482 	i -= 1;
483 	mc_q = priv->rx_vq[0];
484 	while (i >= 0) {
485 		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
486 		dpaa2_free_dq_storage(dpaa2_q->q_storage);
487 		rte_free(dpaa2_q->q_storage);
488 		priv->rx_vq[i--] = NULL;
489 	}
490 
491 	if (dpaa2_enable_err_queue) {
492 		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
493 		if (dpaa2_q->q_storage)
494 			dpaa2_free_dq_storage(dpaa2_q->q_storage);
495 		rte_free(dpaa2_q->q_storage);
496 	}
497 
498 	rte_free(mc_q);
499 	return -1;
500 }
501 
502 static void
503 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
504 {
505 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
506 	struct dpaa2_queue *dpaa2_q;
507 	int i;
508 
509 	PMD_INIT_FUNC_TRACE();
510 
511 	/* Queue allocation base */
512 	if (priv->rx_vq[0]) {
513 		/* cleaning up queue storage */
514 		for (i = 0; i < priv->nb_rx_queues; i++) {
515 			dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
516 			rte_free(dpaa2_q->q_storage);
517 		}
518 		/* cleanup tx queue cscn */
519 		for (i = 0; i < priv->nb_tx_queues; i++) {
520 			dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
521 			rte_free(dpaa2_q->cscn);
522 		}
523 		if (priv->flags & DPAA2_TX_CONF_ENABLE) {
524 			/* cleanup tx conf queue storage */
525 			for (i = 0; i < priv->nb_tx_queues; i++) {
526 				dpaa2_q = (struct dpaa2_queue *)
527 						priv->tx_conf_vq[i];
528 				rte_free(dpaa2_q->q_storage);
529 			}
530 		}
531 		/*free memory for all queues (RX+TX) */
532 		rte_free(priv->rx_vq[0]);
533 		priv->rx_vq[0] = NULL;
534 	}
535 }
536 
537 static int
538 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
539 {
540 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
541 	struct fsl_mc_io *dpni = dev->process_private;
542 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
543 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
544 	uint64_t tx_offloads = eth_conf->txmode.offloads;
545 	int rx_l3_csum_offload = false;
546 	int rx_l4_csum_offload = false;
547 	int tx_l3_csum_offload = false;
548 	int tx_l4_csum_offload = false;
549 	int ret, tc_index;
550 	uint32_t max_rx_pktlen;
551 
552 	PMD_INIT_FUNC_TRACE();
553 
554 	/* Rx offloads which are enabled by default */
555 	if (dev_rx_offloads_nodis & ~rx_offloads) {
556 		DPAA2_PMD_INFO(
557 		"Some of rx offloads enabled by default - requested 0x%" PRIx64
558 		" fixed are 0x%" PRIx64,
559 		rx_offloads, dev_rx_offloads_nodis);
560 	}
561 
562 	/* Tx offloads which are enabled by default */
563 	if (dev_tx_offloads_nodis & ~tx_offloads) {
564 		DPAA2_PMD_INFO(
565 		"Some of tx offloads enabled by default - requested 0x%" PRIx64
566 		" fixed are 0x%" PRIx64,
567 		tx_offloads, dev_tx_offloads_nodis);
568 	}
569 
570 	max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
571 				RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
572 	if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) {
573 		ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
574 			priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN);
575 		if (ret != 0) {
576 			DPAA2_PMD_ERR("Unable to set mtu. check config");
577 			return ret;
578 		}
579 		DPAA2_PMD_INFO("MTU configured for the device: %d",
580 				dev->data->mtu);
581 	} else {
582 		return -1;
583 	}
584 
585 	if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
586 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
587 			ret = dpaa2_setup_flow_dist(dev,
588 					eth_conf->rx_adv_conf.rss_conf.rss_hf,
589 					tc_index);
590 			if (ret) {
591 				DPAA2_PMD_ERR(
592 					"Unable to set flow distribution on tc%d."
593 					"Check queue config", tc_index);
594 				return ret;
595 			}
596 		}
597 	}
598 
599 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
600 		rx_l3_csum_offload = true;
601 
602 	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
603 		(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
604 		(rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
605 		rx_l4_csum_offload = true;
606 
607 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
608 			       DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
609 	if (ret) {
610 		DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
611 		return ret;
612 	}
613 
614 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
615 			       DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
616 	if (ret) {
617 		DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
618 		return ret;
619 	}
620 
621 #if !defined(RTE_LIBRTE_IEEE1588)
622 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
623 #endif
624 	{
625 		ret = rte_mbuf_dyn_rx_timestamp_register(
626 				&dpaa2_timestamp_dynfield_offset,
627 				&dpaa2_timestamp_rx_dynflag);
628 		if (ret != 0) {
629 			DPAA2_PMD_ERR("Error to register timestamp field/flag");
630 			return -rte_errno;
631 		}
632 		dpaa2_enable_ts[dev->data->port_id] = true;
633 	}
634 
635 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
636 		tx_l3_csum_offload = true;
637 
638 	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
639 		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
640 		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
641 		tx_l4_csum_offload = true;
642 
643 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
644 			       DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
645 	if (ret) {
646 		DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
647 		return ret;
648 	}
649 
650 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
651 			       DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
652 	if (ret) {
653 		DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
654 		return ret;
655 	}
656 
657 	/* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
658 	 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
659 	 * to 0 for LS2 in the hardware thus disabling data/annotation
660 	 * stashing. For LX2 this is fixed in hardware and thus hash result and
661 	 * parse results can be received in FD using this option.
662 	 */
663 	if (dpaa2_svr_family == SVR_LX2160A) {
664 		ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
665 				       DPNI_FLCTYPE_HASH, true);
666 		if (ret) {
667 			DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
668 			return ret;
669 		}
670 	}
671 
672 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
673 		dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
674 
675 	if (eth_conf->lpbk_mode) {
676 		ret = dpaa2_dev_recycle_config(dev);
677 		if (ret) {
678 			DPAA2_PMD_ERR("Error to configure %s to recycle port.",
679 				dev->data->name);
680 
681 			return ret;
682 		}
683 	} else {
684 		/** User may disable loopback mode by calling
685 		 * "dev_configure" with lpbk_mode cleared.
686 		 * No matter the port was configured recycle or not,
687 		 * recycle de-configure is called here.
688 		 * If port is not recycled, the de-configure will return directly.
689 		 */
690 		ret = dpaa2_dev_recycle_deconfig(dev);
691 		if (ret) {
692 			DPAA2_PMD_ERR("Error to de-configure recycle port %s.",
693 				dev->data->name);
694 
695 			return ret;
696 		}
697 	}
698 
699 	dpaa2_tm_init(dev);
700 
701 	return 0;
702 }
703 
704 /* Function to setup RX flow information. It contains traffic class ID,
705  * flow ID, destination configuration etc.
706  */
707 static int
708 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
709 			 uint16_t rx_queue_id,
710 			 uint16_t nb_rx_desc,
711 			 unsigned int socket_id __rte_unused,
712 			 const struct rte_eth_rxconf *rx_conf,
713 			 struct rte_mempool *mb_pool)
714 {
715 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
716 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
717 	struct dpaa2_queue *dpaa2_q;
718 	struct dpni_queue cfg;
719 	uint8_t options = 0;
720 	uint8_t flow_id;
721 	uint32_t bpid;
722 	int i, ret;
723 
724 	PMD_INIT_FUNC_TRACE();
725 
726 	DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
727 			dev, rx_queue_id, mb_pool, rx_conf);
728 
729 	total_nb_rx_desc += nb_rx_desc;
730 	if (total_nb_rx_desc > MAX_NB_RX_DESC) {
731 		DPAA2_PMD_WARN("\nTotal nb_rx_desc exceeds %d limit. Please use Normal buffers",
732 			       MAX_NB_RX_DESC);
733 		DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script");
734 	}
735 
736 	/* Rx deferred start is not supported */
737 	if (rx_conf->rx_deferred_start) {
738 		DPAA2_PMD_ERR("%p:Rx deferred start not supported",
739 				(void *)dev);
740 		return -EINVAL;
741 	}
742 
743 	if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
744 		if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
745 			ret = rte_dpaa2_bpid_info_init(mb_pool);
746 			if (ret)
747 				return ret;
748 		}
749 		bpid = mempool_to_bpid(mb_pool);
750 		ret = dpaa2_attach_bp_list(priv, dpni,
751 				rte_dpaa2_bpid_info[bpid].bp_list);
752 		if (ret)
753 			return ret;
754 	}
755 	dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
756 	dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
757 	dpaa2_q->bp_array = rte_dpaa2_bpid_info;
758 	dpaa2_q->nb_desc = UINT16_MAX;
759 	dpaa2_q->offloads = rx_conf->offloads;
760 
761 	/*Get the flow id from given VQ id*/
762 	flow_id = dpaa2_q->flow_id;
763 	memset(&cfg, 0, sizeof(struct dpni_queue));
764 
765 	options = options | DPNI_QUEUE_OPT_USER_CTX;
766 	cfg.user_context = (size_t)(dpaa2_q);
767 
768 	/* check if a private cgr available. */
769 	for (i = 0; i < priv->max_cgs; i++) {
770 		if (!priv->cgid_in_use[i]) {
771 			priv->cgid_in_use[i] = 1;
772 			break;
773 		}
774 	}
775 
776 	if (i < priv->max_cgs) {
777 		options |= DPNI_QUEUE_OPT_SET_CGID;
778 		cfg.cgid = i;
779 		dpaa2_q->cgid = cfg.cgid;
780 	} else {
781 		dpaa2_q->cgid = 0xff;
782 	}
783 
784 	/*if ls2088 or rev2 device, enable the stashing */
785 
786 	if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
787 		options |= DPNI_QUEUE_OPT_FLC;
788 		cfg.flc.stash_control = true;
789 		cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
790 		/* 00 00 00 - last 6 bit represent annotation, context stashing,
791 		 * data stashing setting 01 01 00 (0x14)
792 		 * (in following order ->DS AS CS)
793 		 * to enable 1 line data, 1 line annotation.
794 		 * For LX2, this setting should be 01 00 00 (0x10)
795 		 */
796 		if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
797 			cfg.flc.value |= 0x10;
798 		else
799 			cfg.flc.value |= 0x14;
800 	}
801 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
802 			     dpaa2_q->tc_index, flow_id, options, &cfg);
803 	if (ret) {
804 		DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
805 		return -1;
806 	}
807 
808 	if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
809 		struct dpni_taildrop taildrop;
810 
811 		taildrop.enable = 1;
812 		dpaa2_q->nb_desc = nb_rx_desc;
813 		/* Private CGR will use tail drop length as nb_rx_desc.
814 		 * for rest cases we can use standard byte based tail drop.
815 		 * There is no HW restriction, but number of CGRs are limited,
816 		 * hence this restriction is placed.
817 		 */
818 		if (dpaa2_q->cgid != 0xff) {
819 			/*enabling per rx queue congestion control */
820 			taildrop.threshold = nb_rx_desc;
821 			taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
822 			taildrop.oal = 0;
823 			DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
824 					rx_queue_id);
825 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
826 						DPNI_CP_CONGESTION_GROUP,
827 						DPNI_QUEUE_RX,
828 						dpaa2_q->tc_index,
829 						dpaa2_q->cgid, &taildrop);
830 		} else {
831 			/*enabling per rx queue congestion control */
832 			taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
833 			taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
834 			taildrop.oal = CONG_RX_OAL;
835 			DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
836 					rx_queue_id);
837 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
838 						DPNI_CP_QUEUE, DPNI_QUEUE_RX,
839 						dpaa2_q->tc_index, flow_id,
840 						&taildrop);
841 		}
842 		if (ret) {
843 			DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
844 				      ret);
845 			return -1;
846 		}
847 	} else { /* Disable tail Drop */
848 		struct dpni_taildrop taildrop = {0};
849 		DPAA2_PMD_INFO("Tail drop is disabled on queue");
850 
851 		taildrop.enable = 0;
852 		if (dpaa2_q->cgid != 0xff) {
853 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
854 					DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
855 					dpaa2_q->tc_index,
856 					dpaa2_q->cgid, &taildrop);
857 		} else {
858 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
859 					DPNI_CP_QUEUE, DPNI_QUEUE_RX,
860 					dpaa2_q->tc_index, flow_id, &taildrop);
861 		}
862 		if (ret) {
863 			DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
864 				      ret);
865 			return -1;
866 		}
867 	}
868 
869 	dev->data->rx_queues[rx_queue_id] = dpaa2_q;
870 	return 0;
871 }
872 
873 static int
874 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
875 			 uint16_t tx_queue_id,
876 			 uint16_t nb_tx_desc,
877 			 unsigned int socket_id __rte_unused,
878 			 const struct rte_eth_txconf *tx_conf)
879 {
880 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
881 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
882 		priv->tx_vq[tx_queue_id];
883 	struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
884 		priv->tx_conf_vq[tx_queue_id];
885 	struct fsl_mc_io *dpni = dev->process_private;
886 	struct dpni_queue tx_conf_cfg;
887 	struct dpni_queue tx_flow_cfg;
888 	uint8_t options = 0, flow_id;
889 	uint16_t channel_id;
890 	struct dpni_queue_id qid;
891 	uint32_t tc_id;
892 	int ret;
893 
894 	PMD_INIT_FUNC_TRACE();
895 
896 	/* Tx deferred start is not supported */
897 	if (tx_conf->tx_deferred_start) {
898 		DPAA2_PMD_ERR("%p:Tx deferred start not supported",
899 				(void *)dev);
900 		return -EINVAL;
901 	}
902 
903 	dpaa2_q->nb_desc = UINT16_MAX;
904 	dpaa2_q->offloads = tx_conf->offloads;
905 
906 	/* Return if queue already configured */
907 	if (dpaa2_q->flow_id != 0xffff) {
908 		dev->data->tx_queues[tx_queue_id] = dpaa2_q;
909 		return 0;
910 	}
911 
912 	memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
913 	memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
914 
915 	if (tx_queue_id == 0) {
916 		/*Set tx-conf and error configuration*/
917 		if (priv->flags & DPAA2_TX_CONF_ENABLE)
918 			ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
919 							    priv->token,
920 							    DPNI_CONF_AFFINE);
921 		else
922 			ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
923 							    priv->token,
924 							    DPNI_CONF_DISABLE);
925 		if (ret) {
926 			DPAA2_PMD_ERR("Error in set tx conf mode settings: "
927 				      "err=%d", ret);
928 			return -1;
929 		}
930 	}
931 
932 	tc_id = tx_queue_id % priv->num_tx_tc;
933 	channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels;
934 	flow_id = 0;
935 
936 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
937 			((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg);
938 	if (ret) {
939 		DPAA2_PMD_ERR("Error in setting the tx flow: "
940 			"tc_id=%d, flow=%d err=%d",
941 			tc_id, flow_id, ret);
942 			return -1;
943 	}
944 
945 	dpaa2_q->flow_id = flow_id;
946 
947 	dpaa2_q->tc_index = tc_id;
948 
949 	ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
950 			     DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
951 			     dpaa2_q->flow_id, &tx_flow_cfg, &qid);
952 	if (ret) {
953 		DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
954 		return -1;
955 	}
956 	dpaa2_q->fqid = qid.fqid;
957 
958 	if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
959 		struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
960 
961 		dpaa2_q->nb_desc = nb_tx_desc;
962 
963 		cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
964 		cong_notif_cfg.threshold_entry = nb_tx_desc;
965 		/* Notify that the queue is not congested when the data in
966 		 * the queue is below this threshold.(90% of value)
967 		 */
968 		cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
969 		cong_notif_cfg.message_ctx = 0;
970 		cong_notif_cfg.message_iova =
971 				(size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
972 		cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
973 		cong_notif_cfg.notification_mode =
974 					 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
975 					 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
976 					 DPNI_CONG_OPT_COHERENT_WRITE;
977 		cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
978 
979 		ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
980 						       priv->token,
981 						       DPNI_QUEUE_TX,
982 						       ((channel_id << 8) | tc_id),
983 						       &cong_notif_cfg);
984 		if (ret) {
985 			DPAA2_PMD_ERR(
986 			   "Error in setting tx congestion notification: "
987 			   "err=%d", ret);
988 			return -ret;
989 		}
990 	}
991 	dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
992 	dev->data->tx_queues[tx_queue_id] = dpaa2_q;
993 
994 	if (priv->flags & DPAA2_TX_CONF_ENABLE) {
995 		dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
996 		options = options | DPNI_QUEUE_OPT_USER_CTX;
997 		tx_conf_cfg.user_context = (size_t)(dpaa2_q);
998 		ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
999 			     DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
1000 			     dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
1001 		if (ret) {
1002 			DPAA2_PMD_ERR("Error in setting the tx conf flow: "
1003 			      "tc_index=%d, flow=%d err=%d",
1004 			      dpaa2_tx_conf_q->tc_index,
1005 			      dpaa2_tx_conf_q->flow_id, ret);
1006 			return -1;
1007 		}
1008 
1009 		ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1010 			     DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
1011 			     dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
1012 		if (ret) {
1013 			DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
1014 			return -1;
1015 		}
1016 		dpaa2_tx_conf_q->fqid = qid.fqid;
1017 	}
1018 	return 0;
1019 }
1020 
1021 static void
1022 dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1023 {
1024 	struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id];
1025 	struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
1026 	struct fsl_mc_io *dpni =
1027 		(struct fsl_mc_io *)priv->eth_dev->process_private;
1028 	uint8_t options = 0;
1029 	int ret;
1030 	struct dpni_queue cfg;
1031 
1032 	memset(&cfg, 0, sizeof(struct dpni_queue));
1033 	PMD_INIT_FUNC_TRACE();
1034 
1035 	total_nb_rx_desc -= dpaa2_q->nb_desc;
1036 
1037 	if (dpaa2_q->cgid != 0xff) {
1038 		options = DPNI_QUEUE_OPT_CLEAR_CGID;
1039 		cfg.cgid = dpaa2_q->cgid;
1040 
1041 		ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
1042 				     DPNI_QUEUE_RX,
1043 				     dpaa2_q->tc_index, dpaa2_q->flow_id,
1044 				     options, &cfg);
1045 		if (ret)
1046 			DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
1047 					dpaa2_q->fqid, ret);
1048 		priv->cgid_in_use[dpaa2_q->cgid] = 0;
1049 		dpaa2_q->cgid = 0xff;
1050 	}
1051 }
1052 
1053 static uint32_t
1054 dpaa2_dev_rx_queue_count(void *rx_queue)
1055 {
1056 	int32_t ret;
1057 	struct dpaa2_queue *dpaa2_q;
1058 	struct qbman_swp *swp;
1059 	struct qbman_fq_query_np_rslt state;
1060 	uint32_t frame_cnt = 0;
1061 
1062 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1063 		ret = dpaa2_affine_qbman_swp();
1064 		if (ret) {
1065 			DPAA2_PMD_ERR(
1066 				"Failed to allocate IO portal, tid: %d\n",
1067 				rte_gettid());
1068 			return -EINVAL;
1069 		}
1070 	}
1071 	swp = DPAA2_PER_LCORE_PORTAL;
1072 
1073 	dpaa2_q = rx_queue;
1074 
1075 	if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
1076 		frame_cnt = qbman_fq_state_frame_count(&state);
1077 		DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u",
1078 				rx_queue, frame_cnt);
1079 	}
1080 	return frame_cnt;
1081 }
1082 
1083 static const uint32_t *
1084 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
1085 {
1086 	static const uint32_t ptypes[] = {
1087 		/*todo -= add more types */
1088 		RTE_PTYPE_L2_ETHER,
1089 		RTE_PTYPE_L3_IPV4,
1090 		RTE_PTYPE_L3_IPV4_EXT,
1091 		RTE_PTYPE_L3_IPV6,
1092 		RTE_PTYPE_L3_IPV6_EXT,
1093 		RTE_PTYPE_L4_TCP,
1094 		RTE_PTYPE_L4_UDP,
1095 		RTE_PTYPE_L4_SCTP,
1096 		RTE_PTYPE_L4_ICMP,
1097 		RTE_PTYPE_UNKNOWN
1098 	};
1099 
1100 	if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
1101 		dev->rx_pkt_burst == dpaa2_dev_rx ||
1102 		dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
1103 		return ptypes;
1104 	return NULL;
1105 }
1106 
1107 /**
1108  * Dpaa2 link Interrupt handler
1109  *
1110  * @param param
1111  *  The address of parameter (struct rte_eth_dev *) registered before.
1112  *
1113  * @return
1114  *  void
1115  */
1116 static void
1117 dpaa2_interrupt_handler(void *param)
1118 {
1119 	struct rte_eth_dev *dev = param;
1120 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1121 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1122 	int ret;
1123 	int irq_index = DPNI_IRQ_INDEX;
1124 	unsigned int status = 0, clear = 0;
1125 
1126 	PMD_INIT_FUNC_TRACE();
1127 
1128 	if (dpni == NULL) {
1129 		DPAA2_PMD_ERR("dpni is NULL");
1130 		return;
1131 	}
1132 
1133 	ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
1134 				  irq_index, &status);
1135 	if (unlikely(ret)) {
1136 		DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
1137 		clear = 0xffffffff;
1138 		goto out;
1139 	}
1140 
1141 	if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
1142 		clear = DPNI_IRQ_EVENT_LINK_CHANGED;
1143 		dpaa2_dev_link_update(dev, 0);
1144 		/* calling all the apps registered for link status event */
1145 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1146 	}
1147 out:
1148 	ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
1149 				    irq_index, clear);
1150 	if (unlikely(ret))
1151 		DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
1152 }
1153 
1154 static int
1155 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
1156 {
1157 	int err = 0;
1158 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1159 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1160 	int irq_index = DPNI_IRQ_INDEX;
1161 	unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
1162 
1163 	PMD_INIT_FUNC_TRACE();
1164 
1165 	err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
1166 				irq_index, mask);
1167 	if (err < 0) {
1168 		DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
1169 			      strerror(-err));
1170 		return err;
1171 	}
1172 
1173 	err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
1174 				  irq_index, enable);
1175 	if (err < 0)
1176 		DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
1177 			      strerror(-err));
1178 
1179 	return err;
1180 }
1181 
1182 static int
1183 dpaa2_dev_start(struct rte_eth_dev *dev)
1184 {
1185 	struct rte_device *rdev = dev->device;
1186 	struct rte_dpaa2_device *dpaa2_dev;
1187 	struct rte_eth_dev_data *data = dev->data;
1188 	struct dpaa2_dev_priv *priv = data->dev_private;
1189 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1190 	struct dpni_queue cfg;
1191 	struct dpni_error_cfg	err_cfg;
1192 	struct dpni_queue_id qid;
1193 	struct dpaa2_queue *dpaa2_q;
1194 	int ret, i;
1195 	struct rte_intr_handle *intr_handle;
1196 
1197 	dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1198 	intr_handle = dpaa2_dev->intr_handle;
1199 
1200 	PMD_INIT_FUNC_TRACE();
1201 	ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1202 	if (ret) {
1203 		DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
1204 			      priv->hw_id, ret);
1205 		return ret;
1206 	}
1207 
1208 	/* Power up the phy. Needed to make the link go UP */
1209 	dpaa2_dev_set_link_up(dev);
1210 
1211 	for (i = 0; i < data->nb_rx_queues; i++) {
1212 		dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
1213 		ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1214 				     DPNI_QUEUE_RX, dpaa2_q->tc_index,
1215 				       dpaa2_q->flow_id, &cfg, &qid);
1216 		if (ret) {
1217 			DPAA2_PMD_ERR("Error in getting flow information: "
1218 				      "err=%d", ret);
1219 			return ret;
1220 		}
1221 		dpaa2_q->fqid = qid.fqid;
1222 	}
1223 
1224 	if (dpaa2_enable_err_queue) {
1225 		ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1226 				     DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid);
1227 		if (ret) {
1228 			DPAA2_PMD_ERR("Error getting rx err flow information: err=%d",
1229 						ret);
1230 			return ret;
1231 		}
1232 		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
1233 		dpaa2_q->fqid = qid.fqid;
1234 		dpaa2_q->eth_data = dev->data;
1235 
1236 		err_cfg.errors =  DPNI_ERROR_DISC;
1237 		err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
1238 	} else {
1239 		/* checksum errors, send them to normal path
1240 		 * and set it in annotation
1241 		 */
1242 		err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
1243 
1244 		/* if packet with parse error are not to be dropped */
1245 		err_cfg.errors |= DPNI_ERROR_PHE;
1246 
1247 		err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
1248 	}
1249 	err_cfg.set_frame_annotation = true;
1250 
1251 	ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
1252 				       priv->token, &err_cfg);
1253 	if (ret) {
1254 		DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
1255 			      ret);
1256 		return ret;
1257 	}
1258 
1259 	/* if the interrupts were configured on this devices*/
1260 	if (intr_handle && rte_intr_fd_get(intr_handle) &&
1261 	    dev->data->dev_conf.intr_conf.lsc != 0) {
1262 		/* Registering LSC interrupt handler */
1263 		rte_intr_callback_register(intr_handle,
1264 					   dpaa2_interrupt_handler,
1265 					   (void *)dev);
1266 
1267 		/* enable vfio intr/eventfd mapping
1268 		 * Interrupt index 0 is required, so we can not use
1269 		 * rte_intr_enable.
1270 		 */
1271 		rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
1272 
1273 		/* enable dpni_irqs */
1274 		dpaa2_eth_setup_irqs(dev, 1);
1275 	}
1276 
1277 	/* Change the tx burst function if ordered queues are used */
1278 	if (priv->en_ordered)
1279 		dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
1280 
1281 	return 0;
1282 }
1283 
1284 /**
1285  *  This routine disables all traffic on the adapter by issuing a
1286  *  global reset on the MAC.
1287  */
1288 static int
1289 dpaa2_dev_stop(struct rte_eth_dev *dev)
1290 {
1291 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1292 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1293 	int ret;
1294 	struct rte_eth_link link;
1295 	struct rte_device *rdev = dev->device;
1296 	struct rte_intr_handle *intr_handle;
1297 	struct rte_dpaa2_device *dpaa2_dev;
1298 
1299 	dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1300 	intr_handle = dpaa2_dev->intr_handle;
1301 
1302 	PMD_INIT_FUNC_TRACE();
1303 
1304 	/* reset interrupt callback  */
1305 	if (intr_handle && rte_intr_fd_get(intr_handle) &&
1306 	    dev->data->dev_conf.intr_conf.lsc != 0) {
1307 		/*disable dpni irqs */
1308 		dpaa2_eth_setup_irqs(dev, 0);
1309 
1310 		/* disable vfio intr before callback unregister */
1311 		rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1312 
1313 		/* Unregistering LSC interrupt handler */
1314 		rte_intr_callback_unregister(intr_handle,
1315 					     dpaa2_interrupt_handler,
1316 					     (void *)dev);
1317 	}
1318 
1319 	dpaa2_dev_set_link_down(dev);
1320 
1321 	ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
1322 	if (ret) {
1323 		DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
1324 			      ret, priv->hw_id);
1325 		return ret;
1326 	}
1327 
1328 	/* clear the recorded link status */
1329 	memset(&link, 0, sizeof(link));
1330 	rte_eth_linkstatus_set(dev, &link);
1331 
1332 	return 0;
1333 }
1334 
1335 static int
1336 dpaa2_dev_close(struct rte_eth_dev *dev)
1337 {
1338 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1339 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1340 	int i, ret;
1341 	struct rte_eth_link link;
1342 
1343 	PMD_INIT_FUNC_TRACE();
1344 
1345 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1346 		return 0;
1347 
1348 	if (!dpni) {
1349 		DPAA2_PMD_WARN("Already closed or not started");
1350 		return -1;
1351 	}
1352 
1353 	dpaa2_tm_deinit(dev);
1354 	dpaa2_flow_clean(dev);
1355 	/* Clean the device first */
1356 	ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
1357 	if (ret) {
1358 		DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1359 		return -1;
1360 	}
1361 
1362 	memset(&link, 0, sizeof(link));
1363 	rte_eth_linkstatus_set(dev, &link);
1364 
1365 	/* Free private queues memory */
1366 	dpaa2_free_rx_tx_queues(dev);
1367 	/* Close the device at underlying layer*/
1368 	ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1369 	if (ret) {
1370 		DPAA2_PMD_ERR("Failure closing dpni device with err code %d",
1371 			      ret);
1372 	}
1373 
1374 	/* Free the allocated memory for ethernet private data and dpni*/
1375 	priv->hw = NULL;
1376 	dev->process_private = NULL;
1377 	rte_free(dpni);
1378 
1379 	for (i = 0; i < MAX_TCS; i++)
1380 		rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
1381 
1382 	if (priv->extract.qos_extract_param)
1383 		rte_free((void *)(size_t)priv->extract.qos_extract_param);
1384 
1385 	DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name);
1386 	return 0;
1387 }
1388 
1389 static int
1390 dpaa2_dev_promiscuous_enable(
1391 		struct rte_eth_dev *dev)
1392 {
1393 	int ret;
1394 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1395 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1396 
1397 	PMD_INIT_FUNC_TRACE();
1398 
1399 	if (dpni == NULL) {
1400 		DPAA2_PMD_ERR("dpni is NULL");
1401 		return -ENODEV;
1402 	}
1403 
1404 	ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1405 	if (ret < 0)
1406 		DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1407 
1408 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1409 	if (ret < 0)
1410 		DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1411 
1412 	return ret;
1413 }
1414 
1415 static int
1416 dpaa2_dev_promiscuous_disable(
1417 		struct rte_eth_dev *dev)
1418 {
1419 	int ret;
1420 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1421 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1422 
1423 	PMD_INIT_FUNC_TRACE();
1424 
1425 	if (dpni == NULL) {
1426 		DPAA2_PMD_ERR("dpni is NULL");
1427 		return -ENODEV;
1428 	}
1429 
1430 	ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1431 	if (ret < 0)
1432 		DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1433 
1434 	if (dev->data->all_multicast == 0) {
1435 		ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1436 						 priv->token, false);
1437 		if (ret < 0)
1438 			DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1439 				      ret);
1440 	}
1441 
1442 	return ret;
1443 }
1444 
1445 static int
1446 dpaa2_dev_allmulticast_enable(
1447 		struct rte_eth_dev *dev)
1448 {
1449 	int ret;
1450 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1451 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1452 
1453 	PMD_INIT_FUNC_TRACE();
1454 
1455 	if (dpni == NULL) {
1456 		DPAA2_PMD_ERR("dpni is NULL");
1457 		return -ENODEV;
1458 	}
1459 
1460 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1461 	if (ret < 0)
1462 		DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1463 
1464 	return ret;
1465 }
1466 
1467 static int
1468 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1469 {
1470 	int ret;
1471 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1472 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1473 
1474 	PMD_INIT_FUNC_TRACE();
1475 
1476 	if (dpni == NULL) {
1477 		DPAA2_PMD_ERR("dpni is NULL");
1478 		return -ENODEV;
1479 	}
1480 
1481 	/* must remain on for all promiscuous */
1482 	if (dev->data->promiscuous == 1)
1483 		return 0;
1484 
1485 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1486 	if (ret < 0)
1487 		DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1488 
1489 	return ret;
1490 }
1491 
1492 static int
1493 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1494 {
1495 	int ret;
1496 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1497 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1498 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1499 				+ VLAN_TAG_SIZE;
1500 
1501 	PMD_INIT_FUNC_TRACE();
1502 
1503 	if (dpni == NULL) {
1504 		DPAA2_PMD_ERR("dpni is NULL");
1505 		return -EINVAL;
1506 	}
1507 
1508 	/* Set the Max Rx frame length as 'mtu' +
1509 	 * Maximum Ethernet header length
1510 	 */
1511 	ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1512 					frame_size - RTE_ETHER_CRC_LEN);
1513 	if (ret) {
1514 		DPAA2_PMD_ERR("Setting the max frame length failed");
1515 		return -1;
1516 	}
1517 	DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1518 	return 0;
1519 }
1520 
1521 static int
1522 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1523 		       struct rte_ether_addr *addr,
1524 		       __rte_unused uint32_t index,
1525 		       __rte_unused uint32_t pool)
1526 {
1527 	int ret;
1528 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1529 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1530 
1531 	PMD_INIT_FUNC_TRACE();
1532 
1533 	if (dpni == NULL) {
1534 		DPAA2_PMD_ERR("dpni is NULL");
1535 		return -1;
1536 	}
1537 
1538 	ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
1539 				addr->addr_bytes, 0, 0, 0);
1540 	if (ret)
1541 		DPAA2_PMD_ERR(
1542 			"error: Adding the MAC ADDR failed: err = %d", ret);
1543 	return 0;
1544 }
1545 
1546 static void
1547 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1548 			  uint32_t index)
1549 {
1550 	int ret;
1551 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1552 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1553 	struct rte_eth_dev_data *data = dev->data;
1554 	struct rte_ether_addr *macaddr;
1555 
1556 	PMD_INIT_FUNC_TRACE();
1557 
1558 	macaddr = &data->mac_addrs[index];
1559 
1560 	if (dpni == NULL) {
1561 		DPAA2_PMD_ERR("dpni is NULL");
1562 		return;
1563 	}
1564 
1565 	ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1566 				   priv->token, macaddr->addr_bytes);
1567 	if (ret)
1568 		DPAA2_PMD_ERR(
1569 			"error: Removing the MAC ADDR failed: err = %d", ret);
1570 }
1571 
1572 static int
1573 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1574 		       struct rte_ether_addr *addr)
1575 {
1576 	int ret;
1577 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1578 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1579 
1580 	PMD_INIT_FUNC_TRACE();
1581 
1582 	if (dpni == NULL) {
1583 		DPAA2_PMD_ERR("dpni is NULL");
1584 		return -EINVAL;
1585 	}
1586 
1587 	ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1588 					priv->token, addr->addr_bytes);
1589 
1590 	if (ret)
1591 		DPAA2_PMD_ERR(
1592 			"error: Setting the MAC ADDR failed %d", ret);
1593 
1594 	return ret;
1595 }
1596 
1597 static
1598 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1599 			 struct rte_eth_stats *stats)
1600 {
1601 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1602 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1603 	int32_t  retcode;
1604 	uint8_t page0 = 0, page1 = 1, page2 = 2;
1605 	union dpni_statistics value;
1606 	int i;
1607 	struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1608 
1609 	memset(&value, 0, sizeof(union dpni_statistics));
1610 
1611 	PMD_INIT_FUNC_TRACE();
1612 
1613 	if (!dpni) {
1614 		DPAA2_PMD_ERR("dpni is NULL");
1615 		return -EINVAL;
1616 	}
1617 
1618 	if (!stats) {
1619 		DPAA2_PMD_ERR("stats is NULL");
1620 		return -EINVAL;
1621 	}
1622 
1623 	/*Get Counters from page_0*/
1624 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1625 				      page0, 0, &value);
1626 	if (retcode)
1627 		goto err;
1628 
1629 	stats->ipackets = value.page_0.ingress_all_frames;
1630 	stats->ibytes = value.page_0.ingress_all_bytes;
1631 
1632 	/*Get Counters from page_1*/
1633 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1634 				      page1, 0, &value);
1635 	if (retcode)
1636 		goto err;
1637 
1638 	stats->opackets = value.page_1.egress_all_frames;
1639 	stats->obytes = value.page_1.egress_all_bytes;
1640 
1641 	/*Get Counters from page_2*/
1642 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1643 				      page2, 0, &value);
1644 	if (retcode)
1645 		goto err;
1646 
1647 	/* Ingress drop frame count due to configured rules */
1648 	stats->ierrors = value.page_2.ingress_filtered_frames;
1649 	/* Ingress drop frame count due to error */
1650 	stats->ierrors += value.page_2.ingress_discarded_frames;
1651 
1652 	stats->oerrors = value.page_2.egress_discarded_frames;
1653 	stats->imissed = value.page_2.ingress_nobuffer_discards;
1654 
1655 	/* Fill in per queue stats */
1656 	for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1657 		(i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1658 		dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1659 		dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1660 		if (dpaa2_rxq)
1661 			stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1662 		if (dpaa2_txq)
1663 			stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1664 
1665 		/* Byte counting is not implemented */
1666 		stats->q_ibytes[i]   = 0;
1667 		stats->q_obytes[i]   = 0;
1668 	}
1669 
1670 	return 0;
1671 
1672 err:
1673 	DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1674 	return retcode;
1675 };
1676 
1677 static int
1678 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1679 		     unsigned int n)
1680 {
1681 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1682 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1683 	int32_t  retcode;
1684 	union dpni_statistics value[5] = {};
1685 	unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1686 
1687 	if (n < num)
1688 		return num;
1689 
1690 	if (xstats == NULL)
1691 		return 0;
1692 
1693 	/* Get Counters from page_0*/
1694 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1695 				      0, 0, &value[0]);
1696 	if (retcode)
1697 		goto err;
1698 
1699 	/* Get Counters from page_1*/
1700 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1701 				      1, 0, &value[1]);
1702 	if (retcode)
1703 		goto err;
1704 
1705 	/* Get Counters from page_2*/
1706 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1707 				      2, 0, &value[2]);
1708 	if (retcode)
1709 		goto err;
1710 
1711 	for (i = 0; i < priv->max_cgs; i++) {
1712 		if (!priv->cgid_in_use[i]) {
1713 			/* Get Counters from page_4*/
1714 			retcode = dpni_get_statistics(dpni, CMD_PRI_LOW,
1715 						      priv->token,
1716 						      4, 0, &value[4]);
1717 			if (retcode)
1718 				goto err;
1719 			break;
1720 		}
1721 	}
1722 
1723 	for (i = 0; i < num; i++) {
1724 		xstats[i].id = i;
1725 		xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1726 			raw.counter[dpaa2_xstats_strings[i].stats_id];
1727 	}
1728 	return i;
1729 err:
1730 	DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1731 	return retcode;
1732 }
1733 
1734 static int
1735 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1736 		       struct rte_eth_xstat_name *xstats_names,
1737 		       unsigned int limit)
1738 {
1739 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1740 
1741 	if (limit < stat_cnt)
1742 		return stat_cnt;
1743 
1744 	if (xstats_names != NULL)
1745 		for (i = 0; i < stat_cnt; i++)
1746 			strlcpy(xstats_names[i].name,
1747 				dpaa2_xstats_strings[i].name,
1748 				sizeof(xstats_names[i].name));
1749 
1750 	return stat_cnt;
1751 }
1752 
1753 static int
1754 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1755 		       uint64_t *values, unsigned int n)
1756 {
1757 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1758 	uint64_t values_copy[stat_cnt];
1759 
1760 	if (!ids) {
1761 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
1762 		struct fsl_mc_io *dpni =
1763 			(struct fsl_mc_io *)dev->process_private;
1764 		int32_t  retcode;
1765 		union dpni_statistics value[5] = {};
1766 
1767 		if (n < stat_cnt)
1768 			return stat_cnt;
1769 
1770 		if (!values)
1771 			return 0;
1772 
1773 		/* Get Counters from page_0*/
1774 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1775 					      0, 0, &value[0]);
1776 		if (retcode)
1777 			return 0;
1778 
1779 		/* Get Counters from page_1*/
1780 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1781 					      1, 0, &value[1]);
1782 		if (retcode)
1783 			return 0;
1784 
1785 		/* Get Counters from page_2*/
1786 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1787 					      2, 0, &value[2]);
1788 		if (retcode)
1789 			return 0;
1790 
1791 		/* Get Counters from page_4*/
1792 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1793 					      4, 0, &value[4]);
1794 		if (retcode)
1795 			return 0;
1796 
1797 		for (i = 0; i < stat_cnt; i++) {
1798 			values[i] = value[dpaa2_xstats_strings[i].page_id].
1799 				raw.counter[dpaa2_xstats_strings[i].stats_id];
1800 		}
1801 		return stat_cnt;
1802 	}
1803 
1804 	dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1805 
1806 	for (i = 0; i < n; i++) {
1807 		if (ids[i] >= stat_cnt) {
1808 			DPAA2_PMD_ERR("xstats id value isn't valid");
1809 			return -1;
1810 		}
1811 		values[i] = values_copy[ids[i]];
1812 	}
1813 	return n;
1814 }
1815 
1816 static int
1817 dpaa2_xstats_get_names_by_id(
1818 	struct rte_eth_dev *dev,
1819 	const uint64_t *ids,
1820 	struct rte_eth_xstat_name *xstats_names,
1821 	unsigned int limit)
1822 {
1823 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1824 	struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1825 
1826 	if (!ids)
1827 		return dpaa2_xstats_get_names(dev, xstats_names, limit);
1828 
1829 	dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1830 
1831 	for (i = 0; i < limit; i++) {
1832 		if (ids[i] >= stat_cnt) {
1833 			DPAA2_PMD_ERR("xstats id value isn't valid");
1834 			return -1;
1835 		}
1836 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1837 	}
1838 	return limit;
1839 }
1840 
1841 static int
1842 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1843 {
1844 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1845 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1846 	int retcode;
1847 	int i;
1848 	struct dpaa2_queue *dpaa2_q;
1849 
1850 	PMD_INIT_FUNC_TRACE();
1851 
1852 	if (dpni == NULL) {
1853 		DPAA2_PMD_ERR("dpni is NULL");
1854 		return -EINVAL;
1855 	}
1856 
1857 	retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1858 	if (retcode)
1859 		goto error;
1860 
1861 	/* Reset the per queue stats in dpaa2_queue structure */
1862 	for (i = 0; i < priv->nb_rx_queues; i++) {
1863 		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1864 		if (dpaa2_q)
1865 			dpaa2_q->rx_pkts = 0;
1866 	}
1867 
1868 	for (i = 0; i < priv->nb_tx_queues; i++) {
1869 		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1870 		if (dpaa2_q)
1871 			dpaa2_q->tx_pkts = 0;
1872 	}
1873 
1874 	return 0;
1875 
1876 error:
1877 	DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1878 	return retcode;
1879 };
1880 
1881 /* return 0 means link status changed, -1 means not changed */
1882 static int
1883 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1884 		      int wait_to_complete)
1885 {
1886 	int ret;
1887 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1888 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1889 	struct rte_eth_link link;
1890 	struct dpni_link_state state = {0};
1891 	uint8_t count;
1892 
1893 	if (dpni == NULL) {
1894 		DPAA2_PMD_ERR("dpni is NULL");
1895 		return 0;
1896 	}
1897 
1898 	for (count = 0; count <= MAX_REPEAT_TIME; count++) {
1899 		ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token,
1900 					  &state);
1901 		if (ret < 0) {
1902 			DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1903 			return -1;
1904 		}
1905 		if (state.up == RTE_ETH_LINK_DOWN &&
1906 		    wait_to_complete)
1907 			rte_delay_ms(CHECK_INTERVAL);
1908 		else
1909 			break;
1910 	}
1911 
1912 	memset(&link, 0, sizeof(struct rte_eth_link));
1913 	link.link_status = state.up;
1914 	link.link_speed = state.rate;
1915 
1916 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1917 		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1918 	else
1919 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1920 
1921 	ret = rte_eth_linkstatus_set(dev, &link);
1922 	if (ret == -1)
1923 		DPAA2_PMD_DEBUG("No change in status");
1924 	else
1925 		DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1926 			       link.link_status ? "Up" : "Down");
1927 
1928 	return ret;
1929 }
1930 
1931 /**
1932  * Toggle the DPNI to enable, if not already enabled.
1933  * This is not strictly PHY up/down - it is more of logical toggling.
1934  */
1935 static int
1936 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1937 {
1938 	int ret = -EINVAL;
1939 	struct dpaa2_dev_priv *priv;
1940 	struct fsl_mc_io *dpni;
1941 	int en = 0;
1942 	struct dpni_link_state state = {0};
1943 
1944 	priv = dev->data->dev_private;
1945 	dpni = (struct fsl_mc_io *)dev->process_private;
1946 
1947 	if (dpni == NULL) {
1948 		DPAA2_PMD_ERR("dpni is NULL");
1949 		return ret;
1950 	}
1951 
1952 	/* Check if DPNI is currently enabled */
1953 	ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1954 	if (ret) {
1955 		/* Unable to obtain dpni status; Not continuing */
1956 		DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1957 		return -EINVAL;
1958 	}
1959 
1960 	/* Enable link if not already enabled */
1961 	if (!en) {
1962 		ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1963 		if (ret) {
1964 			DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1965 			return -EINVAL;
1966 		}
1967 	}
1968 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1969 	if (ret < 0) {
1970 		DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1971 		return -1;
1972 	}
1973 
1974 	/* changing tx burst function to start enqueues */
1975 	dev->tx_pkt_burst = dpaa2_dev_tx;
1976 	dev->data->dev_link.link_status = state.up;
1977 	dev->data->dev_link.link_speed = state.rate;
1978 
1979 	if (state.up)
1980 		DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1981 	else
1982 		DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1983 	return ret;
1984 }
1985 
1986 /**
1987  * Toggle the DPNI to disable, if not already disabled.
1988  * This is not strictly PHY up/down - it is more of logical toggling.
1989  */
1990 static int
1991 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1992 {
1993 	int ret = -EINVAL;
1994 	struct dpaa2_dev_priv *priv;
1995 	struct fsl_mc_io *dpni;
1996 	int dpni_enabled = 0;
1997 	int retries = 10;
1998 
1999 	PMD_INIT_FUNC_TRACE();
2000 
2001 	priv = dev->data->dev_private;
2002 	dpni = (struct fsl_mc_io *)dev->process_private;
2003 
2004 	if (dpni == NULL) {
2005 		DPAA2_PMD_ERR("Device has not yet been configured");
2006 		return ret;
2007 	}
2008 
2009 	/*changing  tx burst function to avoid any more enqueues */
2010 	dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
2011 
2012 	/* Loop while dpni_disable() attempts to drain the egress FQs
2013 	 * and confirm them back to us.
2014 	 */
2015 	do {
2016 		ret = dpni_disable(dpni, 0, priv->token);
2017 		if (ret) {
2018 			DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
2019 			return ret;
2020 		}
2021 		ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
2022 		if (ret) {
2023 			DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
2024 			return ret;
2025 		}
2026 		if (dpni_enabled)
2027 			/* Allow the MC some slack */
2028 			rte_delay_us(100 * 1000);
2029 	} while (dpni_enabled && --retries);
2030 
2031 	if (!retries) {
2032 		DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
2033 		/* todo- we may have to manually cleanup queues.
2034 		 */
2035 	} else {
2036 		DPAA2_PMD_INFO("Port %d Link DOWN successful",
2037 			       dev->data->port_id);
2038 	}
2039 
2040 	dev->data->dev_link.link_status = 0;
2041 
2042 	return ret;
2043 }
2044 
2045 static int
2046 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2047 {
2048 	int ret = -EINVAL;
2049 	struct dpaa2_dev_priv *priv;
2050 	struct fsl_mc_io *dpni;
2051 	struct dpni_link_state state = {0};
2052 
2053 	PMD_INIT_FUNC_TRACE();
2054 
2055 	priv = dev->data->dev_private;
2056 	dpni = (struct fsl_mc_io *)dev->process_private;
2057 
2058 	if (dpni == NULL || fc_conf == NULL) {
2059 		DPAA2_PMD_ERR("device not configured");
2060 		return ret;
2061 	}
2062 
2063 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2064 	if (ret) {
2065 		DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
2066 		return ret;
2067 	}
2068 
2069 	memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
2070 	if (state.options & DPNI_LINK_OPT_PAUSE) {
2071 		/* DPNI_LINK_OPT_PAUSE set
2072 		 *  if ASYM_PAUSE not set,
2073 		 *	RX Side flow control (handle received Pause frame)
2074 		 *	TX side flow control (send Pause frame)
2075 		 *  if ASYM_PAUSE set,
2076 		 *	RX Side flow control (handle received Pause frame)
2077 		 *	No TX side flow control (send Pause frame disabled)
2078 		 */
2079 		if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
2080 			fc_conf->mode = RTE_ETH_FC_FULL;
2081 		else
2082 			fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2083 	} else {
2084 		/* DPNI_LINK_OPT_PAUSE not set
2085 		 *  if ASYM_PAUSE set,
2086 		 *	TX side flow control (send Pause frame)
2087 		 *	No RX side flow control (No action on pause frame rx)
2088 		 *  if ASYM_PAUSE not set,
2089 		 *	Flow control disabled
2090 		 */
2091 		if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
2092 			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2093 		else
2094 			fc_conf->mode = RTE_ETH_FC_NONE;
2095 	}
2096 
2097 	return ret;
2098 }
2099 
2100 static int
2101 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2102 {
2103 	int ret = -EINVAL;
2104 	struct dpaa2_dev_priv *priv;
2105 	struct fsl_mc_io *dpni;
2106 	struct dpni_link_state state = {0};
2107 	struct dpni_link_cfg cfg = {0};
2108 
2109 	PMD_INIT_FUNC_TRACE();
2110 
2111 	priv = dev->data->dev_private;
2112 	dpni = (struct fsl_mc_io *)dev->process_private;
2113 
2114 	if (dpni == NULL) {
2115 		DPAA2_PMD_ERR("dpni is NULL");
2116 		return ret;
2117 	}
2118 
2119 	/* It is necessary to obtain the current state before setting fc_conf
2120 	 * as MC would return error in case rate, autoneg or duplex values are
2121 	 * different.
2122 	 */
2123 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2124 	if (ret) {
2125 		DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
2126 		return -1;
2127 	}
2128 
2129 	/* Disable link before setting configuration */
2130 	dpaa2_dev_set_link_down(dev);
2131 
2132 	/* Based on fc_conf, update cfg */
2133 	cfg.rate = state.rate;
2134 	cfg.options = state.options;
2135 
2136 	/* update cfg with fc_conf */
2137 	switch (fc_conf->mode) {
2138 	case RTE_ETH_FC_FULL:
2139 		/* Full flow control;
2140 		 * OPT_PAUSE set, ASYM_PAUSE not set
2141 		 */
2142 		cfg.options |= DPNI_LINK_OPT_PAUSE;
2143 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2144 		break;
2145 	case RTE_ETH_FC_TX_PAUSE:
2146 		/* Enable RX flow control
2147 		 * OPT_PAUSE not set;
2148 		 * ASYM_PAUSE set;
2149 		 */
2150 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2151 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2152 		break;
2153 	case RTE_ETH_FC_RX_PAUSE:
2154 		/* Enable TX Flow control
2155 		 * OPT_PAUSE set
2156 		 * ASYM_PAUSE set
2157 		 */
2158 		cfg.options |= DPNI_LINK_OPT_PAUSE;
2159 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2160 		break;
2161 	case RTE_ETH_FC_NONE:
2162 		/* Disable Flow control
2163 		 * OPT_PAUSE not set
2164 		 * ASYM_PAUSE not set
2165 		 */
2166 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2167 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2168 		break;
2169 	default:
2170 		DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
2171 			      fc_conf->mode);
2172 		return -1;
2173 	}
2174 
2175 	ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2176 	if (ret)
2177 		DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
2178 			      ret);
2179 
2180 	/* Enable link */
2181 	dpaa2_dev_set_link_up(dev);
2182 
2183 	return ret;
2184 }
2185 
2186 static int
2187 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
2188 			  struct rte_eth_rss_conf *rss_conf)
2189 {
2190 	struct rte_eth_dev_data *data = dev->data;
2191 	struct dpaa2_dev_priv *priv = data->dev_private;
2192 	struct rte_eth_conf *eth_conf = &data->dev_conf;
2193 	int ret, tc_index;
2194 
2195 	PMD_INIT_FUNC_TRACE();
2196 
2197 	if (rss_conf->rss_hf) {
2198 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2199 			ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf,
2200 				tc_index);
2201 			if (ret) {
2202 				DPAA2_PMD_ERR("Unable to set flow dist on tc%d",
2203 					tc_index);
2204 				return ret;
2205 			}
2206 		}
2207 	} else {
2208 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2209 			ret = dpaa2_remove_flow_dist(dev, tc_index);
2210 			if (ret) {
2211 				DPAA2_PMD_ERR(
2212 					"Unable to remove flow dist on tc%d",
2213 					tc_index);
2214 				return ret;
2215 			}
2216 		}
2217 	}
2218 	eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
2219 	return 0;
2220 }
2221 
2222 static int
2223 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2224 			    struct rte_eth_rss_conf *rss_conf)
2225 {
2226 	struct rte_eth_dev_data *data = dev->data;
2227 	struct rte_eth_conf *eth_conf = &data->dev_conf;
2228 
2229 	/* dpaa2 does not support rss_key, so length should be 0*/
2230 	rss_conf->rss_key_len = 0;
2231 	rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
2232 	return 0;
2233 }
2234 
2235 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
2236 		int eth_rx_queue_id,
2237 		struct dpaa2_dpcon_dev *dpcon,
2238 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2239 {
2240 	struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2241 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2242 	struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2243 	uint8_t flow_id = dpaa2_ethq->flow_id;
2244 	struct dpni_queue cfg;
2245 	uint8_t options, priority;
2246 	int ret;
2247 
2248 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
2249 		dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
2250 	else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
2251 		dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
2252 	else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
2253 		dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
2254 	else
2255 		return -EINVAL;
2256 
2257 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
2258 		   (dpcon->num_priorities - 1);
2259 
2260 	memset(&cfg, 0, sizeof(struct dpni_queue));
2261 	options = DPNI_QUEUE_OPT_DEST;
2262 	cfg.destination.type = DPNI_DEST_DPCON;
2263 	cfg.destination.id = dpcon->dpcon_id;
2264 	cfg.destination.priority = priority;
2265 
2266 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
2267 		options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
2268 		cfg.destination.hold_active = 1;
2269 	}
2270 
2271 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
2272 			!eth_priv->en_ordered) {
2273 		struct opr_cfg ocfg;
2274 
2275 		/* Restoration window size = 256 frames */
2276 		ocfg.oprrws = 3;
2277 		/* Restoration window size = 512 frames for LX2 */
2278 		if (dpaa2_svr_family == SVR_LX2160A)
2279 			ocfg.oprrws = 4;
2280 		/* Auto advance NESN window enabled */
2281 		ocfg.oa = 1;
2282 		/* Late arrival window size disabled */
2283 		ocfg.olws = 0;
2284 		/* ORL resource exhaustion advance NESN disabled */
2285 		ocfg.oeane = 0;
2286 		/* Loose ordering enabled */
2287 		ocfg.oloe = 1;
2288 		eth_priv->en_loose_ordered = 1;
2289 		/* Strict ordering enabled if explicitly set */
2290 		if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
2291 			ocfg.oloe = 0;
2292 			eth_priv->en_loose_ordered = 0;
2293 		}
2294 
2295 		ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
2296 				   dpaa2_ethq->tc_index, flow_id,
2297 				   OPR_OPT_CREATE, &ocfg, 0);
2298 		if (ret) {
2299 			DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
2300 			return ret;
2301 		}
2302 
2303 		eth_priv->en_ordered = 1;
2304 	}
2305 
2306 	options |= DPNI_QUEUE_OPT_USER_CTX;
2307 	cfg.user_context = (size_t)(dpaa2_ethq);
2308 
2309 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2310 			     dpaa2_ethq->tc_index, flow_id, options, &cfg);
2311 	if (ret) {
2312 		DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2313 		return ret;
2314 	}
2315 
2316 	memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
2317 
2318 	return 0;
2319 }
2320 
2321 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
2322 		int eth_rx_queue_id)
2323 {
2324 	struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2325 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2326 	struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2327 	uint8_t flow_id = dpaa2_ethq->flow_id;
2328 	struct dpni_queue cfg;
2329 	uint8_t options;
2330 	int ret;
2331 
2332 	memset(&cfg, 0, sizeof(struct dpni_queue));
2333 	options = DPNI_QUEUE_OPT_DEST;
2334 	cfg.destination.type = DPNI_DEST_NONE;
2335 
2336 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2337 			     dpaa2_ethq->tc_index, flow_id, options, &cfg);
2338 	if (ret)
2339 		DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2340 
2341 	return ret;
2342 }
2343 
2344 static int
2345 dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev,
2346 		       const struct rte_flow_ops **ops)
2347 {
2348 	if (!dev)
2349 		return -ENODEV;
2350 
2351 	*ops = &dpaa2_flow_ops;
2352 	return 0;
2353 }
2354 
2355 static void
2356 dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2357 	struct rte_eth_rxq_info *qinfo)
2358 {
2359 	struct dpaa2_queue *rxq;
2360 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2361 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2362 	uint16_t max_frame_length;
2363 
2364 	rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
2365 
2366 	qinfo->mp = rxq->mb_pool;
2367 	qinfo->scattered_rx = dev->data->scattered_rx;
2368 	qinfo->nb_desc = rxq->nb_desc;
2369 	if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
2370 				&max_frame_length) == 0)
2371 		qinfo->rx_buf_size = max_frame_length;
2372 
2373 	qinfo->conf.rx_free_thresh = 1;
2374 	qinfo->conf.rx_drop_en = 1;
2375 	qinfo->conf.rx_deferred_start = 0;
2376 	qinfo->conf.offloads = rxq->offloads;
2377 }
2378 
2379 static void
2380 dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2381 	struct rte_eth_txq_info *qinfo)
2382 {
2383 	struct dpaa2_queue *txq;
2384 
2385 	txq = dev->data->tx_queues[queue_id];
2386 
2387 	qinfo->nb_desc = txq->nb_desc;
2388 	qinfo->conf.tx_thresh.pthresh = 0;
2389 	qinfo->conf.tx_thresh.hthresh = 0;
2390 	qinfo->conf.tx_thresh.wthresh = 0;
2391 
2392 	qinfo->conf.tx_free_thresh = 0;
2393 	qinfo->conf.tx_rs_thresh = 0;
2394 	qinfo->conf.offloads = txq->offloads;
2395 	qinfo->conf.tx_deferred_start = 0;
2396 }
2397 
2398 static int
2399 dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2400 {
2401 	*(const void **)ops = &dpaa2_tm_ops;
2402 
2403 	return 0;
2404 }
2405 
2406 void
2407 rte_pmd_dpaa2_thread_init(void)
2408 {
2409 	int ret;
2410 
2411 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
2412 		ret = dpaa2_affine_qbman_swp();
2413 		if (ret) {
2414 			DPAA2_PMD_ERR(
2415 				"Failed to allocate IO portal, tid: %d\n",
2416 				rte_gettid());
2417 			return;
2418 		}
2419 	}
2420 }
2421 
2422 static struct eth_dev_ops dpaa2_ethdev_ops = {
2423 	.dev_configure	  = dpaa2_eth_dev_configure,
2424 	.dev_start	      = dpaa2_dev_start,
2425 	.dev_stop	      = dpaa2_dev_stop,
2426 	.dev_close	      = dpaa2_dev_close,
2427 	.promiscuous_enable   = dpaa2_dev_promiscuous_enable,
2428 	.promiscuous_disable  = dpaa2_dev_promiscuous_disable,
2429 	.allmulticast_enable  = dpaa2_dev_allmulticast_enable,
2430 	.allmulticast_disable = dpaa2_dev_allmulticast_disable,
2431 	.dev_set_link_up      = dpaa2_dev_set_link_up,
2432 	.dev_set_link_down    = dpaa2_dev_set_link_down,
2433 	.link_update	   = dpaa2_dev_link_update,
2434 	.stats_get	       = dpaa2_dev_stats_get,
2435 	.xstats_get	       = dpaa2_dev_xstats_get,
2436 	.xstats_get_by_id     = dpaa2_xstats_get_by_id,
2437 	.xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
2438 	.xstats_get_names      = dpaa2_xstats_get_names,
2439 	.stats_reset	   = dpaa2_dev_stats_reset,
2440 	.xstats_reset	      = dpaa2_dev_stats_reset,
2441 	.fw_version_get	   = dpaa2_fw_version_get,
2442 	.dev_infos_get	   = dpaa2_dev_info_get,
2443 	.dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2444 	.mtu_set           = dpaa2_dev_mtu_set,
2445 	.vlan_filter_set      = dpaa2_vlan_filter_set,
2446 	.vlan_offload_set     = dpaa2_vlan_offload_set,
2447 	.vlan_tpid_set	      = dpaa2_vlan_tpid_set,
2448 	.rx_queue_setup    = dpaa2_dev_rx_queue_setup,
2449 	.rx_queue_release  = dpaa2_dev_rx_queue_release,
2450 	.tx_queue_setup    = dpaa2_dev_tx_queue_setup,
2451 	.rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
2452 	.tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
2453 	.flow_ctrl_get	      = dpaa2_flow_ctrl_get,
2454 	.flow_ctrl_set	      = dpaa2_flow_ctrl_set,
2455 	.mac_addr_add         = dpaa2_dev_add_mac_addr,
2456 	.mac_addr_remove      = dpaa2_dev_remove_mac_addr,
2457 	.mac_addr_set         = dpaa2_dev_set_mac_addr,
2458 	.rss_hash_update      = dpaa2_dev_rss_hash_update,
2459 	.rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
2460 	.flow_ops_get         = dpaa2_dev_flow_ops_get,
2461 	.rxq_info_get	      = dpaa2_rxq_info_get,
2462 	.txq_info_get	      = dpaa2_txq_info_get,
2463 	.tm_ops_get	      = dpaa2_tm_ops_get,
2464 #if defined(RTE_LIBRTE_IEEE1588)
2465 	.timesync_enable      = dpaa2_timesync_enable,
2466 	.timesync_disable     = dpaa2_timesync_disable,
2467 	.timesync_read_time   = dpaa2_timesync_read_time,
2468 	.timesync_write_time  = dpaa2_timesync_write_time,
2469 	.timesync_adjust_time = dpaa2_timesync_adjust_time,
2470 	.timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp,
2471 	.timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp,
2472 #endif
2473 };
2474 
2475 /* Populate the mac address from physically available (u-boot/firmware) and/or
2476  * one set by higher layers like MC (restool) etc.
2477  * Returns the table of MAC entries (multiple entries)
2478  */
2479 static int
2480 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
2481 		  struct rte_ether_addr *mac_entry)
2482 {
2483 	int ret;
2484 	struct rte_ether_addr phy_mac, prime_mac;
2485 
2486 	memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2487 	memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2488 
2489 	/* Get the physical device MAC address */
2490 	ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2491 				     phy_mac.addr_bytes);
2492 	if (ret) {
2493 		DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2494 		goto cleanup;
2495 	}
2496 
2497 	ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2498 					prime_mac.addr_bytes);
2499 	if (ret) {
2500 		DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2501 		goto cleanup;
2502 	}
2503 
2504 	/* Now that both MAC have been obtained, do:
2505 	 *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2506 	 *     and return phy
2507 	 *  If empty_mac(phy), return prime.
2508 	 *  if both are empty, create random MAC, set as prime and return
2509 	 */
2510 	if (!rte_is_zero_ether_addr(&phy_mac)) {
2511 		/* If the addresses are not same, overwrite prime */
2512 		if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2513 			ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2514 							priv->token,
2515 							phy_mac.addr_bytes);
2516 			if (ret) {
2517 				DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2518 					      ret);
2519 				goto cleanup;
2520 			}
2521 			memcpy(&prime_mac, &phy_mac,
2522 				sizeof(struct rte_ether_addr));
2523 		}
2524 	} else if (rte_is_zero_ether_addr(&prime_mac)) {
2525 		/* In case phys and prime, both are zero, create random MAC */
2526 		rte_eth_random_addr(prime_mac.addr_bytes);
2527 		ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2528 						priv->token,
2529 						prime_mac.addr_bytes);
2530 		if (ret) {
2531 			DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2532 			goto cleanup;
2533 		}
2534 	}
2535 
2536 	/* prime_mac the final MAC address */
2537 	memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2538 	return 0;
2539 
2540 cleanup:
2541 	return -1;
2542 }
2543 
2544 static int
2545 check_devargs_handler(__rte_unused const char *key, const char *value,
2546 		      __rte_unused void *opaque)
2547 {
2548 	if (strcmp(value, "1"))
2549 		return -1;
2550 
2551 	return 0;
2552 }
2553 
2554 static int
2555 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2556 {
2557 	struct rte_kvargs *kvlist;
2558 
2559 	if (!devargs)
2560 		return 0;
2561 
2562 	kvlist = rte_kvargs_parse(devargs->args, NULL);
2563 	if (!kvlist)
2564 		return 0;
2565 
2566 	if (!rte_kvargs_count(kvlist, key)) {
2567 		rte_kvargs_free(kvlist);
2568 		return 0;
2569 	}
2570 
2571 	if (rte_kvargs_process(kvlist, key,
2572 			       check_devargs_handler, NULL) < 0) {
2573 		rte_kvargs_free(kvlist);
2574 		return 0;
2575 	}
2576 	rte_kvargs_free(kvlist);
2577 
2578 	return 1;
2579 }
2580 
2581 static int
2582 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2583 {
2584 	struct rte_device *dev = eth_dev->device;
2585 	struct rte_dpaa2_device *dpaa2_dev;
2586 	struct fsl_mc_io *dpni_dev;
2587 	struct dpni_attr attr;
2588 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2589 	struct dpni_buffer_layout layout;
2590 	int ret, hw_id, i;
2591 
2592 	PMD_INIT_FUNC_TRACE();
2593 
2594 	dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2595 	if (!dpni_dev) {
2596 		DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2597 		return -1;
2598 	}
2599 	dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
2600 	eth_dev->process_private = (void *)dpni_dev;
2601 
2602 	/* For secondary processes, the primary has done all the work */
2603 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2604 		/* In case of secondary, only burst and ops API need to be
2605 		 * plugged.
2606 		 */
2607 		eth_dev->dev_ops = &dpaa2_ethdev_ops;
2608 		eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count;
2609 		if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2610 			eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2611 		else if (dpaa2_get_devargs(dev->devargs,
2612 					DRIVER_NO_PREFETCH_MODE))
2613 			eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2614 		else
2615 			eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2616 		eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2617 		return 0;
2618 	}
2619 
2620 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2621 
2622 	hw_id = dpaa2_dev->object_id;
2623 	ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2624 	if (ret) {
2625 		DPAA2_PMD_ERR(
2626 			     "Failure in opening dpni@%d with err code %d",
2627 			     hw_id, ret);
2628 		rte_free(dpni_dev);
2629 		return -1;
2630 	}
2631 
2632 	if (eth_dev->data->dev_conf.lpbk_mode)
2633 		dpaa2_dev_recycle_deconfig(eth_dev);
2634 
2635 	/* Clean the device first */
2636 	ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2637 	if (ret) {
2638 		DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2639 			      hw_id, ret);
2640 		goto init_err;
2641 	}
2642 
2643 	ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2644 	if (ret) {
2645 		DPAA2_PMD_ERR(
2646 			     "Failure in get dpni@%d attribute, err code %d",
2647 			     hw_id, ret);
2648 		goto init_err;
2649 	}
2650 
2651 	priv->num_rx_tc = attr.num_rx_tcs;
2652 	priv->num_tx_tc = attr.num_tx_tcs;
2653 	priv->qos_entries = attr.qos_entries;
2654 	priv->fs_entries = attr.fs_entries;
2655 	priv->dist_queues = attr.num_queues;
2656 	priv->num_channels = attr.num_channels;
2657 	priv->channel_inuse = 0;
2658 	rte_spinlock_init(&priv->lpbk_qp_lock);
2659 
2660 	/* only if the custom CG is enabled */
2661 	if (attr.options & DPNI_OPT_CUSTOM_CG)
2662 		priv->max_cgs = attr.num_cgs;
2663 	else
2664 		priv->max_cgs = 0;
2665 
2666 	for (i = 0; i < priv->max_cgs; i++)
2667 		priv->cgid_in_use[i] = 0;
2668 
2669 	for (i = 0; i < attr.num_rx_tcs; i++)
2670 		priv->nb_rx_queues += attr.num_queues;
2671 
2672 	priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels;
2673 
2674 	DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2675 			priv->num_rx_tc, priv->nb_rx_queues,
2676 			priv->nb_tx_queues, priv->max_cgs);
2677 
2678 	priv->hw = dpni_dev;
2679 	priv->hw_id = hw_id;
2680 	priv->options = attr.options;
2681 	priv->max_mac_filters = attr.mac_filter_entries;
2682 	priv->max_vlan_filters = attr.vlan_filter_entries;
2683 	priv->flags = 0;
2684 #if defined(RTE_LIBRTE_IEEE1588)
2685 	printf("DPDK IEEE1588 is enabled\n");
2686 	priv->flags |= DPAA2_TX_CONF_ENABLE;
2687 #endif
2688 	/* Used with ``fslmc:dpni.1,drv_tx_conf=1`` */
2689 	if (dpaa2_get_devargs(dev->devargs, DRIVER_TX_CONF)) {
2690 		priv->flags |= DPAA2_TX_CONF_ENABLE;
2691 		DPAA2_PMD_INFO("TX_CONF Enabled");
2692 	}
2693 
2694 	if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
2695 		dpaa2_enable_err_queue = 1;
2696 		DPAA2_PMD_INFO("Enable error queue");
2697 	}
2698 
2699 	/* Allocate memory for hardware structure for queues */
2700 	ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2701 	if (ret) {
2702 		DPAA2_PMD_ERR("Queue allocation Failed");
2703 		goto init_err;
2704 	}
2705 
2706 	/* Allocate memory for storing MAC addresses.
2707 	 * Table of mac_filter_entries size is allocated so that RTE ether lib
2708 	 * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2709 	 */
2710 	eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2711 		RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2712 	if (eth_dev->data->mac_addrs == NULL) {
2713 		DPAA2_PMD_ERR(
2714 		   "Failed to allocate %d bytes needed to store MAC addresses",
2715 		   RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2716 		ret = -ENOMEM;
2717 		goto init_err;
2718 	}
2719 
2720 	ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2721 	if (ret) {
2722 		DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2723 		rte_free(eth_dev->data->mac_addrs);
2724 		eth_dev->data->mac_addrs = NULL;
2725 		goto init_err;
2726 	}
2727 
2728 	/* ... tx buffer layout ... */
2729 	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2730 	if (priv->flags & DPAA2_TX_CONF_ENABLE) {
2731 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2732 				 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2733 		layout.pass_timestamp = true;
2734 	} else {
2735 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2736 	}
2737 	layout.pass_frame_status = 1;
2738 	ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2739 				     DPNI_QUEUE_TX, &layout);
2740 	if (ret) {
2741 		DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2742 		goto init_err;
2743 	}
2744 
2745 	/* ... tx-conf and error buffer layout ... */
2746 	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2747 	if (priv->flags & DPAA2_TX_CONF_ENABLE) {
2748 		layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2749 		layout.pass_timestamp = true;
2750 	}
2751 	layout.options |= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2752 	layout.pass_frame_status = 1;
2753 	ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2754 				     DPNI_QUEUE_TX_CONFIRM, &layout);
2755 	if (ret) {
2756 		DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2757 			     ret);
2758 		goto init_err;
2759 	}
2760 
2761 	eth_dev->dev_ops = &dpaa2_ethdev_ops;
2762 
2763 	if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2764 		eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2765 		DPAA2_PMD_INFO("Loopback mode");
2766 	} else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
2767 		eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2768 		DPAA2_PMD_INFO("No Prefetch mode");
2769 	} else {
2770 		eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2771 	}
2772 	eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2773 
2774 	/* Init fields w.r.t. classification */
2775 	memset(&priv->extract.qos_key_extract, 0,
2776 		sizeof(struct dpaa2_key_extract));
2777 	priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2778 	if (!priv->extract.qos_extract_param) {
2779 		DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2780 			    " classification ", ret);
2781 		goto init_err;
2782 	}
2783 	priv->extract.qos_key_extract.key_info.ipv4_src_offset =
2784 		IP_ADDRESS_OFFSET_INVALID;
2785 	priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
2786 		IP_ADDRESS_OFFSET_INVALID;
2787 	priv->extract.qos_key_extract.key_info.ipv6_src_offset =
2788 		IP_ADDRESS_OFFSET_INVALID;
2789 	priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
2790 		IP_ADDRESS_OFFSET_INVALID;
2791 
2792 	for (i = 0; i < MAX_TCS; i++) {
2793 		memset(&priv->extract.tc_key_extract[i], 0,
2794 			sizeof(struct dpaa2_key_extract));
2795 		priv->extract.tc_extract_param[i] =
2796 			(size_t)rte_malloc(NULL, 256, 64);
2797 		if (!priv->extract.tc_extract_param[i]) {
2798 			DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification",
2799 				     ret);
2800 			goto init_err;
2801 		}
2802 		priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
2803 			IP_ADDRESS_OFFSET_INVALID;
2804 		priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
2805 			IP_ADDRESS_OFFSET_INVALID;
2806 		priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
2807 			IP_ADDRESS_OFFSET_INVALID;
2808 		priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
2809 			IP_ADDRESS_OFFSET_INVALID;
2810 	}
2811 
2812 	ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
2813 					RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN
2814 					+ VLAN_TAG_SIZE);
2815 	if (ret) {
2816 		DPAA2_PMD_ERR("Unable to set mtu. check config");
2817 		goto init_err;
2818 	}
2819 
2820 	/*TODO To enable soft parser support DPAA2 driver needs to integrate
2821 	 * with external entity to receive byte code for software sequence
2822 	 * and same will be offload to the H/W using MC interface.
2823 	 * Currently it is assumed that DPAA2 driver has byte code by some
2824 	 * mean and same if offloaded to H/W.
2825 	 */
2826 	if (getenv("DPAA2_ENABLE_SOFT_PARSER")) {
2827 		WRIOP_SS_INITIALIZER(priv);
2828 		ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS);
2829 		if (ret < 0) {
2830 			DPAA2_PMD_ERR(" Error(%d) in loading softparser\n",
2831 				      ret);
2832 			return ret;
2833 		}
2834 
2835 		ret = dpaa2_eth_enable_wriop_soft_parser(priv,
2836 							 DPNI_SS_INGRESS);
2837 		if (ret < 0) {
2838 			DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n",
2839 				      ret);
2840 			return ret;
2841 		}
2842 	}
2843 	RTE_LOG(INFO, PMD, "%s: netdev created, connected to %s\n",
2844 		eth_dev->data->name, dpaa2_dev->ep_name);
2845 
2846 	return 0;
2847 init_err:
2848 	dpaa2_dev_close(eth_dev);
2849 
2850 	return ret;
2851 }
2852 
2853 int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev)
2854 {
2855 	return dev->device->driver == &rte_dpaa2_pmd.driver;
2856 }
2857 
2858 static int
2859 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2860 		struct rte_dpaa2_device *dpaa2_dev)
2861 {
2862 	struct rte_eth_dev *eth_dev;
2863 	struct dpaa2_dev_priv *dev_priv;
2864 	int diag;
2865 
2866 	if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2867 		RTE_PKTMBUF_HEADROOM) {
2868 		DPAA2_PMD_ERR(
2869 		"RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2870 		RTE_PKTMBUF_HEADROOM,
2871 		DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2872 
2873 		return -1;
2874 	}
2875 
2876 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2877 		eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2878 		if (!eth_dev)
2879 			return -ENODEV;
2880 		dev_priv = rte_zmalloc("ethdev private structure",
2881 				       sizeof(struct dpaa2_dev_priv),
2882 				       RTE_CACHE_LINE_SIZE);
2883 		if (dev_priv == NULL) {
2884 			DPAA2_PMD_CRIT(
2885 				"Unable to allocate memory for private data");
2886 			rte_eth_dev_release_port(eth_dev);
2887 			return -ENOMEM;
2888 		}
2889 		eth_dev->data->dev_private = (void *)dev_priv;
2890 		/* Store a pointer to eth_dev in dev_private */
2891 		dev_priv->eth_dev = eth_dev;
2892 	} else {
2893 		eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2894 		if (!eth_dev) {
2895 			DPAA2_PMD_DEBUG("returning enodev");
2896 			return -ENODEV;
2897 		}
2898 	}
2899 
2900 	eth_dev->device = &dpaa2_dev->device;
2901 
2902 	dpaa2_dev->eth_dev = eth_dev;
2903 	eth_dev->data->rx_mbuf_alloc_failed = 0;
2904 
2905 	if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2906 		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2907 
2908 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2909 
2910 	/* Invoke PMD device initialization function */
2911 	diag = dpaa2_dev_init(eth_dev);
2912 	if (diag == 0) {
2913 		if (!dpaa2_tx_sg_pool) {
2914 			dpaa2_tx_sg_pool =
2915 				rte_pktmbuf_pool_create("dpaa2_mbuf_tx_sg_pool",
2916 				DPAA2_POOL_SIZE,
2917 				DPAA2_POOL_CACHE_SIZE, 0,
2918 				DPAA2_MAX_SGS * sizeof(struct qbman_sge),
2919 				rte_socket_id());
2920 			if (dpaa2_tx_sg_pool == NULL) {
2921 				DPAA2_PMD_ERR("SG pool creation failed\n");
2922 				return -ENOMEM;
2923 			}
2924 		}
2925 		rte_eth_dev_probing_finish(eth_dev);
2926 		dpaa2_valid_dev++;
2927 		return 0;
2928 	}
2929 
2930 	rte_eth_dev_release_port(eth_dev);
2931 	return diag;
2932 }
2933 
2934 static int
2935 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2936 {
2937 	struct rte_eth_dev *eth_dev;
2938 	int ret;
2939 
2940 	eth_dev = dpaa2_dev->eth_dev;
2941 	dpaa2_dev_close(eth_dev);
2942 	dpaa2_valid_dev--;
2943 	if (!dpaa2_valid_dev)
2944 		rte_mempool_free(dpaa2_tx_sg_pool);
2945 	ret = rte_eth_dev_release_port(eth_dev);
2946 
2947 	return ret;
2948 }
2949 
2950 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2951 	.drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2952 	.drv_type = DPAA2_ETH,
2953 	.probe = rte_dpaa2_probe,
2954 	.remove = rte_dpaa2_remove,
2955 };
2956 
2957 RTE_PMD_REGISTER_DPAA2(NET_DPAA2_PMD_DRIVER_NAME, rte_dpaa2_pmd);
2958 RTE_PMD_REGISTER_PARAM_STRING(NET_DPAA2_PMD_DRIVER_NAME,
2959 		DRIVER_LOOPBACK_MODE "=<int> "
2960 		DRIVER_NO_PREFETCH_MODE "=<int>"
2961 		DRIVER_TX_CONF "=<int>"
2962 		DRIVER_ERROR_QUEUE "=<int>");
2963 RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE);
2964