xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.c (revision 10b71caecbe1cddcbb65c050ca775fba575e88db)
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2020 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 #include <rte_fslmc.h>
20 #include <rte_flow_driver.h>
21 
22 #include "dpaa2_pmd_logs.h"
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_mempool.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <mc/fsl_dpmng.h>
28 #include "dpaa2_ethdev.h"
29 #include "dpaa2_sparser.h"
30 #include <fsl_qbman_debug.h>
31 
32 #define DRIVER_LOOPBACK_MODE "drv_loopback"
33 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
34 
35 /* Supported Rx offloads */
36 static uint64_t dev_rx_offloads_sup =
37 		DEV_RX_OFFLOAD_CHECKSUM |
38 		DEV_RX_OFFLOAD_SCTP_CKSUM |
39 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
40 		DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
41 		DEV_RX_OFFLOAD_VLAN_STRIP |
42 		DEV_RX_OFFLOAD_VLAN_FILTER |
43 		DEV_RX_OFFLOAD_JUMBO_FRAME |
44 		DEV_RX_OFFLOAD_TIMESTAMP;
45 
46 /* Rx offloads which cannot be disabled */
47 static uint64_t dev_rx_offloads_nodis =
48 		DEV_RX_OFFLOAD_RSS_HASH |
49 		DEV_RX_OFFLOAD_SCATTER;
50 
51 /* Supported Tx offloads */
52 static uint64_t dev_tx_offloads_sup =
53 		DEV_TX_OFFLOAD_VLAN_INSERT |
54 		DEV_TX_OFFLOAD_IPV4_CKSUM |
55 		DEV_TX_OFFLOAD_UDP_CKSUM |
56 		DEV_TX_OFFLOAD_TCP_CKSUM |
57 		DEV_TX_OFFLOAD_SCTP_CKSUM |
58 		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
59 		DEV_TX_OFFLOAD_MT_LOCKFREE |
60 		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
61 
62 /* Tx offloads which cannot be disabled */
63 static uint64_t dev_tx_offloads_nodis =
64 		DEV_TX_OFFLOAD_MULTI_SEGS;
65 
66 /* enable timestamp in mbuf */
67 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
68 
69 struct rte_dpaa2_xstats_name_off {
70 	char name[RTE_ETH_XSTATS_NAME_SIZE];
71 	uint8_t page_id; /* dpni statistics page id */
72 	uint8_t stats_id; /* stats id in the given page */
73 };
74 
75 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
76 	{"ingress_multicast_frames", 0, 2},
77 	{"ingress_multicast_bytes", 0, 3},
78 	{"ingress_broadcast_frames", 0, 4},
79 	{"ingress_broadcast_bytes", 0, 5},
80 	{"egress_multicast_frames", 1, 2},
81 	{"egress_multicast_bytes", 1, 3},
82 	{"egress_broadcast_frames", 1, 4},
83 	{"egress_broadcast_bytes", 1, 5},
84 	{"ingress_filtered_frames", 2, 0},
85 	{"ingress_discarded_frames", 2, 1},
86 	{"ingress_nobuffer_discards", 2, 2},
87 	{"egress_discarded_frames", 2, 3},
88 	{"egress_confirmed_frames", 2, 4},
89 	{"cgr_reject_frames", 4, 0},
90 	{"cgr_reject_bytes", 4, 1},
91 };
92 
93 static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
94 	RTE_ETH_FILTER_ADD,
95 	RTE_ETH_FILTER_DELETE,
96 	RTE_ETH_FILTER_UPDATE,
97 	RTE_ETH_FILTER_FLUSH,
98 	RTE_ETH_FILTER_GET
99 };
100 
101 static struct rte_dpaa2_driver rte_dpaa2_pmd;
102 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
103 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
104 				 int wait_to_complete);
105 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
106 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
107 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
108 
109 static int
110 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
111 {
112 	int ret;
113 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
114 	struct fsl_mc_io *dpni = dev->process_private;
115 
116 	PMD_INIT_FUNC_TRACE();
117 
118 	if (dpni == NULL) {
119 		DPAA2_PMD_ERR("dpni is NULL");
120 		return -1;
121 	}
122 
123 	if (on)
124 		ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token,
125 				       vlan_id, 0, 0, 0);
126 	else
127 		ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
128 					  priv->token, vlan_id);
129 
130 	if (ret < 0)
131 		DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
132 			      ret, vlan_id, priv->hw_id);
133 
134 	return ret;
135 }
136 
137 static int
138 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
139 {
140 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
141 	struct fsl_mc_io *dpni = dev->process_private;
142 	int ret = 0;
143 
144 	PMD_INIT_FUNC_TRACE();
145 
146 	if (mask & ETH_VLAN_FILTER_MASK) {
147 		/* VLAN Filter not avaialble */
148 		if (!priv->max_vlan_filters) {
149 			DPAA2_PMD_INFO("VLAN filter not available");
150 			return -ENOTSUP;
151 		}
152 
153 		if (dev->data->dev_conf.rxmode.offloads &
154 			DEV_RX_OFFLOAD_VLAN_FILTER)
155 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
156 						      priv->token, true);
157 		else
158 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
159 						      priv->token, false);
160 		if (ret < 0)
161 			DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
162 	}
163 
164 	return ret;
165 }
166 
167 static int
168 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
169 		      enum rte_vlan_type vlan_type __rte_unused,
170 		      uint16_t tpid)
171 {
172 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
173 	struct fsl_mc_io *dpni = dev->process_private;
174 	int ret = -ENOTSUP;
175 
176 	PMD_INIT_FUNC_TRACE();
177 
178 	/* nothing to be done for standard vlan tpids */
179 	if (tpid == 0x8100 || tpid == 0x88A8)
180 		return 0;
181 
182 	ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
183 				   priv->token, tpid);
184 	if (ret < 0)
185 		DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
186 	/* if already configured tpids, remove them first */
187 	if (ret == -EBUSY) {
188 		struct dpni_custom_tpid_cfg tpid_list = {0};
189 
190 		ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
191 				   priv->token, &tpid_list);
192 		if (ret < 0)
193 			goto fail;
194 		ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
195 				   priv->token, tpid_list.tpid1);
196 		if (ret < 0)
197 			goto fail;
198 		ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
199 					   priv->token, tpid);
200 	}
201 fail:
202 	return ret;
203 }
204 
205 static int
206 dpaa2_fw_version_get(struct rte_eth_dev *dev,
207 		     char *fw_version,
208 		     size_t fw_size)
209 {
210 	int ret;
211 	struct fsl_mc_io *dpni = dev->process_private;
212 	struct mc_soc_version mc_plat_info = {0};
213 	struct mc_version mc_ver_info = {0};
214 
215 	PMD_INIT_FUNC_TRACE();
216 
217 	if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
218 		DPAA2_PMD_WARN("\tmc_get_soc_version failed");
219 
220 	if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
221 		DPAA2_PMD_WARN("\tmc_get_version failed");
222 
223 	ret = snprintf(fw_version, fw_size,
224 		       "%x-%d.%d.%d",
225 		       mc_plat_info.svr,
226 		       mc_ver_info.major,
227 		       mc_ver_info.minor,
228 		       mc_ver_info.revision);
229 
230 	ret += 1; /* add the size of '\0' */
231 	if (fw_size < (uint32_t)ret)
232 		return ret;
233 	else
234 		return 0;
235 }
236 
237 static int
238 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
239 {
240 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
241 
242 	PMD_INIT_FUNC_TRACE();
243 
244 	dev_info->if_index = priv->hw_id;
245 
246 	dev_info->max_mac_addrs = priv->max_mac_filters;
247 	dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
248 	dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
249 	dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
250 	dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
251 	dev_info->rx_offload_capa = dev_rx_offloads_sup |
252 					dev_rx_offloads_nodis;
253 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
254 					dev_tx_offloads_nodis;
255 	dev_info->speed_capa = ETH_LINK_SPEED_1G |
256 			ETH_LINK_SPEED_2_5G |
257 			ETH_LINK_SPEED_10G;
258 
259 	dev_info->max_hash_mac_addrs = 0;
260 	dev_info->max_vfs = 0;
261 	dev_info->max_vmdq_pools = ETH_16_POOLS;
262 	dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
263 
264 	dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
265 	/* same is rx size for best perf */
266 	dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
267 
268 	dev_info->default_rxportconf.nb_queues = 1;
269 	dev_info->default_txportconf.nb_queues = 1;
270 	dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
271 	dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
272 
273 	if (dpaa2_svr_family == SVR_LX2160A) {
274 		dev_info->speed_capa |= ETH_LINK_SPEED_25G |
275 				ETH_LINK_SPEED_40G |
276 				ETH_LINK_SPEED_50G |
277 				ETH_LINK_SPEED_100G;
278 	}
279 
280 	return 0;
281 }
282 
283 static int
284 dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
285 			__rte_unused uint16_t queue_id,
286 			struct rte_eth_burst_mode *mode)
287 {
288 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
289 	int ret = -EINVAL;
290 	unsigned int i;
291 	const struct burst_info {
292 		uint64_t flags;
293 		const char *output;
294 	} rx_offload_map[] = {
295 			{DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
296 			{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
297 			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
298 			{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
299 			{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
300 			{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
301 			{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
302 			{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
303 			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
304 			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
305 	};
306 
307 	/* Update Rx offload info */
308 	for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
309 		if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
310 			snprintf(mode->info, sizeof(mode->info), "%s",
311 				rx_offload_map[i].output);
312 			ret = 0;
313 			break;
314 		}
315 	}
316 	return ret;
317 }
318 
319 static int
320 dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
321 			__rte_unused uint16_t queue_id,
322 			struct rte_eth_burst_mode *mode)
323 {
324 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
325 	int ret = -EINVAL;
326 	unsigned int i;
327 	const struct burst_info {
328 		uint64_t flags;
329 		const char *output;
330 	} tx_offload_map[] = {
331 			{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
332 			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
333 			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
334 			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
335 			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
336 			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
337 			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
338 			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
339 			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
340 	};
341 
342 	/* Update Tx offload info */
343 	for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
344 		if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
345 			snprintf(mode->info, sizeof(mode->info), "%s",
346 				tx_offload_map[i].output);
347 			ret = 0;
348 			break;
349 		}
350 	}
351 	return ret;
352 }
353 
354 static int
355 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
356 {
357 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
358 	uint16_t dist_idx;
359 	uint32_t vq_id;
360 	uint8_t num_rxqueue_per_tc;
361 	struct dpaa2_queue *mc_q, *mcq;
362 	uint32_t tot_queues;
363 	int i;
364 	struct dpaa2_queue *dpaa2_q;
365 
366 	PMD_INIT_FUNC_TRACE();
367 
368 	num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
369 	if (priv->tx_conf_en)
370 		tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
371 	else
372 		tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
373 	mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
374 			  RTE_CACHE_LINE_SIZE);
375 	if (!mc_q) {
376 		DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
377 		return -1;
378 	}
379 
380 	for (i = 0; i < priv->nb_rx_queues; i++) {
381 		mc_q->eth_data = dev->data;
382 		priv->rx_vq[i] = mc_q++;
383 		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
384 		dpaa2_q->q_storage = rte_malloc("dq_storage",
385 					sizeof(struct queue_storage_info_t),
386 					RTE_CACHE_LINE_SIZE);
387 		if (!dpaa2_q->q_storage)
388 			goto fail;
389 
390 		memset(dpaa2_q->q_storage, 0,
391 		       sizeof(struct queue_storage_info_t));
392 		if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
393 			goto fail;
394 	}
395 
396 	for (i = 0; i < priv->nb_tx_queues; i++) {
397 		mc_q->eth_data = dev->data;
398 		mc_q->flow_id = 0xffff;
399 		priv->tx_vq[i] = mc_q++;
400 		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
401 		dpaa2_q->cscn = rte_malloc(NULL,
402 					   sizeof(struct qbman_result), 16);
403 		if (!dpaa2_q->cscn)
404 			goto fail_tx;
405 	}
406 
407 	if (priv->tx_conf_en) {
408 		/*Setup tx confirmation queues*/
409 		for (i = 0; i < priv->nb_tx_queues; i++) {
410 			mc_q->eth_data = dev->data;
411 			mc_q->tc_index = i;
412 			mc_q->flow_id = 0;
413 			priv->tx_conf_vq[i] = mc_q++;
414 			dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
415 			dpaa2_q->q_storage =
416 				rte_malloc("dq_storage",
417 					sizeof(struct queue_storage_info_t),
418 					RTE_CACHE_LINE_SIZE);
419 			if (!dpaa2_q->q_storage)
420 				goto fail_tx_conf;
421 
422 			memset(dpaa2_q->q_storage, 0,
423 			       sizeof(struct queue_storage_info_t));
424 			if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
425 				goto fail_tx_conf;
426 		}
427 	}
428 
429 	vq_id = 0;
430 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
431 		mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
432 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
433 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
434 		vq_id++;
435 	}
436 
437 	return 0;
438 fail_tx_conf:
439 	i -= 1;
440 	while (i >= 0) {
441 		dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
442 		rte_free(dpaa2_q->q_storage);
443 		priv->tx_conf_vq[i--] = NULL;
444 	}
445 	i = priv->nb_tx_queues;
446 fail_tx:
447 	i -= 1;
448 	while (i >= 0) {
449 		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
450 		rte_free(dpaa2_q->cscn);
451 		priv->tx_vq[i--] = NULL;
452 	}
453 	i = priv->nb_rx_queues;
454 fail:
455 	i -= 1;
456 	mc_q = priv->rx_vq[0];
457 	while (i >= 0) {
458 		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
459 		dpaa2_free_dq_storage(dpaa2_q->q_storage);
460 		rte_free(dpaa2_q->q_storage);
461 		priv->rx_vq[i--] = NULL;
462 	}
463 	rte_free(mc_q);
464 	return -1;
465 }
466 
467 static void
468 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
469 {
470 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
471 	struct dpaa2_queue *dpaa2_q;
472 	int i;
473 
474 	PMD_INIT_FUNC_TRACE();
475 
476 	/* Queue allocation base */
477 	if (priv->rx_vq[0]) {
478 		/* cleaning up queue storage */
479 		for (i = 0; i < priv->nb_rx_queues; i++) {
480 			dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
481 			if (dpaa2_q->q_storage)
482 				rte_free(dpaa2_q->q_storage);
483 		}
484 		/* cleanup tx queue cscn */
485 		for (i = 0; i < priv->nb_tx_queues; i++) {
486 			dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
487 			rte_free(dpaa2_q->cscn);
488 		}
489 		if (priv->tx_conf_en) {
490 			/* cleanup tx conf queue storage */
491 			for (i = 0; i < priv->nb_tx_queues; i++) {
492 				dpaa2_q = (struct dpaa2_queue *)
493 						priv->tx_conf_vq[i];
494 				rte_free(dpaa2_q->q_storage);
495 			}
496 		}
497 		/*free memory for all queues (RX+TX) */
498 		rte_free(priv->rx_vq[0]);
499 		priv->rx_vq[0] = NULL;
500 	}
501 }
502 
503 static int
504 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
505 {
506 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
507 	struct fsl_mc_io *dpni = dev->process_private;
508 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
509 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
510 	uint64_t tx_offloads = eth_conf->txmode.offloads;
511 	int rx_l3_csum_offload = false;
512 	int rx_l4_csum_offload = false;
513 	int tx_l3_csum_offload = false;
514 	int tx_l4_csum_offload = false;
515 	int ret, tc_index;
516 
517 	PMD_INIT_FUNC_TRACE();
518 
519 	/* Rx offloads which are enabled by default */
520 	if (dev_rx_offloads_nodis & ~rx_offloads) {
521 		DPAA2_PMD_INFO(
522 		"Some of rx offloads enabled by default - requested 0x%" PRIx64
523 		" fixed are 0x%" PRIx64,
524 		rx_offloads, dev_rx_offloads_nodis);
525 	}
526 
527 	/* Tx offloads which are enabled by default */
528 	if (dev_tx_offloads_nodis & ~tx_offloads) {
529 		DPAA2_PMD_INFO(
530 		"Some of tx offloads enabled by default - requested 0x%" PRIx64
531 		" fixed are 0x%" PRIx64,
532 		tx_offloads, dev_tx_offloads_nodis);
533 	}
534 
535 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
536 		if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
537 			ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
538 				priv->token, eth_conf->rxmode.max_rx_pkt_len
539 				- RTE_ETHER_CRC_LEN);
540 			if (ret) {
541 				DPAA2_PMD_ERR(
542 					"Unable to set mtu. check config");
543 				return ret;
544 			}
545 			dev->data->mtu =
546 				dev->data->dev_conf.rxmode.max_rx_pkt_len -
547 				RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN -
548 				VLAN_TAG_SIZE;
549 		} else {
550 			return -1;
551 		}
552 	}
553 
554 	if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
555 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
556 			ret = dpaa2_setup_flow_dist(dev,
557 					eth_conf->rx_adv_conf.rss_conf.rss_hf,
558 					tc_index);
559 			if (ret) {
560 				DPAA2_PMD_ERR(
561 					"Unable to set flow distribution on tc%d."
562 					"Check queue config", tc_index);
563 				return ret;
564 			}
565 		}
566 	}
567 
568 	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
569 		rx_l3_csum_offload = true;
570 
571 	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
572 		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
573 		(rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
574 		rx_l4_csum_offload = true;
575 
576 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
577 			       DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
578 	if (ret) {
579 		DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
580 		return ret;
581 	}
582 
583 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
584 			       DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
585 	if (ret) {
586 		DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
587 		return ret;
588 	}
589 
590 #if !defined(RTE_LIBRTE_IEEE1588)
591 	if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
592 #endif
593 		dpaa2_enable_ts[dev->data->port_id] = true;
594 
595 	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
596 		tx_l3_csum_offload = true;
597 
598 	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
599 		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
600 		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
601 		tx_l4_csum_offload = true;
602 
603 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
604 			       DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
605 	if (ret) {
606 		DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
607 		return ret;
608 	}
609 
610 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
611 			       DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
612 	if (ret) {
613 		DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
614 		return ret;
615 	}
616 
617 	/* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
618 	 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
619 	 * to 0 for LS2 in the hardware thus disabling data/annotation
620 	 * stashing. For LX2 this is fixed in hardware and thus hash result and
621 	 * parse results can be received in FD using this option.
622 	 */
623 	if (dpaa2_svr_family == SVR_LX2160A) {
624 		ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
625 				       DPNI_FLCTYPE_HASH, true);
626 		if (ret) {
627 			DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
628 			return ret;
629 		}
630 	}
631 
632 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
633 		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
634 
635 	return 0;
636 }
637 
638 /* Function to setup RX flow information. It contains traffic class ID,
639  * flow ID, destination configuration etc.
640  */
641 static int
642 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
643 			 uint16_t rx_queue_id,
644 			 uint16_t nb_rx_desc,
645 			 unsigned int socket_id __rte_unused,
646 			 const struct rte_eth_rxconf *rx_conf,
647 			 struct rte_mempool *mb_pool)
648 {
649 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
650 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
651 	struct dpaa2_queue *dpaa2_q;
652 	struct dpni_queue cfg;
653 	uint8_t options = 0;
654 	uint8_t flow_id;
655 	uint32_t bpid;
656 	int i, ret;
657 
658 	PMD_INIT_FUNC_TRACE();
659 
660 	DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
661 			dev, rx_queue_id, mb_pool, rx_conf);
662 
663 	/* Rx deferred start is not supported */
664 	if (rx_conf->rx_deferred_start) {
665 		DPAA2_PMD_ERR("%p:Rx deferred start not supported",
666 				(void *)dev);
667 		return -EINVAL;
668 	}
669 
670 	if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
671 		bpid = mempool_to_bpid(mb_pool);
672 		ret = dpaa2_attach_bp_list(priv,
673 					   rte_dpaa2_bpid_info[bpid].bp_list);
674 		if (ret)
675 			return ret;
676 	}
677 	dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
678 	dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
679 	dpaa2_q->bp_array = rte_dpaa2_bpid_info;
680 	dpaa2_q->nb_desc = UINT16_MAX;
681 	dpaa2_q->offloads = rx_conf->offloads;
682 
683 	/*Get the flow id from given VQ id*/
684 	flow_id = dpaa2_q->flow_id;
685 	memset(&cfg, 0, sizeof(struct dpni_queue));
686 
687 	options = options | DPNI_QUEUE_OPT_USER_CTX;
688 	cfg.user_context = (size_t)(dpaa2_q);
689 
690 	/* check if a private cgr available. */
691 	for (i = 0; i < priv->max_cgs; i++) {
692 		if (!priv->cgid_in_use[i]) {
693 			priv->cgid_in_use[i] = 1;
694 			break;
695 		}
696 	}
697 
698 	if (i < priv->max_cgs) {
699 		options |= DPNI_QUEUE_OPT_SET_CGID;
700 		cfg.cgid = i;
701 		dpaa2_q->cgid = cfg.cgid;
702 	} else {
703 		dpaa2_q->cgid = 0xff;
704 	}
705 
706 	/*if ls2088 or rev2 device, enable the stashing */
707 
708 	if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
709 		options |= DPNI_QUEUE_OPT_FLC;
710 		cfg.flc.stash_control = true;
711 		cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
712 		/* 00 00 00 - last 6 bit represent annotation, context stashing,
713 		 * data stashing setting 01 01 00 (0x14)
714 		 * (in following order ->DS AS CS)
715 		 * to enable 1 line data, 1 line annotation.
716 		 * For LX2, this setting should be 01 00 00 (0x10)
717 		 */
718 		if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
719 			cfg.flc.value |= 0x10;
720 		else
721 			cfg.flc.value |= 0x14;
722 	}
723 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
724 			     dpaa2_q->tc_index, flow_id, options, &cfg);
725 	if (ret) {
726 		DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
727 		return -1;
728 	}
729 
730 	if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
731 		struct dpni_taildrop taildrop;
732 
733 		taildrop.enable = 1;
734 		dpaa2_q->nb_desc = nb_rx_desc;
735 		/* Private CGR will use tail drop length as nb_rx_desc.
736 		 * for rest cases we can use standard byte based tail drop.
737 		 * There is no HW restriction, but number of CGRs are limited,
738 		 * hence this restriction is placed.
739 		 */
740 		if (dpaa2_q->cgid != 0xff) {
741 			/*enabling per rx queue congestion control */
742 			taildrop.threshold = nb_rx_desc;
743 			taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
744 			taildrop.oal = 0;
745 			DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
746 					rx_queue_id);
747 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
748 						DPNI_CP_CONGESTION_GROUP,
749 						DPNI_QUEUE_RX,
750 						dpaa2_q->tc_index,
751 						dpaa2_q->cgid, &taildrop);
752 		} else {
753 			/*enabling per rx queue congestion control */
754 			taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
755 			taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
756 			taildrop.oal = CONG_RX_OAL;
757 			DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
758 					rx_queue_id);
759 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
760 						DPNI_CP_QUEUE, DPNI_QUEUE_RX,
761 						dpaa2_q->tc_index, flow_id,
762 						&taildrop);
763 		}
764 		if (ret) {
765 			DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
766 				      ret);
767 			return -1;
768 		}
769 	} else { /* Disable tail Drop */
770 		struct dpni_taildrop taildrop = {0};
771 		DPAA2_PMD_INFO("Tail drop is disabled on queue");
772 
773 		taildrop.enable = 0;
774 		if (dpaa2_q->cgid != 0xff) {
775 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
776 					DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
777 					dpaa2_q->tc_index,
778 					dpaa2_q->cgid, &taildrop);
779 		} else {
780 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
781 					DPNI_CP_QUEUE, DPNI_QUEUE_RX,
782 					dpaa2_q->tc_index, flow_id, &taildrop);
783 		}
784 		if (ret) {
785 			DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
786 				      ret);
787 			return -1;
788 		}
789 	}
790 
791 	dev->data->rx_queues[rx_queue_id] = dpaa2_q;
792 	return 0;
793 }
794 
795 static int
796 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
797 			 uint16_t tx_queue_id,
798 			 uint16_t nb_tx_desc,
799 			 unsigned int socket_id __rte_unused,
800 			 const struct rte_eth_txconf *tx_conf)
801 {
802 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
803 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
804 		priv->tx_vq[tx_queue_id];
805 	struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
806 		priv->tx_conf_vq[tx_queue_id];
807 	struct fsl_mc_io *dpni = dev->process_private;
808 	struct dpni_queue tx_conf_cfg;
809 	struct dpni_queue tx_flow_cfg;
810 	uint8_t options = 0, flow_id;
811 	struct dpni_queue_id qid;
812 	uint32_t tc_id;
813 	int ret;
814 
815 	PMD_INIT_FUNC_TRACE();
816 
817 	/* Tx deferred start is not supported */
818 	if (tx_conf->tx_deferred_start) {
819 		DPAA2_PMD_ERR("%p:Tx deferred start not supported",
820 				(void *)dev);
821 		return -EINVAL;
822 	}
823 
824 	dpaa2_q->nb_desc = UINT16_MAX;
825 	dpaa2_q->offloads = tx_conf->offloads;
826 
827 	/* Return if queue already configured */
828 	if (dpaa2_q->flow_id != 0xffff) {
829 		dev->data->tx_queues[tx_queue_id] = dpaa2_q;
830 		return 0;
831 	}
832 
833 	memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
834 	memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
835 
836 	tc_id = tx_queue_id;
837 	flow_id = 0;
838 
839 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
840 			tc_id, flow_id, options, &tx_flow_cfg);
841 	if (ret) {
842 		DPAA2_PMD_ERR("Error in setting the tx flow: "
843 			"tc_id=%d, flow=%d err=%d",
844 			tc_id, flow_id, ret);
845 			return -1;
846 	}
847 
848 	dpaa2_q->flow_id = flow_id;
849 
850 	if (tx_queue_id == 0) {
851 		/*Set tx-conf and error configuration*/
852 		if (priv->tx_conf_en)
853 			ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
854 							    priv->token,
855 							    DPNI_CONF_AFFINE);
856 		else
857 			ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
858 							    priv->token,
859 							    DPNI_CONF_DISABLE);
860 		if (ret) {
861 			DPAA2_PMD_ERR("Error in set tx conf mode settings: "
862 				      "err=%d", ret);
863 			return -1;
864 		}
865 	}
866 	dpaa2_q->tc_index = tc_id;
867 
868 	ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
869 			     DPNI_QUEUE_TX, dpaa2_q->tc_index,
870 			     dpaa2_q->flow_id, &tx_flow_cfg, &qid);
871 	if (ret) {
872 		DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
873 		return -1;
874 	}
875 	dpaa2_q->fqid = qid.fqid;
876 
877 	if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
878 		struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
879 
880 		dpaa2_q->nb_desc = nb_tx_desc;
881 
882 		cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
883 		cong_notif_cfg.threshold_entry = nb_tx_desc;
884 		/* Notify that the queue is not congested when the data in
885 		 * the queue is below this thershold.
886 		 */
887 		cong_notif_cfg.threshold_exit = nb_tx_desc - 24;
888 		cong_notif_cfg.message_ctx = 0;
889 		cong_notif_cfg.message_iova =
890 				(size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
891 		cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
892 		cong_notif_cfg.notification_mode =
893 					 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
894 					 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
895 					 DPNI_CONG_OPT_COHERENT_WRITE;
896 		cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
897 
898 		ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
899 						       priv->token,
900 						       DPNI_QUEUE_TX,
901 						       tc_id,
902 						       &cong_notif_cfg);
903 		if (ret) {
904 			DPAA2_PMD_ERR(
905 			   "Error in setting tx congestion notification: "
906 			   "err=%d", ret);
907 			return -ret;
908 		}
909 	}
910 	dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
911 	dev->data->tx_queues[tx_queue_id] = dpaa2_q;
912 
913 	if (priv->tx_conf_en) {
914 		dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
915 		options = options | DPNI_QUEUE_OPT_USER_CTX;
916 		tx_conf_cfg.user_context = (size_t)(dpaa2_q);
917 		ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
918 			     DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
919 			     dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
920 		if (ret) {
921 			DPAA2_PMD_ERR("Error in setting the tx conf flow: "
922 			      "tc_index=%d, flow=%d err=%d",
923 			      dpaa2_tx_conf_q->tc_index,
924 			      dpaa2_tx_conf_q->flow_id, ret);
925 			return -1;
926 		}
927 
928 		ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
929 			     DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
930 			     dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
931 		if (ret) {
932 			DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
933 			return -1;
934 		}
935 		dpaa2_tx_conf_q->fqid = qid.fqid;
936 	}
937 	return 0;
938 }
939 
940 static void
941 dpaa2_dev_rx_queue_release(void *q __rte_unused)
942 {
943 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
944 	struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
945 	struct fsl_mc_io *dpni =
946 		(struct fsl_mc_io *)priv->eth_dev->process_private;
947 	uint8_t options = 0;
948 	int ret;
949 	struct dpni_queue cfg;
950 
951 	memset(&cfg, 0, sizeof(struct dpni_queue));
952 	PMD_INIT_FUNC_TRACE();
953 	if (dpaa2_q->cgid != 0xff) {
954 		options = DPNI_QUEUE_OPT_CLEAR_CGID;
955 		cfg.cgid = dpaa2_q->cgid;
956 
957 		ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
958 				     DPNI_QUEUE_RX,
959 				     dpaa2_q->tc_index, dpaa2_q->flow_id,
960 				     options, &cfg);
961 		if (ret)
962 			DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
963 					dpaa2_q->fqid, ret);
964 		priv->cgid_in_use[dpaa2_q->cgid] = 0;
965 		dpaa2_q->cgid = 0xff;
966 	}
967 }
968 
969 static void
970 dpaa2_dev_tx_queue_release(void *q __rte_unused)
971 {
972 	PMD_INIT_FUNC_TRACE();
973 }
974 
975 static uint32_t
976 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
977 {
978 	int32_t ret;
979 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
980 	struct dpaa2_queue *dpaa2_q;
981 	struct qbman_swp *swp;
982 	struct qbman_fq_query_np_rslt state;
983 	uint32_t frame_cnt = 0;
984 
985 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
986 		ret = dpaa2_affine_qbman_swp();
987 		if (ret) {
988 			DPAA2_PMD_ERR(
989 				"Failed to allocate IO portal, tid: %d\n",
990 				rte_gettid());
991 			return -EINVAL;
992 		}
993 	}
994 	swp = DPAA2_PER_LCORE_PORTAL;
995 
996 	dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
997 
998 	if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
999 		frame_cnt = qbman_fq_state_frame_count(&state);
1000 		DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u",
1001 				rx_queue_id, frame_cnt);
1002 	}
1003 	return frame_cnt;
1004 }
1005 
1006 static const uint32_t *
1007 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
1008 {
1009 	static const uint32_t ptypes[] = {
1010 		/*todo -= add more types */
1011 		RTE_PTYPE_L2_ETHER,
1012 		RTE_PTYPE_L3_IPV4,
1013 		RTE_PTYPE_L3_IPV4_EXT,
1014 		RTE_PTYPE_L3_IPV6,
1015 		RTE_PTYPE_L3_IPV6_EXT,
1016 		RTE_PTYPE_L4_TCP,
1017 		RTE_PTYPE_L4_UDP,
1018 		RTE_PTYPE_L4_SCTP,
1019 		RTE_PTYPE_L4_ICMP,
1020 		RTE_PTYPE_UNKNOWN
1021 	};
1022 
1023 	if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
1024 		dev->rx_pkt_burst == dpaa2_dev_rx ||
1025 		dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
1026 		return ptypes;
1027 	return NULL;
1028 }
1029 
1030 /**
1031  * Dpaa2 link Interrupt handler
1032  *
1033  * @param param
1034  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1035  *
1036  * @return
1037  *  void
1038  */
1039 static void
1040 dpaa2_interrupt_handler(void *param)
1041 {
1042 	struct rte_eth_dev *dev = param;
1043 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1044 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1045 	int ret;
1046 	int irq_index = DPNI_IRQ_INDEX;
1047 	unsigned int status = 0, clear = 0;
1048 
1049 	PMD_INIT_FUNC_TRACE();
1050 
1051 	if (dpni == NULL) {
1052 		DPAA2_PMD_ERR("dpni is NULL");
1053 		return;
1054 	}
1055 
1056 	ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
1057 				  irq_index, &status);
1058 	if (unlikely(ret)) {
1059 		DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
1060 		clear = 0xffffffff;
1061 		goto out;
1062 	}
1063 
1064 	if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
1065 		clear = DPNI_IRQ_EVENT_LINK_CHANGED;
1066 		dpaa2_dev_link_update(dev, 0);
1067 		/* calling all the apps registered for link status event */
1068 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1069 					      NULL);
1070 	}
1071 out:
1072 	ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
1073 				    irq_index, clear);
1074 	if (unlikely(ret))
1075 		DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
1076 }
1077 
1078 static int
1079 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
1080 {
1081 	int err = 0;
1082 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1083 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1084 	int irq_index = DPNI_IRQ_INDEX;
1085 	unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
1086 
1087 	PMD_INIT_FUNC_TRACE();
1088 
1089 	err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
1090 				irq_index, mask);
1091 	if (err < 0) {
1092 		DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
1093 			      strerror(-err));
1094 		return err;
1095 	}
1096 
1097 	err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
1098 				  irq_index, enable);
1099 	if (err < 0)
1100 		DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
1101 			      strerror(-err));
1102 
1103 	return err;
1104 }
1105 
1106 static int
1107 dpaa2_dev_start(struct rte_eth_dev *dev)
1108 {
1109 	struct rte_device *rdev = dev->device;
1110 	struct rte_dpaa2_device *dpaa2_dev;
1111 	struct rte_eth_dev_data *data = dev->data;
1112 	struct dpaa2_dev_priv *priv = data->dev_private;
1113 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1114 	struct dpni_queue cfg;
1115 	struct dpni_error_cfg	err_cfg;
1116 	uint16_t qdid;
1117 	struct dpni_queue_id qid;
1118 	struct dpaa2_queue *dpaa2_q;
1119 	int ret, i;
1120 	struct rte_intr_handle *intr_handle;
1121 
1122 	dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1123 	intr_handle = &dpaa2_dev->intr_handle;
1124 
1125 	PMD_INIT_FUNC_TRACE();
1126 
1127 	ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1128 	if (ret) {
1129 		DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
1130 			      priv->hw_id, ret);
1131 		return ret;
1132 	}
1133 
1134 	/* Power up the phy. Needed to make the link go UP */
1135 	dpaa2_dev_set_link_up(dev);
1136 
1137 	ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
1138 			    DPNI_QUEUE_TX, &qdid);
1139 	if (ret) {
1140 		DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
1141 		return ret;
1142 	}
1143 	priv->qdid = qdid;
1144 
1145 	for (i = 0; i < data->nb_rx_queues; i++) {
1146 		dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
1147 		ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1148 				     DPNI_QUEUE_RX, dpaa2_q->tc_index,
1149 				       dpaa2_q->flow_id, &cfg, &qid);
1150 		if (ret) {
1151 			DPAA2_PMD_ERR("Error in getting flow information: "
1152 				      "err=%d", ret);
1153 			return ret;
1154 		}
1155 		dpaa2_q->fqid = qid.fqid;
1156 	}
1157 
1158 	/*checksum errors, send them to normal path and set it in annotation */
1159 	err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
1160 	err_cfg.errors |= DPNI_ERROR_PHE;
1161 
1162 	err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
1163 	err_cfg.set_frame_annotation = true;
1164 
1165 	ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
1166 				       priv->token, &err_cfg);
1167 	if (ret) {
1168 		DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
1169 			      ret);
1170 		return ret;
1171 	}
1172 
1173 	/* if the interrupts were configured on this devices*/
1174 	if (intr_handle && (intr_handle->fd) &&
1175 	    (dev->data->dev_conf.intr_conf.lsc != 0)) {
1176 		/* Registering LSC interrupt handler */
1177 		rte_intr_callback_register(intr_handle,
1178 					   dpaa2_interrupt_handler,
1179 					   (void *)dev);
1180 
1181 		/* enable vfio intr/eventfd mapping
1182 		 * Interrupt index 0 is required, so we can not use
1183 		 * rte_intr_enable.
1184 		 */
1185 		rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
1186 
1187 		/* enable dpni_irqs */
1188 		dpaa2_eth_setup_irqs(dev, 1);
1189 	}
1190 
1191 	/* Change the tx burst function if ordered queues are used */
1192 	if (priv->en_ordered)
1193 		dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
1194 
1195 	return 0;
1196 }
1197 
1198 /**
1199  *  This routine disables all traffic on the adapter by issuing a
1200  *  global reset on the MAC.
1201  */
1202 static void
1203 dpaa2_dev_stop(struct rte_eth_dev *dev)
1204 {
1205 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1206 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1207 	int ret;
1208 	struct rte_eth_link link;
1209 	struct rte_intr_handle *intr_handle = dev->intr_handle;
1210 
1211 	PMD_INIT_FUNC_TRACE();
1212 
1213 	/* reset interrupt callback  */
1214 	if (intr_handle && (intr_handle->fd) &&
1215 	    (dev->data->dev_conf.intr_conf.lsc != 0)) {
1216 		/*disable dpni irqs */
1217 		dpaa2_eth_setup_irqs(dev, 0);
1218 
1219 		/* disable vfio intr before callback unregister */
1220 		rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1221 
1222 		/* Unregistering LSC interrupt handler */
1223 		rte_intr_callback_unregister(intr_handle,
1224 					     dpaa2_interrupt_handler,
1225 					     (void *)dev);
1226 	}
1227 
1228 	dpaa2_dev_set_link_down(dev);
1229 
1230 	ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
1231 	if (ret) {
1232 		DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
1233 			      ret, priv->hw_id);
1234 		return;
1235 	}
1236 
1237 	/* clear the recorded link status */
1238 	memset(&link, 0, sizeof(link));
1239 	rte_eth_linkstatus_set(dev, &link);
1240 }
1241 
1242 static void
1243 dpaa2_dev_close(struct rte_eth_dev *dev)
1244 {
1245 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1246 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1247 	int ret;
1248 	struct rte_eth_link link;
1249 
1250 	PMD_INIT_FUNC_TRACE();
1251 
1252 	dpaa2_flow_clean(dev);
1253 
1254 	/* Clean the device first */
1255 	ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
1256 	if (ret) {
1257 		DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1258 		return;
1259 	}
1260 
1261 	memset(&link, 0, sizeof(link));
1262 	rte_eth_linkstatus_set(dev, &link);
1263 }
1264 
1265 static int
1266 dpaa2_dev_promiscuous_enable(
1267 		struct rte_eth_dev *dev)
1268 {
1269 	int ret;
1270 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1271 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1272 
1273 	PMD_INIT_FUNC_TRACE();
1274 
1275 	if (dpni == NULL) {
1276 		DPAA2_PMD_ERR("dpni is NULL");
1277 		return -ENODEV;
1278 	}
1279 
1280 	ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1281 	if (ret < 0)
1282 		DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1283 
1284 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1285 	if (ret < 0)
1286 		DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1287 
1288 	return ret;
1289 }
1290 
1291 static int
1292 dpaa2_dev_promiscuous_disable(
1293 		struct rte_eth_dev *dev)
1294 {
1295 	int ret;
1296 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1297 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1298 
1299 	PMD_INIT_FUNC_TRACE();
1300 
1301 	if (dpni == NULL) {
1302 		DPAA2_PMD_ERR("dpni is NULL");
1303 		return -ENODEV;
1304 	}
1305 
1306 	ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1307 	if (ret < 0)
1308 		DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1309 
1310 	if (dev->data->all_multicast == 0) {
1311 		ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1312 						 priv->token, false);
1313 		if (ret < 0)
1314 			DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1315 				      ret);
1316 	}
1317 
1318 	return ret;
1319 }
1320 
1321 static int
1322 dpaa2_dev_allmulticast_enable(
1323 		struct rte_eth_dev *dev)
1324 {
1325 	int ret;
1326 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1327 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1328 
1329 	PMD_INIT_FUNC_TRACE();
1330 
1331 	if (dpni == NULL) {
1332 		DPAA2_PMD_ERR("dpni is NULL");
1333 		return -ENODEV;
1334 	}
1335 
1336 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1337 	if (ret < 0)
1338 		DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1339 
1340 	return ret;
1341 }
1342 
1343 static int
1344 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1345 {
1346 	int ret;
1347 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1348 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1349 
1350 	PMD_INIT_FUNC_TRACE();
1351 
1352 	if (dpni == NULL) {
1353 		DPAA2_PMD_ERR("dpni is NULL");
1354 		return -ENODEV;
1355 	}
1356 
1357 	/* must remain on for all promiscuous */
1358 	if (dev->data->promiscuous == 1)
1359 		return 0;
1360 
1361 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1362 	if (ret < 0)
1363 		DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1364 
1365 	return ret;
1366 }
1367 
1368 static int
1369 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1370 {
1371 	int ret;
1372 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1373 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1374 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1375 				+ VLAN_TAG_SIZE;
1376 
1377 	PMD_INIT_FUNC_TRACE();
1378 
1379 	if (dpni == NULL) {
1380 		DPAA2_PMD_ERR("dpni is NULL");
1381 		return -EINVAL;
1382 	}
1383 
1384 	/* check that mtu is within the allowed range */
1385 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
1386 		return -EINVAL;
1387 
1388 	if (frame_size > RTE_ETHER_MAX_LEN)
1389 		dev->data->dev_conf.rxmode.offloads |=
1390 						DEV_RX_OFFLOAD_JUMBO_FRAME;
1391 	else
1392 		dev->data->dev_conf.rxmode.offloads &=
1393 						~DEV_RX_OFFLOAD_JUMBO_FRAME;
1394 
1395 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1396 
1397 	/* Set the Max Rx frame length as 'mtu' +
1398 	 * Maximum Ethernet header length
1399 	 */
1400 	ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1401 					frame_size - RTE_ETHER_CRC_LEN);
1402 	if (ret) {
1403 		DPAA2_PMD_ERR("Setting the max frame length failed");
1404 		return -1;
1405 	}
1406 	DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1407 	return 0;
1408 }
1409 
1410 static int
1411 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1412 		       struct rte_ether_addr *addr,
1413 		       __rte_unused uint32_t index,
1414 		       __rte_unused uint32_t pool)
1415 {
1416 	int ret;
1417 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1418 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1419 
1420 	PMD_INIT_FUNC_TRACE();
1421 
1422 	if (dpni == NULL) {
1423 		DPAA2_PMD_ERR("dpni is NULL");
1424 		return -1;
1425 	}
1426 
1427 	ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
1428 				addr->addr_bytes, 0, 0, 0);
1429 	if (ret)
1430 		DPAA2_PMD_ERR(
1431 			"error: Adding the MAC ADDR failed: err = %d", ret);
1432 	return 0;
1433 }
1434 
1435 static void
1436 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1437 			  uint32_t index)
1438 {
1439 	int ret;
1440 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1441 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1442 	struct rte_eth_dev_data *data = dev->data;
1443 	struct rte_ether_addr *macaddr;
1444 
1445 	PMD_INIT_FUNC_TRACE();
1446 
1447 	macaddr = &data->mac_addrs[index];
1448 
1449 	if (dpni == NULL) {
1450 		DPAA2_PMD_ERR("dpni is NULL");
1451 		return;
1452 	}
1453 
1454 	ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1455 				   priv->token, macaddr->addr_bytes);
1456 	if (ret)
1457 		DPAA2_PMD_ERR(
1458 			"error: Removing the MAC ADDR failed: err = %d", ret);
1459 }
1460 
1461 static int
1462 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1463 		       struct rte_ether_addr *addr)
1464 {
1465 	int ret;
1466 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1467 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1468 
1469 	PMD_INIT_FUNC_TRACE();
1470 
1471 	if (dpni == NULL) {
1472 		DPAA2_PMD_ERR("dpni is NULL");
1473 		return -EINVAL;
1474 	}
1475 
1476 	ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1477 					priv->token, addr->addr_bytes);
1478 
1479 	if (ret)
1480 		DPAA2_PMD_ERR(
1481 			"error: Setting the MAC ADDR failed %d", ret);
1482 
1483 	return ret;
1484 }
1485 
1486 static
1487 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1488 			 struct rte_eth_stats *stats)
1489 {
1490 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1491 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1492 	int32_t  retcode;
1493 	uint8_t page0 = 0, page1 = 1, page2 = 2;
1494 	union dpni_statistics value;
1495 	int i;
1496 	struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1497 
1498 	memset(&value, 0, sizeof(union dpni_statistics));
1499 
1500 	PMD_INIT_FUNC_TRACE();
1501 
1502 	if (!dpni) {
1503 		DPAA2_PMD_ERR("dpni is NULL");
1504 		return -EINVAL;
1505 	}
1506 
1507 	if (!stats) {
1508 		DPAA2_PMD_ERR("stats is NULL");
1509 		return -EINVAL;
1510 	}
1511 
1512 	/*Get Counters from page_0*/
1513 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1514 				      page0, 0, &value);
1515 	if (retcode)
1516 		goto err;
1517 
1518 	stats->ipackets = value.page_0.ingress_all_frames;
1519 	stats->ibytes = value.page_0.ingress_all_bytes;
1520 
1521 	/*Get Counters from page_1*/
1522 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1523 				      page1, 0, &value);
1524 	if (retcode)
1525 		goto err;
1526 
1527 	stats->opackets = value.page_1.egress_all_frames;
1528 	stats->obytes = value.page_1.egress_all_bytes;
1529 
1530 	/*Get Counters from page_2*/
1531 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1532 				      page2, 0, &value);
1533 	if (retcode)
1534 		goto err;
1535 
1536 	/* Ingress drop frame count due to configured rules */
1537 	stats->ierrors = value.page_2.ingress_filtered_frames;
1538 	/* Ingress drop frame count due to error */
1539 	stats->ierrors += value.page_2.ingress_discarded_frames;
1540 
1541 	stats->oerrors = value.page_2.egress_discarded_frames;
1542 	stats->imissed = value.page_2.ingress_nobuffer_discards;
1543 
1544 	/* Fill in per queue stats */
1545 	for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1546 		(i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1547 		dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1548 		dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1549 		if (dpaa2_rxq)
1550 			stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1551 		if (dpaa2_txq)
1552 			stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1553 
1554 		/* Byte counting is not implemented */
1555 		stats->q_ibytes[i]   = 0;
1556 		stats->q_obytes[i]   = 0;
1557 	}
1558 
1559 	return 0;
1560 
1561 err:
1562 	DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1563 	return retcode;
1564 };
1565 
1566 static int
1567 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1568 		     unsigned int n)
1569 {
1570 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1571 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1572 	int32_t  retcode;
1573 	union dpni_statistics value[5] = {};
1574 	unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1575 
1576 	if (n < num)
1577 		return num;
1578 
1579 	if (xstats == NULL)
1580 		return 0;
1581 
1582 	/* Get Counters from page_0*/
1583 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1584 				      0, 0, &value[0]);
1585 	if (retcode)
1586 		goto err;
1587 
1588 	/* Get Counters from page_1*/
1589 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1590 				      1, 0, &value[1]);
1591 	if (retcode)
1592 		goto err;
1593 
1594 	/* Get Counters from page_2*/
1595 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1596 				      2, 0, &value[2]);
1597 	if (retcode)
1598 		goto err;
1599 
1600 	for (i = 0; i < priv->max_cgs; i++) {
1601 		if (!priv->cgid_in_use[i]) {
1602 			/* Get Counters from page_4*/
1603 			retcode = dpni_get_statistics(dpni, CMD_PRI_LOW,
1604 						      priv->token,
1605 						      4, 0, &value[4]);
1606 			if (retcode)
1607 				goto err;
1608 			break;
1609 		}
1610 	}
1611 
1612 	for (i = 0; i < num; i++) {
1613 		xstats[i].id = i;
1614 		xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1615 			raw.counter[dpaa2_xstats_strings[i].stats_id];
1616 	}
1617 	return i;
1618 err:
1619 	DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1620 	return retcode;
1621 }
1622 
1623 static int
1624 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1625 		       struct rte_eth_xstat_name *xstats_names,
1626 		       unsigned int limit)
1627 {
1628 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1629 
1630 	if (limit < stat_cnt)
1631 		return stat_cnt;
1632 
1633 	if (xstats_names != NULL)
1634 		for (i = 0; i < stat_cnt; i++)
1635 			strlcpy(xstats_names[i].name,
1636 				dpaa2_xstats_strings[i].name,
1637 				sizeof(xstats_names[i].name));
1638 
1639 	return stat_cnt;
1640 }
1641 
1642 static int
1643 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1644 		       uint64_t *values, unsigned int n)
1645 {
1646 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1647 	uint64_t values_copy[stat_cnt];
1648 
1649 	if (!ids) {
1650 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
1651 		struct fsl_mc_io *dpni =
1652 			(struct fsl_mc_io *)dev->process_private;
1653 		int32_t  retcode;
1654 		union dpni_statistics value[5] = {};
1655 
1656 		if (n < stat_cnt)
1657 			return stat_cnt;
1658 
1659 		if (!values)
1660 			return 0;
1661 
1662 		/* Get Counters from page_0*/
1663 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1664 					      0, 0, &value[0]);
1665 		if (retcode)
1666 			return 0;
1667 
1668 		/* Get Counters from page_1*/
1669 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1670 					      1, 0, &value[1]);
1671 		if (retcode)
1672 			return 0;
1673 
1674 		/* Get Counters from page_2*/
1675 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1676 					      2, 0, &value[2]);
1677 		if (retcode)
1678 			return 0;
1679 
1680 		/* Get Counters from page_4*/
1681 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1682 					      4, 0, &value[4]);
1683 		if (retcode)
1684 			return 0;
1685 
1686 		for (i = 0; i < stat_cnt; i++) {
1687 			values[i] = value[dpaa2_xstats_strings[i].page_id].
1688 				raw.counter[dpaa2_xstats_strings[i].stats_id];
1689 		}
1690 		return stat_cnt;
1691 	}
1692 
1693 	dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1694 
1695 	for (i = 0; i < n; i++) {
1696 		if (ids[i] >= stat_cnt) {
1697 			DPAA2_PMD_ERR("xstats id value isn't valid");
1698 			return -1;
1699 		}
1700 		values[i] = values_copy[ids[i]];
1701 	}
1702 	return n;
1703 }
1704 
1705 static int
1706 dpaa2_xstats_get_names_by_id(
1707 	struct rte_eth_dev *dev,
1708 	struct rte_eth_xstat_name *xstats_names,
1709 	const uint64_t *ids,
1710 	unsigned int limit)
1711 {
1712 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1713 	struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1714 
1715 	if (!ids)
1716 		return dpaa2_xstats_get_names(dev, xstats_names, limit);
1717 
1718 	dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1719 
1720 	for (i = 0; i < limit; i++) {
1721 		if (ids[i] >= stat_cnt) {
1722 			DPAA2_PMD_ERR("xstats id value isn't valid");
1723 			return -1;
1724 		}
1725 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1726 	}
1727 	return limit;
1728 }
1729 
1730 static int
1731 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1732 {
1733 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1734 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1735 	int retcode;
1736 	int i;
1737 	struct dpaa2_queue *dpaa2_q;
1738 
1739 	PMD_INIT_FUNC_TRACE();
1740 
1741 	if (dpni == NULL) {
1742 		DPAA2_PMD_ERR("dpni is NULL");
1743 		return -EINVAL;
1744 	}
1745 
1746 	retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1747 	if (retcode)
1748 		goto error;
1749 
1750 	/* Reset the per queue stats in dpaa2_queue structure */
1751 	for (i = 0; i < priv->nb_rx_queues; i++) {
1752 		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1753 		if (dpaa2_q)
1754 			dpaa2_q->rx_pkts = 0;
1755 	}
1756 
1757 	for (i = 0; i < priv->nb_tx_queues; i++) {
1758 		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1759 		if (dpaa2_q)
1760 			dpaa2_q->tx_pkts = 0;
1761 	}
1762 
1763 	return 0;
1764 
1765 error:
1766 	DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1767 	return retcode;
1768 };
1769 
1770 /* return 0 means link status changed, -1 means not changed */
1771 static int
1772 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1773 			int wait_to_complete __rte_unused)
1774 {
1775 	int ret;
1776 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1777 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1778 	struct rte_eth_link link;
1779 	struct dpni_link_state state = {0};
1780 
1781 	if (dpni == NULL) {
1782 		DPAA2_PMD_ERR("dpni is NULL");
1783 		return 0;
1784 	}
1785 
1786 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1787 	if (ret < 0) {
1788 		DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1789 		return -1;
1790 	}
1791 
1792 	memset(&link, 0, sizeof(struct rte_eth_link));
1793 	link.link_status = state.up;
1794 	link.link_speed = state.rate;
1795 
1796 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1797 		link.link_duplex = ETH_LINK_HALF_DUPLEX;
1798 	else
1799 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
1800 
1801 	ret = rte_eth_linkstatus_set(dev, &link);
1802 	if (ret == -1)
1803 		DPAA2_PMD_DEBUG("No change in status");
1804 	else
1805 		DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1806 			       link.link_status ? "Up" : "Down");
1807 
1808 	return ret;
1809 }
1810 
1811 /**
1812  * Toggle the DPNI to enable, if not already enabled.
1813  * This is not strictly PHY up/down - it is more of logical toggling.
1814  */
1815 static int
1816 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1817 {
1818 	int ret = -EINVAL;
1819 	struct dpaa2_dev_priv *priv;
1820 	struct fsl_mc_io *dpni;
1821 	int en = 0;
1822 	struct dpni_link_state state = {0};
1823 
1824 	priv = dev->data->dev_private;
1825 	dpni = (struct fsl_mc_io *)dev->process_private;
1826 
1827 	if (dpni == NULL) {
1828 		DPAA2_PMD_ERR("dpni is NULL");
1829 		return ret;
1830 	}
1831 
1832 	/* Check if DPNI is currently enabled */
1833 	ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1834 	if (ret) {
1835 		/* Unable to obtain dpni status; Not continuing */
1836 		DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1837 		return -EINVAL;
1838 	}
1839 
1840 	/* Enable link if not already enabled */
1841 	if (!en) {
1842 		ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1843 		if (ret) {
1844 			DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1845 			return -EINVAL;
1846 		}
1847 	}
1848 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1849 	if (ret < 0) {
1850 		DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1851 		return -1;
1852 	}
1853 
1854 	/* changing tx burst function to start enqueues */
1855 	dev->tx_pkt_burst = dpaa2_dev_tx;
1856 	dev->data->dev_link.link_status = state.up;
1857 	dev->data->dev_link.link_speed = state.rate;
1858 
1859 	if (state.up)
1860 		DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1861 	else
1862 		DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1863 	return ret;
1864 }
1865 
1866 /**
1867  * Toggle the DPNI to disable, if not already disabled.
1868  * This is not strictly PHY up/down - it is more of logical toggling.
1869  */
1870 static int
1871 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1872 {
1873 	int ret = -EINVAL;
1874 	struct dpaa2_dev_priv *priv;
1875 	struct fsl_mc_io *dpni;
1876 	int dpni_enabled = 0;
1877 	int retries = 10;
1878 
1879 	PMD_INIT_FUNC_TRACE();
1880 
1881 	priv = dev->data->dev_private;
1882 	dpni = (struct fsl_mc_io *)dev->process_private;
1883 
1884 	if (dpni == NULL) {
1885 		DPAA2_PMD_ERR("Device has not yet been configured");
1886 		return ret;
1887 	}
1888 
1889 	/*changing  tx burst function to avoid any more enqueues */
1890 	dev->tx_pkt_burst = dummy_dev_tx;
1891 
1892 	/* Loop while dpni_disable() attempts to drain the egress FQs
1893 	 * and confirm them back to us.
1894 	 */
1895 	do {
1896 		ret = dpni_disable(dpni, 0, priv->token);
1897 		if (ret) {
1898 			DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1899 			return ret;
1900 		}
1901 		ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1902 		if (ret) {
1903 			DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1904 			return ret;
1905 		}
1906 		if (dpni_enabled)
1907 			/* Allow the MC some slack */
1908 			rte_delay_us(100 * 1000);
1909 	} while (dpni_enabled && --retries);
1910 
1911 	if (!retries) {
1912 		DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1913 		/* todo- we may have to manually cleanup queues.
1914 		 */
1915 	} else {
1916 		DPAA2_PMD_INFO("Port %d Link DOWN successful",
1917 			       dev->data->port_id);
1918 	}
1919 
1920 	dev->data->dev_link.link_status = 0;
1921 
1922 	return ret;
1923 }
1924 
1925 static int
1926 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1927 {
1928 	int ret = -EINVAL;
1929 	struct dpaa2_dev_priv *priv;
1930 	struct fsl_mc_io *dpni;
1931 	struct dpni_link_state state = {0};
1932 
1933 	PMD_INIT_FUNC_TRACE();
1934 
1935 	priv = dev->data->dev_private;
1936 	dpni = (struct fsl_mc_io *)dev->process_private;
1937 
1938 	if (dpni == NULL || fc_conf == NULL) {
1939 		DPAA2_PMD_ERR("device not configured");
1940 		return ret;
1941 	}
1942 
1943 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1944 	if (ret) {
1945 		DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1946 		return ret;
1947 	}
1948 
1949 	memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1950 	if (state.options & DPNI_LINK_OPT_PAUSE) {
1951 		/* DPNI_LINK_OPT_PAUSE set
1952 		 *  if ASYM_PAUSE not set,
1953 		 *	RX Side flow control (handle received Pause frame)
1954 		 *	TX side flow control (send Pause frame)
1955 		 *  if ASYM_PAUSE set,
1956 		 *	RX Side flow control (handle received Pause frame)
1957 		 *	No TX side flow control (send Pause frame disabled)
1958 		 */
1959 		if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1960 			fc_conf->mode = RTE_FC_FULL;
1961 		else
1962 			fc_conf->mode = RTE_FC_RX_PAUSE;
1963 	} else {
1964 		/* DPNI_LINK_OPT_PAUSE not set
1965 		 *  if ASYM_PAUSE set,
1966 		 *	TX side flow control (send Pause frame)
1967 		 *	No RX side flow control (No action on pause frame rx)
1968 		 *  if ASYM_PAUSE not set,
1969 		 *	Flow control disabled
1970 		 */
1971 		if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1972 			fc_conf->mode = RTE_FC_TX_PAUSE;
1973 		else
1974 			fc_conf->mode = RTE_FC_NONE;
1975 	}
1976 
1977 	return ret;
1978 }
1979 
1980 static int
1981 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1982 {
1983 	int ret = -EINVAL;
1984 	struct dpaa2_dev_priv *priv;
1985 	struct fsl_mc_io *dpni;
1986 	struct dpni_link_state state = {0};
1987 	struct dpni_link_cfg cfg = {0};
1988 
1989 	PMD_INIT_FUNC_TRACE();
1990 
1991 	priv = dev->data->dev_private;
1992 	dpni = (struct fsl_mc_io *)dev->process_private;
1993 
1994 	if (dpni == NULL) {
1995 		DPAA2_PMD_ERR("dpni is NULL");
1996 		return ret;
1997 	}
1998 
1999 	/* It is necessary to obtain the current state before setting fc_conf
2000 	 * as MC would return error in case rate, autoneg or duplex values are
2001 	 * different.
2002 	 */
2003 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2004 	if (ret) {
2005 		DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
2006 		return -1;
2007 	}
2008 
2009 	/* Disable link before setting configuration */
2010 	dpaa2_dev_set_link_down(dev);
2011 
2012 	/* Based on fc_conf, update cfg */
2013 	cfg.rate = state.rate;
2014 	cfg.options = state.options;
2015 
2016 	/* update cfg with fc_conf */
2017 	switch (fc_conf->mode) {
2018 	case RTE_FC_FULL:
2019 		/* Full flow control;
2020 		 * OPT_PAUSE set, ASYM_PAUSE not set
2021 		 */
2022 		cfg.options |= DPNI_LINK_OPT_PAUSE;
2023 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2024 		break;
2025 	case RTE_FC_TX_PAUSE:
2026 		/* Enable RX flow control
2027 		 * OPT_PAUSE not set;
2028 		 * ASYM_PAUSE set;
2029 		 */
2030 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2031 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2032 		break;
2033 	case RTE_FC_RX_PAUSE:
2034 		/* Enable TX Flow control
2035 		 * OPT_PAUSE set
2036 		 * ASYM_PAUSE set
2037 		 */
2038 		cfg.options |= DPNI_LINK_OPT_PAUSE;
2039 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2040 		break;
2041 	case RTE_FC_NONE:
2042 		/* Disable Flow control
2043 		 * OPT_PAUSE not set
2044 		 * ASYM_PAUSE not set
2045 		 */
2046 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2047 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2048 		break;
2049 	default:
2050 		DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
2051 			      fc_conf->mode);
2052 		return -1;
2053 	}
2054 
2055 	ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2056 	if (ret)
2057 		DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
2058 			      ret);
2059 
2060 	/* Enable link */
2061 	dpaa2_dev_set_link_up(dev);
2062 
2063 	return ret;
2064 }
2065 
2066 static int
2067 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
2068 			  struct rte_eth_rss_conf *rss_conf)
2069 {
2070 	struct rte_eth_dev_data *data = dev->data;
2071 	struct dpaa2_dev_priv *priv = data->dev_private;
2072 	struct rte_eth_conf *eth_conf = &data->dev_conf;
2073 	int ret, tc_index;
2074 
2075 	PMD_INIT_FUNC_TRACE();
2076 
2077 	if (rss_conf->rss_hf) {
2078 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2079 			ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf,
2080 				tc_index);
2081 			if (ret) {
2082 				DPAA2_PMD_ERR("Unable to set flow dist on tc%d",
2083 					tc_index);
2084 				return ret;
2085 			}
2086 		}
2087 	} else {
2088 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2089 			ret = dpaa2_remove_flow_dist(dev, tc_index);
2090 			if (ret) {
2091 				DPAA2_PMD_ERR(
2092 					"Unable to remove flow dist on tc%d",
2093 					tc_index);
2094 				return ret;
2095 			}
2096 		}
2097 	}
2098 	eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
2099 	return 0;
2100 }
2101 
2102 static int
2103 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2104 			    struct rte_eth_rss_conf *rss_conf)
2105 {
2106 	struct rte_eth_dev_data *data = dev->data;
2107 	struct rte_eth_conf *eth_conf = &data->dev_conf;
2108 
2109 	/* dpaa2 does not support rss_key, so length should be 0*/
2110 	rss_conf->rss_key_len = 0;
2111 	rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
2112 	return 0;
2113 }
2114 
2115 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
2116 		int eth_rx_queue_id,
2117 		struct dpaa2_dpcon_dev *dpcon,
2118 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2119 {
2120 	struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2121 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2122 	struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2123 	uint8_t flow_id = dpaa2_ethq->flow_id;
2124 	struct dpni_queue cfg;
2125 	uint8_t options, priority;
2126 	int ret;
2127 
2128 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
2129 		dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
2130 	else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
2131 		dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
2132 	else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
2133 		dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
2134 	else
2135 		return -EINVAL;
2136 
2137 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
2138 		   (dpcon->num_priorities - 1);
2139 
2140 	memset(&cfg, 0, sizeof(struct dpni_queue));
2141 	options = DPNI_QUEUE_OPT_DEST;
2142 	cfg.destination.type = DPNI_DEST_DPCON;
2143 	cfg.destination.id = dpcon->dpcon_id;
2144 	cfg.destination.priority = priority;
2145 
2146 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
2147 		options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
2148 		cfg.destination.hold_active = 1;
2149 	}
2150 
2151 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
2152 			!eth_priv->en_ordered) {
2153 		struct opr_cfg ocfg;
2154 
2155 		/* Restoration window size = 256 frames */
2156 		ocfg.oprrws = 3;
2157 		/* Restoration window size = 512 frames for LX2 */
2158 		if (dpaa2_svr_family == SVR_LX2160A)
2159 			ocfg.oprrws = 4;
2160 		/* Auto advance NESN window enabled */
2161 		ocfg.oa = 1;
2162 		/* Late arrival window size disabled */
2163 		ocfg.olws = 0;
2164 		/* ORL resource exhaustaion advance NESN disabled */
2165 		ocfg.oeane = 0;
2166 		/* Loose ordering enabled */
2167 		ocfg.oloe = 1;
2168 		eth_priv->en_loose_ordered = 1;
2169 		/* Strict ordering enabled if explicitly set */
2170 		if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
2171 			ocfg.oloe = 0;
2172 			eth_priv->en_loose_ordered = 0;
2173 		}
2174 
2175 		ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
2176 				   dpaa2_ethq->tc_index, flow_id,
2177 				   OPR_OPT_CREATE, &ocfg);
2178 		if (ret) {
2179 			DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
2180 			return ret;
2181 		}
2182 
2183 		eth_priv->en_ordered = 1;
2184 	}
2185 
2186 	options |= DPNI_QUEUE_OPT_USER_CTX;
2187 	cfg.user_context = (size_t)(dpaa2_ethq);
2188 
2189 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2190 			     dpaa2_ethq->tc_index, flow_id, options, &cfg);
2191 	if (ret) {
2192 		DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2193 		return ret;
2194 	}
2195 
2196 	memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
2197 
2198 	return 0;
2199 }
2200 
2201 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
2202 		int eth_rx_queue_id)
2203 {
2204 	struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2205 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2206 	struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2207 	uint8_t flow_id = dpaa2_ethq->flow_id;
2208 	struct dpni_queue cfg;
2209 	uint8_t options;
2210 	int ret;
2211 
2212 	memset(&cfg, 0, sizeof(struct dpni_queue));
2213 	options = DPNI_QUEUE_OPT_DEST;
2214 	cfg.destination.type = DPNI_DEST_NONE;
2215 
2216 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2217 			     dpaa2_ethq->tc_index, flow_id, options, &cfg);
2218 	if (ret)
2219 		DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2220 
2221 	return ret;
2222 }
2223 
2224 static inline int
2225 dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
2226 {
2227 	unsigned int i;
2228 
2229 	for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
2230 		if (dpaa2_supported_filter_ops[i] == filter_op)
2231 			return 0;
2232 	}
2233 	return -ENOTSUP;
2234 }
2235 
2236 static int
2237 dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
2238 		    enum rte_filter_type filter_type,
2239 				 enum rte_filter_op filter_op,
2240 				 void *arg)
2241 {
2242 	int ret = 0;
2243 
2244 	if (!dev)
2245 		return -ENODEV;
2246 
2247 	switch (filter_type) {
2248 	case RTE_ETH_FILTER_GENERIC:
2249 		if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
2250 			ret = -ENOTSUP;
2251 			break;
2252 		}
2253 		*(const void **)arg = &dpaa2_flow_ops;
2254 		dpaa2_filter_type |= filter_type;
2255 		break;
2256 	default:
2257 		RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
2258 			filter_type);
2259 		ret = -ENOTSUP;
2260 		break;
2261 	}
2262 	return ret;
2263 }
2264 
2265 static void
2266 dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2267 	struct rte_eth_rxq_info *qinfo)
2268 {
2269 	struct dpaa2_queue *rxq;
2270 
2271 	rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
2272 
2273 	qinfo->mp = rxq->mb_pool;
2274 	qinfo->scattered_rx = dev->data->scattered_rx;
2275 	qinfo->nb_desc = rxq->nb_desc;
2276 
2277 	qinfo->conf.rx_free_thresh = 1;
2278 	qinfo->conf.rx_drop_en = 1;
2279 	qinfo->conf.rx_deferred_start = 0;
2280 	qinfo->conf.offloads = rxq->offloads;
2281 }
2282 
2283 static void
2284 dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2285 	struct rte_eth_txq_info *qinfo)
2286 {
2287 	struct dpaa2_queue *txq;
2288 
2289 	txq = dev->data->tx_queues[queue_id];
2290 
2291 	qinfo->nb_desc = txq->nb_desc;
2292 	qinfo->conf.tx_thresh.pthresh = 0;
2293 	qinfo->conf.tx_thresh.hthresh = 0;
2294 	qinfo->conf.tx_thresh.wthresh = 0;
2295 
2296 	qinfo->conf.tx_free_thresh = 0;
2297 	qinfo->conf.tx_rs_thresh = 0;
2298 	qinfo->conf.offloads = txq->offloads;
2299 	qinfo->conf.tx_deferred_start = 0;
2300 }
2301 
2302 static struct eth_dev_ops dpaa2_ethdev_ops = {
2303 	.dev_configure	  = dpaa2_eth_dev_configure,
2304 	.dev_start	      = dpaa2_dev_start,
2305 	.dev_stop	      = dpaa2_dev_stop,
2306 	.dev_close	      = dpaa2_dev_close,
2307 	.promiscuous_enable   = dpaa2_dev_promiscuous_enable,
2308 	.promiscuous_disable  = dpaa2_dev_promiscuous_disable,
2309 	.allmulticast_enable  = dpaa2_dev_allmulticast_enable,
2310 	.allmulticast_disable = dpaa2_dev_allmulticast_disable,
2311 	.dev_set_link_up      = dpaa2_dev_set_link_up,
2312 	.dev_set_link_down    = dpaa2_dev_set_link_down,
2313 	.link_update	   = dpaa2_dev_link_update,
2314 	.stats_get	       = dpaa2_dev_stats_get,
2315 	.xstats_get	       = dpaa2_dev_xstats_get,
2316 	.xstats_get_by_id     = dpaa2_xstats_get_by_id,
2317 	.xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
2318 	.xstats_get_names      = dpaa2_xstats_get_names,
2319 	.stats_reset	   = dpaa2_dev_stats_reset,
2320 	.xstats_reset	      = dpaa2_dev_stats_reset,
2321 	.fw_version_get	   = dpaa2_fw_version_get,
2322 	.dev_infos_get	   = dpaa2_dev_info_get,
2323 	.dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2324 	.mtu_set           = dpaa2_dev_mtu_set,
2325 	.vlan_filter_set      = dpaa2_vlan_filter_set,
2326 	.vlan_offload_set     = dpaa2_vlan_offload_set,
2327 	.vlan_tpid_set	      = dpaa2_vlan_tpid_set,
2328 	.rx_queue_setup    = dpaa2_dev_rx_queue_setup,
2329 	.rx_queue_release  = dpaa2_dev_rx_queue_release,
2330 	.tx_queue_setup    = dpaa2_dev_tx_queue_setup,
2331 	.tx_queue_release  = dpaa2_dev_tx_queue_release,
2332 	.rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
2333 	.tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
2334 	.rx_queue_count       = dpaa2_dev_rx_queue_count,
2335 	.flow_ctrl_get	      = dpaa2_flow_ctrl_get,
2336 	.flow_ctrl_set	      = dpaa2_flow_ctrl_set,
2337 	.mac_addr_add         = dpaa2_dev_add_mac_addr,
2338 	.mac_addr_remove      = dpaa2_dev_remove_mac_addr,
2339 	.mac_addr_set         = dpaa2_dev_set_mac_addr,
2340 	.rss_hash_update      = dpaa2_dev_rss_hash_update,
2341 	.rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
2342 	.filter_ctrl          = dpaa2_dev_flow_ctrl,
2343 	.rxq_info_get	      = dpaa2_rxq_info_get,
2344 	.txq_info_get	      = dpaa2_txq_info_get,
2345 #if defined(RTE_LIBRTE_IEEE1588)
2346 	.timesync_enable      = dpaa2_timesync_enable,
2347 	.timesync_disable     = dpaa2_timesync_disable,
2348 	.timesync_read_time   = dpaa2_timesync_read_time,
2349 	.timesync_write_time  = dpaa2_timesync_write_time,
2350 	.timesync_adjust_time = dpaa2_timesync_adjust_time,
2351 	.timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp,
2352 	.timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp,
2353 #endif
2354 };
2355 
2356 /* Populate the mac address from physically available (u-boot/firmware) and/or
2357  * one set by higher layers like MC (restool) etc.
2358  * Returns the table of MAC entries (multiple entries)
2359  */
2360 static int
2361 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
2362 		  struct rte_ether_addr *mac_entry)
2363 {
2364 	int ret;
2365 	struct rte_ether_addr phy_mac, prime_mac;
2366 
2367 	memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2368 	memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2369 
2370 	/* Get the physical device MAC address */
2371 	ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2372 				     phy_mac.addr_bytes);
2373 	if (ret) {
2374 		DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2375 		goto cleanup;
2376 	}
2377 
2378 	ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2379 					prime_mac.addr_bytes);
2380 	if (ret) {
2381 		DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2382 		goto cleanup;
2383 	}
2384 
2385 	/* Now that both MAC have been obtained, do:
2386 	 *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2387 	 *     and return phy
2388 	 *  If empty_mac(phy), return prime.
2389 	 *  if both are empty, create random MAC, set as prime and return
2390 	 */
2391 	if (!rte_is_zero_ether_addr(&phy_mac)) {
2392 		/* If the addresses are not same, overwrite prime */
2393 		if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2394 			ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2395 							priv->token,
2396 							phy_mac.addr_bytes);
2397 			if (ret) {
2398 				DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2399 					      ret);
2400 				goto cleanup;
2401 			}
2402 			memcpy(&prime_mac, &phy_mac,
2403 				sizeof(struct rte_ether_addr));
2404 		}
2405 	} else if (rte_is_zero_ether_addr(&prime_mac)) {
2406 		/* In case phys and prime, both are zero, create random MAC */
2407 		rte_eth_random_addr(prime_mac.addr_bytes);
2408 		ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2409 						priv->token,
2410 						prime_mac.addr_bytes);
2411 		if (ret) {
2412 			DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2413 			goto cleanup;
2414 		}
2415 	}
2416 
2417 	/* prime_mac the final MAC address */
2418 	memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2419 	return 0;
2420 
2421 cleanup:
2422 	return -1;
2423 }
2424 
2425 static int
2426 check_devargs_handler(__rte_unused const char *key, const char *value,
2427 		      __rte_unused void *opaque)
2428 {
2429 	if (strcmp(value, "1"))
2430 		return -1;
2431 
2432 	return 0;
2433 }
2434 
2435 static int
2436 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2437 {
2438 	struct rte_kvargs *kvlist;
2439 
2440 	if (!devargs)
2441 		return 0;
2442 
2443 	kvlist = rte_kvargs_parse(devargs->args, NULL);
2444 	if (!kvlist)
2445 		return 0;
2446 
2447 	if (!rte_kvargs_count(kvlist, key)) {
2448 		rte_kvargs_free(kvlist);
2449 		return 0;
2450 	}
2451 
2452 	if (rte_kvargs_process(kvlist, key,
2453 			       check_devargs_handler, NULL) < 0) {
2454 		rte_kvargs_free(kvlist);
2455 		return 0;
2456 	}
2457 	rte_kvargs_free(kvlist);
2458 
2459 	return 1;
2460 }
2461 
2462 static int
2463 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2464 {
2465 	struct rte_device *dev = eth_dev->device;
2466 	struct rte_dpaa2_device *dpaa2_dev;
2467 	struct fsl_mc_io *dpni_dev;
2468 	struct dpni_attr attr;
2469 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2470 	struct dpni_buffer_layout layout;
2471 	int ret, hw_id, i;
2472 
2473 	PMD_INIT_FUNC_TRACE();
2474 
2475 	dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2476 	if (!dpni_dev) {
2477 		DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2478 		return -1;
2479 	}
2480 	dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
2481 	eth_dev->process_private = (void *)dpni_dev;
2482 
2483 	/* For secondary processes, the primary has done all the work */
2484 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2485 		/* In case of secondary, only burst and ops API need to be
2486 		 * plugged.
2487 		 */
2488 		eth_dev->dev_ops = &dpaa2_ethdev_ops;
2489 		if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2490 			eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2491 		else if (dpaa2_get_devargs(dev->devargs,
2492 					DRIVER_NO_PREFETCH_MODE))
2493 			eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2494 		else
2495 			eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2496 		eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2497 		return 0;
2498 	}
2499 
2500 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2501 
2502 	hw_id = dpaa2_dev->object_id;
2503 	ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2504 	if (ret) {
2505 		DPAA2_PMD_ERR(
2506 			     "Failure in opening dpni@%d with err code %d",
2507 			     hw_id, ret);
2508 		rte_free(dpni_dev);
2509 		return -1;
2510 	}
2511 
2512 	/* Clean the device first */
2513 	ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2514 	if (ret) {
2515 		DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2516 			      hw_id, ret);
2517 		goto init_err;
2518 	}
2519 
2520 	ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2521 	if (ret) {
2522 		DPAA2_PMD_ERR(
2523 			     "Failure in get dpni@%d attribute, err code %d",
2524 			     hw_id, ret);
2525 		goto init_err;
2526 	}
2527 
2528 	priv->num_rx_tc = attr.num_rx_tcs;
2529 	priv->qos_entries = attr.qos_entries;
2530 	priv->fs_entries = attr.fs_entries;
2531 	priv->dist_queues = attr.num_queues;
2532 
2533 	/* only if the custom CG is enabled */
2534 	if (attr.options & DPNI_OPT_CUSTOM_CG)
2535 		priv->max_cgs = attr.num_cgs;
2536 	else
2537 		priv->max_cgs = 0;
2538 
2539 	for (i = 0; i < priv->max_cgs; i++)
2540 		priv->cgid_in_use[i] = 0;
2541 
2542 	for (i = 0; i < attr.num_rx_tcs; i++)
2543 		priv->nb_rx_queues += attr.num_queues;
2544 
2545 	/* Using number of TX queues as number of TX TCs */
2546 	priv->nb_tx_queues = attr.num_tx_tcs;
2547 
2548 	DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2549 			priv->num_rx_tc, priv->nb_rx_queues,
2550 			priv->nb_tx_queues, priv->max_cgs);
2551 
2552 	priv->hw = dpni_dev;
2553 	priv->hw_id = hw_id;
2554 	priv->options = attr.options;
2555 	priv->max_mac_filters = attr.mac_filter_entries;
2556 	priv->max_vlan_filters = attr.vlan_filter_entries;
2557 	priv->flags = 0;
2558 #if defined(RTE_LIBRTE_IEEE1588)
2559 	priv->tx_conf_en = 1;
2560 #else
2561 	priv->tx_conf_en = 0;
2562 #endif
2563 
2564 	/* Allocate memory for hardware structure for queues */
2565 	ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2566 	if (ret) {
2567 		DPAA2_PMD_ERR("Queue allocation Failed");
2568 		goto init_err;
2569 	}
2570 
2571 	/* Allocate memory for storing MAC addresses.
2572 	 * Table of mac_filter_entries size is allocated so that RTE ether lib
2573 	 * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2574 	 */
2575 	eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2576 		RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2577 	if (eth_dev->data->mac_addrs == NULL) {
2578 		DPAA2_PMD_ERR(
2579 		   "Failed to allocate %d bytes needed to store MAC addresses",
2580 		   RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2581 		ret = -ENOMEM;
2582 		goto init_err;
2583 	}
2584 
2585 	ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2586 	if (ret) {
2587 		DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2588 		rte_free(eth_dev->data->mac_addrs);
2589 		eth_dev->data->mac_addrs = NULL;
2590 		goto init_err;
2591 	}
2592 
2593 	/* ... tx buffer layout ... */
2594 	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2595 	if (priv->tx_conf_en) {
2596 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2597 				 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2598 		layout.pass_timestamp = true;
2599 	} else {
2600 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2601 	}
2602 	layout.pass_frame_status = 1;
2603 	ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2604 				     DPNI_QUEUE_TX, &layout);
2605 	if (ret) {
2606 		DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2607 		goto init_err;
2608 	}
2609 
2610 	/* ... tx-conf and error buffer layout ... */
2611 	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2612 	if (priv->tx_conf_en) {
2613 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2614 				 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2615 		layout.pass_timestamp = true;
2616 	} else {
2617 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2618 	}
2619 	layout.pass_frame_status = 1;
2620 	ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2621 				     DPNI_QUEUE_TX_CONFIRM, &layout);
2622 	if (ret) {
2623 		DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2624 			     ret);
2625 		goto init_err;
2626 	}
2627 
2628 	eth_dev->dev_ops = &dpaa2_ethdev_ops;
2629 
2630 	if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2631 		eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2632 		DPAA2_PMD_INFO("Loopback mode");
2633 	} else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
2634 		eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2635 		DPAA2_PMD_INFO("No Prefetch mode");
2636 	} else {
2637 		eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2638 	}
2639 	eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2640 
2641 	/*Init fields w.r.t. classficaition*/
2642 	memset(&priv->extract.qos_key_extract, 0,
2643 		sizeof(struct dpaa2_key_extract));
2644 	priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2645 	if (!priv->extract.qos_extract_param) {
2646 		DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2647 			    " classificaiton ", ret);
2648 		goto init_err;
2649 	}
2650 	priv->extract.qos_key_extract.key_info.ipv4_src_offset =
2651 		IP_ADDRESS_OFFSET_INVALID;
2652 	priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
2653 		IP_ADDRESS_OFFSET_INVALID;
2654 	priv->extract.qos_key_extract.key_info.ipv6_src_offset =
2655 		IP_ADDRESS_OFFSET_INVALID;
2656 	priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
2657 		IP_ADDRESS_OFFSET_INVALID;
2658 
2659 	for (i = 0; i < MAX_TCS; i++) {
2660 		memset(&priv->extract.tc_key_extract[i], 0,
2661 			sizeof(struct dpaa2_key_extract));
2662 		priv->extract.tc_extract_param[i] =
2663 			(size_t)rte_malloc(NULL, 256, 64);
2664 		if (!priv->extract.tc_extract_param[i]) {
2665 			DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
2666 				     ret);
2667 			goto init_err;
2668 		}
2669 		priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
2670 			IP_ADDRESS_OFFSET_INVALID;
2671 		priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
2672 			IP_ADDRESS_OFFSET_INVALID;
2673 		priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
2674 			IP_ADDRESS_OFFSET_INVALID;
2675 		priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
2676 			IP_ADDRESS_OFFSET_INVALID;
2677 	}
2678 
2679 	ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
2680 					RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN
2681 					+ VLAN_TAG_SIZE);
2682 	if (ret) {
2683 		DPAA2_PMD_ERR("Unable to set mtu. check config");
2684 		goto init_err;
2685 	}
2686 
2687 	/*TODO To enable soft parser support DPAA2 driver needs to integrate
2688 	 * with external entity to receive byte code for software sequence
2689 	 * and same will be offload to the H/W using MC interface.
2690 	 * Currently it is assumed that DPAA2 driver has byte code by some
2691 	 * mean and same if offloaded to H/W.
2692 	 */
2693 	if (getenv("DPAA2_ENABLE_SOFT_PARSER")) {
2694 		WRIOP_SS_INITIALIZER(priv);
2695 		ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS);
2696 		if (ret < 0) {
2697 			DPAA2_PMD_ERR(" Error(%d) in loading softparser\n",
2698 				      ret);
2699 			return ret;
2700 		}
2701 
2702 		ret = dpaa2_eth_enable_wriop_soft_parser(priv,
2703 							 DPNI_SS_INGRESS);
2704 		if (ret < 0) {
2705 			DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n",
2706 				      ret);
2707 			return ret;
2708 		}
2709 	}
2710 	RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2711 	return 0;
2712 init_err:
2713 	dpaa2_dev_uninit(eth_dev);
2714 	return ret;
2715 }
2716 
2717 static int
2718 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
2719 {
2720 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2721 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_dev->process_private;
2722 	int i, ret;
2723 
2724 	PMD_INIT_FUNC_TRACE();
2725 
2726 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2727 		return 0;
2728 
2729 	if (!dpni) {
2730 		DPAA2_PMD_WARN("Already closed or not started");
2731 		return -1;
2732 	}
2733 
2734 	dpaa2_dev_close(eth_dev);
2735 
2736 	dpaa2_free_rx_tx_queues(eth_dev);
2737 
2738 	/* Close the device at underlying layer*/
2739 	ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
2740 	if (ret) {
2741 		DPAA2_PMD_ERR(
2742 			     "Failure closing dpni device with err code %d",
2743 			     ret);
2744 	}
2745 
2746 	/* Free the allocated memory for ethernet private data and dpni*/
2747 	priv->hw = NULL;
2748 	eth_dev->process_private = NULL;
2749 	rte_free(dpni);
2750 
2751 	for (i = 0; i < MAX_TCS; i++)
2752 		rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
2753 
2754 	if (priv->extract.qos_extract_param)
2755 		rte_free((void *)(size_t)priv->extract.qos_extract_param);
2756 
2757 	eth_dev->dev_ops = NULL;
2758 	eth_dev->rx_pkt_burst = NULL;
2759 	eth_dev->tx_pkt_burst = NULL;
2760 
2761 	DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
2762 	return 0;
2763 }
2764 
2765 static int
2766 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2767 		struct rte_dpaa2_device *dpaa2_dev)
2768 {
2769 	struct rte_eth_dev *eth_dev;
2770 	struct dpaa2_dev_priv *dev_priv;
2771 	int diag;
2772 
2773 	if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2774 		RTE_PKTMBUF_HEADROOM) {
2775 		DPAA2_PMD_ERR(
2776 		"RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2777 		RTE_PKTMBUF_HEADROOM,
2778 		DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2779 
2780 		return -1;
2781 	}
2782 
2783 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2784 		eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2785 		if (!eth_dev)
2786 			return -ENODEV;
2787 		dev_priv = rte_zmalloc("ethdev private structure",
2788 				       sizeof(struct dpaa2_dev_priv),
2789 				       RTE_CACHE_LINE_SIZE);
2790 		if (dev_priv == NULL) {
2791 			DPAA2_PMD_CRIT(
2792 				"Unable to allocate memory for private data");
2793 			rte_eth_dev_release_port(eth_dev);
2794 			return -ENOMEM;
2795 		}
2796 		eth_dev->data->dev_private = (void *)dev_priv;
2797 		/* Store a pointer to eth_dev in dev_private */
2798 		dev_priv->eth_dev = eth_dev;
2799 		dev_priv->tx_conf_en = 0;
2800 	} else {
2801 		eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2802 		if (!eth_dev) {
2803 			DPAA2_PMD_DEBUG("returning enodev");
2804 			return -ENODEV;
2805 		}
2806 	}
2807 
2808 	eth_dev->device = &dpaa2_dev->device;
2809 
2810 	dpaa2_dev->eth_dev = eth_dev;
2811 	eth_dev->data->rx_mbuf_alloc_failed = 0;
2812 
2813 	if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2814 		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2815 
2816 	/* Invoke PMD device initialization function */
2817 	diag = dpaa2_dev_init(eth_dev);
2818 	if (diag == 0) {
2819 		rte_eth_dev_probing_finish(eth_dev);
2820 		return 0;
2821 	}
2822 
2823 	rte_eth_dev_release_port(eth_dev);
2824 	return diag;
2825 }
2826 
2827 static int
2828 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2829 {
2830 	struct rte_eth_dev *eth_dev;
2831 
2832 	eth_dev = dpaa2_dev->eth_dev;
2833 	dpaa2_dev_uninit(eth_dev);
2834 
2835 	rte_eth_dev_release_port(eth_dev);
2836 
2837 	return 0;
2838 }
2839 
2840 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2841 	.drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2842 	.drv_type = DPAA2_ETH,
2843 	.probe = rte_dpaa2_probe,
2844 	.remove = rte_dpaa2_remove,
2845 };
2846 
2847 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2848 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
2849 		DRIVER_LOOPBACK_MODE "=<int> "
2850 		DRIVER_NO_PREFETCH_MODE "=<int>");
2851 RTE_LOG_REGISTER(dpaa2_logtype_pmd, pmd.net.dpaa2, NOTICE);
2852