xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.c (revision bc700b6767278e49c4ea9c08bb43c0fd9ca3e70d)
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2020 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 #include <rte_fslmc.h>
20 #include <rte_flow_driver.h>
21 
22 #include "dpaa2_pmd_logs.h"
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_mempool.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <mc/fsl_dpmng.h>
28 #include "dpaa2_ethdev.h"
29 #include "dpaa2_sparser.h"
30 #include <fsl_qbman_debug.h>
31 
32 #define DRIVER_LOOPBACK_MODE "drv_loopback"
33 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
34 
35 /* Supported Rx offloads */
36 static uint64_t dev_rx_offloads_sup =
37 		DEV_RX_OFFLOAD_CHECKSUM |
38 		DEV_RX_OFFLOAD_SCTP_CKSUM |
39 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
40 		DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
41 		DEV_RX_OFFLOAD_VLAN_STRIP |
42 		DEV_RX_OFFLOAD_VLAN_FILTER |
43 		DEV_RX_OFFLOAD_JUMBO_FRAME |
44 		DEV_RX_OFFLOAD_TIMESTAMP;
45 
46 /* Rx offloads which cannot be disabled */
47 static uint64_t dev_rx_offloads_nodis =
48 		DEV_RX_OFFLOAD_RSS_HASH |
49 		DEV_RX_OFFLOAD_SCATTER;
50 
51 /* Supported Tx offloads */
52 static uint64_t dev_tx_offloads_sup =
53 		DEV_TX_OFFLOAD_VLAN_INSERT |
54 		DEV_TX_OFFLOAD_IPV4_CKSUM |
55 		DEV_TX_OFFLOAD_UDP_CKSUM |
56 		DEV_TX_OFFLOAD_TCP_CKSUM |
57 		DEV_TX_OFFLOAD_SCTP_CKSUM |
58 		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
59 		DEV_TX_OFFLOAD_MT_LOCKFREE |
60 		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
61 
62 /* Tx offloads which cannot be disabled */
63 static uint64_t dev_tx_offloads_nodis =
64 		DEV_TX_OFFLOAD_MULTI_SEGS;
65 
66 /* enable timestamp in mbuf */
67 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
68 
69 struct rte_dpaa2_xstats_name_off {
70 	char name[RTE_ETH_XSTATS_NAME_SIZE];
71 	uint8_t page_id; /* dpni statistics page id */
72 	uint8_t stats_id; /* stats id in the given page */
73 };
74 
75 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
76 	{"ingress_multicast_frames", 0, 2},
77 	{"ingress_multicast_bytes", 0, 3},
78 	{"ingress_broadcast_frames", 0, 4},
79 	{"ingress_broadcast_bytes", 0, 5},
80 	{"egress_multicast_frames", 1, 2},
81 	{"egress_multicast_bytes", 1, 3},
82 	{"egress_broadcast_frames", 1, 4},
83 	{"egress_broadcast_bytes", 1, 5},
84 	{"ingress_filtered_frames", 2, 0},
85 	{"ingress_discarded_frames", 2, 1},
86 	{"ingress_nobuffer_discards", 2, 2},
87 	{"egress_discarded_frames", 2, 3},
88 	{"egress_confirmed_frames", 2, 4},
89 	{"cgr_reject_frames", 4, 0},
90 	{"cgr_reject_bytes", 4, 1},
91 };
92 
93 static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
94 	RTE_ETH_FILTER_ADD,
95 	RTE_ETH_FILTER_DELETE,
96 	RTE_ETH_FILTER_UPDATE,
97 	RTE_ETH_FILTER_FLUSH,
98 	RTE_ETH_FILTER_GET
99 };
100 
101 static struct rte_dpaa2_driver rte_dpaa2_pmd;
102 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
103 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
104 				 int wait_to_complete);
105 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
106 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
107 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
108 
109 static int
110 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
111 {
112 	int ret;
113 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
114 	struct fsl_mc_io *dpni = dev->process_private;
115 
116 	PMD_INIT_FUNC_TRACE();
117 
118 	if (dpni == NULL) {
119 		DPAA2_PMD_ERR("dpni is NULL");
120 		return -1;
121 	}
122 
123 	if (on)
124 		ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token,
125 				       vlan_id, 0, 0, 0);
126 	else
127 		ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
128 					  priv->token, vlan_id);
129 
130 	if (ret < 0)
131 		DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
132 			      ret, vlan_id, priv->hw_id);
133 
134 	return ret;
135 }
136 
137 static int
138 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
139 {
140 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
141 	struct fsl_mc_io *dpni = dev->process_private;
142 	int ret = 0;
143 
144 	PMD_INIT_FUNC_TRACE();
145 
146 	if (mask & ETH_VLAN_FILTER_MASK) {
147 		/* VLAN Filter not avaialble */
148 		if (!priv->max_vlan_filters) {
149 			DPAA2_PMD_INFO("VLAN filter not available");
150 			return -ENOTSUP;
151 		}
152 
153 		if (dev->data->dev_conf.rxmode.offloads &
154 			DEV_RX_OFFLOAD_VLAN_FILTER)
155 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
156 						      priv->token, true);
157 		else
158 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
159 						      priv->token, false);
160 		if (ret < 0)
161 			DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
162 	}
163 
164 	return ret;
165 }
166 
167 static int
168 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
169 		      enum rte_vlan_type vlan_type __rte_unused,
170 		      uint16_t tpid)
171 {
172 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
173 	struct fsl_mc_io *dpni = dev->process_private;
174 	int ret = -ENOTSUP;
175 
176 	PMD_INIT_FUNC_TRACE();
177 
178 	/* nothing to be done for standard vlan tpids */
179 	if (tpid == 0x8100 || tpid == 0x88A8)
180 		return 0;
181 
182 	ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
183 				   priv->token, tpid);
184 	if (ret < 0)
185 		DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
186 	/* if already configured tpids, remove them first */
187 	if (ret == -EBUSY) {
188 		struct dpni_custom_tpid_cfg tpid_list = {0};
189 
190 		ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
191 				   priv->token, &tpid_list);
192 		if (ret < 0)
193 			goto fail;
194 		ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
195 				   priv->token, tpid_list.tpid1);
196 		if (ret < 0)
197 			goto fail;
198 		ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
199 					   priv->token, tpid);
200 	}
201 fail:
202 	return ret;
203 }
204 
205 static int
206 dpaa2_fw_version_get(struct rte_eth_dev *dev,
207 		     char *fw_version,
208 		     size_t fw_size)
209 {
210 	int ret;
211 	struct fsl_mc_io *dpni = dev->process_private;
212 	struct mc_soc_version mc_plat_info = {0};
213 	struct mc_version mc_ver_info = {0};
214 
215 	PMD_INIT_FUNC_TRACE();
216 
217 	if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
218 		DPAA2_PMD_WARN("\tmc_get_soc_version failed");
219 
220 	if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
221 		DPAA2_PMD_WARN("\tmc_get_version failed");
222 
223 	ret = snprintf(fw_version, fw_size,
224 		       "%x-%d.%d.%d",
225 		       mc_plat_info.svr,
226 		       mc_ver_info.major,
227 		       mc_ver_info.minor,
228 		       mc_ver_info.revision);
229 
230 	ret += 1; /* add the size of '\0' */
231 	if (fw_size < (uint32_t)ret)
232 		return ret;
233 	else
234 		return 0;
235 }
236 
237 static int
238 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
239 {
240 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
241 
242 	PMD_INIT_FUNC_TRACE();
243 
244 	dev_info->if_index = priv->hw_id;
245 
246 	dev_info->max_mac_addrs = priv->max_mac_filters;
247 	dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
248 	dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
249 	dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
250 	dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
251 	dev_info->rx_offload_capa = dev_rx_offloads_sup |
252 					dev_rx_offloads_nodis;
253 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
254 					dev_tx_offloads_nodis;
255 	dev_info->speed_capa = ETH_LINK_SPEED_1G |
256 			ETH_LINK_SPEED_2_5G |
257 			ETH_LINK_SPEED_10G;
258 
259 	dev_info->max_hash_mac_addrs = 0;
260 	dev_info->max_vfs = 0;
261 	dev_info->max_vmdq_pools = ETH_16_POOLS;
262 	dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
263 
264 	dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
265 	/* same is rx size for best perf */
266 	dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
267 
268 	dev_info->default_rxportconf.nb_queues = 1;
269 	dev_info->default_txportconf.nb_queues = 1;
270 	dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
271 	dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
272 
273 	if (dpaa2_svr_family == SVR_LX2160A) {
274 		dev_info->speed_capa |= ETH_LINK_SPEED_25G |
275 				ETH_LINK_SPEED_40G |
276 				ETH_LINK_SPEED_50G |
277 				ETH_LINK_SPEED_100G;
278 	}
279 
280 	return 0;
281 }
282 
283 static int
284 dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
285 			__rte_unused uint16_t queue_id,
286 			struct rte_eth_burst_mode *mode)
287 {
288 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
289 	int ret = -EINVAL;
290 	unsigned int i;
291 	const struct burst_info {
292 		uint64_t flags;
293 		const char *output;
294 	} rx_offload_map[] = {
295 			{DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
296 			{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
297 			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
298 			{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
299 			{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
300 			{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
301 			{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
302 			{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
303 			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
304 			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
305 	};
306 
307 	/* Update Rx offload info */
308 	for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
309 		if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
310 			snprintf(mode->info, sizeof(mode->info), "%s",
311 				rx_offload_map[i].output);
312 			ret = 0;
313 			break;
314 		}
315 	}
316 	return ret;
317 }
318 
319 static int
320 dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
321 			__rte_unused uint16_t queue_id,
322 			struct rte_eth_burst_mode *mode)
323 {
324 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
325 	int ret = -EINVAL;
326 	unsigned int i;
327 	const struct burst_info {
328 		uint64_t flags;
329 		const char *output;
330 	} tx_offload_map[] = {
331 			{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
332 			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
333 			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
334 			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
335 			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
336 			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
337 			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
338 			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
339 			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
340 	};
341 
342 	/* Update Tx offload info */
343 	for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
344 		if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
345 			snprintf(mode->info, sizeof(mode->info), "%s",
346 				tx_offload_map[i].output);
347 			ret = 0;
348 			break;
349 		}
350 	}
351 	return ret;
352 }
353 
354 static int
355 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
356 {
357 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
358 	uint16_t dist_idx;
359 	uint32_t vq_id;
360 	uint8_t num_rxqueue_per_tc;
361 	struct dpaa2_queue *mc_q, *mcq;
362 	uint32_t tot_queues;
363 	int i;
364 	struct dpaa2_queue *dpaa2_q;
365 
366 	PMD_INIT_FUNC_TRACE();
367 
368 	num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
369 	if (priv->tx_conf_en)
370 		tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
371 	else
372 		tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
373 	mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
374 			  RTE_CACHE_LINE_SIZE);
375 	if (!mc_q) {
376 		DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
377 		return -1;
378 	}
379 
380 	for (i = 0; i < priv->nb_rx_queues; i++) {
381 		mc_q->eth_data = dev->data;
382 		priv->rx_vq[i] = mc_q++;
383 		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
384 		dpaa2_q->q_storage = rte_malloc("dq_storage",
385 					sizeof(struct queue_storage_info_t),
386 					RTE_CACHE_LINE_SIZE);
387 		if (!dpaa2_q->q_storage)
388 			goto fail;
389 
390 		memset(dpaa2_q->q_storage, 0,
391 		       sizeof(struct queue_storage_info_t));
392 		if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
393 			goto fail;
394 	}
395 
396 	for (i = 0; i < priv->nb_tx_queues; i++) {
397 		mc_q->eth_data = dev->data;
398 		mc_q->flow_id = 0xffff;
399 		priv->tx_vq[i] = mc_q++;
400 		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
401 		dpaa2_q->cscn = rte_malloc(NULL,
402 					   sizeof(struct qbman_result), 16);
403 		if (!dpaa2_q->cscn)
404 			goto fail_tx;
405 	}
406 
407 	if (priv->tx_conf_en) {
408 		/*Setup tx confirmation queues*/
409 		for (i = 0; i < priv->nb_tx_queues; i++) {
410 			mc_q->eth_data = dev->data;
411 			mc_q->tc_index = i;
412 			mc_q->flow_id = 0;
413 			priv->tx_conf_vq[i] = mc_q++;
414 			dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
415 			dpaa2_q->q_storage =
416 				rte_malloc("dq_storage",
417 					sizeof(struct queue_storage_info_t),
418 					RTE_CACHE_LINE_SIZE);
419 			if (!dpaa2_q->q_storage)
420 				goto fail_tx_conf;
421 
422 			memset(dpaa2_q->q_storage, 0,
423 			       sizeof(struct queue_storage_info_t));
424 			if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
425 				goto fail_tx_conf;
426 		}
427 	}
428 
429 	vq_id = 0;
430 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
431 		mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
432 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
433 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
434 		vq_id++;
435 	}
436 
437 	return 0;
438 fail_tx_conf:
439 	i -= 1;
440 	while (i >= 0) {
441 		dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
442 		rte_free(dpaa2_q->q_storage);
443 		priv->tx_conf_vq[i--] = NULL;
444 	}
445 	i = priv->nb_tx_queues;
446 fail_tx:
447 	i -= 1;
448 	while (i >= 0) {
449 		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
450 		rte_free(dpaa2_q->cscn);
451 		priv->tx_vq[i--] = NULL;
452 	}
453 	i = priv->nb_rx_queues;
454 fail:
455 	i -= 1;
456 	mc_q = priv->rx_vq[0];
457 	while (i >= 0) {
458 		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
459 		dpaa2_free_dq_storage(dpaa2_q->q_storage);
460 		rte_free(dpaa2_q->q_storage);
461 		priv->rx_vq[i--] = NULL;
462 	}
463 	rte_free(mc_q);
464 	return -1;
465 }
466 
467 static void
468 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
469 {
470 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
471 	struct dpaa2_queue *dpaa2_q;
472 	int i;
473 
474 	PMD_INIT_FUNC_TRACE();
475 
476 	/* Queue allocation base */
477 	if (priv->rx_vq[0]) {
478 		/* cleaning up queue storage */
479 		for (i = 0; i < priv->nb_rx_queues; i++) {
480 			dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
481 			if (dpaa2_q->q_storage)
482 				rte_free(dpaa2_q->q_storage);
483 		}
484 		/* cleanup tx queue cscn */
485 		for (i = 0; i < priv->nb_tx_queues; i++) {
486 			dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
487 			rte_free(dpaa2_q->cscn);
488 		}
489 		if (priv->tx_conf_en) {
490 			/* cleanup tx conf queue storage */
491 			for (i = 0; i < priv->nb_tx_queues; i++) {
492 				dpaa2_q = (struct dpaa2_queue *)
493 						priv->tx_conf_vq[i];
494 				rte_free(dpaa2_q->q_storage);
495 			}
496 		}
497 		/*free memory for all queues (RX+TX) */
498 		rte_free(priv->rx_vq[0]);
499 		priv->rx_vq[0] = NULL;
500 	}
501 }
502 
503 static int
504 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
505 {
506 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
507 	struct fsl_mc_io *dpni = dev->process_private;
508 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
509 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
510 	uint64_t tx_offloads = eth_conf->txmode.offloads;
511 	int rx_l3_csum_offload = false;
512 	int rx_l4_csum_offload = false;
513 	int tx_l3_csum_offload = false;
514 	int tx_l4_csum_offload = false;
515 	int ret, tc_index;
516 
517 	PMD_INIT_FUNC_TRACE();
518 
519 	/* Rx offloads which are enabled by default */
520 	if (dev_rx_offloads_nodis & ~rx_offloads) {
521 		DPAA2_PMD_INFO(
522 		"Some of rx offloads enabled by default - requested 0x%" PRIx64
523 		" fixed are 0x%" PRIx64,
524 		rx_offloads, dev_rx_offloads_nodis);
525 	}
526 
527 	/* Tx offloads which are enabled by default */
528 	if (dev_tx_offloads_nodis & ~tx_offloads) {
529 		DPAA2_PMD_INFO(
530 		"Some of tx offloads enabled by default - requested 0x%" PRIx64
531 		" fixed are 0x%" PRIx64,
532 		tx_offloads, dev_tx_offloads_nodis);
533 	}
534 
535 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
536 		if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
537 			ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
538 				priv->token, eth_conf->rxmode.max_rx_pkt_len
539 				- RTE_ETHER_CRC_LEN);
540 			if (ret) {
541 				DPAA2_PMD_ERR(
542 					"Unable to set mtu. check config");
543 				return ret;
544 			}
545 			dev->data->mtu =
546 				dev->data->dev_conf.rxmode.max_rx_pkt_len -
547 				RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN -
548 				VLAN_TAG_SIZE;
549 		} else {
550 			return -1;
551 		}
552 	}
553 
554 	if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
555 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
556 			ret = dpaa2_setup_flow_dist(dev,
557 					eth_conf->rx_adv_conf.rss_conf.rss_hf,
558 					tc_index);
559 			if (ret) {
560 				DPAA2_PMD_ERR(
561 					"Unable to set flow distribution on tc%d."
562 					"Check queue config", tc_index);
563 				return ret;
564 			}
565 		}
566 	}
567 
568 	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
569 		rx_l3_csum_offload = true;
570 
571 	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
572 		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
573 		(rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
574 		rx_l4_csum_offload = true;
575 
576 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
577 			       DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
578 	if (ret) {
579 		DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
580 		return ret;
581 	}
582 
583 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
584 			       DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
585 	if (ret) {
586 		DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
587 		return ret;
588 	}
589 
590 #if !defined(RTE_LIBRTE_IEEE1588)
591 	if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
592 #endif
593 		dpaa2_enable_ts[dev->data->port_id] = true;
594 
595 	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
596 		tx_l3_csum_offload = true;
597 
598 	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
599 		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
600 		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
601 		tx_l4_csum_offload = true;
602 
603 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
604 			       DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
605 	if (ret) {
606 		DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
607 		return ret;
608 	}
609 
610 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
611 			       DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
612 	if (ret) {
613 		DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
614 		return ret;
615 	}
616 
617 	/* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
618 	 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
619 	 * to 0 for LS2 in the hardware thus disabling data/annotation
620 	 * stashing. For LX2 this is fixed in hardware and thus hash result and
621 	 * parse results can be received in FD using this option.
622 	 */
623 	if (dpaa2_svr_family == SVR_LX2160A) {
624 		ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
625 				       DPNI_FLCTYPE_HASH, true);
626 		if (ret) {
627 			DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
628 			return ret;
629 		}
630 	}
631 
632 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
633 		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
634 
635 	return 0;
636 }
637 
638 /* Function to setup RX flow information. It contains traffic class ID,
639  * flow ID, destination configuration etc.
640  */
641 static int
642 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
643 			 uint16_t rx_queue_id,
644 			 uint16_t nb_rx_desc,
645 			 unsigned int socket_id __rte_unused,
646 			 const struct rte_eth_rxconf *rx_conf,
647 			 struct rte_mempool *mb_pool)
648 {
649 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
650 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
651 	struct dpaa2_queue *dpaa2_q;
652 	struct dpni_queue cfg;
653 	uint8_t options = 0;
654 	uint8_t flow_id;
655 	uint32_t bpid;
656 	int i, ret;
657 
658 	PMD_INIT_FUNC_TRACE();
659 
660 	DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
661 			dev, rx_queue_id, mb_pool, rx_conf);
662 
663 	/* Rx deferred start is not supported */
664 	if (rx_conf->rx_deferred_start) {
665 		DPAA2_PMD_ERR("%p:Rx deferred start not supported",
666 				(void *)dev);
667 		return -EINVAL;
668 	}
669 
670 	if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
671 		bpid = mempool_to_bpid(mb_pool);
672 		ret = dpaa2_attach_bp_list(priv,
673 					   rte_dpaa2_bpid_info[bpid].bp_list);
674 		if (ret)
675 			return ret;
676 	}
677 	dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
678 	dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
679 	dpaa2_q->bp_array = rte_dpaa2_bpid_info;
680 	dpaa2_q->nb_desc = UINT16_MAX;
681 	dpaa2_q->offloads = rx_conf->offloads;
682 
683 	/*Get the flow id from given VQ id*/
684 	flow_id = dpaa2_q->flow_id;
685 	memset(&cfg, 0, sizeof(struct dpni_queue));
686 
687 	options = options | DPNI_QUEUE_OPT_USER_CTX;
688 	cfg.user_context = (size_t)(dpaa2_q);
689 
690 	/* check if a private cgr available. */
691 	for (i = 0; i < priv->max_cgs; i++) {
692 		if (!priv->cgid_in_use[i]) {
693 			priv->cgid_in_use[i] = 1;
694 			break;
695 		}
696 	}
697 
698 	if (i < priv->max_cgs) {
699 		options |= DPNI_QUEUE_OPT_SET_CGID;
700 		cfg.cgid = i;
701 		dpaa2_q->cgid = cfg.cgid;
702 	} else {
703 		dpaa2_q->cgid = 0xff;
704 	}
705 
706 	/*if ls2088 or rev2 device, enable the stashing */
707 
708 	if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
709 		options |= DPNI_QUEUE_OPT_FLC;
710 		cfg.flc.stash_control = true;
711 		cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
712 		/* 00 00 00 - last 6 bit represent annotation, context stashing,
713 		 * data stashing setting 01 01 00 (0x14)
714 		 * (in following order ->DS AS CS)
715 		 * to enable 1 line data, 1 line annotation.
716 		 * For LX2, this setting should be 01 00 00 (0x10)
717 		 */
718 		if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
719 			cfg.flc.value |= 0x10;
720 		else
721 			cfg.flc.value |= 0x14;
722 	}
723 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
724 			     dpaa2_q->tc_index, flow_id, options, &cfg);
725 	if (ret) {
726 		DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
727 		return -1;
728 	}
729 
730 	if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
731 		struct dpni_taildrop taildrop;
732 
733 		taildrop.enable = 1;
734 		dpaa2_q->nb_desc = nb_rx_desc;
735 		/* Private CGR will use tail drop length as nb_rx_desc.
736 		 * for rest cases we can use standard byte based tail drop.
737 		 * There is no HW restriction, but number of CGRs are limited,
738 		 * hence this restriction is placed.
739 		 */
740 		if (dpaa2_q->cgid != 0xff) {
741 			/*enabling per rx queue congestion control */
742 			taildrop.threshold = nb_rx_desc;
743 			taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
744 			taildrop.oal = 0;
745 			DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
746 					rx_queue_id);
747 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
748 						DPNI_CP_CONGESTION_GROUP,
749 						DPNI_QUEUE_RX,
750 						dpaa2_q->tc_index,
751 						dpaa2_q->cgid, &taildrop);
752 		} else {
753 			/*enabling per rx queue congestion control */
754 			taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
755 			taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
756 			taildrop.oal = CONG_RX_OAL;
757 			DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
758 					rx_queue_id);
759 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
760 						DPNI_CP_QUEUE, DPNI_QUEUE_RX,
761 						dpaa2_q->tc_index, flow_id,
762 						&taildrop);
763 		}
764 		if (ret) {
765 			DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
766 				      ret);
767 			return -1;
768 		}
769 	} else { /* Disable tail Drop */
770 		struct dpni_taildrop taildrop = {0};
771 		DPAA2_PMD_INFO("Tail drop is disabled on queue");
772 
773 		taildrop.enable = 0;
774 		if (dpaa2_q->cgid != 0xff) {
775 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
776 					DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
777 					dpaa2_q->tc_index,
778 					dpaa2_q->cgid, &taildrop);
779 		} else {
780 			ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
781 					DPNI_CP_QUEUE, DPNI_QUEUE_RX,
782 					dpaa2_q->tc_index, flow_id, &taildrop);
783 		}
784 		if (ret) {
785 			DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
786 				      ret);
787 			return -1;
788 		}
789 	}
790 
791 	dev->data->rx_queues[rx_queue_id] = dpaa2_q;
792 	return 0;
793 }
794 
795 static int
796 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
797 			 uint16_t tx_queue_id,
798 			 uint16_t nb_tx_desc,
799 			 unsigned int socket_id __rte_unused,
800 			 const struct rte_eth_txconf *tx_conf)
801 {
802 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
803 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
804 		priv->tx_vq[tx_queue_id];
805 	struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
806 		priv->tx_conf_vq[tx_queue_id];
807 	struct fsl_mc_io *dpni = dev->process_private;
808 	struct dpni_queue tx_conf_cfg;
809 	struct dpni_queue tx_flow_cfg;
810 	uint8_t options = 0, flow_id;
811 	struct dpni_queue_id qid;
812 	uint32_t tc_id;
813 	int ret;
814 
815 	PMD_INIT_FUNC_TRACE();
816 
817 	/* Tx deferred start is not supported */
818 	if (tx_conf->tx_deferred_start) {
819 		DPAA2_PMD_ERR("%p:Tx deferred start not supported",
820 				(void *)dev);
821 		return -EINVAL;
822 	}
823 
824 	dpaa2_q->nb_desc = UINT16_MAX;
825 	dpaa2_q->offloads = tx_conf->offloads;
826 
827 	/* Return if queue already configured */
828 	if (dpaa2_q->flow_id != 0xffff) {
829 		dev->data->tx_queues[tx_queue_id] = dpaa2_q;
830 		return 0;
831 	}
832 
833 	memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
834 	memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
835 
836 	tc_id = tx_queue_id;
837 	flow_id = 0;
838 
839 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
840 			tc_id, flow_id, options, &tx_flow_cfg);
841 	if (ret) {
842 		DPAA2_PMD_ERR("Error in setting the tx flow: "
843 			"tc_id=%d, flow=%d err=%d",
844 			tc_id, flow_id, ret);
845 			return -1;
846 	}
847 
848 	dpaa2_q->flow_id = flow_id;
849 
850 	if (tx_queue_id == 0) {
851 		/*Set tx-conf and error configuration*/
852 		if (priv->tx_conf_en)
853 			ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
854 							    priv->token,
855 							    DPNI_CONF_AFFINE);
856 		else
857 			ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
858 							    priv->token,
859 							    DPNI_CONF_DISABLE);
860 		if (ret) {
861 			DPAA2_PMD_ERR("Error in set tx conf mode settings: "
862 				      "err=%d", ret);
863 			return -1;
864 		}
865 	}
866 	dpaa2_q->tc_index = tc_id;
867 
868 	ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
869 			     DPNI_QUEUE_TX, dpaa2_q->tc_index,
870 			     dpaa2_q->flow_id, &tx_flow_cfg, &qid);
871 	if (ret) {
872 		DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
873 		return -1;
874 	}
875 	dpaa2_q->fqid = qid.fqid;
876 
877 	if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
878 		struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
879 
880 		dpaa2_q->nb_desc = nb_tx_desc;
881 
882 		cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
883 		cong_notif_cfg.threshold_entry = nb_tx_desc;
884 		/* Notify that the queue is not congested when the data in
885 		 * the queue is below this thershold.
886 		 */
887 		cong_notif_cfg.threshold_exit = nb_tx_desc - 24;
888 		cong_notif_cfg.message_ctx = 0;
889 		cong_notif_cfg.message_iova =
890 				(size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
891 		cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
892 		cong_notif_cfg.notification_mode =
893 					 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
894 					 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
895 					 DPNI_CONG_OPT_COHERENT_WRITE;
896 		cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
897 
898 		ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
899 						       priv->token,
900 						       DPNI_QUEUE_TX,
901 						       tc_id,
902 						       &cong_notif_cfg);
903 		if (ret) {
904 			DPAA2_PMD_ERR(
905 			   "Error in setting tx congestion notification: "
906 			   "err=%d", ret);
907 			return -ret;
908 		}
909 	}
910 	dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
911 	dev->data->tx_queues[tx_queue_id] = dpaa2_q;
912 
913 	if (priv->tx_conf_en) {
914 		dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
915 		options = options | DPNI_QUEUE_OPT_USER_CTX;
916 		tx_conf_cfg.user_context = (size_t)(dpaa2_q);
917 		ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
918 			     DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
919 			     dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
920 		if (ret) {
921 			DPAA2_PMD_ERR("Error in setting the tx conf flow: "
922 			      "tc_index=%d, flow=%d err=%d",
923 			      dpaa2_tx_conf_q->tc_index,
924 			      dpaa2_tx_conf_q->flow_id, ret);
925 			return -1;
926 		}
927 
928 		ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
929 			     DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
930 			     dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
931 		if (ret) {
932 			DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
933 			return -1;
934 		}
935 		dpaa2_tx_conf_q->fqid = qid.fqid;
936 	}
937 	return 0;
938 }
939 
940 static void
941 dpaa2_dev_rx_queue_release(void *q __rte_unused)
942 {
943 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
944 	struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
945 	struct fsl_mc_io *dpni =
946 		(struct fsl_mc_io *)priv->eth_dev->process_private;
947 	uint8_t options = 0;
948 	int ret;
949 	struct dpni_queue cfg;
950 
951 	memset(&cfg, 0, sizeof(struct dpni_queue));
952 	PMD_INIT_FUNC_TRACE();
953 	if (dpaa2_q->cgid != 0xff) {
954 		options = DPNI_QUEUE_OPT_CLEAR_CGID;
955 		cfg.cgid = dpaa2_q->cgid;
956 
957 		ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
958 				     DPNI_QUEUE_RX,
959 				     dpaa2_q->tc_index, dpaa2_q->flow_id,
960 				     options, &cfg);
961 		if (ret)
962 			DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
963 					dpaa2_q->fqid, ret);
964 		priv->cgid_in_use[dpaa2_q->cgid] = 0;
965 		dpaa2_q->cgid = 0xff;
966 	}
967 }
968 
969 static void
970 dpaa2_dev_tx_queue_release(void *q __rte_unused)
971 {
972 	PMD_INIT_FUNC_TRACE();
973 }
974 
975 static uint32_t
976 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
977 {
978 	int32_t ret;
979 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
980 	struct dpaa2_queue *dpaa2_q;
981 	struct qbman_swp *swp;
982 	struct qbman_fq_query_np_rslt state;
983 	uint32_t frame_cnt = 0;
984 
985 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
986 		ret = dpaa2_affine_qbman_swp();
987 		if (ret) {
988 			DPAA2_PMD_ERR(
989 				"Failed to allocate IO portal, tid: %d\n",
990 				rte_gettid());
991 			return -EINVAL;
992 		}
993 	}
994 	swp = DPAA2_PER_LCORE_PORTAL;
995 
996 	dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
997 
998 	if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
999 		frame_cnt = qbman_fq_state_frame_count(&state);
1000 		DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u",
1001 				rx_queue_id, frame_cnt);
1002 	}
1003 	return frame_cnt;
1004 }
1005 
1006 static const uint32_t *
1007 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
1008 {
1009 	static const uint32_t ptypes[] = {
1010 		/*todo -= add more types */
1011 		RTE_PTYPE_L2_ETHER,
1012 		RTE_PTYPE_L3_IPV4,
1013 		RTE_PTYPE_L3_IPV4_EXT,
1014 		RTE_PTYPE_L3_IPV6,
1015 		RTE_PTYPE_L3_IPV6_EXT,
1016 		RTE_PTYPE_L4_TCP,
1017 		RTE_PTYPE_L4_UDP,
1018 		RTE_PTYPE_L4_SCTP,
1019 		RTE_PTYPE_L4_ICMP,
1020 		RTE_PTYPE_UNKNOWN
1021 	};
1022 
1023 	if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
1024 		dev->rx_pkt_burst == dpaa2_dev_rx ||
1025 		dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
1026 		return ptypes;
1027 	return NULL;
1028 }
1029 
1030 /**
1031  * Dpaa2 link Interrupt handler
1032  *
1033  * @param param
1034  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1035  *
1036  * @return
1037  *  void
1038  */
1039 static void
1040 dpaa2_interrupt_handler(void *param)
1041 {
1042 	struct rte_eth_dev *dev = param;
1043 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1044 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1045 	int ret;
1046 	int irq_index = DPNI_IRQ_INDEX;
1047 	unsigned int status = 0, clear = 0;
1048 
1049 	PMD_INIT_FUNC_TRACE();
1050 
1051 	if (dpni == NULL) {
1052 		DPAA2_PMD_ERR("dpni is NULL");
1053 		return;
1054 	}
1055 
1056 	ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
1057 				  irq_index, &status);
1058 	if (unlikely(ret)) {
1059 		DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
1060 		clear = 0xffffffff;
1061 		goto out;
1062 	}
1063 
1064 	if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
1065 		clear = DPNI_IRQ_EVENT_LINK_CHANGED;
1066 		dpaa2_dev_link_update(dev, 0);
1067 		/* calling all the apps registered for link status event */
1068 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1069 	}
1070 out:
1071 	ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
1072 				    irq_index, clear);
1073 	if (unlikely(ret))
1074 		DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
1075 }
1076 
1077 static int
1078 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
1079 {
1080 	int err = 0;
1081 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1082 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1083 	int irq_index = DPNI_IRQ_INDEX;
1084 	unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
1085 
1086 	PMD_INIT_FUNC_TRACE();
1087 
1088 	err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
1089 				irq_index, mask);
1090 	if (err < 0) {
1091 		DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
1092 			      strerror(-err));
1093 		return err;
1094 	}
1095 
1096 	err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
1097 				  irq_index, enable);
1098 	if (err < 0)
1099 		DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
1100 			      strerror(-err));
1101 
1102 	return err;
1103 }
1104 
1105 static int
1106 dpaa2_dev_start(struct rte_eth_dev *dev)
1107 {
1108 	struct rte_device *rdev = dev->device;
1109 	struct rte_dpaa2_device *dpaa2_dev;
1110 	struct rte_eth_dev_data *data = dev->data;
1111 	struct dpaa2_dev_priv *priv = data->dev_private;
1112 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1113 	struct dpni_queue cfg;
1114 	struct dpni_error_cfg	err_cfg;
1115 	uint16_t qdid;
1116 	struct dpni_queue_id qid;
1117 	struct dpaa2_queue *dpaa2_q;
1118 	int ret, i;
1119 	struct rte_intr_handle *intr_handle;
1120 
1121 	dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1122 	intr_handle = &dpaa2_dev->intr_handle;
1123 
1124 	PMD_INIT_FUNC_TRACE();
1125 
1126 	ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1127 	if (ret) {
1128 		DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
1129 			      priv->hw_id, ret);
1130 		return ret;
1131 	}
1132 
1133 	/* Power up the phy. Needed to make the link go UP */
1134 	dpaa2_dev_set_link_up(dev);
1135 
1136 	ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
1137 			    DPNI_QUEUE_TX, &qdid);
1138 	if (ret) {
1139 		DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
1140 		return ret;
1141 	}
1142 	priv->qdid = qdid;
1143 
1144 	for (i = 0; i < data->nb_rx_queues; i++) {
1145 		dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
1146 		ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1147 				     DPNI_QUEUE_RX, dpaa2_q->tc_index,
1148 				       dpaa2_q->flow_id, &cfg, &qid);
1149 		if (ret) {
1150 			DPAA2_PMD_ERR("Error in getting flow information: "
1151 				      "err=%d", ret);
1152 			return ret;
1153 		}
1154 		dpaa2_q->fqid = qid.fqid;
1155 	}
1156 
1157 	/*checksum errors, send them to normal path and set it in annotation */
1158 	err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
1159 	err_cfg.errors |= DPNI_ERROR_PHE;
1160 
1161 	err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
1162 	err_cfg.set_frame_annotation = true;
1163 
1164 	ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
1165 				       priv->token, &err_cfg);
1166 	if (ret) {
1167 		DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
1168 			      ret);
1169 		return ret;
1170 	}
1171 
1172 	/* if the interrupts were configured on this devices*/
1173 	if (intr_handle && (intr_handle->fd) &&
1174 	    (dev->data->dev_conf.intr_conf.lsc != 0)) {
1175 		/* Registering LSC interrupt handler */
1176 		rte_intr_callback_register(intr_handle,
1177 					   dpaa2_interrupt_handler,
1178 					   (void *)dev);
1179 
1180 		/* enable vfio intr/eventfd mapping
1181 		 * Interrupt index 0 is required, so we can not use
1182 		 * rte_intr_enable.
1183 		 */
1184 		rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
1185 
1186 		/* enable dpni_irqs */
1187 		dpaa2_eth_setup_irqs(dev, 1);
1188 	}
1189 
1190 	/* Change the tx burst function if ordered queues are used */
1191 	if (priv->en_ordered)
1192 		dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
1193 
1194 	return 0;
1195 }
1196 
1197 /**
1198  *  This routine disables all traffic on the adapter by issuing a
1199  *  global reset on the MAC.
1200  */
1201 static void
1202 dpaa2_dev_stop(struct rte_eth_dev *dev)
1203 {
1204 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1205 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1206 	int ret;
1207 	struct rte_eth_link link;
1208 	struct rte_intr_handle *intr_handle = dev->intr_handle;
1209 
1210 	PMD_INIT_FUNC_TRACE();
1211 
1212 	/* reset interrupt callback  */
1213 	if (intr_handle && (intr_handle->fd) &&
1214 	    (dev->data->dev_conf.intr_conf.lsc != 0)) {
1215 		/*disable dpni irqs */
1216 		dpaa2_eth_setup_irqs(dev, 0);
1217 
1218 		/* disable vfio intr before callback unregister */
1219 		rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1220 
1221 		/* Unregistering LSC interrupt handler */
1222 		rte_intr_callback_unregister(intr_handle,
1223 					     dpaa2_interrupt_handler,
1224 					     (void *)dev);
1225 	}
1226 
1227 	dpaa2_dev_set_link_down(dev);
1228 
1229 	ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
1230 	if (ret) {
1231 		DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
1232 			      ret, priv->hw_id);
1233 		return;
1234 	}
1235 
1236 	/* clear the recorded link status */
1237 	memset(&link, 0, sizeof(link));
1238 	rte_eth_linkstatus_set(dev, &link);
1239 }
1240 
1241 static void
1242 dpaa2_dev_close(struct rte_eth_dev *dev)
1243 {
1244 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1245 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1246 	int ret;
1247 	struct rte_eth_link link;
1248 
1249 	PMD_INIT_FUNC_TRACE();
1250 
1251 	dpaa2_flow_clean(dev);
1252 
1253 	/* Clean the device first */
1254 	ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
1255 	if (ret) {
1256 		DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1257 		return;
1258 	}
1259 
1260 	memset(&link, 0, sizeof(link));
1261 	rte_eth_linkstatus_set(dev, &link);
1262 }
1263 
1264 static int
1265 dpaa2_dev_promiscuous_enable(
1266 		struct rte_eth_dev *dev)
1267 {
1268 	int ret;
1269 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1270 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1271 
1272 	PMD_INIT_FUNC_TRACE();
1273 
1274 	if (dpni == NULL) {
1275 		DPAA2_PMD_ERR("dpni is NULL");
1276 		return -ENODEV;
1277 	}
1278 
1279 	ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1280 	if (ret < 0)
1281 		DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1282 
1283 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1284 	if (ret < 0)
1285 		DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1286 
1287 	return ret;
1288 }
1289 
1290 static int
1291 dpaa2_dev_promiscuous_disable(
1292 		struct rte_eth_dev *dev)
1293 {
1294 	int ret;
1295 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1296 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1297 
1298 	PMD_INIT_FUNC_TRACE();
1299 
1300 	if (dpni == NULL) {
1301 		DPAA2_PMD_ERR("dpni is NULL");
1302 		return -ENODEV;
1303 	}
1304 
1305 	ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1306 	if (ret < 0)
1307 		DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1308 
1309 	if (dev->data->all_multicast == 0) {
1310 		ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1311 						 priv->token, false);
1312 		if (ret < 0)
1313 			DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1314 				      ret);
1315 	}
1316 
1317 	return ret;
1318 }
1319 
1320 static int
1321 dpaa2_dev_allmulticast_enable(
1322 		struct rte_eth_dev *dev)
1323 {
1324 	int ret;
1325 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1326 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1327 
1328 	PMD_INIT_FUNC_TRACE();
1329 
1330 	if (dpni == NULL) {
1331 		DPAA2_PMD_ERR("dpni is NULL");
1332 		return -ENODEV;
1333 	}
1334 
1335 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1336 	if (ret < 0)
1337 		DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1338 
1339 	return ret;
1340 }
1341 
1342 static int
1343 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1344 {
1345 	int ret;
1346 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1347 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1348 
1349 	PMD_INIT_FUNC_TRACE();
1350 
1351 	if (dpni == NULL) {
1352 		DPAA2_PMD_ERR("dpni is NULL");
1353 		return -ENODEV;
1354 	}
1355 
1356 	/* must remain on for all promiscuous */
1357 	if (dev->data->promiscuous == 1)
1358 		return 0;
1359 
1360 	ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1361 	if (ret < 0)
1362 		DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1363 
1364 	return ret;
1365 }
1366 
1367 static int
1368 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1369 {
1370 	int ret;
1371 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1372 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1373 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1374 				+ VLAN_TAG_SIZE;
1375 
1376 	PMD_INIT_FUNC_TRACE();
1377 
1378 	if (dpni == NULL) {
1379 		DPAA2_PMD_ERR("dpni is NULL");
1380 		return -EINVAL;
1381 	}
1382 
1383 	/* check that mtu is within the allowed range */
1384 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
1385 		return -EINVAL;
1386 
1387 	if (frame_size > RTE_ETHER_MAX_LEN)
1388 		dev->data->dev_conf.rxmode.offloads |=
1389 						DEV_RX_OFFLOAD_JUMBO_FRAME;
1390 	else
1391 		dev->data->dev_conf.rxmode.offloads &=
1392 						~DEV_RX_OFFLOAD_JUMBO_FRAME;
1393 
1394 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1395 
1396 	/* Set the Max Rx frame length as 'mtu' +
1397 	 * Maximum Ethernet header length
1398 	 */
1399 	ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1400 					frame_size - RTE_ETHER_CRC_LEN);
1401 	if (ret) {
1402 		DPAA2_PMD_ERR("Setting the max frame length failed");
1403 		return -1;
1404 	}
1405 	DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1406 	return 0;
1407 }
1408 
1409 static int
1410 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1411 		       struct rte_ether_addr *addr,
1412 		       __rte_unused uint32_t index,
1413 		       __rte_unused uint32_t pool)
1414 {
1415 	int ret;
1416 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1417 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1418 
1419 	PMD_INIT_FUNC_TRACE();
1420 
1421 	if (dpni == NULL) {
1422 		DPAA2_PMD_ERR("dpni is NULL");
1423 		return -1;
1424 	}
1425 
1426 	ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
1427 				addr->addr_bytes, 0, 0, 0);
1428 	if (ret)
1429 		DPAA2_PMD_ERR(
1430 			"error: Adding the MAC ADDR failed: err = %d", ret);
1431 	return 0;
1432 }
1433 
1434 static void
1435 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1436 			  uint32_t index)
1437 {
1438 	int ret;
1439 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1440 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1441 	struct rte_eth_dev_data *data = dev->data;
1442 	struct rte_ether_addr *macaddr;
1443 
1444 	PMD_INIT_FUNC_TRACE();
1445 
1446 	macaddr = &data->mac_addrs[index];
1447 
1448 	if (dpni == NULL) {
1449 		DPAA2_PMD_ERR("dpni is NULL");
1450 		return;
1451 	}
1452 
1453 	ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1454 				   priv->token, macaddr->addr_bytes);
1455 	if (ret)
1456 		DPAA2_PMD_ERR(
1457 			"error: Removing the MAC ADDR failed: err = %d", ret);
1458 }
1459 
1460 static int
1461 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1462 		       struct rte_ether_addr *addr)
1463 {
1464 	int ret;
1465 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1466 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1467 
1468 	PMD_INIT_FUNC_TRACE();
1469 
1470 	if (dpni == NULL) {
1471 		DPAA2_PMD_ERR("dpni is NULL");
1472 		return -EINVAL;
1473 	}
1474 
1475 	ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1476 					priv->token, addr->addr_bytes);
1477 
1478 	if (ret)
1479 		DPAA2_PMD_ERR(
1480 			"error: Setting the MAC ADDR failed %d", ret);
1481 
1482 	return ret;
1483 }
1484 
1485 static
1486 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1487 			 struct rte_eth_stats *stats)
1488 {
1489 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1490 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1491 	int32_t  retcode;
1492 	uint8_t page0 = 0, page1 = 1, page2 = 2;
1493 	union dpni_statistics value;
1494 	int i;
1495 	struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1496 
1497 	memset(&value, 0, sizeof(union dpni_statistics));
1498 
1499 	PMD_INIT_FUNC_TRACE();
1500 
1501 	if (!dpni) {
1502 		DPAA2_PMD_ERR("dpni is NULL");
1503 		return -EINVAL;
1504 	}
1505 
1506 	if (!stats) {
1507 		DPAA2_PMD_ERR("stats is NULL");
1508 		return -EINVAL;
1509 	}
1510 
1511 	/*Get Counters from page_0*/
1512 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1513 				      page0, 0, &value);
1514 	if (retcode)
1515 		goto err;
1516 
1517 	stats->ipackets = value.page_0.ingress_all_frames;
1518 	stats->ibytes = value.page_0.ingress_all_bytes;
1519 
1520 	/*Get Counters from page_1*/
1521 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1522 				      page1, 0, &value);
1523 	if (retcode)
1524 		goto err;
1525 
1526 	stats->opackets = value.page_1.egress_all_frames;
1527 	stats->obytes = value.page_1.egress_all_bytes;
1528 
1529 	/*Get Counters from page_2*/
1530 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1531 				      page2, 0, &value);
1532 	if (retcode)
1533 		goto err;
1534 
1535 	/* Ingress drop frame count due to configured rules */
1536 	stats->ierrors = value.page_2.ingress_filtered_frames;
1537 	/* Ingress drop frame count due to error */
1538 	stats->ierrors += value.page_2.ingress_discarded_frames;
1539 
1540 	stats->oerrors = value.page_2.egress_discarded_frames;
1541 	stats->imissed = value.page_2.ingress_nobuffer_discards;
1542 
1543 	/* Fill in per queue stats */
1544 	for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1545 		(i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1546 		dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1547 		dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1548 		if (dpaa2_rxq)
1549 			stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1550 		if (dpaa2_txq)
1551 			stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1552 
1553 		/* Byte counting is not implemented */
1554 		stats->q_ibytes[i]   = 0;
1555 		stats->q_obytes[i]   = 0;
1556 	}
1557 
1558 	return 0;
1559 
1560 err:
1561 	DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1562 	return retcode;
1563 };
1564 
1565 static int
1566 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1567 		     unsigned int n)
1568 {
1569 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1570 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1571 	int32_t  retcode;
1572 	union dpni_statistics value[5] = {};
1573 	unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1574 
1575 	if (n < num)
1576 		return num;
1577 
1578 	if (xstats == NULL)
1579 		return 0;
1580 
1581 	/* Get Counters from page_0*/
1582 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1583 				      0, 0, &value[0]);
1584 	if (retcode)
1585 		goto err;
1586 
1587 	/* Get Counters from page_1*/
1588 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1589 				      1, 0, &value[1]);
1590 	if (retcode)
1591 		goto err;
1592 
1593 	/* Get Counters from page_2*/
1594 	retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1595 				      2, 0, &value[2]);
1596 	if (retcode)
1597 		goto err;
1598 
1599 	for (i = 0; i < priv->max_cgs; i++) {
1600 		if (!priv->cgid_in_use[i]) {
1601 			/* Get Counters from page_4*/
1602 			retcode = dpni_get_statistics(dpni, CMD_PRI_LOW,
1603 						      priv->token,
1604 						      4, 0, &value[4]);
1605 			if (retcode)
1606 				goto err;
1607 			break;
1608 		}
1609 	}
1610 
1611 	for (i = 0; i < num; i++) {
1612 		xstats[i].id = i;
1613 		xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1614 			raw.counter[dpaa2_xstats_strings[i].stats_id];
1615 	}
1616 	return i;
1617 err:
1618 	DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1619 	return retcode;
1620 }
1621 
1622 static int
1623 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1624 		       struct rte_eth_xstat_name *xstats_names,
1625 		       unsigned int limit)
1626 {
1627 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1628 
1629 	if (limit < stat_cnt)
1630 		return stat_cnt;
1631 
1632 	if (xstats_names != NULL)
1633 		for (i = 0; i < stat_cnt; i++)
1634 			strlcpy(xstats_names[i].name,
1635 				dpaa2_xstats_strings[i].name,
1636 				sizeof(xstats_names[i].name));
1637 
1638 	return stat_cnt;
1639 }
1640 
1641 static int
1642 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1643 		       uint64_t *values, unsigned int n)
1644 {
1645 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1646 	uint64_t values_copy[stat_cnt];
1647 
1648 	if (!ids) {
1649 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
1650 		struct fsl_mc_io *dpni =
1651 			(struct fsl_mc_io *)dev->process_private;
1652 		int32_t  retcode;
1653 		union dpni_statistics value[5] = {};
1654 
1655 		if (n < stat_cnt)
1656 			return stat_cnt;
1657 
1658 		if (!values)
1659 			return 0;
1660 
1661 		/* Get Counters from page_0*/
1662 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1663 					      0, 0, &value[0]);
1664 		if (retcode)
1665 			return 0;
1666 
1667 		/* Get Counters from page_1*/
1668 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1669 					      1, 0, &value[1]);
1670 		if (retcode)
1671 			return 0;
1672 
1673 		/* Get Counters from page_2*/
1674 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1675 					      2, 0, &value[2]);
1676 		if (retcode)
1677 			return 0;
1678 
1679 		/* Get Counters from page_4*/
1680 		retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1681 					      4, 0, &value[4]);
1682 		if (retcode)
1683 			return 0;
1684 
1685 		for (i = 0; i < stat_cnt; i++) {
1686 			values[i] = value[dpaa2_xstats_strings[i].page_id].
1687 				raw.counter[dpaa2_xstats_strings[i].stats_id];
1688 		}
1689 		return stat_cnt;
1690 	}
1691 
1692 	dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1693 
1694 	for (i = 0; i < n; i++) {
1695 		if (ids[i] >= stat_cnt) {
1696 			DPAA2_PMD_ERR("xstats id value isn't valid");
1697 			return -1;
1698 		}
1699 		values[i] = values_copy[ids[i]];
1700 	}
1701 	return n;
1702 }
1703 
1704 static int
1705 dpaa2_xstats_get_names_by_id(
1706 	struct rte_eth_dev *dev,
1707 	struct rte_eth_xstat_name *xstats_names,
1708 	const uint64_t *ids,
1709 	unsigned int limit)
1710 {
1711 	unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1712 	struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1713 
1714 	if (!ids)
1715 		return dpaa2_xstats_get_names(dev, xstats_names, limit);
1716 
1717 	dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1718 
1719 	for (i = 0; i < limit; i++) {
1720 		if (ids[i] >= stat_cnt) {
1721 			DPAA2_PMD_ERR("xstats id value isn't valid");
1722 			return -1;
1723 		}
1724 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1725 	}
1726 	return limit;
1727 }
1728 
1729 static int
1730 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1731 {
1732 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1733 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1734 	int retcode;
1735 	int i;
1736 	struct dpaa2_queue *dpaa2_q;
1737 
1738 	PMD_INIT_FUNC_TRACE();
1739 
1740 	if (dpni == NULL) {
1741 		DPAA2_PMD_ERR("dpni is NULL");
1742 		return -EINVAL;
1743 	}
1744 
1745 	retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1746 	if (retcode)
1747 		goto error;
1748 
1749 	/* Reset the per queue stats in dpaa2_queue structure */
1750 	for (i = 0; i < priv->nb_rx_queues; i++) {
1751 		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1752 		if (dpaa2_q)
1753 			dpaa2_q->rx_pkts = 0;
1754 	}
1755 
1756 	for (i = 0; i < priv->nb_tx_queues; i++) {
1757 		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1758 		if (dpaa2_q)
1759 			dpaa2_q->tx_pkts = 0;
1760 	}
1761 
1762 	return 0;
1763 
1764 error:
1765 	DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1766 	return retcode;
1767 };
1768 
1769 /* return 0 means link status changed, -1 means not changed */
1770 static int
1771 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1772 			int wait_to_complete __rte_unused)
1773 {
1774 	int ret;
1775 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1776 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1777 	struct rte_eth_link link;
1778 	struct dpni_link_state state = {0};
1779 
1780 	if (dpni == NULL) {
1781 		DPAA2_PMD_ERR("dpni is NULL");
1782 		return 0;
1783 	}
1784 
1785 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1786 	if (ret < 0) {
1787 		DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1788 		return -1;
1789 	}
1790 
1791 	memset(&link, 0, sizeof(struct rte_eth_link));
1792 	link.link_status = state.up;
1793 	link.link_speed = state.rate;
1794 
1795 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1796 		link.link_duplex = ETH_LINK_HALF_DUPLEX;
1797 	else
1798 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
1799 
1800 	ret = rte_eth_linkstatus_set(dev, &link);
1801 	if (ret == -1)
1802 		DPAA2_PMD_DEBUG("No change in status");
1803 	else
1804 		DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1805 			       link.link_status ? "Up" : "Down");
1806 
1807 	return ret;
1808 }
1809 
1810 /**
1811  * Toggle the DPNI to enable, if not already enabled.
1812  * This is not strictly PHY up/down - it is more of logical toggling.
1813  */
1814 static int
1815 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1816 {
1817 	int ret = -EINVAL;
1818 	struct dpaa2_dev_priv *priv;
1819 	struct fsl_mc_io *dpni;
1820 	int en = 0;
1821 	struct dpni_link_state state = {0};
1822 
1823 	priv = dev->data->dev_private;
1824 	dpni = (struct fsl_mc_io *)dev->process_private;
1825 
1826 	if (dpni == NULL) {
1827 		DPAA2_PMD_ERR("dpni is NULL");
1828 		return ret;
1829 	}
1830 
1831 	/* Check if DPNI is currently enabled */
1832 	ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1833 	if (ret) {
1834 		/* Unable to obtain dpni status; Not continuing */
1835 		DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1836 		return -EINVAL;
1837 	}
1838 
1839 	/* Enable link if not already enabled */
1840 	if (!en) {
1841 		ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1842 		if (ret) {
1843 			DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1844 			return -EINVAL;
1845 		}
1846 	}
1847 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1848 	if (ret < 0) {
1849 		DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1850 		return -1;
1851 	}
1852 
1853 	/* changing tx burst function to start enqueues */
1854 	dev->tx_pkt_burst = dpaa2_dev_tx;
1855 	dev->data->dev_link.link_status = state.up;
1856 	dev->data->dev_link.link_speed = state.rate;
1857 
1858 	if (state.up)
1859 		DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1860 	else
1861 		DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1862 	return ret;
1863 }
1864 
1865 /**
1866  * Toggle the DPNI to disable, if not already disabled.
1867  * This is not strictly PHY up/down - it is more of logical toggling.
1868  */
1869 static int
1870 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1871 {
1872 	int ret = -EINVAL;
1873 	struct dpaa2_dev_priv *priv;
1874 	struct fsl_mc_io *dpni;
1875 	int dpni_enabled = 0;
1876 	int retries = 10;
1877 
1878 	PMD_INIT_FUNC_TRACE();
1879 
1880 	priv = dev->data->dev_private;
1881 	dpni = (struct fsl_mc_io *)dev->process_private;
1882 
1883 	if (dpni == NULL) {
1884 		DPAA2_PMD_ERR("Device has not yet been configured");
1885 		return ret;
1886 	}
1887 
1888 	/*changing  tx burst function to avoid any more enqueues */
1889 	dev->tx_pkt_burst = dummy_dev_tx;
1890 
1891 	/* Loop while dpni_disable() attempts to drain the egress FQs
1892 	 * and confirm them back to us.
1893 	 */
1894 	do {
1895 		ret = dpni_disable(dpni, 0, priv->token);
1896 		if (ret) {
1897 			DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1898 			return ret;
1899 		}
1900 		ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1901 		if (ret) {
1902 			DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1903 			return ret;
1904 		}
1905 		if (dpni_enabled)
1906 			/* Allow the MC some slack */
1907 			rte_delay_us(100 * 1000);
1908 	} while (dpni_enabled && --retries);
1909 
1910 	if (!retries) {
1911 		DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1912 		/* todo- we may have to manually cleanup queues.
1913 		 */
1914 	} else {
1915 		DPAA2_PMD_INFO("Port %d Link DOWN successful",
1916 			       dev->data->port_id);
1917 	}
1918 
1919 	dev->data->dev_link.link_status = 0;
1920 
1921 	return ret;
1922 }
1923 
1924 static int
1925 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1926 {
1927 	int ret = -EINVAL;
1928 	struct dpaa2_dev_priv *priv;
1929 	struct fsl_mc_io *dpni;
1930 	struct dpni_link_state state = {0};
1931 
1932 	PMD_INIT_FUNC_TRACE();
1933 
1934 	priv = dev->data->dev_private;
1935 	dpni = (struct fsl_mc_io *)dev->process_private;
1936 
1937 	if (dpni == NULL || fc_conf == NULL) {
1938 		DPAA2_PMD_ERR("device not configured");
1939 		return ret;
1940 	}
1941 
1942 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1943 	if (ret) {
1944 		DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1945 		return ret;
1946 	}
1947 
1948 	memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1949 	if (state.options & DPNI_LINK_OPT_PAUSE) {
1950 		/* DPNI_LINK_OPT_PAUSE set
1951 		 *  if ASYM_PAUSE not set,
1952 		 *	RX Side flow control (handle received Pause frame)
1953 		 *	TX side flow control (send Pause frame)
1954 		 *  if ASYM_PAUSE set,
1955 		 *	RX Side flow control (handle received Pause frame)
1956 		 *	No TX side flow control (send Pause frame disabled)
1957 		 */
1958 		if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1959 			fc_conf->mode = RTE_FC_FULL;
1960 		else
1961 			fc_conf->mode = RTE_FC_RX_PAUSE;
1962 	} else {
1963 		/* DPNI_LINK_OPT_PAUSE not set
1964 		 *  if ASYM_PAUSE set,
1965 		 *	TX side flow control (send Pause frame)
1966 		 *	No RX side flow control (No action on pause frame rx)
1967 		 *  if ASYM_PAUSE not set,
1968 		 *	Flow control disabled
1969 		 */
1970 		if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1971 			fc_conf->mode = RTE_FC_TX_PAUSE;
1972 		else
1973 			fc_conf->mode = RTE_FC_NONE;
1974 	}
1975 
1976 	return ret;
1977 }
1978 
1979 static int
1980 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1981 {
1982 	int ret = -EINVAL;
1983 	struct dpaa2_dev_priv *priv;
1984 	struct fsl_mc_io *dpni;
1985 	struct dpni_link_state state = {0};
1986 	struct dpni_link_cfg cfg = {0};
1987 
1988 	PMD_INIT_FUNC_TRACE();
1989 
1990 	priv = dev->data->dev_private;
1991 	dpni = (struct fsl_mc_io *)dev->process_private;
1992 
1993 	if (dpni == NULL) {
1994 		DPAA2_PMD_ERR("dpni is NULL");
1995 		return ret;
1996 	}
1997 
1998 	/* It is necessary to obtain the current state before setting fc_conf
1999 	 * as MC would return error in case rate, autoneg or duplex values are
2000 	 * different.
2001 	 */
2002 	ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2003 	if (ret) {
2004 		DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
2005 		return -1;
2006 	}
2007 
2008 	/* Disable link before setting configuration */
2009 	dpaa2_dev_set_link_down(dev);
2010 
2011 	/* Based on fc_conf, update cfg */
2012 	cfg.rate = state.rate;
2013 	cfg.options = state.options;
2014 
2015 	/* update cfg with fc_conf */
2016 	switch (fc_conf->mode) {
2017 	case RTE_FC_FULL:
2018 		/* Full flow control;
2019 		 * OPT_PAUSE set, ASYM_PAUSE not set
2020 		 */
2021 		cfg.options |= DPNI_LINK_OPT_PAUSE;
2022 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2023 		break;
2024 	case RTE_FC_TX_PAUSE:
2025 		/* Enable RX flow control
2026 		 * OPT_PAUSE not set;
2027 		 * ASYM_PAUSE set;
2028 		 */
2029 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2030 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2031 		break;
2032 	case RTE_FC_RX_PAUSE:
2033 		/* Enable TX Flow control
2034 		 * OPT_PAUSE set
2035 		 * ASYM_PAUSE set
2036 		 */
2037 		cfg.options |= DPNI_LINK_OPT_PAUSE;
2038 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2039 		break;
2040 	case RTE_FC_NONE:
2041 		/* Disable Flow control
2042 		 * OPT_PAUSE not set
2043 		 * ASYM_PAUSE not set
2044 		 */
2045 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2046 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2047 		break;
2048 	default:
2049 		DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
2050 			      fc_conf->mode);
2051 		return -1;
2052 	}
2053 
2054 	ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2055 	if (ret)
2056 		DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
2057 			      ret);
2058 
2059 	/* Enable link */
2060 	dpaa2_dev_set_link_up(dev);
2061 
2062 	return ret;
2063 }
2064 
2065 static int
2066 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
2067 			  struct rte_eth_rss_conf *rss_conf)
2068 {
2069 	struct rte_eth_dev_data *data = dev->data;
2070 	struct dpaa2_dev_priv *priv = data->dev_private;
2071 	struct rte_eth_conf *eth_conf = &data->dev_conf;
2072 	int ret, tc_index;
2073 
2074 	PMD_INIT_FUNC_TRACE();
2075 
2076 	if (rss_conf->rss_hf) {
2077 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2078 			ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf,
2079 				tc_index);
2080 			if (ret) {
2081 				DPAA2_PMD_ERR("Unable to set flow dist on tc%d",
2082 					tc_index);
2083 				return ret;
2084 			}
2085 		}
2086 	} else {
2087 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2088 			ret = dpaa2_remove_flow_dist(dev, tc_index);
2089 			if (ret) {
2090 				DPAA2_PMD_ERR(
2091 					"Unable to remove flow dist on tc%d",
2092 					tc_index);
2093 				return ret;
2094 			}
2095 		}
2096 	}
2097 	eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
2098 	return 0;
2099 }
2100 
2101 static int
2102 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2103 			    struct rte_eth_rss_conf *rss_conf)
2104 {
2105 	struct rte_eth_dev_data *data = dev->data;
2106 	struct rte_eth_conf *eth_conf = &data->dev_conf;
2107 
2108 	/* dpaa2 does not support rss_key, so length should be 0*/
2109 	rss_conf->rss_key_len = 0;
2110 	rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
2111 	return 0;
2112 }
2113 
2114 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
2115 		int eth_rx_queue_id,
2116 		struct dpaa2_dpcon_dev *dpcon,
2117 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2118 {
2119 	struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2120 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2121 	struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2122 	uint8_t flow_id = dpaa2_ethq->flow_id;
2123 	struct dpni_queue cfg;
2124 	uint8_t options, priority;
2125 	int ret;
2126 
2127 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
2128 		dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
2129 	else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
2130 		dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
2131 	else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
2132 		dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
2133 	else
2134 		return -EINVAL;
2135 
2136 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
2137 		   (dpcon->num_priorities - 1);
2138 
2139 	memset(&cfg, 0, sizeof(struct dpni_queue));
2140 	options = DPNI_QUEUE_OPT_DEST;
2141 	cfg.destination.type = DPNI_DEST_DPCON;
2142 	cfg.destination.id = dpcon->dpcon_id;
2143 	cfg.destination.priority = priority;
2144 
2145 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
2146 		options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
2147 		cfg.destination.hold_active = 1;
2148 	}
2149 
2150 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
2151 			!eth_priv->en_ordered) {
2152 		struct opr_cfg ocfg;
2153 
2154 		/* Restoration window size = 256 frames */
2155 		ocfg.oprrws = 3;
2156 		/* Restoration window size = 512 frames for LX2 */
2157 		if (dpaa2_svr_family == SVR_LX2160A)
2158 			ocfg.oprrws = 4;
2159 		/* Auto advance NESN window enabled */
2160 		ocfg.oa = 1;
2161 		/* Late arrival window size disabled */
2162 		ocfg.olws = 0;
2163 		/* ORL resource exhaustaion advance NESN disabled */
2164 		ocfg.oeane = 0;
2165 		/* Loose ordering enabled */
2166 		ocfg.oloe = 1;
2167 		eth_priv->en_loose_ordered = 1;
2168 		/* Strict ordering enabled if explicitly set */
2169 		if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
2170 			ocfg.oloe = 0;
2171 			eth_priv->en_loose_ordered = 0;
2172 		}
2173 
2174 		ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
2175 				   dpaa2_ethq->tc_index, flow_id,
2176 				   OPR_OPT_CREATE, &ocfg);
2177 		if (ret) {
2178 			DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
2179 			return ret;
2180 		}
2181 
2182 		eth_priv->en_ordered = 1;
2183 	}
2184 
2185 	options |= DPNI_QUEUE_OPT_USER_CTX;
2186 	cfg.user_context = (size_t)(dpaa2_ethq);
2187 
2188 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2189 			     dpaa2_ethq->tc_index, flow_id, options, &cfg);
2190 	if (ret) {
2191 		DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2192 		return ret;
2193 	}
2194 
2195 	memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
2196 
2197 	return 0;
2198 }
2199 
2200 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
2201 		int eth_rx_queue_id)
2202 {
2203 	struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2204 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2205 	struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2206 	uint8_t flow_id = dpaa2_ethq->flow_id;
2207 	struct dpni_queue cfg;
2208 	uint8_t options;
2209 	int ret;
2210 
2211 	memset(&cfg, 0, sizeof(struct dpni_queue));
2212 	options = DPNI_QUEUE_OPT_DEST;
2213 	cfg.destination.type = DPNI_DEST_NONE;
2214 
2215 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2216 			     dpaa2_ethq->tc_index, flow_id, options, &cfg);
2217 	if (ret)
2218 		DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2219 
2220 	return ret;
2221 }
2222 
2223 static inline int
2224 dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
2225 {
2226 	unsigned int i;
2227 
2228 	for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
2229 		if (dpaa2_supported_filter_ops[i] == filter_op)
2230 			return 0;
2231 	}
2232 	return -ENOTSUP;
2233 }
2234 
2235 static int
2236 dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
2237 		    enum rte_filter_type filter_type,
2238 				 enum rte_filter_op filter_op,
2239 				 void *arg)
2240 {
2241 	int ret = 0;
2242 
2243 	if (!dev)
2244 		return -ENODEV;
2245 
2246 	switch (filter_type) {
2247 	case RTE_ETH_FILTER_GENERIC:
2248 		if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
2249 			ret = -ENOTSUP;
2250 			break;
2251 		}
2252 		*(const void **)arg = &dpaa2_flow_ops;
2253 		dpaa2_filter_type |= filter_type;
2254 		break;
2255 	default:
2256 		RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
2257 			filter_type);
2258 		ret = -ENOTSUP;
2259 		break;
2260 	}
2261 	return ret;
2262 }
2263 
2264 static void
2265 dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2266 	struct rte_eth_rxq_info *qinfo)
2267 {
2268 	struct dpaa2_queue *rxq;
2269 
2270 	rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
2271 
2272 	qinfo->mp = rxq->mb_pool;
2273 	qinfo->scattered_rx = dev->data->scattered_rx;
2274 	qinfo->nb_desc = rxq->nb_desc;
2275 
2276 	qinfo->conf.rx_free_thresh = 1;
2277 	qinfo->conf.rx_drop_en = 1;
2278 	qinfo->conf.rx_deferred_start = 0;
2279 	qinfo->conf.offloads = rxq->offloads;
2280 }
2281 
2282 static void
2283 dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2284 	struct rte_eth_txq_info *qinfo)
2285 {
2286 	struct dpaa2_queue *txq;
2287 
2288 	txq = dev->data->tx_queues[queue_id];
2289 
2290 	qinfo->nb_desc = txq->nb_desc;
2291 	qinfo->conf.tx_thresh.pthresh = 0;
2292 	qinfo->conf.tx_thresh.hthresh = 0;
2293 	qinfo->conf.tx_thresh.wthresh = 0;
2294 
2295 	qinfo->conf.tx_free_thresh = 0;
2296 	qinfo->conf.tx_rs_thresh = 0;
2297 	qinfo->conf.offloads = txq->offloads;
2298 	qinfo->conf.tx_deferred_start = 0;
2299 }
2300 
2301 static struct eth_dev_ops dpaa2_ethdev_ops = {
2302 	.dev_configure	  = dpaa2_eth_dev_configure,
2303 	.dev_start	      = dpaa2_dev_start,
2304 	.dev_stop	      = dpaa2_dev_stop,
2305 	.dev_close	      = dpaa2_dev_close,
2306 	.promiscuous_enable   = dpaa2_dev_promiscuous_enable,
2307 	.promiscuous_disable  = dpaa2_dev_promiscuous_disable,
2308 	.allmulticast_enable  = dpaa2_dev_allmulticast_enable,
2309 	.allmulticast_disable = dpaa2_dev_allmulticast_disable,
2310 	.dev_set_link_up      = dpaa2_dev_set_link_up,
2311 	.dev_set_link_down    = dpaa2_dev_set_link_down,
2312 	.link_update	   = dpaa2_dev_link_update,
2313 	.stats_get	       = dpaa2_dev_stats_get,
2314 	.xstats_get	       = dpaa2_dev_xstats_get,
2315 	.xstats_get_by_id     = dpaa2_xstats_get_by_id,
2316 	.xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
2317 	.xstats_get_names      = dpaa2_xstats_get_names,
2318 	.stats_reset	   = dpaa2_dev_stats_reset,
2319 	.xstats_reset	      = dpaa2_dev_stats_reset,
2320 	.fw_version_get	   = dpaa2_fw_version_get,
2321 	.dev_infos_get	   = dpaa2_dev_info_get,
2322 	.dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2323 	.mtu_set           = dpaa2_dev_mtu_set,
2324 	.vlan_filter_set      = dpaa2_vlan_filter_set,
2325 	.vlan_offload_set     = dpaa2_vlan_offload_set,
2326 	.vlan_tpid_set	      = dpaa2_vlan_tpid_set,
2327 	.rx_queue_setup    = dpaa2_dev_rx_queue_setup,
2328 	.rx_queue_release  = dpaa2_dev_rx_queue_release,
2329 	.tx_queue_setup    = dpaa2_dev_tx_queue_setup,
2330 	.tx_queue_release  = dpaa2_dev_tx_queue_release,
2331 	.rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
2332 	.tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
2333 	.flow_ctrl_get	      = dpaa2_flow_ctrl_get,
2334 	.flow_ctrl_set	      = dpaa2_flow_ctrl_set,
2335 	.mac_addr_add         = dpaa2_dev_add_mac_addr,
2336 	.mac_addr_remove      = dpaa2_dev_remove_mac_addr,
2337 	.mac_addr_set         = dpaa2_dev_set_mac_addr,
2338 	.rss_hash_update      = dpaa2_dev_rss_hash_update,
2339 	.rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
2340 	.filter_ctrl          = dpaa2_dev_flow_ctrl,
2341 	.rxq_info_get	      = dpaa2_rxq_info_get,
2342 	.txq_info_get	      = dpaa2_txq_info_get,
2343 #if defined(RTE_LIBRTE_IEEE1588)
2344 	.timesync_enable      = dpaa2_timesync_enable,
2345 	.timesync_disable     = dpaa2_timesync_disable,
2346 	.timesync_read_time   = dpaa2_timesync_read_time,
2347 	.timesync_write_time  = dpaa2_timesync_write_time,
2348 	.timesync_adjust_time = dpaa2_timesync_adjust_time,
2349 	.timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp,
2350 	.timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp,
2351 #endif
2352 };
2353 
2354 /* Populate the mac address from physically available (u-boot/firmware) and/or
2355  * one set by higher layers like MC (restool) etc.
2356  * Returns the table of MAC entries (multiple entries)
2357  */
2358 static int
2359 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
2360 		  struct rte_ether_addr *mac_entry)
2361 {
2362 	int ret;
2363 	struct rte_ether_addr phy_mac, prime_mac;
2364 
2365 	memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2366 	memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2367 
2368 	/* Get the physical device MAC address */
2369 	ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2370 				     phy_mac.addr_bytes);
2371 	if (ret) {
2372 		DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2373 		goto cleanup;
2374 	}
2375 
2376 	ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2377 					prime_mac.addr_bytes);
2378 	if (ret) {
2379 		DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2380 		goto cleanup;
2381 	}
2382 
2383 	/* Now that both MAC have been obtained, do:
2384 	 *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2385 	 *     and return phy
2386 	 *  If empty_mac(phy), return prime.
2387 	 *  if both are empty, create random MAC, set as prime and return
2388 	 */
2389 	if (!rte_is_zero_ether_addr(&phy_mac)) {
2390 		/* If the addresses are not same, overwrite prime */
2391 		if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2392 			ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2393 							priv->token,
2394 							phy_mac.addr_bytes);
2395 			if (ret) {
2396 				DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2397 					      ret);
2398 				goto cleanup;
2399 			}
2400 			memcpy(&prime_mac, &phy_mac,
2401 				sizeof(struct rte_ether_addr));
2402 		}
2403 	} else if (rte_is_zero_ether_addr(&prime_mac)) {
2404 		/* In case phys and prime, both are zero, create random MAC */
2405 		rte_eth_random_addr(prime_mac.addr_bytes);
2406 		ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2407 						priv->token,
2408 						prime_mac.addr_bytes);
2409 		if (ret) {
2410 			DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2411 			goto cleanup;
2412 		}
2413 	}
2414 
2415 	/* prime_mac the final MAC address */
2416 	memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2417 	return 0;
2418 
2419 cleanup:
2420 	return -1;
2421 }
2422 
2423 static int
2424 check_devargs_handler(__rte_unused const char *key, const char *value,
2425 		      __rte_unused void *opaque)
2426 {
2427 	if (strcmp(value, "1"))
2428 		return -1;
2429 
2430 	return 0;
2431 }
2432 
2433 static int
2434 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2435 {
2436 	struct rte_kvargs *kvlist;
2437 
2438 	if (!devargs)
2439 		return 0;
2440 
2441 	kvlist = rte_kvargs_parse(devargs->args, NULL);
2442 	if (!kvlist)
2443 		return 0;
2444 
2445 	if (!rte_kvargs_count(kvlist, key)) {
2446 		rte_kvargs_free(kvlist);
2447 		return 0;
2448 	}
2449 
2450 	if (rte_kvargs_process(kvlist, key,
2451 			       check_devargs_handler, NULL) < 0) {
2452 		rte_kvargs_free(kvlist);
2453 		return 0;
2454 	}
2455 	rte_kvargs_free(kvlist);
2456 
2457 	return 1;
2458 }
2459 
2460 static int
2461 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2462 {
2463 	struct rte_device *dev = eth_dev->device;
2464 	struct rte_dpaa2_device *dpaa2_dev;
2465 	struct fsl_mc_io *dpni_dev;
2466 	struct dpni_attr attr;
2467 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2468 	struct dpni_buffer_layout layout;
2469 	int ret, hw_id, i;
2470 
2471 	PMD_INIT_FUNC_TRACE();
2472 
2473 	dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2474 	if (!dpni_dev) {
2475 		DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2476 		return -1;
2477 	}
2478 	dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
2479 	eth_dev->process_private = (void *)dpni_dev;
2480 
2481 	/* For secondary processes, the primary has done all the work */
2482 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2483 		/* In case of secondary, only burst and ops API need to be
2484 		 * plugged.
2485 		 */
2486 		eth_dev->dev_ops = &dpaa2_ethdev_ops;
2487 		eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count;
2488 		if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2489 			eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2490 		else if (dpaa2_get_devargs(dev->devargs,
2491 					DRIVER_NO_PREFETCH_MODE))
2492 			eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2493 		else
2494 			eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2495 		eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2496 		return 0;
2497 	}
2498 
2499 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2500 
2501 	hw_id = dpaa2_dev->object_id;
2502 	ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2503 	if (ret) {
2504 		DPAA2_PMD_ERR(
2505 			     "Failure in opening dpni@%d with err code %d",
2506 			     hw_id, ret);
2507 		rte_free(dpni_dev);
2508 		return -1;
2509 	}
2510 
2511 	/* Clean the device first */
2512 	ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2513 	if (ret) {
2514 		DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2515 			      hw_id, ret);
2516 		goto init_err;
2517 	}
2518 
2519 	ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2520 	if (ret) {
2521 		DPAA2_PMD_ERR(
2522 			     "Failure in get dpni@%d attribute, err code %d",
2523 			     hw_id, ret);
2524 		goto init_err;
2525 	}
2526 
2527 	priv->num_rx_tc = attr.num_rx_tcs;
2528 	priv->qos_entries = attr.qos_entries;
2529 	priv->fs_entries = attr.fs_entries;
2530 	priv->dist_queues = attr.num_queues;
2531 
2532 	/* only if the custom CG is enabled */
2533 	if (attr.options & DPNI_OPT_CUSTOM_CG)
2534 		priv->max_cgs = attr.num_cgs;
2535 	else
2536 		priv->max_cgs = 0;
2537 
2538 	for (i = 0; i < priv->max_cgs; i++)
2539 		priv->cgid_in_use[i] = 0;
2540 
2541 	for (i = 0; i < attr.num_rx_tcs; i++)
2542 		priv->nb_rx_queues += attr.num_queues;
2543 
2544 	/* Using number of TX queues as number of TX TCs */
2545 	priv->nb_tx_queues = attr.num_tx_tcs;
2546 
2547 	DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2548 			priv->num_rx_tc, priv->nb_rx_queues,
2549 			priv->nb_tx_queues, priv->max_cgs);
2550 
2551 	priv->hw = dpni_dev;
2552 	priv->hw_id = hw_id;
2553 	priv->options = attr.options;
2554 	priv->max_mac_filters = attr.mac_filter_entries;
2555 	priv->max_vlan_filters = attr.vlan_filter_entries;
2556 	priv->flags = 0;
2557 #if defined(RTE_LIBRTE_IEEE1588)
2558 	priv->tx_conf_en = 1;
2559 #else
2560 	priv->tx_conf_en = 0;
2561 #endif
2562 
2563 	/* Allocate memory for hardware structure for queues */
2564 	ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2565 	if (ret) {
2566 		DPAA2_PMD_ERR("Queue allocation Failed");
2567 		goto init_err;
2568 	}
2569 
2570 	/* Allocate memory for storing MAC addresses.
2571 	 * Table of mac_filter_entries size is allocated so that RTE ether lib
2572 	 * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2573 	 */
2574 	eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2575 		RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2576 	if (eth_dev->data->mac_addrs == NULL) {
2577 		DPAA2_PMD_ERR(
2578 		   "Failed to allocate %d bytes needed to store MAC addresses",
2579 		   RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2580 		ret = -ENOMEM;
2581 		goto init_err;
2582 	}
2583 
2584 	ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2585 	if (ret) {
2586 		DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2587 		rte_free(eth_dev->data->mac_addrs);
2588 		eth_dev->data->mac_addrs = NULL;
2589 		goto init_err;
2590 	}
2591 
2592 	/* ... tx buffer layout ... */
2593 	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2594 	if (priv->tx_conf_en) {
2595 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2596 				 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2597 		layout.pass_timestamp = true;
2598 	} else {
2599 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2600 	}
2601 	layout.pass_frame_status = 1;
2602 	ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2603 				     DPNI_QUEUE_TX, &layout);
2604 	if (ret) {
2605 		DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2606 		goto init_err;
2607 	}
2608 
2609 	/* ... tx-conf and error buffer layout ... */
2610 	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2611 	if (priv->tx_conf_en) {
2612 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2613 				 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2614 		layout.pass_timestamp = true;
2615 	} else {
2616 		layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2617 	}
2618 	layout.pass_frame_status = 1;
2619 	ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2620 				     DPNI_QUEUE_TX_CONFIRM, &layout);
2621 	if (ret) {
2622 		DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2623 			     ret);
2624 		goto init_err;
2625 	}
2626 
2627 	eth_dev->dev_ops = &dpaa2_ethdev_ops;
2628 
2629 	if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2630 		eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2631 		DPAA2_PMD_INFO("Loopback mode");
2632 	} else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
2633 		eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2634 		DPAA2_PMD_INFO("No Prefetch mode");
2635 	} else {
2636 		eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2637 	}
2638 	eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2639 
2640 	/*Init fields w.r.t. classficaition*/
2641 	memset(&priv->extract.qos_key_extract, 0,
2642 		sizeof(struct dpaa2_key_extract));
2643 	priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2644 	if (!priv->extract.qos_extract_param) {
2645 		DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2646 			    " classificaiton ", ret);
2647 		goto init_err;
2648 	}
2649 	priv->extract.qos_key_extract.key_info.ipv4_src_offset =
2650 		IP_ADDRESS_OFFSET_INVALID;
2651 	priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
2652 		IP_ADDRESS_OFFSET_INVALID;
2653 	priv->extract.qos_key_extract.key_info.ipv6_src_offset =
2654 		IP_ADDRESS_OFFSET_INVALID;
2655 	priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
2656 		IP_ADDRESS_OFFSET_INVALID;
2657 
2658 	for (i = 0; i < MAX_TCS; i++) {
2659 		memset(&priv->extract.tc_key_extract[i], 0,
2660 			sizeof(struct dpaa2_key_extract));
2661 		priv->extract.tc_extract_param[i] =
2662 			(size_t)rte_malloc(NULL, 256, 64);
2663 		if (!priv->extract.tc_extract_param[i]) {
2664 			DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
2665 				     ret);
2666 			goto init_err;
2667 		}
2668 		priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
2669 			IP_ADDRESS_OFFSET_INVALID;
2670 		priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
2671 			IP_ADDRESS_OFFSET_INVALID;
2672 		priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
2673 			IP_ADDRESS_OFFSET_INVALID;
2674 		priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
2675 			IP_ADDRESS_OFFSET_INVALID;
2676 	}
2677 
2678 	ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
2679 					RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN
2680 					+ VLAN_TAG_SIZE);
2681 	if (ret) {
2682 		DPAA2_PMD_ERR("Unable to set mtu. check config");
2683 		goto init_err;
2684 	}
2685 
2686 	/*TODO To enable soft parser support DPAA2 driver needs to integrate
2687 	 * with external entity to receive byte code for software sequence
2688 	 * and same will be offload to the H/W using MC interface.
2689 	 * Currently it is assumed that DPAA2 driver has byte code by some
2690 	 * mean and same if offloaded to H/W.
2691 	 */
2692 	if (getenv("DPAA2_ENABLE_SOFT_PARSER")) {
2693 		WRIOP_SS_INITIALIZER(priv);
2694 		ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS);
2695 		if (ret < 0) {
2696 			DPAA2_PMD_ERR(" Error(%d) in loading softparser\n",
2697 				      ret);
2698 			return ret;
2699 		}
2700 
2701 		ret = dpaa2_eth_enable_wriop_soft_parser(priv,
2702 							 DPNI_SS_INGRESS);
2703 		if (ret < 0) {
2704 			DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n",
2705 				      ret);
2706 			return ret;
2707 		}
2708 	}
2709 	RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2710 	return 0;
2711 init_err:
2712 	dpaa2_dev_uninit(eth_dev);
2713 	return ret;
2714 }
2715 
2716 static int
2717 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
2718 {
2719 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2720 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_dev->process_private;
2721 	int i, ret;
2722 
2723 	PMD_INIT_FUNC_TRACE();
2724 
2725 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2726 		return 0;
2727 
2728 	if (!dpni) {
2729 		DPAA2_PMD_WARN("Already closed or not started");
2730 		return -1;
2731 	}
2732 
2733 	dpaa2_dev_close(eth_dev);
2734 
2735 	dpaa2_free_rx_tx_queues(eth_dev);
2736 
2737 	/* Close the device at underlying layer*/
2738 	ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
2739 	if (ret) {
2740 		DPAA2_PMD_ERR(
2741 			     "Failure closing dpni device with err code %d",
2742 			     ret);
2743 	}
2744 
2745 	/* Free the allocated memory for ethernet private data and dpni*/
2746 	priv->hw = NULL;
2747 	eth_dev->process_private = NULL;
2748 	rte_free(dpni);
2749 
2750 	for (i = 0; i < MAX_TCS; i++)
2751 		rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
2752 
2753 	if (priv->extract.qos_extract_param)
2754 		rte_free((void *)(size_t)priv->extract.qos_extract_param);
2755 
2756 	eth_dev->dev_ops = NULL;
2757 	eth_dev->rx_pkt_burst = NULL;
2758 	eth_dev->tx_pkt_burst = NULL;
2759 
2760 	DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
2761 	return 0;
2762 }
2763 
2764 static int
2765 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2766 		struct rte_dpaa2_device *dpaa2_dev)
2767 {
2768 	struct rte_eth_dev *eth_dev;
2769 	struct dpaa2_dev_priv *dev_priv;
2770 	int diag;
2771 
2772 	if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2773 		RTE_PKTMBUF_HEADROOM) {
2774 		DPAA2_PMD_ERR(
2775 		"RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2776 		RTE_PKTMBUF_HEADROOM,
2777 		DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2778 
2779 		return -1;
2780 	}
2781 
2782 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2783 		eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2784 		if (!eth_dev)
2785 			return -ENODEV;
2786 		dev_priv = rte_zmalloc("ethdev private structure",
2787 				       sizeof(struct dpaa2_dev_priv),
2788 				       RTE_CACHE_LINE_SIZE);
2789 		if (dev_priv == NULL) {
2790 			DPAA2_PMD_CRIT(
2791 				"Unable to allocate memory for private data");
2792 			rte_eth_dev_release_port(eth_dev);
2793 			return -ENOMEM;
2794 		}
2795 		eth_dev->data->dev_private = (void *)dev_priv;
2796 		/* Store a pointer to eth_dev in dev_private */
2797 		dev_priv->eth_dev = eth_dev;
2798 		dev_priv->tx_conf_en = 0;
2799 	} else {
2800 		eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2801 		if (!eth_dev) {
2802 			DPAA2_PMD_DEBUG("returning enodev");
2803 			return -ENODEV;
2804 		}
2805 	}
2806 
2807 	eth_dev->device = &dpaa2_dev->device;
2808 
2809 	dpaa2_dev->eth_dev = eth_dev;
2810 	eth_dev->data->rx_mbuf_alloc_failed = 0;
2811 
2812 	if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2813 		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2814 
2815 	/* Invoke PMD device initialization function */
2816 	diag = dpaa2_dev_init(eth_dev);
2817 	if (diag == 0) {
2818 		rte_eth_dev_probing_finish(eth_dev);
2819 		return 0;
2820 	}
2821 
2822 	rte_eth_dev_release_port(eth_dev);
2823 	return diag;
2824 }
2825 
2826 static int
2827 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2828 {
2829 	struct rte_eth_dev *eth_dev;
2830 
2831 	eth_dev = dpaa2_dev->eth_dev;
2832 	dpaa2_dev_uninit(eth_dev);
2833 
2834 	rte_eth_dev_release_port(eth_dev);
2835 
2836 	return 0;
2837 }
2838 
2839 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2840 	.drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2841 	.drv_type = DPAA2_ETH,
2842 	.probe = rte_dpaa2_probe,
2843 	.remove = rte_dpaa2_remove,
2844 };
2845 
2846 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2847 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
2848 		DRIVER_LOOPBACK_MODE "=<int> "
2849 		DRIVER_NO_PREFETCH_MODE "=<int>");
2850 RTE_LOG_REGISTER(dpaa2_logtype_pmd, pmd.net.dpaa2, NOTICE);
2851