xref: /dpdk/drivers/net/dpaa/dpaa_ethdev.c (revision 3e0ceb9f17fff027fc6c8f18de35e11719ffa61e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright 2017 NXP.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 /* System headers */
34 #include <stdio.h>
35 #include <inttypes.h>
36 #include <unistd.h>
37 #include <limits.h>
38 #include <sched.h>
39 #include <signal.h>
40 #include <pthread.h>
41 #include <sys/types.h>
42 #include <sys/syscall.h>
43 
44 #include <rte_byteorder.h>
45 #include <rte_common.h>
46 #include <rte_interrupts.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_tailq.h>
54 #include <rte_eal.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_malloc.h>
59 #include <rte_ring.h>
60 
61 #include <rte_dpaa_bus.h>
62 #include <rte_dpaa_logs.h>
63 #include <dpaa_mempool.h>
64 
65 #include <dpaa_ethdev.h>
66 #include <dpaa_rxtx.h>
67 
68 #include <fsl_usd.h>
69 #include <fsl_qman.h>
70 #include <fsl_bman.h>
71 #include <fsl_fman.h>
72 
73 /* Keep track of whether QMAN and BMAN have been globally initialized */
74 static int is_global_init;
75 
76 struct rte_dpaa_xstats_name_off {
77 	char name[RTE_ETH_XSTATS_NAME_SIZE];
78 	uint32_t offset;
79 };
80 
81 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
82 	{"rx_align_err",
83 		offsetof(struct dpaa_if_stats, raln)},
84 	{"rx_valid_pause",
85 		offsetof(struct dpaa_if_stats, rxpf)},
86 	{"rx_fcs_err",
87 		offsetof(struct dpaa_if_stats, rfcs)},
88 	{"rx_vlan_frame",
89 		offsetof(struct dpaa_if_stats, rvlan)},
90 	{"rx_frame_err",
91 		offsetof(struct dpaa_if_stats, rerr)},
92 	{"rx_drop_err",
93 		offsetof(struct dpaa_if_stats, rdrp)},
94 	{"rx_undersized",
95 		offsetof(struct dpaa_if_stats, rund)},
96 	{"rx_oversize_err",
97 		offsetof(struct dpaa_if_stats, rovr)},
98 	{"rx_fragment_pkt",
99 		offsetof(struct dpaa_if_stats, rfrg)},
100 	{"tx_valid_pause",
101 		offsetof(struct dpaa_if_stats, txpf)},
102 	{"tx_fcs_err",
103 		offsetof(struct dpaa_if_stats, terr)},
104 	{"tx_vlan_frame",
105 		offsetof(struct dpaa_if_stats, tvlan)},
106 	{"rx_undersized",
107 		offsetof(struct dpaa_if_stats, tund)},
108 };
109 
110 static int
111 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
112 {
113 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
114 
115 	PMD_INIT_FUNC_TRACE();
116 
117 	if (mtu < ETHER_MIN_MTU)
118 		return -EINVAL;
119 	if (mtu > ETHER_MAX_LEN)
120 		dev->data->dev_conf.rxmode.jumbo_frame = 1;
121 	else
122 		dev->data->dev_conf.rxmode.jumbo_frame = 0;
123 
124 	dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu;
125 
126 	fman_if_set_maxfrm(dpaa_intf->fif, mtu);
127 
128 	return 0;
129 }
130 
131 static int
132 dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
133 {
134 	PMD_INIT_FUNC_TRACE();
135 
136 	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
137 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
138 		    DPAA_MAX_RX_PKT_LEN)
139 			return dpaa_mtu_set(dev,
140 				dev->data->dev_conf.rxmode.max_rx_pkt_len);
141 		else
142 			return -1;
143 	}
144 	return 0;
145 }
146 
147 static const uint32_t *
148 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
149 {
150 	static const uint32_t ptypes[] = {
151 		/*todo -= add more types */
152 		RTE_PTYPE_L2_ETHER,
153 		RTE_PTYPE_L3_IPV4,
154 		RTE_PTYPE_L3_IPV4_EXT,
155 		RTE_PTYPE_L3_IPV6,
156 		RTE_PTYPE_L3_IPV6_EXT,
157 		RTE_PTYPE_L4_TCP,
158 		RTE_PTYPE_L4_UDP,
159 		RTE_PTYPE_L4_SCTP
160 	};
161 
162 	PMD_INIT_FUNC_TRACE();
163 
164 	if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
165 		return ptypes;
166 	return NULL;
167 }
168 
169 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
170 {
171 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
172 
173 	PMD_INIT_FUNC_TRACE();
174 
175 	/* Change tx callback to the real one */
176 	dev->tx_pkt_burst = dpaa_eth_queue_tx;
177 	fman_if_enable_rx(dpaa_intf->fif);
178 
179 	return 0;
180 }
181 
182 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
183 {
184 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
185 
186 	PMD_INIT_FUNC_TRACE();
187 
188 	fman_if_disable_rx(dpaa_intf->fif);
189 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
190 }
191 
192 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
193 {
194 	PMD_INIT_FUNC_TRACE();
195 
196 	dpaa_eth_dev_stop(dev);
197 }
198 
199 static int
200 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
201 		     char *fw_version,
202 		     size_t fw_size)
203 {
204 	int ret;
205 	FILE *svr_file = NULL;
206 	unsigned int svr_ver = 0;
207 
208 	PMD_INIT_FUNC_TRACE();
209 
210 	svr_file = fopen(DPAA_SOC_ID_FILE, "r");
211 	if (!svr_file) {
212 		DPAA_PMD_ERR("Unable to open SoC device");
213 		return -ENOTSUP; /* Not supported on this infra */
214 	}
215 
216 	ret = fscanf(svr_file, "svr:%x", &svr_ver);
217 	if (ret <= 0) {
218 		DPAA_PMD_ERR("Unable to read SoC device");
219 		return -ENOTSUP; /* Not supported on this infra */
220 	}
221 
222 	ret = snprintf(fw_version, fw_size,
223 		       "svr:%x-fman-v%x",
224 		       svr_ver,
225 		       fman_ip_rev);
226 
227 	ret += 1; /* add the size of '\0' */
228 	if (fw_size < (uint32_t)ret)
229 		return ret;
230 	else
231 		return 0;
232 }
233 
234 static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
235 			      struct rte_eth_dev_info *dev_info)
236 {
237 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
238 
239 	PMD_INIT_FUNC_TRACE();
240 
241 	dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
242 	dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
243 	dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
244 	dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
245 	dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
246 	dev_info->max_hash_mac_addrs = 0;
247 	dev_info->max_vfs = 0;
248 	dev_info->max_vmdq_pools = ETH_16_POOLS;
249 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
250 	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
251 				ETH_LINK_SPEED_10G);
252 	dev_info->rx_offload_capa =
253 		(DEV_RX_OFFLOAD_IPV4_CKSUM |
254 		DEV_RX_OFFLOAD_UDP_CKSUM   |
255 		DEV_RX_OFFLOAD_TCP_CKSUM);
256 	dev_info->tx_offload_capa =
257 		(DEV_TX_OFFLOAD_IPV4_CKSUM  |
258 		DEV_TX_OFFLOAD_UDP_CKSUM   |
259 		DEV_TX_OFFLOAD_TCP_CKSUM);
260 }
261 
262 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
263 				int wait_to_complete __rte_unused)
264 {
265 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
266 	struct rte_eth_link *link = &dev->data->dev_link;
267 
268 	PMD_INIT_FUNC_TRACE();
269 
270 	if (dpaa_intf->fif->mac_type == fman_mac_1g)
271 		link->link_speed = 1000;
272 	else if (dpaa_intf->fif->mac_type == fman_mac_10g)
273 		link->link_speed = 10000;
274 	else
275 		DPAA_PMD_ERR("invalid link_speed: %s, %d",
276 			     dpaa_intf->name, dpaa_intf->fif->mac_type);
277 
278 	link->link_status = dpaa_intf->valid;
279 	link->link_duplex = ETH_LINK_FULL_DUPLEX;
280 	link->link_autoneg = ETH_LINK_AUTONEG;
281 	return 0;
282 }
283 
284 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
285 			       struct rte_eth_stats *stats)
286 {
287 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
288 
289 	PMD_INIT_FUNC_TRACE();
290 
291 	fman_if_stats_get(dpaa_intf->fif, stats);
292 	return 0;
293 }
294 
295 static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
296 {
297 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
298 
299 	PMD_INIT_FUNC_TRACE();
300 
301 	fman_if_stats_reset(dpaa_intf->fif);
302 }
303 
304 static int
305 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
306 		    unsigned int n)
307 {
308 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
309 	unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
310 	uint64_t values[sizeof(struct dpaa_if_stats) / 8];
311 
312 	if (xstats == NULL)
313 		return 0;
314 
315 	if (n < num)
316 		return num;
317 
318 	fman_if_stats_get_all(dpaa_intf->fif, values,
319 			      sizeof(struct dpaa_if_stats) / 8);
320 
321 	for (i = 0; i < num; i++) {
322 		xstats[i].id = i;
323 		xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
324 	}
325 	return i;
326 }
327 
328 static int
329 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
330 		      struct rte_eth_xstat_name *xstats_names,
331 		      __rte_unused unsigned int limit)
332 {
333 	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
334 
335 	if (xstats_names != NULL)
336 		for (i = 0; i < stat_cnt; i++)
337 			snprintf(xstats_names[i].name,
338 				 sizeof(xstats_names[i].name),
339 				 "%s",
340 				 dpaa_xstats_strings[i].name);
341 
342 	return stat_cnt;
343 }
344 
345 static int
346 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
347 		      uint64_t *values, unsigned int n)
348 {
349 	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
350 	uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
351 
352 	if (!ids) {
353 		struct dpaa_if *dpaa_intf = dev->data->dev_private;
354 
355 		if (n < stat_cnt)
356 			return stat_cnt;
357 
358 		if (!values)
359 			return 0;
360 
361 		fman_if_stats_get_all(dpaa_intf->fif, values_copy,
362 				      sizeof(struct dpaa_if_stats));
363 
364 		for (i = 0; i < stat_cnt; i++)
365 			values[i] =
366 				values_copy[dpaa_xstats_strings[i].offset / 8];
367 
368 		return stat_cnt;
369 	}
370 
371 	dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
372 
373 	for (i = 0; i < n; i++) {
374 		if (ids[i] >= stat_cnt) {
375 			DPAA_PMD_ERR("id value isn't valid");
376 			return -1;
377 		}
378 		values[i] = values_copy[ids[i]];
379 	}
380 	return n;
381 }
382 
383 static int
384 dpaa_xstats_get_names_by_id(
385 	struct rte_eth_dev *dev,
386 	struct rte_eth_xstat_name *xstats_names,
387 	const uint64_t *ids,
388 	unsigned int limit)
389 {
390 	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
391 	struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
392 
393 	if (!ids)
394 		return dpaa_xstats_get_names(dev, xstats_names, limit);
395 
396 	dpaa_xstats_get_names(dev, xstats_names_copy, limit);
397 
398 	for (i = 0; i < limit; i++) {
399 		if (ids[i] >= stat_cnt) {
400 			DPAA_PMD_ERR("id value isn't valid");
401 			return -1;
402 		}
403 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
404 	}
405 	return limit;
406 }
407 
408 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
409 {
410 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
411 
412 	PMD_INIT_FUNC_TRACE();
413 
414 	fman_if_promiscuous_enable(dpaa_intf->fif);
415 }
416 
417 static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
418 {
419 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
420 
421 	PMD_INIT_FUNC_TRACE();
422 
423 	fman_if_promiscuous_disable(dpaa_intf->fif);
424 }
425 
426 static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
427 {
428 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
429 
430 	PMD_INIT_FUNC_TRACE();
431 
432 	fman_if_set_mcast_filter_table(dpaa_intf->fif);
433 }
434 
435 static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
436 {
437 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
438 
439 	PMD_INIT_FUNC_TRACE();
440 
441 	fman_if_reset_mcast_filter_table(dpaa_intf->fif);
442 }
443 
444 static
445 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
446 			    uint16_t nb_desc __rte_unused,
447 			    unsigned int socket_id __rte_unused,
448 			    const struct rte_eth_rxconf *rx_conf __rte_unused,
449 			    struct rte_mempool *mp)
450 {
451 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
452 
453 	PMD_INIT_FUNC_TRACE();
454 
455 	DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
456 
457 	if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
458 		struct fman_if_ic_params icp;
459 		uint32_t fd_offset;
460 		uint32_t bp_size;
461 
462 		if (!mp->pool_data) {
463 			DPAA_PMD_ERR("Not an offloaded buffer pool!");
464 			return -1;
465 		}
466 		dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
467 
468 		memset(&icp, 0, sizeof(icp));
469 		/* set ICEOF for to the default value , which is 0*/
470 		icp.iciof = DEFAULT_ICIOF;
471 		icp.iceof = DEFAULT_RX_ICEOF;
472 		icp.icsz = DEFAULT_ICSZ;
473 		fman_if_set_ic_params(dpaa_intf->fif, &icp);
474 
475 		fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
476 		fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
477 
478 		/* Buffer pool size should be equal to Dataroom Size*/
479 		bp_size = rte_pktmbuf_data_room_size(mp);
480 		fman_if_set_bp(dpaa_intf->fif, mp->size,
481 			       dpaa_intf->bp_info->bpid, bp_size);
482 		dpaa_intf->valid = 1;
483 		DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
484 			    dpaa_intf->name, fd_offset,
485 			fman_if_get_fdoff(dpaa_intf->fif));
486 	}
487 	dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];
488 
489 	return 0;
490 }
491 
492 static
493 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
494 {
495 	PMD_INIT_FUNC_TRACE();
496 }
497 
498 static
499 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
500 			    uint16_t nb_desc __rte_unused,
501 		unsigned int socket_id __rte_unused,
502 		const struct rte_eth_txconf *tx_conf __rte_unused)
503 {
504 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
505 
506 	PMD_INIT_FUNC_TRACE();
507 
508 	DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
509 	dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
510 	return 0;
511 }
512 
513 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
514 {
515 	PMD_INIT_FUNC_TRACE();
516 }
517 
518 static int dpaa_link_down(struct rte_eth_dev *dev)
519 {
520 	PMD_INIT_FUNC_TRACE();
521 
522 	dpaa_eth_dev_stop(dev);
523 	return 0;
524 }
525 
526 static int dpaa_link_up(struct rte_eth_dev *dev)
527 {
528 	PMD_INIT_FUNC_TRACE();
529 
530 	dpaa_eth_dev_start(dev);
531 	return 0;
532 }
533 
534 static int
535 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
536 		   struct rte_eth_fc_conf *fc_conf)
537 {
538 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
539 	struct rte_eth_fc_conf *net_fc;
540 
541 	PMD_INIT_FUNC_TRACE();
542 
543 	if (!(dpaa_intf->fc_conf)) {
544 		dpaa_intf->fc_conf = rte_zmalloc(NULL,
545 			sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
546 		if (!dpaa_intf->fc_conf) {
547 			DPAA_PMD_ERR("unable to save flow control info");
548 			return -ENOMEM;
549 		}
550 	}
551 	net_fc = dpaa_intf->fc_conf;
552 
553 	if (fc_conf->high_water < fc_conf->low_water) {
554 		DPAA_PMD_ERR("Incorrect Flow Control Configuration");
555 		return -EINVAL;
556 	}
557 
558 	if (fc_conf->mode == RTE_FC_NONE) {
559 		return 0;
560 	} else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
561 		 fc_conf->mode == RTE_FC_FULL) {
562 		fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
563 					 fc_conf->low_water,
564 				dpaa_intf->bp_info->bpid);
565 		if (fc_conf->pause_time)
566 			fman_if_set_fc_quanta(dpaa_intf->fif,
567 					      fc_conf->pause_time);
568 	}
569 
570 	/* Save the information in dpaa device */
571 	net_fc->pause_time = fc_conf->pause_time;
572 	net_fc->high_water = fc_conf->high_water;
573 	net_fc->low_water = fc_conf->low_water;
574 	net_fc->send_xon = fc_conf->send_xon;
575 	net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
576 	net_fc->mode = fc_conf->mode;
577 	net_fc->autoneg = fc_conf->autoneg;
578 
579 	return 0;
580 }
581 
582 static int
583 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
584 		   struct rte_eth_fc_conf *fc_conf)
585 {
586 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
587 	struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
588 	int ret;
589 
590 	PMD_INIT_FUNC_TRACE();
591 
592 	if (net_fc) {
593 		fc_conf->pause_time = net_fc->pause_time;
594 		fc_conf->high_water = net_fc->high_water;
595 		fc_conf->low_water = net_fc->low_water;
596 		fc_conf->send_xon = net_fc->send_xon;
597 		fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
598 		fc_conf->mode = net_fc->mode;
599 		fc_conf->autoneg = net_fc->autoneg;
600 		return 0;
601 	}
602 	ret = fman_if_get_fc_threshold(dpaa_intf->fif);
603 	if (ret) {
604 		fc_conf->mode = RTE_FC_TX_PAUSE;
605 		fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
606 	} else {
607 		fc_conf->mode = RTE_FC_NONE;
608 	}
609 
610 	return 0;
611 }
612 
613 static int
614 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
615 			     struct ether_addr *addr,
616 			     uint32_t index,
617 			     __rte_unused uint32_t pool)
618 {
619 	int ret;
620 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
621 
622 	PMD_INIT_FUNC_TRACE();
623 
624 	ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
625 
626 	if (ret)
627 		RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
628 			" err = %d", ret);
629 	return 0;
630 }
631 
632 static void
633 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
634 			  uint32_t index)
635 {
636 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
637 
638 	PMD_INIT_FUNC_TRACE();
639 
640 	fman_if_clear_mac_addr(dpaa_intf->fif, index);
641 }
642 
643 static void
644 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
645 		       struct ether_addr *addr)
646 {
647 	int ret;
648 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
649 
650 	PMD_INIT_FUNC_TRACE();
651 
652 	ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
653 	if (ret)
654 		RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
655 }
656 
657 static struct eth_dev_ops dpaa_devops = {
658 	.dev_configure		  = dpaa_eth_dev_configure,
659 	.dev_start		  = dpaa_eth_dev_start,
660 	.dev_stop		  = dpaa_eth_dev_stop,
661 	.dev_close		  = dpaa_eth_dev_close,
662 	.dev_infos_get		  = dpaa_eth_dev_info,
663 	.dev_supported_ptypes_get = dpaa_supported_ptypes_get,
664 
665 	.rx_queue_setup		  = dpaa_eth_rx_queue_setup,
666 	.tx_queue_setup		  = dpaa_eth_tx_queue_setup,
667 	.rx_queue_release	  = dpaa_eth_rx_queue_release,
668 	.tx_queue_release	  = dpaa_eth_tx_queue_release,
669 
670 	.flow_ctrl_get		  = dpaa_flow_ctrl_get,
671 	.flow_ctrl_set		  = dpaa_flow_ctrl_set,
672 
673 	.link_update		  = dpaa_eth_link_update,
674 	.stats_get		  = dpaa_eth_stats_get,
675 	.xstats_get		  = dpaa_dev_xstats_get,
676 	.xstats_get_by_id	  = dpaa_xstats_get_by_id,
677 	.xstats_get_names_by_id	  = dpaa_xstats_get_names_by_id,
678 	.xstats_get_names	  = dpaa_xstats_get_names,
679 	.xstats_reset		  = dpaa_eth_stats_reset,
680 	.stats_reset		  = dpaa_eth_stats_reset,
681 	.promiscuous_enable	  = dpaa_eth_promiscuous_enable,
682 	.promiscuous_disable	  = dpaa_eth_promiscuous_disable,
683 	.allmulticast_enable	  = dpaa_eth_multicast_enable,
684 	.allmulticast_disable	  = dpaa_eth_multicast_disable,
685 	.mtu_set		  = dpaa_mtu_set,
686 	.dev_set_link_down	  = dpaa_link_down,
687 	.dev_set_link_up	  = dpaa_link_up,
688 	.mac_addr_add		  = dpaa_dev_add_mac_addr,
689 	.mac_addr_remove	  = dpaa_dev_remove_mac_addr,
690 	.mac_addr_set		  = dpaa_dev_set_mac_addr,
691 
692 	.fw_version_get		  = dpaa_fw_version_get,
693 };
694 
695 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
696 {
697 	struct rte_eth_fc_conf *fc_conf;
698 	int ret;
699 
700 	PMD_INIT_FUNC_TRACE();
701 
702 	if (!(dpaa_intf->fc_conf)) {
703 		dpaa_intf->fc_conf = rte_zmalloc(NULL,
704 			sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
705 		if (!dpaa_intf->fc_conf) {
706 			DPAA_PMD_ERR("unable to save flow control info");
707 			return -ENOMEM;
708 		}
709 	}
710 	fc_conf = dpaa_intf->fc_conf;
711 	ret = fman_if_get_fc_threshold(dpaa_intf->fif);
712 	if (ret) {
713 		fc_conf->mode = RTE_FC_TX_PAUSE;
714 		fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
715 	} else {
716 		fc_conf->mode = RTE_FC_NONE;
717 	}
718 
719 	return 0;
720 }
721 
722 /* Initialise an Rx FQ */
723 static int dpaa_rx_queue_init(struct qman_fq *fq,
724 			      uint32_t fqid)
725 {
726 	struct qm_mcc_initfq opts;
727 	int ret;
728 
729 	PMD_INIT_FUNC_TRACE();
730 
731 	ret = qman_reserve_fqid(fqid);
732 	if (ret) {
733 		DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
734 			     fqid, ret);
735 		return -EINVAL;
736 	}
737 
738 	DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
739 	ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
740 	if (ret) {
741 		DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
742 			fqid, ret);
743 		return ret;
744 	}
745 
746 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
747 		       QM_INITFQ_WE_CONTEXTA;
748 
749 	opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
750 	opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
751 			   QM_FQCTRL_PREFERINCACHE;
752 	opts.fqd.context_a.stashing.exclusive = 0;
753 	opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
754 	opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
755 	opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
756 
757 	/*Enable tail drop */
758 	opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH;
759 	opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE;
760 	qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);
761 
762 	ret = qman_init_fq(fq, 0, &opts);
763 	if (ret)
764 		DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
765 	return ret;
766 }
767 
768 /* Initialise a Tx FQ */
769 static int dpaa_tx_queue_init(struct qman_fq *fq,
770 			      struct fman_if *fman_intf)
771 {
772 	struct qm_mcc_initfq opts;
773 	int ret;
774 
775 	PMD_INIT_FUNC_TRACE();
776 
777 	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
778 			     QMAN_FQ_FLAG_TO_DCPORTAL, fq);
779 	if (ret) {
780 		DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
781 		return ret;
782 	}
783 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
784 		       QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
785 	opts.fqd.dest.channel = fman_intf->tx_channel_id;
786 	opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
787 	opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
788 	opts.fqd.context_b = 0;
789 	/* no tx-confirmation */
790 	opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
791 	opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
792 	DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
793 	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
794 	if (ret)
795 		DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
796 	return ret;
797 }
798 
799 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
800 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
801 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
802 {
803 	struct qm_mcc_initfq opts;
804 	int ret;
805 
806 	PMD_INIT_FUNC_TRACE();
807 
808 	ret = qman_reserve_fqid(fqid);
809 	if (ret) {
810 		DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
811 			fqid, ret);
812 		return -EINVAL;
813 	}
814 	/* "map" this Rx FQ to one of the interfaces Tx FQID */
815 	DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
816 	ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
817 	if (ret) {
818 		DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
819 			fqid, ret);
820 		return ret;
821 	}
822 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
823 	opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
824 	ret = qman_init_fq(fq, 0, &opts);
825 	if (ret)
826 		DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
827 			    fqid, ret);
828 	return ret;
829 }
830 #endif
831 
832 /* Initialise a network interface */
833 static int
834 dpaa_dev_init(struct rte_eth_dev *eth_dev)
835 {
836 	int num_cores, num_rx_fqs, fqid;
837 	int loop, ret = 0;
838 	int dev_id;
839 	struct rte_dpaa_device *dpaa_device;
840 	struct dpaa_if *dpaa_intf;
841 	struct fm_eth_port_cfg *cfg;
842 	struct fman_if *fman_intf;
843 	struct fman_if_bpool *bp, *tmp_bp;
844 
845 	PMD_INIT_FUNC_TRACE();
846 
847 	/* For secondary processes, the primary has done all the work */
848 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
849 		return 0;
850 
851 	dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
852 	dev_id = dpaa_device->id.dev_id;
853 	dpaa_intf = eth_dev->data->dev_private;
854 	cfg = &dpaa_netcfg->port_cfg[dev_id];
855 	fman_intf = cfg->fman_if;
856 
857 	dpaa_intf->name = dpaa_device->name;
858 
859 	/* save fman_if & cfg in the interface struture */
860 	dpaa_intf->fif = fman_intf;
861 	dpaa_intf->ifid = dev_id;
862 	dpaa_intf->cfg = cfg;
863 
864 	/* Initialize Rx FQ's */
865 	if (getenv("DPAA_NUM_RX_QUEUES"))
866 		num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
867 	else
868 		num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
869 
870 	/* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
871 	 * queues.
872 	 */
873 	if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
874 		DPAA_PMD_ERR("Invalid number of RX queues\n");
875 		return -EINVAL;
876 	}
877 
878 	dpaa_intf->rx_queues = rte_zmalloc(NULL,
879 		sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
880 	for (loop = 0; loop < num_rx_fqs; loop++) {
881 		fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
882 			DPAA_PCD_FQID_MULTIPLIER + loop;
883 		ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);
884 		if (ret)
885 			return ret;
886 		dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
887 	}
888 	dpaa_intf->nb_rx_queues = num_rx_fqs;
889 
890 	/* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
891 	num_cores = rte_lcore_count();
892 	dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
893 		num_cores, MAX_CACHELINE);
894 	if (!dpaa_intf->tx_queues)
895 		return -ENOMEM;
896 
897 	for (loop = 0; loop < num_cores; loop++) {
898 		ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
899 					 fman_intf);
900 		if (ret)
901 			return ret;
902 		dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
903 	}
904 	dpaa_intf->nb_tx_queues = num_cores;
905 
906 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
907 	dpaa_debug_queue_init(&dpaa_intf->debug_queues[
908 		DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
909 	dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
910 	dpaa_debug_queue_init(&dpaa_intf->debug_queues[
911 		DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
912 	dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
913 #endif
914 
915 	DPAA_PMD_DEBUG("All frame queues created");
916 
917 	/* Get the initial configuration for flow control */
918 	dpaa_fc_set_default(dpaa_intf);
919 
920 	/* reset bpool list, initialize bpool dynamically */
921 	list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
922 		list_del(&bp->node);
923 		free(bp);
924 	}
925 
926 	/* Populate ethdev structure */
927 	eth_dev->dev_ops = &dpaa_devops;
928 	eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
929 	eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
930 
931 	/* Allocate memory for storing MAC addresses */
932 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
933 		ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
934 	if (eth_dev->data->mac_addrs == NULL) {
935 		DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
936 						"store MAC addresses",
937 				ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
938 		rte_free(dpaa_intf->rx_queues);
939 		rte_free(dpaa_intf->tx_queues);
940 		dpaa_intf->rx_queues = NULL;
941 		dpaa_intf->tx_queues = NULL;
942 		dpaa_intf->nb_rx_queues = 0;
943 		dpaa_intf->nb_tx_queues = 0;
944 		return -ENOMEM;
945 	}
946 
947 	/* copy the primary mac address */
948 	ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
949 
950 	RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
951 		dpaa_device->name,
952 		fman_intf->mac_addr.addr_bytes[0],
953 		fman_intf->mac_addr.addr_bytes[1],
954 		fman_intf->mac_addr.addr_bytes[2],
955 		fman_intf->mac_addr.addr_bytes[3],
956 		fman_intf->mac_addr.addr_bytes[4],
957 		fman_intf->mac_addr.addr_bytes[5]);
958 
959 	/* Disable RX mode */
960 	fman_if_discard_rx_errors(fman_intf);
961 	fman_if_disable_rx(fman_intf);
962 	/* Disable promiscuous mode */
963 	fman_if_promiscuous_disable(fman_intf);
964 	/* Disable multicast */
965 	fman_if_reset_mcast_filter_table(fman_intf);
966 	/* Reset interface statistics */
967 	fman_if_stats_reset(fman_intf);
968 
969 	return 0;
970 }
971 
972 static int
973 dpaa_dev_uninit(struct rte_eth_dev *dev)
974 {
975 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
976 
977 	PMD_INIT_FUNC_TRACE();
978 
979 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
980 		return -EPERM;
981 
982 	if (!dpaa_intf) {
983 		DPAA_PMD_WARN("Already closed or not started");
984 		return -1;
985 	}
986 
987 	dpaa_eth_dev_close(dev);
988 
989 	/* release configuration memory */
990 	if (dpaa_intf->fc_conf)
991 		rte_free(dpaa_intf->fc_conf);
992 
993 	rte_free(dpaa_intf->rx_queues);
994 	dpaa_intf->rx_queues = NULL;
995 
996 	rte_free(dpaa_intf->tx_queues);
997 	dpaa_intf->tx_queues = NULL;
998 
999 	/* free memory for storing MAC addresses */
1000 	rte_free(dev->data->mac_addrs);
1001 	dev->data->mac_addrs = NULL;
1002 
1003 	dev->dev_ops = NULL;
1004 	dev->rx_pkt_burst = NULL;
1005 	dev->tx_pkt_burst = NULL;
1006 
1007 	return 0;
1008 }
1009 
1010 static int
1011 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
1012 	       struct rte_dpaa_device *dpaa_dev)
1013 {
1014 	int diag;
1015 	int ret;
1016 	struct rte_eth_dev *eth_dev;
1017 
1018 	PMD_INIT_FUNC_TRACE();
1019 
1020 	/* In case of secondary process, the device is already configured
1021 	 * and no further action is required, except portal initialization
1022 	 * and verifying secondary attachment to port name.
1023 	 */
1024 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1025 		eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
1026 		if (!eth_dev)
1027 			return -ENOMEM;
1028 		return 0;
1029 	}
1030 
1031 	if (!is_global_init) {
1032 		/* One time load of Qman/Bman drivers */
1033 		ret = qman_global_init();
1034 		if (ret) {
1035 			DPAA_PMD_ERR("QMAN initialization failed: %d",
1036 				     ret);
1037 			return ret;
1038 		}
1039 		ret = bman_global_init();
1040 		if (ret) {
1041 			DPAA_PMD_ERR("BMAN initialization failed: %d",
1042 				     ret);
1043 			return ret;
1044 		}
1045 
1046 		is_global_init = 1;
1047 	}
1048 
1049 	ret = rte_dpaa_portal_init((void *)1);
1050 	if (ret) {
1051 		DPAA_PMD_ERR("Unable to initialize portal");
1052 		return ret;
1053 	}
1054 
1055 	eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
1056 	if (eth_dev == NULL)
1057 		return -ENOMEM;
1058 
1059 	eth_dev->data->dev_private = rte_zmalloc(
1060 					"ethdev private structure",
1061 					sizeof(struct dpaa_if),
1062 					RTE_CACHE_LINE_SIZE);
1063 	if (!eth_dev->data->dev_private) {
1064 		DPAA_PMD_ERR("Cannot allocate memzone for port data");
1065 		rte_eth_dev_release_port(eth_dev);
1066 		return -ENOMEM;
1067 	}
1068 
1069 	eth_dev->device = &dpaa_dev->device;
1070 	eth_dev->device->driver = &dpaa_drv->driver;
1071 	dpaa_dev->eth_dev = eth_dev;
1072 
1073 	/* Invoke PMD device initialization function */
1074 	diag = dpaa_dev_init(eth_dev);
1075 	if (diag == 0)
1076 		return 0;
1077 
1078 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1079 		rte_free(eth_dev->data->dev_private);
1080 
1081 	rte_eth_dev_release_port(eth_dev);
1082 	return diag;
1083 }
1084 
1085 static int
1086 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
1087 {
1088 	struct rte_eth_dev *eth_dev;
1089 
1090 	PMD_INIT_FUNC_TRACE();
1091 
1092 	eth_dev = dpaa_dev->eth_dev;
1093 	dpaa_dev_uninit(eth_dev);
1094 
1095 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1096 		rte_free(eth_dev->data->dev_private);
1097 
1098 	rte_eth_dev_release_port(eth_dev);
1099 
1100 	return 0;
1101 }
1102 
1103 static struct rte_dpaa_driver rte_dpaa_pmd = {
1104 	.drv_type = FSL_DPAA_ETH,
1105 	.probe = rte_dpaa_probe,
1106 	.remove = rte_dpaa_remove,
1107 };
1108 
1109 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
1110