xref: /dpdk/drivers/net/dpaa/dpaa_ethdev.c (revision d81734caccade4dc17d24d2ffd8b71244d35a69f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17 
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_interrupts.h>
21 #include <rte_log.h>
22 #include <rte_debug.h>
23 #include <rte_pci.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_memory.h>
27 #include <rte_tailq.h>
28 #include <rte_eal.h>
29 #include <rte_alarm.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev.h>
32 #include <rte_malloc.h>
33 #include <rte_ring.h>
34 
35 #include <rte_dpaa_bus.h>
36 #include <rte_dpaa_logs.h>
37 #include <dpaa_mempool.h>
38 
39 #include <dpaa_ethdev.h>
40 #include <dpaa_rxtx.h>
41 
42 #include <fsl_usd.h>
43 #include <fsl_qman.h>
44 #include <fsl_bman.h>
45 #include <fsl_fman.h>
46 
47 /* Keep track of whether QMAN and BMAN have been globally initialized */
48 static int is_global_init;
49 
50 struct rte_dpaa_xstats_name_off {
51 	char name[RTE_ETH_XSTATS_NAME_SIZE];
52 	uint32_t offset;
53 };
54 
55 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
56 	{"rx_align_err",
57 		offsetof(struct dpaa_if_stats, raln)},
58 	{"rx_valid_pause",
59 		offsetof(struct dpaa_if_stats, rxpf)},
60 	{"rx_fcs_err",
61 		offsetof(struct dpaa_if_stats, rfcs)},
62 	{"rx_vlan_frame",
63 		offsetof(struct dpaa_if_stats, rvlan)},
64 	{"rx_frame_err",
65 		offsetof(struct dpaa_if_stats, rerr)},
66 	{"rx_drop_err",
67 		offsetof(struct dpaa_if_stats, rdrp)},
68 	{"rx_undersized",
69 		offsetof(struct dpaa_if_stats, rund)},
70 	{"rx_oversize_err",
71 		offsetof(struct dpaa_if_stats, rovr)},
72 	{"rx_fragment_pkt",
73 		offsetof(struct dpaa_if_stats, rfrg)},
74 	{"tx_valid_pause",
75 		offsetof(struct dpaa_if_stats, txpf)},
76 	{"tx_fcs_err",
77 		offsetof(struct dpaa_if_stats, terr)},
78 	{"tx_vlan_frame",
79 		offsetof(struct dpaa_if_stats, tvlan)},
80 	{"rx_undersized",
81 		offsetof(struct dpaa_if_stats, tund)},
82 };
83 
84 static int
85 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
86 {
87 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
88 
89 	PMD_INIT_FUNC_TRACE();
90 
91 	if (mtu < ETHER_MIN_MTU)
92 		return -EINVAL;
93 	if (mtu > ETHER_MAX_LEN)
94 		dev->data->dev_conf.rxmode.jumbo_frame = 1;
95 	else
96 		dev->data->dev_conf.rxmode.jumbo_frame = 0;
97 
98 	dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu;
99 
100 	fman_if_set_maxfrm(dpaa_intf->fif, mtu);
101 
102 	return 0;
103 }
104 
105 static int
106 dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
107 {
108 	PMD_INIT_FUNC_TRACE();
109 
110 	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
111 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
112 		    DPAA_MAX_RX_PKT_LEN)
113 			return dpaa_mtu_set(dev,
114 				dev->data->dev_conf.rxmode.max_rx_pkt_len);
115 		else
116 			return -1;
117 	}
118 	return 0;
119 }
120 
121 static const uint32_t *
122 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
123 {
124 	static const uint32_t ptypes[] = {
125 		/*todo -= add more types */
126 		RTE_PTYPE_L2_ETHER,
127 		RTE_PTYPE_L3_IPV4,
128 		RTE_PTYPE_L3_IPV4_EXT,
129 		RTE_PTYPE_L3_IPV6,
130 		RTE_PTYPE_L3_IPV6_EXT,
131 		RTE_PTYPE_L4_TCP,
132 		RTE_PTYPE_L4_UDP,
133 		RTE_PTYPE_L4_SCTP
134 	};
135 
136 	PMD_INIT_FUNC_TRACE();
137 
138 	if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
139 		return ptypes;
140 	return NULL;
141 }
142 
143 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
144 {
145 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
146 
147 	PMD_INIT_FUNC_TRACE();
148 
149 	/* Change tx callback to the real one */
150 	dev->tx_pkt_burst = dpaa_eth_queue_tx;
151 	fman_if_enable_rx(dpaa_intf->fif);
152 
153 	return 0;
154 }
155 
156 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
157 {
158 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
159 
160 	PMD_INIT_FUNC_TRACE();
161 
162 	fman_if_disable_rx(dpaa_intf->fif);
163 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
164 }
165 
166 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
167 {
168 	PMD_INIT_FUNC_TRACE();
169 
170 	dpaa_eth_dev_stop(dev);
171 }
172 
173 static int
174 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
175 		     char *fw_version,
176 		     size_t fw_size)
177 {
178 	int ret;
179 	FILE *svr_file = NULL;
180 	unsigned int svr_ver = 0;
181 
182 	PMD_INIT_FUNC_TRACE();
183 
184 	svr_file = fopen(DPAA_SOC_ID_FILE, "r");
185 	if (!svr_file) {
186 		DPAA_PMD_ERR("Unable to open SoC device");
187 		return -ENOTSUP; /* Not supported on this infra */
188 	}
189 
190 	ret = fscanf(svr_file, "svr:%x", &svr_ver);
191 	if (ret <= 0) {
192 		DPAA_PMD_ERR("Unable to read SoC device");
193 		return -ENOTSUP; /* Not supported on this infra */
194 	}
195 
196 	ret = snprintf(fw_version, fw_size,
197 		       "svr:%x-fman-v%x",
198 		       svr_ver,
199 		       fman_ip_rev);
200 
201 	ret += 1; /* add the size of '\0' */
202 	if (fw_size < (uint32_t)ret)
203 		return ret;
204 	else
205 		return 0;
206 }
207 
208 static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
209 			      struct rte_eth_dev_info *dev_info)
210 {
211 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
212 
213 	PMD_INIT_FUNC_TRACE();
214 
215 	dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
216 	dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
217 	dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
218 	dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
219 	dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
220 	dev_info->max_hash_mac_addrs = 0;
221 	dev_info->max_vfs = 0;
222 	dev_info->max_vmdq_pools = ETH_16_POOLS;
223 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
224 	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
225 				ETH_LINK_SPEED_10G);
226 	dev_info->rx_offload_capa =
227 		(DEV_RX_OFFLOAD_IPV4_CKSUM |
228 		DEV_RX_OFFLOAD_UDP_CKSUM   |
229 		DEV_RX_OFFLOAD_TCP_CKSUM);
230 	dev_info->tx_offload_capa =
231 		(DEV_TX_OFFLOAD_IPV4_CKSUM  |
232 		DEV_TX_OFFLOAD_UDP_CKSUM   |
233 		DEV_TX_OFFLOAD_TCP_CKSUM);
234 }
235 
236 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
237 				int wait_to_complete __rte_unused)
238 {
239 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
240 	struct rte_eth_link *link = &dev->data->dev_link;
241 
242 	PMD_INIT_FUNC_TRACE();
243 
244 	if (dpaa_intf->fif->mac_type == fman_mac_1g)
245 		link->link_speed = 1000;
246 	else if (dpaa_intf->fif->mac_type == fman_mac_10g)
247 		link->link_speed = 10000;
248 	else
249 		DPAA_PMD_ERR("invalid link_speed: %s, %d",
250 			     dpaa_intf->name, dpaa_intf->fif->mac_type);
251 
252 	link->link_status = dpaa_intf->valid;
253 	link->link_duplex = ETH_LINK_FULL_DUPLEX;
254 	link->link_autoneg = ETH_LINK_AUTONEG;
255 	return 0;
256 }
257 
258 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
259 			       struct rte_eth_stats *stats)
260 {
261 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
262 
263 	PMD_INIT_FUNC_TRACE();
264 
265 	fman_if_stats_get(dpaa_intf->fif, stats);
266 	return 0;
267 }
268 
269 static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
270 {
271 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
272 
273 	PMD_INIT_FUNC_TRACE();
274 
275 	fman_if_stats_reset(dpaa_intf->fif);
276 }
277 
278 static int
279 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
280 		    unsigned int n)
281 {
282 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
283 	unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
284 	uint64_t values[sizeof(struct dpaa_if_stats) / 8];
285 
286 	if (xstats == NULL)
287 		return 0;
288 
289 	if (n < num)
290 		return num;
291 
292 	fman_if_stats_get_all(dpaa_intf->fif, values,
293 			      sizeof(struct dpaa_if_stats) / 8);
294 
295 	for (i = 0; i < num; i++) {
296 		xstats[i].id = i;
297 		xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
298 	}
299 	return i;
300 }
301 
302 static int
303 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
304 		      struct rte_eth_xstat_name *xstats_names,
305 		      __rte_unused unsigned int limit)
306 {
307 	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
308 
309 	if (xstats_names != NULL)
310 		for (i = 0; i < stat_cnt; i++)
311 			snprintf(xstats_names[i].name,
312 				 sizeof(xstats_names[i].name),
313 				 "%s",
314 				 dpaa_xstats_strings[i].name);
315 
316 	return stat_cnt;
317 }
318 
319 static int
320 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
321 		      uint64_t *values, unsigned int n)
322 {
323 	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
324 	uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
325 
326 	if (!ids) {
327 		struct dpaa_if *dpaa_intf = dev->data->dev_private;
328 
329 		if (n < stat_cnt)
330 			return stat_cnt;
331 
332 		if (!values)
333 			return 0;
334 
335 		fman_if_stats_get_all(dpaa_intf->fif, values_copy,
336 				      sizeof(struct dpaa_if_stats));
337 
338 		for (i = 0; i < stat_cnt; i++)
339 			values[i] =
340 				values_copy[dpaa_xstats_strings[i].offset / 8];
341 
342 		return stat_cnt;
343 	}
344 
345 	dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
346 
347 	for (i = 0; i < n; i++) {
348 		if (ids[i] >= stat_cnt) {
349 			DPAA_PMD_ERR("id value isn't valid");
350 			return -1;
351 		}
352 		values[i] = values_copy[ids[i]];
353 	}
354 	return n;
355 }
356 
357 static int
358 dpaa_xstats_get_names_by_id(
359 	struct rte_eth_dev *dev,
360 	struct rte_eth_xstat_name *xstats_names,
361 	const uint64_t *ids,
362 	unsigned int limit)
363 {
364 	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
365 	struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
366 
367 	if (!ids)
368 		return dpaa_xstats_get_names(dev, xstats_names, limit);
369 
370 	dpaa_xstats_get_names(dev, xstats_names_copy, limit);
371 
372 	for (i = 0; i < limit; i++) {
373 		if (ids[i] >= stat_cnt) {
374 			DPAA_PMD_ERR("id value isn't valid");
375 			return -1;
376 		}
377 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
378 	}
379 	return limit;
380 }
381 
382 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
383 {
384 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
385 
386 	PMD_INIT_FUNC_TRACE();
387 
388 	fman_if_promiscuous_enable(dpaa_intf->fif);
389 }
390 
391 static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
392 {
393 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
394 
395 	PMD_INIT_FUNC_TRACE();
396 
397 	fman_if_promiscuous_disable(dpaa_intf->fif);
398 }
399 
400 static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
401 {
402 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
403 
404 	PMD_INIT_FUNC_TRACE();
405 
406 	fman_if_set_mcast_filter_table(dpaa_intf->fif);
407 }
408 
409 static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
410 {
411 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
412 
413 	PMD_INIT_FUNC_TRACE();
414 
415 	fman_if_reset_mcast_filter_table(dpaa_intf->fif);
416 }
417 
418 static
419 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
420 			    uint16_t nb_desc __rte_unused,
421 			    unsigned int socket_id __rte_unused,
422 			    const struct rte_eth_rxconf *rx_conf __rte_unused,
423 			    struct rte_mempool *mp)
424 {
425 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
426 
427 	PMD_INIT_FUNC_TRACE();
428 
429 	DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
430 
431 	if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
432 		struct fman_if_ic_params icp;
433 		uint32_t fd_offset;
434 		uint32_t bp_size;
435 
436 		if (!mp->pool_data) {
437 			DPAA_PMD_ERR("Not an offloaded buffer pool!");
438 			return -1;
439 		}
440 		dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
441 
442 		memset(&icp, 0, sizeof(icp));
443 		/* set ICEOF for to the default value , which is 0*/
444 		icp.iciof = DEFAULT_ICIOF;
445 		icp.iceof = DEFAULT_RX_ICEOF;
446 		icp.icsz = DEFAULT_ICSZ;
447 		fman_if_set_ic_params(dpaa_intf->fif, &icp);
448 
449 		fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
450 		fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
451 
452 		/* Buffer pool size should be equal to Dataroom Size*/
453 		bp_size = rte_pktmbuf_data_room_size(mp);
454 		fman_if_set_bp(dpaa_intf->fif, mp->size,
455 			       dpaa_intf->bp_info->bpid, bp_size);
456 		dpaa_intf->valid = 1;
457 		DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
458 			    dpaa_intf->name, fd_offset,
459 			fman_if_get_fdoff(dpaa_intf->fif));
460 	}
461 	dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];
462 
463 	return 0;
464 }
465 
466 static
467 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
468 {
469 	PMD_INIT_FUNC_TRACE();
470 }
471 
472 static
473 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
474 			    uint16_t nb_desc __rte_unused,
475 		unsigned int socket_id __rte_unused,
476 		const struct rte_eth_txconf *tx_conf __rte_unused)
477 {
478 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
479 
480 	PMD_INIT_FUNC_TRACE();
481 
482 	DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
483 	dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
484 	return 0;
485 }
486 
487 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
488 {
489 	PMD_INIT_FUNC_TRACE();
490 }
491 
492 static int dpaa_link_down(struct rte_eth_dev *dev)
493 {
494 	PMD_INIT_FUNC_TRACE();
495 
496 	dpaa_eth_dev_stop(dev);
497 	return 0;
498 }
499 
500 static int dpaa_link_up(struct rte_eth_dev *dev)
501 {
502 	PMD_INIT_FUNC_TRACE();
503 
504 	dpaa_eth_dev_start(dev);
505 	return 0;
506 }
507 
508 static int
509 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
510 		   struct rte_eth_fc_conf *fc_conf)
511 {
512 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
513 	struct rte_eth_fc_conf *net_fc;
514 
515 	PMD_INIT_FUNC_TRACE();
516 
517 	if (!(dpaa_intf->fc_conf)) {
518 		dpaa_intf->fc_conf = rte_zmalloc(NULL,
519 			sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
520 		if (!dpaa_intf->fc_conf) {
521 			DPAA_PMD_ERR("unable to save flow control info");
522 			return -ENOMEM;
523 		}
524 	}
525 	net_fc = dpaa_intf->fc_conf;
526 
527 	if (fc_conf->high_water < fc_conf->low_water) {
528 		DPAA_PMD_ERR("Incorrect Flow Control Configuration");
529 		return -EINVAL;
530 	}
531 
532 	if (fc_conf->mode == RTE_FC_NONE) {
533 		return 0;
534 	} else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
535 		 fc_conf->mode == RTE_FC_FULL) {
536 		fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
537 					 fc_conf->low_water,
538 				dpaa_intf->bp_info->bpid);
539 		if (fc_conf->pause_time)
540 			fman_if_set_fc_quanta(dpaa_intf->fif,
541 					      fc_conf->pause_time);
542 	}
543 
544 	/* Save the information in dpaa device */
545 	net_fc->pause_time = fc_conf->pause_time;
546 	net_fc->high_water = fc_conf->high_water;
547 	net_fc->low_water = fc_conf->low_water;
548 	net_fc->send_xon = fc_conf->send_xon;
549 	net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
550 	net_fc->mode = fc_conf->mode;
551 	net_fc->autoneg = fc_conf->autoneg;
552 
553 	return 0;
554 }
555 
556 static int
557 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
558 		   struct rte_eth_fc_conf *fc_conf)
559 {
560 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
561 	struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
562 	int ret;
563 
564 	PMD_INIT_FUNC_TRACE();
565 
566 	if (net_fc) {
567 		fc_conf->pause_time = net_fc->pause_time;
568 		fc_conf->high_water = net_fc->high_water;
569 		fc_conf->low_water = net_fc->low_water;
570 		fc_conf->send_xon = net_fc->send_xon;
571 		fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
572 		fc_conf->mode = net_fc->mode;
573 		fc_conf->autoneg = net_fc->autoneg;
574 		return 0;
575 	}
576 	ret = fman_if_get_fc_threshold(dpaa_intf->fif);
577 	if (ret) {
578 		fc_conf->mode = RTE_FC_TX_PAUSE;
579 		fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
580 	} else {
581 		fc_conf->mode = RTE_FC_NONE;
582 	}
583 
584 	return 0;
585 }
586 
587 static int
588 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
589 			     struct ether_addr *addr,
590 			     uint32_t index,
591 			     __rte_unused uint32_t pool)
592 {
593 	int ret;
594 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
595 
596 	PMD_INIT_FUNC_TRACE();
597 
598 	ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
599 
600 	if (ret)
601 		RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
602 			" err = %d", ret);
603 	return 0;
604 }
605 
606 static void
607 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
608 			  uint32_t index)
609 {
610 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
611 
612 	PMD_INIT_FUNC_TRACE();
613 
614 	fman_if_clear_mac_addr(dpaa_intf->fif, index);
615 }
616 
617 static void
618 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
619 		       struct ether_addr *addr)
620 {
621 	int ret;
622 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
623 
624 	PMD_INIT_FUNC_TRACE();
625 
626 	ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
627 	if (ret)
628 		RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
629 }
630 
631 static struct eth_dev_ops dpaa_devops = {
632 	.dev_configure		  = dpaa_eth_dev_configure,
633 	.dev_start		  = dpaa_eth_dev_start,
634 	.dev_stop		  = dpaa_eth_dev_stop,
635 	.dev_close		  = dpaa_eth_dev_close,
636 	.dev_infos_get		  = dpaa_eth_dev_info,
637 	.dev_supported_ptypes_get = dpaa_supported_ptypes_get,
638 
639 	.rx_queue_setup		  = dpaa_eth_rx_queue_setup,
640 	.tx_queue_setup		  = dpaa_eth_tx_queue_setup,
641 	.rx_queue_release	  = dpaa_eth_rx_queue_release,
642 	.tx_queue_release	  = dpaa_eth_tx_queue_release,
643 
644 	.flow_ctrl_get		  = dpaa_flow_ctrl_get,
645 	.flow_ctrl_set		  = dpaa_flow_ctrl_set,
646 
647 	.link_update		  = dpaa_eth_link_update,
648 	.stats_get		  = dpaa_eth_stats_get,
649 	.xstats_get		  = dpaa_dev_xstats_get,
650 	.xstats_get_by_id	  = dpaa_xstats_get_by_id,
651 	.xstats_get_names_by_id	  = dpaa_xstats_get_names_by_id,
652 	.xstats_get_names	  = dpaa_xstats_get_names,
653 	.xstats_reset		  = dpaa_eth_stats_reset,
654 	.stats_reset		  = dpaa_eth_stats_reset,
655 	.promiscuous_enable	  = dpaa_eth_promiscuous_enable,
656 	.promiscuous_disable	  = dpaa_eth_promiscuous_disable,
657 	.allmulticast_enable	  = dpaa_eth_multicast_enable,
658 	.allmulticast_disable	  = dpaa_eth_multicast_disable,
659 	.mtu_set		  = dpaa_mtu_set,
660 	.dev_set_link_down	  = dpaa_link_down,
661 	.dev_set_link_up	  = dpaa_link_up,
662 	.mac_addr_add		  = dpaa_dev_add_mac_addr,
663 	.mac_addr_remove	  = dpaa_dev_remove_mac_addr,
664 	.mac_addr_set		  = dpaa_dev_set_mac_addr,
665 
666 	.fw_version_get		  = dpaa_fw_version_get,
667 };
668 
669 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
670 {
671 	struct rte_eth_fc_conf *fc_conf;
672 	int ret;
673 
674 	PMD_INIT_FUNC_TRACE();
675 
676 	if (!(dpaa_intf->fc_conf)) {
677 		dpaa_intf->fc_conf = rte_zmalloc(NULL,
678 			sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
679 		if (!dpaa_intf->fc_conf) {
680 			DPAA_PMD_ERR("unable to save flow control info");
681 			return -ENOMEM;
682 		}
683 	}
684 	fc_conf = dpaa_intf->fc_conf;
685 	ret = fman_if_get_fc_threshold(dpaa_intf->fif);
686 	if (ret) {
687 		fc_conf->mode = RTE_FC_TX_PAUSE;
688 		fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
689 	} else {
690 		fc_conf->mode = RTE_FC_NONE;
691 	}
692 
693 	return 0;
694 }
695 
696 /* Initialise an Rx FQ */
697 static int dpaa_rx_queue_init(struct qman_fq *fq,
698 			      uint32_t fqid)
699 {
700 	struct qm_mcc_initfq opts;
701 	int ret;
702 
703 	PMD_INIT_FUNC_TRACE();
704 
705 	ret = qman_reserve_fqid(fqid);
706 	if (ret) {
707 		DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
708 			     fqid, ret);
709 		return -EINVAL;
710 	}
711 
712 	DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
713 	ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
714 	if (ret) {
715 		DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
716 			fqid, ret);
717 		return ret;
718 	}
719 
720 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
721 		       QM_INITFQ_WE_CONTEXTA;
722 
723 	opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
724 	opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
725 			   QM_FQCTRL_PREFERINCACHE;
726 	opts.fqd.context_a.stashing.exclusive = 0;
727 	opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
728 	opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
729 	opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
730 
731 	/*Enable tail drop */
732 	opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH;
733 	opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE;
734 	qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);
735 
736 	ret = qman_init_fq(fq, 0, &opts);
737 	if (ret)
738 		DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
739 	return ret;
740 }
741 
742 /* Initialise a Tx FQ */
743 static int dpaa_tx_queue_init(struct qman_fq *fq,
744 			      struct fman_if *fman_intf)
745 {
746 	struct qm_mcc_initfq opts;
747 	int ret;
748 
749 	PMD_INIT_FUNC_TRACE();
750 
751 	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
752 			     QMAN_FQ_FLAG_TO_DCPORTAL, fq);
753 	if (ret) {
754 		DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
755 		return ret;
756 	}
757 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
758 		       QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
759 	opts.fqd.dest.channel = fman_intf->tx_channel_id;
760 	opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
761 	opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
762 	opts.fqd.context_b = 0;
763 	/* no tx-confirmation */
764 	opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
765 	opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
766 	DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
767 	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
768 	if (ret)
769 		DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
770 	return ret;
771 }
772 
773 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
774 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
775 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
776 {
777 	struct qm_mcc_initfq opts;
778 	int ret;
779 
780 	PMD_INIT_FUNC_TRACE();
781 
782 	ret = qman_reserve_fqid(fqid);
783 	if (ret) {
784 		DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
785 			fqid, ret);
786 		return -EINVAL;
787 	}
788 	/* "map" this Rx FQ to one of the interfaces Tx FQID */
789 	DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
790 	ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
791 	if (ret) {
792 		DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
793 			fqid, ret);
794 		return ret;
795 	}
796 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
797 	opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
798 	ret = qman_init_fq(fq, 0, &opts);
799 	if (ret)
800 		DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
801 			    fqid, ret);
802 	return ret;
803 }
804 #endif
805 
806 /* Initialise a network interface */
807 static int
808 dpaa_dev_init(struct rte_eth_dev *eth_dev)
809 {
810 	int num_cores, num_rx_fqs, fqid;
811 	int loop, ret = 0;
812 	int dev_id;
813 	struct rte_dpaa_device *dpaa_device;
814 	struct dpaa_if *dpaa_intf;
815 	struct fm_eth_port_cfg *cfg;
816 	struct fman_if *fman_intf;
817 	struct fman_if_bpool *bp, *tmp_bp;
818 
819 	PMD_INIT_FUNC_TRACE();
820 
821 	/* For secondary processes, the primary has done all the work */
822 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
823 		return 0;
824 
825 	dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
826 	dev_id = dpaa_device->id.dev_id;
827 	dpaa_intf = eth_dev->data->dev_private;
828 	cfg = &dpaa_netcfg->port_cfg[dev_id];
829 	fman_intf = cfg->fman_if;
830 
831 	dpaa_intf->name = dpaa_device->name;
832 
833 	/* save fman_if & cfg in the interface struture */
834 	dpaa_intf->fif = fman_intf;
835 	dpaa_intf->ifid = dev_id;
836 	dpaa_intf->cfg = cfg;
837 
838 	/* Initialize Rx FQ's */
839 	if (getenv("DPAA_NUM_RX_QUEUES"))
840 		num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
841 	else
842 		num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
843 
844 	/* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
845 	 * queues.
846 	 */
847 	if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
848 		DPAA_PMD_ERR("Invalid number of RX queues\n");
849 		return -EINVAL;
850 	}
851 
852 	dpaa_intf->rx_queues = rte_zmalloc(NULL,
853 		sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
854 	for (loop = 0; loop < num_rx_fqs; loop++) {
855 		fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
856 			DPAA_PCD_FQID_MULTIPLIER + loop;
857 		ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);
858 		if (ret)
859 			return ret;
860 		dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
861 	}
862 	dpaa_intf->nb_rx_queues = num_rx_fqs;
863 
864 	/* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
865 	num_cores = rte_lcore_count();
866 	dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
867 		num_cores, MAX_CACHELINE);
868 	if (!dpaa_intf->tx_queues)
869 		return -ENOMEM;
870 
871 	for (loop = 0; loop < num_cores; loop++) {
872 		ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
873 					 fman_intf);
874 		if (ret)
875 			return ret;
876 		dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
877 	}
878 	dpaa_intf->nb_tx_queues = num_cores;
879 
880 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
881 	dpaa_debug_queue_init(&dpaa_intf->debug_queues[
882 		DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
883 	dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
884 	dpaa_debug_queue_init(&dpaa_intf->debug_queues[
885 		DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
886 	dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
887 #endif
888 
889 	DPAA_PMD_DEBUG("All frame queues created");
890 
891 	/* Get the initial configuration for flow control */
892 	dpaa_fc_set_default(dpaa_intf);
893 
894 	/* reset bpool list, initialize bpool dynamically */
895 	list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
896 		list_del(&bp->node);
897 		free(bp);
898 	}
899 
900 	/* Populate ethdev structure */
901 	eth_dev->dev_ops = &dpaa_devops;
902 	eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
903 	eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
904 
905 	/* Allocate memory for storing MAC addresses */
906 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
907 		ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
908 	if (eth_dev->data->mac_addrs == NULL) {
909 		DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
910 						"store MAC addresses",
911 				ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
912 		rte_free(dpaa_intf->rx_queues);
913 		rte_free(dpaa_intf->tx_queues);
914 		dpaa_intf->rx_queues = NULL;
915 		dpaa_intf->tx_queues = NULL;
916 		dpaa_intf->nb_rx_queues = 0;
917 		dpaa_intf->nb_tx_queues = 0;
918 		return -ENOMEM;
919 	}
920 
921 	/* copy the primary mac address */
922 	ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
923 
924 	RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
925 		dpaa_device->name,
926 		fman_intf->mac_addr.addr_bytes[0],
927 		fman_intf->mac_addr.addr_bytes[1],
928 		fman_intf->mac_addr.addr_bytes[2],
929 		fman_intf->mac_addr.addr_bytes[3],
930 		fman_intf->mac_addr.addr_bytes[4],
931 		fman_intf->mac_addr.addr_bytes[5]);
932 
933 	/* Disable RX mode */
934 	fman_if_discard_rx_errors(fman_intf);
935 	fman_if_disable_rx(fman_intf);
936 	/* Disable promiscuous mode */
937 	fman_if_promiscuous_disable(fman_intf);
938 	/* Disable multicast */
939 	fman_if_reset_mcast_filter_table(fman_intf);
940 	/* Reset interface statistics */
941 	fman_if_stats_reset(fman_intf);
942 
943 	return 0;
944 }
945 
946 static int
947 dpaa_dev_uninit(struct rte_eth_dev *dev)
948 {
949 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
950 
951 	PMD_INIT_FUNC_TRACE();
952 
953 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
954 		return -EPERM;
955 
956 	if (!dpaa_intf) {
957 		DPAA_PMD_WARN("Already closed or not started");
958 		return -1;
959 	}
960 
961 	dpaa_eth_dev_close(dev);
962 
963 	/* release configuration memory */
964 	if (dpaa_intf->fc_conf)
965 		rte_free(dpaa_intf->fc_conf);
966 
967 	rte_free(dpaa_intf->rx_queues);
968 	dpaa_intf->rx_queues = NULL;
969 
970 	rte_free(dpaa_intf->tx_queues);
971 	dpaa_intf->tx_queues = NULL;
972 
973 	/* free memory for storing MAC addresses */
974 	rte_free(dev->data->mac_addrs);
975 	dev->data->mac_addrs = NULL;
976 
977 	dev->dev_ops = NULL;
978 	dev->rx_pkt_burst = NULL;
979 	dev->tx_pkt_burst = NULL;
980 
981 	return 0;
982 }
983 
984 static int
985 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
986 	       struct rte_dpaa_device *dpaa_dev)
987 {
988 	int diag;
989 	int ret;
990 	struct rte_eth_dev *eth_dev;
991 
992 	PMD_INIT_FUNC_TRACE();
993 
994 	/* In case of secondary process, the device is already configured
995 	 * and no further action is required, except portal initialization
996 	 * and verifying secondary attachment to port name.
997 	 */
998 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
999 		eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
1000 		if (!eth_dev)
1001 			return -ENOMEM;
1002 		return 0;
1003 	}
1004 
1005 	if (!is_global_init) {
1006 		/* One time load of Qman/Bman drivers */
1007 		ret = qman_global_init();
1008 		if (ret) {
1009 			DPAA_PMD_ERR("QMAN initialization failed: %d",
1010 				     ret);
1011 			return ret;
1012 		}
1013 		ret = bman_global_init();
1014 		if (ret) {
1015 			DPAA_PMD_ERR("BMAN initialization failed: %d",
1016 				     ret);
1017 			return ret;
1018 		}
1019 
1020 		is_global_init = 1;
1021 	}
1022 
1023 	ret = rte_dpaa_portal_init((void *)1);
1024 	if (ret) {
1025 		DPAA_PMD_ERR("Unable to initialize portal");
1026 		return ret;
1027 	}
1028 
1029 	eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
1030 	if (eth_dev == NULL)
1031 		return -ENOMEM;
1032 
1033 	eth_dev->data->dev_private = rte_zmalloc(
1034 					"ethdev private structure",
1035 					sizeof(struct dpaa_if),
1036 					RTE_CACHE_LINE_SIZE);
1037 	if (!eth_dev->data->dev_private) {
1038 		DPAA_PMD_ERR("Cannot allocate memzone for port data");
1039 		rte_eth_dev_release_port(eth_dev);
1040 		return -ENOMEM;
1041 	}
1042 
1043 	eth_dev->device = &dpaa_dev->device;
1044 	eth_dev->device->driver = &dpaa_drv->driver;
1045 	dpaa_dev->eth_dev = eth_dev;
1046 
1047 	/* Invoke PMD device initialization function */
1048 	diag = dpaa_dev_init(eth_dev);
1049 	if (diag == 0)
1050 		return 0;
1051 
1052 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1053 		rte_free(eth_dev->data->dev_private);
1054 
1055 	rte_eth_dev_release_port(eth_dev);
1056 	return diag;
1057 }
1058 
1059 static int
1060 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
1061 {
1062 	struct rte_eth_dev *eth_dev;
1063 
1064 	PMD_INIT_FUNC_TRACE();
1065 
1066 	eth_dev = dpaa_dev->eth_dev;
1067 	dpaa_dev_uninit(eth_dev);
1068 
1069 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1070 		rte_free(eth_dev->data->dev_private);
1071 
1072 	rte_eth_dev_release_port(eth_dev);
1073 
1074 	return 0;
1075 }
1076 
1077 static struct rte_dpaa_driver rte_dpaa_pmd = {
1078 	.drv_type = FSL_DPAA_ETH,
1079 	.probe = rte_dpaa_probe,
1080 	.remove = rte_dpaa_remove,
1081 };
1082 
1083 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
1084