xref: /dpdk/examples/link_status_interrupt/main.c (revision 69d22b8e668d4cbc6d3a5e2873551de5d44c3a45)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <stdint.h>
39 #include <inttypes.h>
40 #include <sys/types.h>
41 #include <string.h>
42 #include <sys/queue.h>
43 #include <netinet/in.h>
44 #include <setjmp.h>
45 #include <stdarg.h>
46 #include <ctype.h>
47 #include <errno.h>
48 #include <getopt.h>
49 
50 #include <rte_common.h>
51 #include <rte_log.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_launch.h>
59 #include <rte_atomic.h>
60 #include <rte_cycles.h>
61 #include <rte_prefetch.h>
62 #include <rte_lcore.h>
63 #include <rte_per_lcore.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_interrupts.h>
66 #include <rte_pci.h>
67 #include <rte_random.h>
68 #include <rte_debug.h>
69 #include <rte_ether.h>
70 #include <rte_ethdev.h>
71 #include <rte_ring.h>
72 #include <rte_mempool.h>
73 #include <rte_mbuf.h>
74 
75 #include "main.h"
76 
77 #define RTE_LOGTYPE_LSI RTE_LOGTYPE_USER1
78 
79 #define LSI_MAX_PORTS 32
80 
81 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
82 #define NB_MBUF   8192
83 
84 /*
85  * RX and TX Prefetch, Host, and Write-back threshold values should be
86  * carefully set for optimal performance. Consult the network
87  * controller's datasheet and supporting DPDK documentation for guidance
88  * on how these parameters should be set.
89  */
90 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
91 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
92 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
93 
94 /*
95  * These default values are optimized for use with the Intel(R) 82599 10 GbE
96  * Controller and the DPDK ixgbe PMD. Consider using other values for other
97  * network controllers and/or network drivers.
98  */
99 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
100 #define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
101 #define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
102 
103 #define MAX_PKT_BURST 32
104 #define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
105 
106 #define SOCKET0 0
107 
108 /*
109  * Configurable number of RX/TX ring descriptors
110  */
111 #define RTE_TEST_RX_DESC_DEFAULT 128
112 #define RTE_TEST_TX_DESC_DEFAULT 512
113 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
114 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
115 
116 /* ethernet addresses of ports */
117 static struct ether_addr lsi_ports_eth_addr[LSI_MAX_PORTS];
118 
119 /* mask of enabled ports */
120 static uint32_t lsi_enabled_port_mask = 0;
121 
122 static unsigned int lsi_rx_queue_per_lcore = 1;
123 
124 /* destination port for L2 forwarding */
125 static unsigned lsi_dst_ports[LSI_MAX_PORTS] = {0};
126 
127 #define MAX_PKT_BURST 32
128 struct mbuf_table {
129 	unsigned len;
130 	struct rte_mbuf *m_table[MAX_PKT_BURST];
131 };
132 
133 #define MAX_RX_QUEUE_PER_LCORE 16
134 #define MAX_TX_QUEUE_PER_PORT 16
135 struct lcore_queue_conf {
136 	unsigned n_rx_queue;
137 	unsigned rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
138 	unsigned tx_queue_id;
139 	struct mbuf_table tx_mbufs[LSI_MAX_PORTS];
140 
141 } __rte_cache_aligned;
142 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
143 
144 static const struct rte_eth_conf port_conf = {
145 	.rxmode = {
146 		.split_hdr_size = 0,
147 		.header_split   = 0, /**< Header Split disabled */
148 		.hw_ip_checksum = 0, /**< IP checksum offload disabled */
149 		.hw_vlan_filter = 0, /**< VLAN filtering disabled */
150 		.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
151 		.hw_strip_crc   = 0, /**< CRC stripped by hardware */
152 	},
153 	.txmode = {
154 	},
155 	.intr_conf = {
156 		.lsc = 1, /**< lsc interrupt feature enabled */
157 	},
158 };
159 
160 static const struct rte_eth_rxconf rx_conf = {
161 	.rx_thresh = {
162 		.pthresh = RX_PTHRESH,
163 		.hthresh = RX_HTHRESH,
164 		.wthresh = RX_WTHRESH,
165 	},
166 };
167 
168 static const struct rte_eth_txconf tx_conf = {
169 	.tx_thresh = {
170 		.pthresh = TX_PTHRESH,
171 		.hthresh = TX_HTHRESH,
172 		.wthresh = TX_WTHRESH,
173 	},
174 	.tx_free_thresh = 0, /* Use PMD default values */
175 	.tx_rs_thresh = 0, /* Use PMD default values */
176 };
177 
178 struct rte_mempool * lsi_pktmbuf_pool = NULL;
179 
180 /* Per-port statistics struct */
181 struct lsi_port_statistics {
182 	uint64_t tx;
183 	uint64_t rx;
184 	uint64_t dropped;
185 } __rte_cache_aligned;
186 struct lsi_port_statistics port_statistics[LSI_MAX_PORTS];
187 
188 /* A tsc-based timer responsible for triggering statistics printout */
189 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
190 #define MAX_TIMER_PERIOD 86400 /* 1 day max */
191 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
192 
193 /* Print out statistics on packets dropped */
194 static void
195 print_stats(void)
196 {
197 	struct rte_eth_link link;
198 	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
199 	unsigned portid;
200 
201 	total_packets_dropped = 0;
202 	total_packets_tx = 0;
203 	total_packets_rx = 0;
204 
205 	const char clr[] = { 27, '[', '2', 'J', '\0' };
206 	const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
207 
208 		/* Clear screen and move to top left */
209 	printf("%s%s", clr, topLeft);
210 
211 	printf("\nPort statistics ====================================");
212 
213 	for (portid = 0; portid < LSI_MAX_PORTS; portid++) {
214 		/* skip ports that are not enabled */
215 		if ((lsi_enabled_port_mask & (1 << portid)) == 0)
216 			continue;
217 
218 		memset(&link, 0, sizeof(link));
219 		rte_eth_link_get_nowait((uint8_t)portid, &link);
220 		printf("\nStatistics for port %u ------------------------------"
221 			   "\nLink status: %25s"
222 			   "\nLink speed: %26u"
223 			   "\nLink duplex: %25s"
224 			   "\nPackets sent: %24"PRIu64
225 			   "\nPackets received: %20"PRIu64
226 			   "\nPackets dropped: %21"PRIu64,
227 			   portid,
228 			   (link.link_status ? "Link up" : "Link down"),
229 			   (unsigned)link.link_speed,
230 			   (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \
231 					"full-duplex" : "half-duplex"),
232 			   port_statistics[portid].tx,
233 			   port_statistics[portid].rx,
234 			   port_statistics[portid].dropped);
235 
236 		total_packets_dropped += port_statistics[portid].dropped;
237 		total_packets_tx += port_statistics[portid].tx;
238 		total_packets_rx += port_statistics[portid].rx;
239 	}
240 	printf("\nAggregate statistics ==============================="
241 		   "\nTotal packets sent: %18"PRIu64
242 		   "\nTotal packets received: %14"PRIu64
243 		   "\nTotal packets dropped: %15"PRIu64,
244 		   total_packets_tx,
245 		   total_packets_rx,
246 		   total_packets_dropped);
247 	printf("\n====================================================\n");
248 }
249 
250 /* Send the packet on an output interface */
251 static int
252 lsi_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
253 {
254 	struct rte_mbuf **m_table;
255 	unsigned ret;
256 	unsigned queueid;
257 
258 	queueid = (uint16_t) qconf->tx_queue_id;
259 	m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
260 
261 	ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n);
262 	port_statistics[port].tx += ret;
263 	if (unlikely(ret < n)) {
264 		port_statistics[port].dropped += (n - ret);
265 		do {
266 			rte_pktmbuf_free(m_table[ret]);
267 		} while (++ret < n);
268 	}
269 
270 	return 0;
271 }
272 
273 /* Send the packet on an output interface */
274 static int
275 lsi_send_packet(struct rte_mbuf *m, uint8_t port)
276 {
277 	unsigned lcore_id, len;
278 	struct lcore_queue_conf *qconf;
279 
280 	lcore_id = rte_lcore_id();
281 
282 	qconf = &lcore_queue_conf[lcore_id];
283 	len = qconf->tx_mbufs[port].len;
284 	qconf->tx_mbufs[port].m_table[len] = m;
285 	len++;
286 
287 	/* enough pkts to be sent */
288 	if (unlikely(len == MAX_PKT_BURST)) {
289 		lsi_send_burst(qconf, MAX_PKT_BURST, port);
290 		len = 0;
291 	}
292 
293 	qconf->tx_mbufs[port].len = len;
294 	return 0;
295 }
296 
297 static void
298 lsi_simple_forward(struct rte_mbuf *m, unsigned portid)
299 {
300 	struct ether_hdr *eth;
301 	void *tmp;
302 	unsigned dst_port = lsi_dst_ports[portid];
303 
304 	eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
305 
306 	/* 00:09:c0:00:00:xx */
307 	tmp = &eth->d_addr.addr_bytes[0];
308 	*((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24);
309 
310 	/* src addr */
311 	ether_addr_copy(&lsi_ports_eth_addr[dst_port], &eth->s_addr);
312 
313 	lsi_send_packet(m, (uint8_t) dst_port);
314 }
315 
316 /* main processing loop */
317 static void
318 lsi_main_loop(void)
319 {
320 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
321 	struct rte_mbuf *m;
322 	unsigned lcore_id;
323 	uint64_t prev_tsc = 0;
324 	uint64_t diff_tsc, cur_tsc, timer_tsc;
325 	unsigned i, j, portid, nb_rx;
326 	struct lcore_queue_conf *qconf;
327 
328 	timer_tsc = 0;
329 
330 	lcore_id = rte_lcore_id();
331 	qconf = &lcore_queue_conf[lcore_id];
332 
333 	if (qconf->n_rx_queue == 0) {
334 		RTE_LOG(INFO, LSI, "lcore %u has nothing to do\n", lcore_id);
335 		while(1);
336 	}
337 
338 	RTE_LOG(INFO, LSI, "entering main loop on lcore %u\n", lcore_id);
339 
340 	for (i = 0; i < qconf->n_rx_queue; i++) {
341 
342 		portid = qconf->rx_queue_list[i];
343 		RTE_LOG(INFO, LSI, " -- lcoreid=%u portid=%u\n", lcore_id,
344 			portid);
345 	}
346 
347 	while (1) {
348 
349 		cur_tsc = rte_rdtsc();
350 
351 		/*
352 		 * TX burst queue drain
353 		 */
354 		diff_tsc = cur_tsc - prev_tsc;
355 		if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
356 
357 			/* this could be optimized (use queueid instead of
358 			 * portid), but it is not called so often */
359 			for (portid = 0; portid < LSI_MAX_PORTS; portid++) {
360 				if (qconf->tx_mbufs[portid].len == 0)
361 					continue;
362 				lsi_send_burst(&lcore_queue_conf[lcore_id],
363 						 qconf->tx_mbufs[portid].len,
364 						 (uint8_t) portid);
365 				qconf->tx_mbufs[portid].len = 0;
366 			}
367 
368 			/* if timer is enabled */
369 			if (timer_period > 0) {
370 
371 				/* advance the timer */
372 				timer_tsc += diff_tsc;
373 
374 				/* if timer has reached its timeout */
375 				if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
376 
377 					/* do this only on master core */
378 					if (lcore_id == rte_get_master_lcore()) {
379 						print_stats();
380 						/* reset the timer */
381 						timer_tsc = 0;
382 					}
383 				}
384 			}
385 
386 			prev_tsc = cur_tsc;
387 		}
388 
389 		/*
390 		 * Read packet from RX queues
391 		 */
392 		for (i = 0; i < qconf->n_rx_queue; i++) {
393 
394 			portid = qconf->rx_queue_list[i];
395 			nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
396 						 pkts_burst, MAX_PKT_BURST);
397 
398 			port_statistics[portid].rx += nb_rx;
399 
400 			for (j = 0; j < nb_rx; j++) {
401 				m = pkts_burst[j];
402 				rte_prefetch0(rte_pktmbuf_mtod(m, void *));
403 				lsi_simple_forward(m, portid);
404 			}
405 		}
406 	}
407 }
408 
409 static int
410 lsi_launch_one_lcore(__attribute__((unused)) void *dummy)
411 {
412 	lsi_main_loop();
413 	return 0;
414 }
415 
416 /* display usage */
417 static void
418 lsi_usage(const char *prgname)
419 {
420 	printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
421 		"  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
422 		"  -q NQ: number of queue (=ports) per lcore (default is 1)\n"
423 		"  -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
424 			prgname);
425 }
426 
427 static int
428 lsi_parse_portmask(const char *portmask)
429 {
430 	char *end = NULL;
431 	unsigned long pm;
432 
433 	/* parse hexadecimal string */
434 	pm = strtoul(portmask, &end, 16);
435 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
436 		return -1;
437 
438 	if (pm == 0)
439 		return -1;
440 
441 	return pm;
442 }
443 
444 static unsigned int
445 lsi_parse_nqueue(const char *q_arg)
446 {
447 	char *end = NULL;
448 	unsigned long n;
449 
450 	/* parse hexadecimal string */
451 	n = strtoul(q_arg, &end, 10);
452 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
453 		return 0;
454 	if (n == 0)
455 		return 0;
456 	if (n >= MAX_RX_QUEUE_PER_LCORE)
457 		return 0;
458 
459 	return n;
460 }
461 
462 static int
463 lsi_parse_timer_period(const char *q_arg)
464 {
465 	char *end = NULL;
466 	int n;
467 
468 	/* parse number string */
469 	n = strtol(q_arg, &end, 10);
470 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
471 		return -1;
472 	if (n >= MAX_TIMER_PERIOD)
473 		return -1;
474 
475 	return n;
476 }
477 
478 /* Parse the argument given in the command line of the application */
479 static int
480 lsi_parse_args(int argc, char **argv)
481 {
482 	int opt, ret;
483 	char **argvopt;
484 	int option_index;
485 	char *prgname = argv[0];
486 	static struct option lgopts[] = {
487 		{NULL, 0, 0, 0}
488 	};
489 
490 	argvopt = argv;
491 
492 	while ((opt = getopt_long(argc, argvopt, "p:q:T:",
493 				  lgopts, &option_index)) != EOF) {
494 
495 		switch (opt) {
496 		/* portmask */
497 		case 'p':
498 			lsi_enabled_port_mask = lsi_parse_portmask(optarg);
499 			if (lsi_enabled_port_mask == 0) {
500 				printf("invalid portmask\n");
501 				lsi_usage(prgname);
502 				return -1;
503 			}
504 			break;
505 
506 		/* nqueue */
507 		case 'q':
508 			lsi_rx_queue_per_lcore = lsi_parse_nqueue(optarg);
509 			if (lsi_rx_queue_per_lcore == 0) {
510 				printf("invalid queue number\n");
511 				lsi_usage(prgname);
512 				return -1;
513 			}
514 			break;
515 
516 		/* timer period */
517 		case 'T':
518 			timer_period = lsi_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
519 			if (timer_period < 0) {
520 				printf("invalid timer period\n");
521 				lsi_usage(prgname);
522 				return -1;
523 			}
524 			break;
525 
526 		/* long options */
527 		case 0:
528 			lsi_usage(prgname);
529 			return -1;
530 
531 		default:
532 			lsi_usage(prgname);
533 			return -1;
534 		}
535 	}
536 
537 	if (optind >= 0)
538 		argv[optind-1] = prgname;
539 
540 	ret = optind-1;
541 	optind = 0; /* reset getopt lib */
542 	return ret;
543 }
544 
545 /**
546  * It will be called as the callback for specified port after a LSI interrupt
547  * has been fully handled. This callback needs to be implemented carefully as
548  * it will be called in the interrupt host thread which is different from the
549  * application main thread.
550  *
551  * @param port_id
552  *  Port id.
553  * @param type
554  *  event type.
555  * @param param
556  *  Pointer to(address of) the parameters.
557  *
558  * @return
559  *  void.
560  */
561 static void
562 lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
563 {
564 	struct rte_eth_link link;
565 
566 	RTE_SET_USED(param);
567 
568 	printf("\n\nIn registered callback...\n");
569 	printf("Event type: %s\n", type == RTE_ETH_EVENT_INTR_LSC ? "LSC interrupt" : "unknown event");
570 	rte_eth_link_get(port_id, &link);
571 	if (link.link_status) {
572 		printf("Port %d Link Up - speed %u Mbps - %s\n\n",
573 				port_id, (unsigned)link.link_speed,
574 			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
575 				("full-duplex") : ("half-duplex"));
576 	} else
577 		printf("Port %d Link Down\n\n", port_id);
578 }
579 
580 int
581 MAIN(int argc, char **argv)
582 {
583 	struct lcore_queue_conf *qconf;
584 	struct rte_eth_dev_info dev_info;
585 	struct rte_eth_link link;
586 	int ret;
587 	unsigned int nb_ports, nb_lcores;
588 	unsigned portid, portid_last = 0, queueid = 0;
589 	unsigned lcore_id, rx_lcore_id;
590 	unsigned n_tx_queue, max_tx_queues;
591 	unsigned nb_ports_in_mask = 0;
592 
593 	/* init EAL */
594 	ret = rte_eal_init(argc, argv);
595 	if (ret < 0)
596 		rte_exit(EXIT_FAILURE, "rte_eal_init failed");
597 	argc -= ret;
598 	argv += ret;
599 
600 	/* parse application arguments (after the EAL ones) */
601 	ret = lsi_parse_args(argc, argv);
602 	if (ret < 0)
603 		rte_exit(EXIT_FAILURE, "Invalid arguments");
604 
605 	/* create the mbuf pool */
606 	lsi_pktmbuf_pool =
607 		rte_mempool_create("mbuf_pool", NB_MBUF,
608 				   MBUF_SIZE, 32,
609 				   sizeof(struct rte_pktmbuf_pool_private),
610 				   rte_pktmbuf_pool_init, NULL,
611 				   rte_pktmbuf_init, NULL,
612 				   SOCKET0, 0);
613 	if (lsi_pktmbuf_pool == NULL)
614 		rte_panic("Cannot init mbuf pool\n");
615 
616 	/* init driver(s) */
617 	if (rte_pmd_init_all() < 0)
618 		rte_panic("Cannot init pmd\n");
619 
620 	if (rte_eal_pci_probe() < 0)
621 		rte_panic("Cannot probe PCI\n");
622 
623 	nb_ports = rte_eth_dev_count();
624 	if (nb_ports == 0)
625 		rte_panic("No Ethernet port - bye\n");
626 
627 	if (nb_ports > LSI_MAX_PORTS)
628 		nb_ports = LSI_MAX_PORTS;
629 
630 	nb_lcores = rte_lcore_count();
631 
632 	/*
633 	 * Each logical core is assigned a dedicated TX queue on each port.
634 	 * Compute the maximum number of TX queues that can be used.
635 	 */
636 	max_tx_queues = nb_lcores;
637 	for (portid = 0; portid < nb_ports; portid++) {
638 		/* skip ports that are not enabled */
639 		if ((lsi_enabled_port_mask & (1 << portid)) == 0)
640 			continue;
641 
642 		/* save the destination port id */
643 		if (nb_ports_in_mask % 2) {
644 			lsi_dst_ports[portid] = portid_last;
645 			lsi_dst_ports[portid_last] = portid;
646 		}
647 		else
648 			portid_last = portid;
649 
650 		nb_ports_in_mask++;
651 
652 		rte_eth_dev_info_get((uint8_t) portid, &dev_info);
653 		if (max_tx_queues > dev_info.max_tx_queues)
654 			max_tx_queues = dev_info.max_tx_queues;
655 	}
656 
657 	if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2)
658 		rte_exit(EXIT_FAILURE, "Current enabled port number is %u, "
659 				"but it should be even and at least 2\n",
660 				nb_ports_in_mask);
661 
662 	rx_lcore_id = 0;
663 	qconf = &lcore_queue_conf[rx_lcore_id];
664 	qconf->tx_queue_id = 0;
665 	n_tx_queue = 1;
666 
667 	/* Initialize the port/queue configuration of each logical core */
668 	for (portid = 0; portid < nb_ports; portid++) {
669 		/* skip ports that are not enabled */
670 		if ((lsi_enabled_port_mask & (1 << portid)) == 0)
671 			continue;
672 
673 		/* get the lcore_id for this port */
674 		while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
675 		       lcore_queue_conf[rx_lcore_id].n_rx_queue ==
676 		       lsi_rx_queue_per_lcore) {
677 
678 			rx_lcore_id++;
679 			if (rx_lcore_id >= RTE_MAX_LCORE)
680 				rte_exit(EXIT_FAILURE, "Not enough cores\n");
681 			if (n_tx_queue == max_tx_queues)
682 				rte_exit(EXIT_FAILURE, "Not enough TX queues\n");
683 		}
684 		if (qconf != &lcore_queue_conf[rx_lcore_id]) {
685 			/* Assigned a new logical core in the loop above. */
686 			qconf = &lcore_queue_conf[rx_lcore_id];
687 			qconf->tx_queue_id = n_tx_queue;
688 			n_tx_queue++;
689 		}
690 		qconf->rx_queue_list[qconf->n_rx_queue] = portid;
691 		qconf->n_rx_queue++;
692 		printf("Lcore %u: RX port %u TX queue %u\n",
693 		       rx_lcore_id, portid, qconf->tx_queue_id);
694 	}
695 
696 	/* Initialise each port */
697 	for (portid = 0; portid < nb_ports; portid++) {
698 
699 		/* skip ports that are not enabled */
700 		if ((lsi_enabled_port_mask & (1 << portid)) == 0) {
701 			printf("Skipping disabled port %u\n", portid);
702 			continue;
703 		}
704 		/* init port */
705 		printf("Initializing port %u... ", portid);
706 		fflush(stdout);
707 		ret = rte_eth_dev_configure((uint8_t) portid, 1,
708 					    (uint16_t) n_tx_queue, &port_conf);
709 		if (ret < 0)
710 			rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
711 				  ret, portid);
712 
713 		/* register lsi interrupt callback, need to be after
714 		 * rte_eth_dev_configure(). if (intr_conf.lsc == 0), no
715 		 * lsc interrupt will be present, and below callback to
716 		 * be registered will never be called.
717 		 */
718 		rte_eth_dev_callback_register((uint8_t)portid,
719 			RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL);
720 
721 		rte_eth_macaddr_get((uint8_t) portid,
722 				    &lsi_ports_eth_addr[portid]);
723 
724 		/* init one RX queue */
725 		fflush(stdout);
726 		ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd,
727 					     SOCKET0, &rx_conf,
728 					     lsi_pktmbuf_pool);
729 		if (ret < 0)
730 			rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%u\n",
731 				  ret, portid);
732 
733 		/* init one TX queue logical core on each port */
734 		for (queueid = 0; queueid < n_tx_queue; queueid++) {
735 			fflush(stdout);
736 			ret = rte_eth_tx_queue_setup((uint8_t) portid,
737 						     (uint16_t) queueid, nb_txd,
738 						     SOCKET0, &tx_conf);
739 			if (ret < 0)
740 				rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
741 					  "port=%u queue=%u\n",
742 					  ret, portid, queueid);
743 		}
744 
745 		/* Start device */
746 		ret = rte_eth_dev_start((uint8_t) portid);
747 		if (ret < 0)
748 			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%u\n",
749 				  ret, portid);
750 
751 		printf("done: ");
752 
753 		/* get link status */
754 		rte_eth_link_get((uint8_t) portid, &link);
755 		if (link.link_status) {
756 			printf(" Link Up - speed %u Mbps - %s\n",
757 			       (unsigned) link.link_speed,
758 			       (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
759 			       ("full-duplex") : ("half-duplex\n"));
760 		} else {
761 			printf(" Link Down\n");
762 		}
763 
764 		printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
765 				portid,
766 				lsi_ports_eth_addr[portid].addr_bytes[0],
767 				lsi_ports_eth_addr[portid].addr_bytes[1],
768 				lsi_ports_eth_addr[portid].addr_bytes[2],
769 				lsi_ports_eth_addr[portid].addr_bytes[3],
770 				lsi_ports_eth_addr[portid].addr_bytes[4],
771 				lsi_ports_eth_addr[portid].addr_bytes[5]);
772 
773 		/* initialize port stats */
774 		memset(&port_statistics, 0, sizeof(port_statistics));
775 	}
776 
777 	/* launch per-lcore init on every lcore */
778 	rte_eal_mp_remote_launch(lsi_launch_one_lcore, NULL, CALL_MASTER);
779 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
780 		if (rte_eal_wait_lcore(lcore_id) < 0)
781 			return -1;
782 	}
783 
784 	return 0;
785 }
786