xref: /dpdk/examples/link_status_interrupt/main.c (revision 6c066bacbf0280b1117b86a5910e3006b02d45bc)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <stdint.h>
38 #include <inttypes.h>
39 #include <sys/types.h>
40 #include <sys/queue.h>
41 #include <netinet/in.h>
42 #include <setjmp.h>
43 #include <stdarg.h>
44 #include <ctype.h>
45 #include <errno.h>
46 #include <getopt.h>
47 
48 #include <rte_common.h>
49 #include <rte_log.h>
50 #include <rte_malloc.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_eal.h>
54 #include <rte_launch.h>
55 #include <rte_atomic.h>
56 #include <rte_cycles.h>
57 #include <rte_prefetch.h>
58 #include <rte_lcore.h>
59 #include <rte_per_lcore.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_mempool.h>
67 #include <rte_mbuf.h>
68 
69 #define RTE_LOGTYPE_LSI RTE_LOGTYPE_USER1
70 
71 #define NB_MBUF   8192
72 
73 #define MAX_PKT_BURST 32
74 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
75 
76 /*
77  * Configurable number of RX/TX ring descriptors
78  */
79 #define RTE_TEST_RX_DESC_DEFAULT 128
80 #define RTE_TEST_TX_DESC_DEFAULT 512
81 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
82 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
83 
84 /* ethernet addresses of ports */
85 static struct ether_addr lsi_ports_eth_addr[RTE_MAX_ETHPORTS];
86 
87 /* mask of enabled ports */
88 static uint32_t lsi_enabled_port_mask = 0;
89 
90 static unsigned int lsi_rx_queue_per_lcore = 1;
91 
92 /* destination port for L2 forwarding */
93 static unsigned lsi_dst_ports[RTE_MAX_ETHPORTS] = {0};
94 
95 #define MAX_PKT_BURST 32
96 
97 #define MAX_RX_QUEUE_PER_LCORE 16
98 #define MAX_TX_QUEUE_PER_PORT 16
99 struct lcore_queue_conf {
100 	unsigned n_rx_port;
101 	unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
102 	unsigned tx_queue_id;
103 } __rte_cache_aligned;
104 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
105 
106 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
107 
108 static const struct rte_eth_conf port_conf = {
109 	.rxmode = {
110 		.split_hdr_size = 0,
111 		.header_split   = 0, /**< Header Split disabled */
112 		.hw_ip_checksum = 0, /**< IP checksum offload disabled */
113 		.hw_vlan_filter = 0, /**< VLAN filtering disabled */
114 		.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
115 		.hw_strip_crc   = 1, /**< CRC stripped by hardware */
116 	},
117 	.txmode = {
118 		.mq_mode = ETH_MQ_TX_NONE,
119 	},
120 	.intr_conf = {
121 		.lsc = 1, /**< lsc interrupt feature enabled */
122 	},
123 };
124 
125 struct rte_mempool * lsi_pktmbuf_pool = NULL;
126 
127 /* Per-port statistics struct */
128 struct lsi_port_statistics {
129 	uint64_t tx;
130 	uint64_t rx;
131 	uint64_t dropped;
132 } __rte_cache_aligned;
133 struct lsi_port_statistics port_statistics[RTE_MAX_ETHPORTS];
134 
135 /* A tsc-based timer responsible for triggering statistics printout */
136 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
137 #define MAX_TIMER_PERIOD 86400 /* 1 day max */
138 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
139 
140 /* Print out statistics on packets dropped */
141 static void
142 print_stats(void)
143 {
144 	struct rte_eth_link link;
145 	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
146 	uint16_t portid;
147 
148 	total_packets_dropped = 0;
149 	total_packets_tx = 0;
150 	total_packets_rx = 0;
151 
152 	const char clr[] = { 27, '[', '2', 'J', '\0' };
153 	const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
154 
155 		/* Clear screen and move to top left */
156 	printf("%s%s", clr, topLeft);
157 
158 	printf("\nPort statistics ====================================");
159 
160 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
161 		/* skip ports that are not enabled */
162 		if ((lsi_enabled_port_mask & (1 << portid)) == 0)
163 			continue;
164 
165 		memset(&link, 0, sizeof(link));
166 		rte_eth_link_get_nowait(portid, &link);
167 		printf("\nStatistics for port %u ------------------------------"
168 			   "\nLink status: %25s"
169 			   "\nLink speed: %26u"
170 			   "\nLink duplex: %25s"
171 			   "\nPackets sent: %24"PRIu64
172 			   "\nPackets received: %20"PRIu64
173 			   "\nPackets dropped: %21"PRIu64,
174 			   portid,
175 			   (link.link_status ? "Link up" : "Link down"),
176 			   (unsigned)link.link_speed,
177 			   (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \
178 					"full-duplex" : "half-duplex"),
179 			   port_statistics[portid].tx,
180 			   port_statistics[portid].rx,
181 			   port_statistics[portid].dropped);
182 
183 		total_packets_dropped += port_statistics[portid].dropped;
184 		total_packets_tx += port_statistics[portid].tx;
185 		total_packets_rx += port_statistics[portid].rx;
186 	}
187 	printf("\nAggregate statistics ==============================="
188 		   "\nTotal packets sent: %18"PRIu64
189 		   "\nTotal packets received: %14"PRIu64
190 		   "\nTotal packets dropped: %15"PRIu64,
191 		   total_packets_tx,
192 		   total_packets_rx,
193 		   total_packets_dropped);
194 	printf("\n====================================================\n");
195 }
196 
197 static void
198 lsi_simple_forward(struct rte_mbuf *m, unsigned portid)
199 {
200 	struct ether_hdr *eth;
201 	void *tmp;
202 	unsigned dst_port = lsi_dst_ports[portid];
203 	int sent;
204 	struct rte_eth_dev_tx_buffer *buffer;
205 
206 	eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
207 
208 	/* 02:00:00:00:00:xx */
209 	tmp = &eth->d_addr.addr_bytes[0];
210 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
211 
212 	/* src addr */
213 	ether_addr_copy(&lsi_ports_eth_addr[dst_port], &eth->s_addr);
214 
215 	buffer = tx_buffer[dst_port];
216 	sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
217 	if (sent)
218 		port_statistics[dst_port].tx += sent;
219 }
220 
221 /* main processing loop */
222 static void
223 lsi_main_loop(void)
224 {
225 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
226 	struct rte_mbuf *m;
227 	unsigned lcore_id;
228 	unsigned sent;
229 	uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
230 	unsigned i, j, portid, nb_rx;
231 	struct lcore_queue_conf *qconf;
232 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
233 			BURST_TX_DRAIN_US;
234 	struct rte_eth_dev_tx_buffer *buffer;
235 
236 	prev_tsc = 0;
237 	timer_tsc = 0;
238 
239 	lcore_id = rte_lcore_id();
240 	qconf = &lcore_queue_conf[lcore_id];
241 
242 	if (qconf->n_rx_port == 0) {
243 		RTE_LOG(INFO, LSI, "lcore %u has nothing to do\n", lcore_id);
244 		return;
245 	}
246 
247 	RTE_LOG(INFO, LSI, "entering main loop on lcore %u\n", lcore_id);
248 
249 	for (i = 0; i < qconf->n_rx_port; i++) {
250 
251 		portid = qconf->rx_port_list[i];
252 		RTE_LOG(INFO, LSI, " -- lcoreid=%u portid=%u\n", lcore_id,
253 			portid);
254 	}
255 
256 	while (1) {
257 
258 		cur_tsc = rte_rdtsc();
259 
260 		/*
261 		 * TX burst queue drain
262 		 */
263 		diff_tsc = cur_tsc - prev_tsc;
264 		if (unlikely(diff_tsc > drain_tsc)) {
265 
266 			for (i = 0; i < qconf->n_rx_port; i++) {
267 
268 				portid = lsi_dst_ports[qconf->rx_port_list[i]];
269 				buffer = tx_buffer[portid];
270 
271 				sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
272 				if (sent)
273 					port_statistics[portid].tx += sent;
274 
275 			}
276 
277 			/* if timer is enabled */
278 			if (timer_period > 0) {
279 
280 				/* advance the timer */
281 				timer_tsc += diff_tsc;
282 
283 				/* if timer has reached its timeout */
284 				if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
285 
286 					/* do this only on master core */
287 					if (lcore_id == rte_get_master_lcore()) {
288 						print_stats();
289 						/* reset the timer */
290 						timer_tsc = 0;
291 					}
292 				}
293 			}
294 
295 			prev_tsc = cur_tsc;
296 		}
297 
298 		/*
299 		 * Read packet from RX queues
300 		 */
301 		for (i = 0; i < qconf->n_rx_port; i++) {
302 
303 			portid = qconf->rx_port_list[i];
304 			nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
305 						 pkts_burst, MAX_PKT_BURST);
306 
307 			port_statistics[portid].rx += nb_rx;
308 
309 			for (j = 0; j < nb_rx; j++) {
310 				m = pkts_burst[j];
311 				rte_prefetch0(rte_pktmbuf_mtod(m, void *));
312 				lsi_simple_forward(m, portid);
313 			}
314 		}
315 	}
316 }
317 
318 static int
319 lsi_launch_one_lcore(__attribute__((unused)) void *dummy)
320 {
321 	lsi_main_loop();
322 	return 0;
323 }
324 
325 /* display usage */
326 static void
327 lsi_usage(const char *prgname)
328 {
329 	printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
330 		"  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
331 		"  -q NQ: number of queue (=ports) per lcore (default is 1)\n"
332 		"  -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
333 			prgname);
334 }
335 
336 static int
337 lsi_parse_portmask(const char *portmask)
338 {
339 	char *end = NULL;
340 	unsigned long pm;
341 
342 	/* parse hexadecimal string */
343 	pm = strtoul(portmask, &end, 16);
344 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
345 		return -1;
346 
347 	if (pm == 0)
348 		return -1;
349 
350 	return pm;
351 }
352 
353 static unsigned int
354 lsi_parse_nqueue(const char *q_arg)
355 {
356 	char *end = NULL;
357 	unsigned long n;
358 
359 	/* parse hexadecimal string */
360 	n = strtoul(q_arg, &end, 10);
361 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
362 		return 0;
363 	if (n == 0)
364 		return 0;
365 	if (n >= MAX_RX_QUEUE_PER_LCORE)
366 		return 0;
367 
368 	return n;
369 }
370 
371 static int
372 lsi_parse_timer_period(const char *q_arg)
373 {
374 	char *end = NULL;
375 	int n;
376 
377 	/* parse number string */
378 	n = strtol(q_arg, &end, 10);
379 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
380 		return -1;
381 	if (n >= MAX_TIMER_PERIOD)
382 		return -1;
383 
384 	return n;
385 }
386 
387 /* Parse the argument given in the command line of the application */
388 static int
389 lsi_parse_args(int argc, char **argv)
390 {
391 	int opt, ret;
392 	char **argvopt;
393 	int option_index;
394 	char *prgname = argv[0];
395 	static struct option lgopts[] = {
396 		{NULL, 0, 0, 0}
397 	};
398 
399 	argvopt = argv;
400 
401 	while ((opt = getopt_long(argc, argvopt, "p:q:T:",
402 				  lgopts, &option_index)) != EOF) {
403 
404 		switch (opt) {
405 		/* portmask */
406 		case 'p':
407 			lsi_enabled_port_mask = lsi_parse_portmask(optarg);
408 			if (lsi_enabled_port_mask == 0) {
409 				printf("invalid portmask\n");
410 				lsi_usage(prgname);
411 				return -1;
412 			}
413 			break;
414 
415 		/* nqueue */
416 		case 'q':
417 			lsi_rx_queue_per_lcore = lsi_parse_nqueue(optarg);
418 			if (lsi_rx_queue_per_lcore == 0) {
419 				printf("invalid queue number\n");
420 				lsi_usage(prgname);
421 				return -1;
422 			}
423 			break;
424 
425 		/* timer period */
426 		case 'T':
427 			timer_period = lsi_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
428 			if (timer_period < 0) {
429 				printf("invalid timer period\n");
430 				lsi_usage(prgname);
431 				return -1;
432 			}
433 			break;
434 
435 		/* long options */
436 		case 0:
437 			lsi_usage(prgname);
438 			return -1;
439 
440 		default:
441 			lsi_usage(prgname);
442 			return -1;
443 		}
444 	}
445 
446 	if (optind >= 0)
447 		argv[optind-1] = prgname;
448 
449 	ret = optind-1;
450 	optind = 1; /* reset getopt lib */
451 	return ret;
452 }
453 
454 /**
455  * It will be called as the callback for specified port after a LSI interrupt
456  * has been fully handled. This callback needs to be implemented carefully as
457  * it will be called in the interrupt host thread which is different from the
458  * application main thread.
459  *
460  * @param port_id
461  *  Port id.
462  * @param type
463  *  event type.
464  * @param param
465  *  Pointer to(address of) the parameters.
466  *
467  * @return
468  *  int.
469  */
470 static int
471 lsi_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param,
472 		    void *ret_param)
473 {
474 	struct rte_eth_link link;
475 
476 	RTE_SET_USED(param);
477 	RTE_SET_USED(ret_param);
478 
479 	printf("\n\nIn registered callback...\n");
480 	printf("Event type: %s\n", type == RTE_ETH_EVENT_INTR_LSC ? "LSC interrupt" : "unknown event");
481 	rte_eth_link_get_nowait(port_id, &link);
482 	if (link.link_status) {
483 		printf("Port %d Link Up - speed %u Mbps - %s\n\n",
484 				port_id, (unsigned)link.link_speed,
485 			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
486 				("full-duplex") : ("half-duplex"));
487 	} else
488 		printf("Port %d Link Down\n\n", port_id);
489 
490 	return 0;
491 }
492 
493 /* Check the link status of all ports in up to 9s, and print them finally */
494 static void
495 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
496 {
497 #define CHECK_INTERVAL 100 /* 100ms */
498 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
499 	uint8_t count, all_ports_up, print_flag = 0;
500 	uint16_t portid;
501 	struct rte_eth_link link;
502 
503 	printf("\nChecking link status");
504 	fflush(stdout);
505 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
506 		all_ports_up = 1;
507 		for (portid = 0; portid < port_num; portid++) {
508 			if ((port_mask & (1 << portid)) == 0)
509 				continue;
510 			memset(&link, 0, sizeof(link));
511 			rte_eth_link_get_nowait(portid, &link);
512 			/* print link status if flag set */
513 			if (print_flag == 1) {
514 				if (link.link_status)
515 					printf(
516 					"Port%d Link Up. Speed %u Mbps - %s\n",
517 						portid, link.link_speed,
518 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
519 					("full-duplex") : ("half-duplex\n"));
520 				else
521 					printf("Port %d Link Down\n", portid);
522 				continue;
523 			}
524 			/* clear all_ports_up flag if any link down */
525 			if (link.link_status == ETH_LINK_DOWN) {
526 				all_ports_up = 0;
527 				break;
528 			}
529 		}
530 		/* after finally printing all link status, get out */
531 		if (print_flag == 1)
532 			break;
533 
534 		if (all_ports_up == 0) {
535 			printf(".");
536 			fflush(stdout);
537 			rte_delay_ms(CHECK_INTERVAL);
538 		}
539 
540 		/* set the print_flag if all ports up or timeout */
541 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
542 			print_flag = 1;
543 			printf("done\n");
544 		}
545 	}
546 }
547 
548 int
549 main(int argc, char **argv)
550 {
551 	struct lcore_queue_conf *qconf;
552 	struct rte_eth_dev_info dev_info;
553 	int ret;
554 	uint16_t nb_ports;
555 	uint16_t portid, portid_last = 0;
556 	unsigned lcore_id, rx_lcore_id;
557 	unsigned nb_ports_in_mask = 0;
558 
559 	/* init EAL */
560 	ret = rte_eal_init(argc, argv);
561 	if (ret < 0)
562 		rte_exit(EXIT_FAILURE, "rte_eal_init failed");
563 	argc -= ret;
564 	argv += ret;
565 
566 	/* parse application arguments (after the EAL ones) */
567 	ret = lsi_parse_args(argc, argv);
568 	if (ret < 0)
569 		rte_exit(EXIT_FAILURE, "Invalid arguments");
570 
571 	/* create the mbuf pool */
572 	lsi_pktmbuf_pool =
573 		rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 32, 0,
574 			RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
575 	if (lsi_pktmbuf_pool == NULL)
576 		rte_panic("Cannot init mbuf pool\n");
577 
578 	nb_ports = rte_eth_dev_count();
579 	if (nb_ports == 0)
580 		rte_panic("No Ethernet port - bye\n");
581 
582 	/*
583 	 * Each logical core is assigned a dedicated TX queue on each port.
584 	 */
585 	for (portid = 0; portid < nb_ports; portid++) {
586 		/* skip ports that are not enabled */
587 		if ((lsi_enabled_port_mask & (1 << portid)) == 0)
588 			continue;
589 
590 		/* save the destination port id */
591 		if (nb_ports_in_mask % 2) {
592 			lsi_dst_ports[portid] = portid_last;
593 			lsi_dst_ports[portid_last] = portid;
594 		}
595 		else
596 			portid_last = portid;
597 
598 		nb_ports_in_mask++;
599 
600 		rte_eth_dev_info_get(portid, &dev_info);
601 	}
602 	if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2)
603 		rte_exit(EXIT_FAILURE, "Current enabled port number is %u, "
604 				"but it should be even and at least 2\n",
605 				nb_ports_in_mask);
606 
607 	rx_lcore_id = 0;
608 	qconf = &lcore_queue_conf[rx_lcore_id];
609 
610 	/* Initialize the port/queue configuration of each logical core */
611 	for (portid = 0; portid < nb_ports; portid++) {
612 		/* skip ports that are not enabled */
613 		if ((lsi_enabled_port_mask & (1 << portid)) == 0)
614 			continue;
615 
616 		/* get the lcore_id for this port */
617 		while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
618 		       lcore_queue_conf[rx_lcore_id].n_rx_port ==
619 		       lsi_rx_queue_per_lcore) {
620 
621 			rx_lcore_id++;
622 			if (rx_lcore_id >= RTE_MAX_LCORE)
623 				rte_exit(EXIT_FAILURE, "Not enough cores\n");
624 		}
625 		if (qconf != &lcore_queue_conf[rx_lcore_id])
626 			/* Assigned a new logical core in the loop above. */
627 			qconf = &lcore_queue_conf[rx_lcore_id];
628 
629 		qconf->rx_port_list[qconf->n_rx_port] = portid;
630 		qconf->n_rx_port++;
631 		printf("Lcore %u: RX port %u\n",rx_lcore_id, (unsigned) portid);
632 	}
633 
634 	/* Initialise each port */
635 	for (portid = 0; portid < nb_ports; portid++) {
636 		/* skip ports that are not enabled */
637 		if ((lsi_enabled_port_mask & (1 << portid)) == 0) {
638 			printf("Skipping disabled port %u\n", (unsigned) portid);
639 			continue;
640 		}
641 		/* init port */
642 		printf("Initializing port %u... ", (unsigned) portid);
643 		fflush(stdout);
644 		ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
645 		if (ret < 0)
646 			rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
647 				  ret, (unsigned) portid);
648 
649 		ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
650 						       &nb_txd);
651 		if (ret < 0)
652 			rte_exit(EXIT_FAILURE,
653 				 "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%u\n",
654 				 ret, (unsigned) portid);
655 
656 		/* register lsi interrupt callback, need to be after
657 		 * rte_eth_dev_configure(). if (intr_conf.lsc == 0), no
658 		 * lsc interrupt will be present, and below callback to
659 		 * be registered will never be called.
660 		 */
661 		rte_eth_dev_callback_register(portid,
662 			RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL);
663 
664 		rte_eth_macaddr_get(portid,
665 				    &lsi_ports_eth_addr[portid]);
666 
667 		/* init one RX queue */
668 		fflush(stdout);
669 		ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
670 					     rte_eth_dev_socket_id(portid),
671 					     NULL,
672 					     lsi_pktmbuf_pool);
673 		if (ret < 0)
674 			rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, port=%u\n",
675 				  ret, (unsigned) portid);
676 
677 		/* init one TX queue logical core on each port */
678 		fflush(stdout);
679 		ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
680 				rte_eth_dev_socket_id(portid),
681 				NULL);
682 		if (ret < 0)
683 			rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d,port=%u\n",
684 				  ret, (unsigned) portid);
685 
686 		/* Initialize TX buffers */
687 		tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
688 				RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
689 				rte_eth_dev_socket_id(portid));
690 		if (tx_buffer[portid] == NULL)
691 			rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
692 					(unsigned) portid);
693 
694 		rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
695 
696 		ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
697 				rte_eth_tx_buffer_count_callback,
698 				&port_statistics[portid].dropped);
699 		if (ret < 0)
700 			rte_exit(EXIT_FAILURE, "Cannot set error callback for "
701 					"tx buffer on port %u\n", (unsigned) portid);
702 
703 		/* Start device */
704 		ret = rte_eth_dev_start(portid);
705 		if (ret < 0)
706 			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%u\n",
707 				  ret, (unsigned) portid);
708 		printf("done:\n");
709 
710 		rte_eth_promiscuous_enable(portid);
711 
712 		printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
713 				(unsigned) portid,
714 				lsi_ports_eth_addr[portid].addr_bytes[0],
715 				lsi_ports_eth_addr[portid].addr_bytes[1],
716 				lsi_ports_eth_addr[portid].addr_bytes[2],
717 				lsi_ports_eth_addr[portid].addr_bytes[3],
718 				lsi_ports_eth_addr[portid].addr_bytes[4],
719 				lsi_ports_eth_addr[portid].addr_bytes[5]);
720 
721 		/* initialize port stats */
722 		memset(&port_statistics, 0, sizeof(port_statistics));
723 	}
724 
725 	check_all_ports_link_status(nb_ports, lsi_enabled_port_mask);
726 
727 	/* launch per-lcore init on every lcore */
728 	rte_eal_mp_remote_launch(lsi_launch_one_lcore, NULL, CALL_MASTER);
729 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
730 		if (rte_eal_wait_lcore(lcore_id) < 0)
731 			return -1;
732 	}
733 
734 	return 0;
735 }
736