xref: /dpdk/examples/vmdq/main.c (revision 03ab51eafda992874a48c392ca66ffb577fe2b71)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <signal.h>
13 #include <stdarg.h>
14 #include <inttypes.h>
15 #include <getopt.h>
16 
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
35 #include <rte_mbuf.h>
36 
37 #define MAX_QUEUES 1024
38 /*
39  * 1024 queues require to meet the needs of a large number of vmdq_pools.
40  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
41  */
42 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
43 						RTE_TEST_TX_DESC_DEFAULT))
44 #define MBUF_CACHE_SIZE 64
45 
46 #define MAX_PKT_BURST 32
47 
48 /*
49  * Configurable number of RX/TX ring descriptors
50  */
51 #define RTE_TEST_RX_DESC_DEFAULT 1024
52 #define RTE_TEST_TX_DESC_DEFAULT 1024
53 
54 #define INVALID_PORT_ID 0xFF
55 
56 /* mask of enabled ports */
57 static uint32_t enabled_port_mask;
58 
59 /* number of pools (if user does not specify any, 8 by default */
60 static uint32_t num_queues = 8;
61 static uint32_t num_pools = 8;
62 static uint8_t rss_enable;
63 
64 /* Default structure for VMDq. 8< */
65 
66 /* empty vmdq configuration structure. Filled in programatically */
67 static const struct rte_eth_conf vmdq_conf_default = {
68 	.rxmode = {
69 		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
70 		.split_hdr_size = 0,
71 	},
72 
73 	.txmode = {
74 		.mq_mode = ETH_MQ_TX_NONE,
75 	},
76 	.rx_adv_conf = {
77 		/*
78 		 * should be overridden separately in code with
79 		 * appropriate values
80 		 */
81 		.vmdq_rx_conf = {
82 			.nb_queue_pools = ETH_8_POOLS,
83 			.enable_default_pool = 0,
84 			.default_pool = 0,
85 			.nb_pool_maps = 0,
86 			.pool_map = {{0, 0},},
87 		},
88 	},
89 };
90 /* >8 End of Empty vdmq configuration structure. */
91 
92 static unsigned lcore_ids[RTE_MAX_LCORE];
93 static uint16_t ports[RTE_MAX_ETHPORTS];
94 static unsigned num_ports; /**< The number of ports specified in command line */
95 
96 /* array used for printing out statistics */
97 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
98 
99 /* vlan_tags 8< */
100 const uint16_t vlan_tags[] = {
101 	0,  1,  2,  3,  4,  5,  6,  7,
102 	8,  9, 10, 11,	12, 13, 14, 15,
103 	16, 17, 18, 19, 20, 21, 22, 23,
104 	24, 25, 26, 27, 28, 29, 30, 31,
105 	32, 33, 34, 35, 36, 37, 38, 39,
106 	40, 41, 42, 43, 44, 45, 46, 47,
107 	48, 49, 50, 51, 52, 53, 54, 55,
108 	56, 57, 58, 59, 60, 61, 62, 63,
109 };
110 /* >8 End of vlan_tags. */
111 
112 const uint16_t num_vlans = RTE_DIM(vlan_tags);
113 static uint16_t num_pf_queues,  num_vmdq_queues;
114 static uint16_t vmdq_pool_base, vmdq_queue_base;
115 
116 /* Pool mac address template. 8< */
117 
118 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
119 static struct rte_ether_addr pool_addr_template = {
120 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
121 };
122 /* >8 End of mac addr template. */
123 
124 /* ethernet addresses of ports */
125 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
126 
127 #define MAX_QUEUE_NUM_10G 128
128 #define MAX_QUEUE_NUM_1G 8
129 #define MAX_POOL_MAP_NUM_10G 64
130 #define MAX_POOL_MAP_NUM_1G 32
131 #define MAX_POOL_NUM_10G 64
132 #define MAX_POOL_NUM_1G 8
133 /*
134  * Builds up the correct configuration for vmdq based on the vlan tags array
135  * given above, and determine the queue number and pool map number according to
136  * valid pool number
137  */
138 
139  /* Building correct configruration for vdmq. 8< */
140 static inline int
141 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
142 {
143 	struct rte_eth_vmdq_rx_conf conf;
144 	unsigned i;
145 
146 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
147 	conf.nb_pool_maps = num_pools;
148 	conf.enable_default_pool = 0;
149 	conf.default_pool = 0; /* set explicit value, even if not used */
150 
151 	for (i = 0; i < conf.nb_pool_maps; i++) {
152 		conf.pool_map[i].vlan_id = vlan_tags[i];
153 		conf.pool_map[i].pools = (1UL << (i % num_pools));
154 	}
155 
156 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
157 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
158 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
159 	if (rss_enable) {
160 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
161 		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
162 							ETH_RSS_UDP |
163 							ETH_RSS_TCP |
164 							ETH_RSS_SCTP;
165 	}
166 	return 0;
167 }
168 
169 /*
170  * Initialises a given port using global settings and with the rx buffers
171  * coming from the mbuf_pool passed as parameter
172  */
173 static inline int
174 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
175 {
176 	struct rte_eth_dev_info dev_info;
177 	struct rte_eth_rxconf *rxconf;
178 	struct rte_eth_txconf *txconf;
179 	struct rte_eth_conf port_conf;
180 	uint16_t rxRings, txRings;
181 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
182 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
183 	int retval;
184 	uint16_t q;
185 	uint16_t queues_per_pool;
186 	uint32_t max_nb_pools;
187 	uint64_t rss_hf_tmp;
188 
189 	/*
190 	 * The max pool number from dev_info will be used to validate the pool
191 	 * number specified in cmd line
192 	 */
193 	retval = rte_eth_dev_info_get(port, &dev_info);
194 	if (retval != 0) {
195 		printf("Error during getting device (port %u) info: %s\n",
196 				port, strerror(-retval));
197 		return retval;
198 	}
199 
200 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
201 	/*
202 	 * We allow to process part of VMDQ pools specified by num_pools in
203 	 * command line.
204 	 */
205 	if (num_pools > max_nb_pools) {
206 		printf("num_pools %d >max_nb_pools %d\n",
207 			num_pools, max_nb_pools);
208 		return -1;
209 	}
210 	retval = get_eth_conf(&port_conf, max_nb_pools);
211 	if (retval < 0)
212 		return retval;
213 
214 	/*
215 	 * NIC queues are divided into pf queues and vmdq queues.
216 	 */
217 	/* There is assumption here all ports have the same configuration! */
218 	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
219 	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
220 	num_vmdq_queues = num_pools * queues_per_pool;
221 	num_queues = num_pf_queues + num_vmdq_queues;
222 	vmdq_queue_base = dev_info.vmdq_queue_base;
223 	vmdq_pool_base  = dev_info.vmdq_pool_base;
224 
225 	printf("pf queue num: %u, configured vmdq pool num: %u,"
226 		" each vmdq pool has %u queues\n",
227 		num_pf_queues, num_pools, queues_per_pool);
228 	printf("vmdq queue base: %d pool base %d\n",
229 		vmdq_queue_base, vmdq_pool_base);
230 	if (!rte_eth_dev_is_valid_port(port))
231 		return -1;
232 
233 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
234 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
235 		dev_info.flow_type_rss_offloads;
236 	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
237 		printf("Port %u modified RSS hash function based on hardware support,"
238 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
239 			port,
240 			rss_hf_tmp,
241 			port_conf.rx_adv_conf.rss_conf.rss_hf);
242 	}
243 
244 	/*
245 	 * Though in this example, we only receive packets from the first queue
246 	 * of each pool and send packets through first rte_lcore_count() tx
247 	 * queues of vmdq queues, all queues including pf queues are setup.
248 	 * This is because VMDQ queues doesn't always start from zero, and the
249 	 * PMD layer doesn't support selectively initialising part of rx/tx
250 	 * queues.
251 	 */
252 	rxRings = (uint16_t)dev_info.max_rx_queues;
253 	txRings = (uint16_t)dev_info.max_tx_queues;
254 
255 	retval = rte_eth_dev_info_get(port, &dev_info);
256 	if (retval != 0) {
257 		printf("Error during getting device (port %u) info: %s\n",
258 				port, strerror(-retval));
259 		return retval;
260 	}
261 
262 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
263 		port_conf.txmode.offloads |=
264 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
265 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
266 	if (retval != 0)
267 		return retval;
268 
269 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
270 				&txRingSize);
271 	if (retval != 0)
272 		return retval;
273 	if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT,
274 			RTE_TEST_TX_DESC_DEFAULT)) {
275 		printf("Mbuf pool has an insufficient size for port %u.\n",
276 			port);
277 		return -1;
278 	}
279 
280 	rxconf = &dev_info.default_rxconf;
281 	rxconf->rx_drop_en = 1;
282 	txconf = &dev_info.default_txconf;
283 	txconf->offloads = port_conf.txmode.offloads;
284 	for (q = 0; q < rxRings; q++) {
285 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
286 					rte_eth_dev_socket_id(port),
287 					rxconf,
288 					mbuf_pool);
289 		if (retval < 0) {
290 			printf("initialise rx queue %d failed\n", q);
291 			return retval;
292 		}
293 	}
294 
295 	for (q = 0; q < txRings; q++) {
296 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
297 					rte_eth_dev_socket_id(port),
298 					txconf);
299 		if (retval < 0) {
300 			printf("initialise tx queue %d failed\n", q);
301 			return retval;
302 		}
303 	}
304 
305 	retval  = rte_eth_dev_start(port);
306 	if (retval < 0) {
307 		printf("port %d start failed\n", port);
308 		return retval;
309 	}
310 
311 	retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
312 	if (retval < 0) {
313 		printf("port %d MAC address get failed: %s\n", port,
314 		       rte_strerror(-retval));
315 		return retval;
316 	}
317 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
318 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
319 			(unsigned)port,
320 			RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
321 
322 	/*
323 	 * Set mac for each pool.
324 	 * There is no default mac for the pools in i40.
325 	 * Removes this after i40e fixes this issue.
326 	 */
327 	for (q = 0; q < num_pools; q++) {
328 		struct rte_ether_addr mac;
329 		mac = pool_addr_template;
330 		mac.addr_bytes[4] = port;
331 		mac.addr_bytes[5] = q;
332 		printf("Port %u vmdq pool %u set mac " RTE_ETHER_ADDR_PRT_FMT "\n",
333 			port, q, RTE_ETHER_ADDR_BYTES(&mac));
334 		retval = rte_eth_dev_mac_addr_add(port, &mac,
335 				q + vmdq_pool_base);
336 		if (retval) {
337 			printf("mac addr add failed at pool %d\n", q);
338 			return retval;
339 		}
340 	}
341 
342 	return 0;
343 }
344 /* >8 End of get_eth_conf. */
345 
346 /* Check num_pools parameter and set it if OK*/
347 static int
348 vmdq_parse_num_pools(const char *q_arg)
349 {
350 	char *end = NULL;
351 	int n;
352 
353 	/* parse number string */
354 	n = strtol(q_arg, &end, 10);
355 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
356 		return -1;
357 
358 	if (num_pools > num_vlans) {
359 		printf("num_pools %d > num_vlans %d\n", num_pools, num_vlans);
360 		return -1;
361 	}
362 
363 	num_pools = n;
364 
365 	return 0;
366 }
367 
368 
369 static int
370 parse_portmask(const char *portmask)
371 {
372 	char *end = NULL;
373 	unsigned long pm;
374 
375 	/* parse hexadecimal string */
376 	pm = strtoul(portmask, &end, 16);
377 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
378 		return 0;
379 
380 	return pm;
381 }
382 
383 /* Display usage */
384 static void
385 vmdq_usage(const char *prgname)
386 {
387 	printf("%s [EAL options] -- -p PORTMASK]\n"
388 	"  --nb-pools NP: number of pools\n"
389 	"  --enable-rss: enable RSS (disabled by default)\n",
390 	       prgname);
391 }
392 
393 /*  Parse the argument (num_pools) given in the command line of the application */
394 static int
395 vmdq_parse_args(int argc, char **argv)
396 {
397 	int opt;
398 	int option_index;
399 	unsigned i;
400 	const char *prgname = argv[0];
401 	static struct option long_option[] = {
402 		{"nb-pools", required_argument, NULL, 0},
403 		{"enable-rss", 0, NULL, 0},
404 		{NULL, 0, 0, 0}
405 	};
406 
407 	/* Parse command line */
408 	while ((opt = getopt_long(argc, argv, "p:", long_option,
409 		&option_index)) != EOF) {
410 		switch (opt) {
411 		/* portmask */
412 		case 'p':
413 			enabled_port_mask = parse_portmask(optarg);
414 			if (enabled_port_mask == 0) {
415 				printf("invalid portmask\n");
416 				vmdq_usage(prgname);
417 				return -1;
418 			}
419 			break;
420 		case 0:
421 			if (!strcmp(long_option[option_index].name,
422 			    "nb-pools")) {
423 				if (vmdq_parse_num_pools(optarg) == -1) {
424 					printf("invalid number of pools\n");
425 					vmdq_usage(prgname);
426 					return -1;
427 				}
428 			}
429 
430 			if (!strcmp(long_option[option_index].name,
431 			    "enable-rss"))
432 				rss_enable = 1;
433 			break;
434 
435 		default:
436 			vmdq_usage(prgname);
437 			return -1;
438 		}
439 	}
440 
441 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
442 		if (enabled_port_mask & (1 << i))
443 			ports[num_ports++] = (uint8_t)i;
444 	}
445 
446 	if (num_ports < 2 || num_ports % 2) {
447 		printf("Current enabled port number is %u,"
448 			"but it should be even and at least 2\n", num_ports);
449 		return -1;
450 	}
451 
452 	return 0;
453 }
454 
455 static void
456 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
457 {
458 	struct rte_ether_hdr *eth;
459 	void *tmp;
460 
461 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
462 
463 	/* 02:00:00:00:00:xx */
464 	tmp = &eth->dst_addr.addr_bytes[0];
465 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
466 
467 	/* src addr */
468 	rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->src_addr);
469 }
470 
471 /* When we receive a HUP signal, print out our stats */
472 static void
473 sighup_handler(int signum)
474 {
475 	unsigned int q = vmdq_queue_base;
476 	for (; q < num_queues; q++) {
477 		if ((q - vmdq_queue_base) % (num_vmdq_queues / num_pools) == 0)
478 			printf("\nPool %u: ", (q - vmdq_queue_base) /
479 			       (num_vmdq_queues / num_pools));
480 		printf("%lu ", rxPackets[q]);
481 	}
482 	printf("\nFinished handling signal %d\n", signum);
483 }
484 
485 /*
486  * Main thread that does the work, reading from INPUT_PORT
487  * and writing to OUTPUT_PORT
488  */
489 static int
490 lcore_main(__rte_unused void *dummy)
491 {
492 	const uint16_t lcore_id = (uint16_t)rte_lcore_id();
493 	const uint16_t num_cores = (uint16_t)rte_lcore_count();
494 	uint16_t core_id = 0;
495 	uint16_t startQueue, endQueue;
496 	uint16_t q, i, p;
497 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
498 
499 	for (i = 0; i < num_cores; i++)
500 		if (lcore_ids[i] == lcore_id) {
501 			core_id = i;
502 			break;
503 		}
504 
505 	if (remainder != 0) {
506 		if (core_id < remainder) {
507 			startQueue = (uint16_t)(core_id *
508 					(num_vmdq_queues / num_cores + 1));
509 			endQueue = (uint16_t)(startQueue +
510 					(num_vmdq_queues / num_cores) + 1);
511 		} else {
512 			startQueue = (uint16_t)(core_id *
513 					(num_vmdq_queues / num_cores) +
514 					remainder);
515 			endQueue = (uint16_t)(startQueue +
516 					(num_vmdq_queues / num_cores));
517 		}
518 	} else {
519 		startQueue = (uint16_t)(core_id *
520 				(num_vmdq_queues / num_cores));
521 		endQueue = (uint16_t)(startQueue +
522 				(num_vmdq_queues / num_cores));
523 	}
524 
525 	/* vmdq queue idx doesn't always start from zero.*/
526 	startQueue += vmdq_queue_base;
527 	endQueue   += vmdq_queue_base;
528 	printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id,
529 		(unsigned)lcore_id, startQueue, endQueue - 1);
530 
531 	if (startQueue == endQueue) {
532 		printf("lcore %u has nothing to do\n", lcore_id);
533 		return 0;
534 	}
535 
536 	for (;;) {
537 		struct rte_mbuf *buf[MAX_PKT_BURST];
538 		const uint16_t buf_size = RTE_DIM(buf);
539 
540 		for (p = 0; p < num_ports; p++) {
541 			const uint8_t sport = ports[p];
542 			/* 0 <-> 1, 2 <-> 3 etc */
543 			const uint8_t dport = ports[p ^ 1];
544 			if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID))
545 				continue;
546 
547 			for (q = startQueue; q < endQueue; q++) {
548 				const uint16_t rxCount = rte_eth_rx_burst(sport,
549 					q, buf, buf_size);
550 
551 				if (unlikely(rxCount == 0))
552 					continue;
553 
554 				rxPackets[q] += rxCount;
555 
556 				for (i = 0; i < rxCount; i++)
557 					update_mac_address(buf[i], dport);
558 
559 				const uint16_t txCount = rte_eth_tx_burst(dport,
560 					vmdq_queue_base + core_id,
561 					buf,
562 					rxCount);
563 
564 				if (txCount != rxCount) {
565 					for (i = txCount; i < rxCount; i++)
566 						rte_pktmbuf_free(buf[i]);
567 				}
568 			}
569 		}
570 	}
571 }
572 
573 /*
574  * Update the global var NUM_PORTS and array PORTS according to system ports number
575  * and return valid ports number
576  */
577 static unsigned check_ports_num(unsigned nb_ports)
578 {
579 	unsigned valid_num_ports = num_ports;
580 	unsigned portid;
581 
582 	if (num_ports > nb_ports) {
583 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
584 			num_ports, nb_ports);
585 		num_ports = nb_ports;
586 	}
587 
588 	for (portid = 0; portid < num_ports; portid++) {
589 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
590 			printf("\nSpecified port ID(%u) is not valid\n",
591 				ports[portid]);
592 			ports[portid] = INVALID_PORT_ID;
593 			valid_num_ports--;
594 		}
595 	}
596 	return valid_num_ports;
597 }
598 
599 /* Main function, does initialisation and calls the per-lcore functions */
600 int
601 main(int argc, char *argv[])
602 {
603 	struct rte_mempool *mbuf_pool;
604 	unsigned lcore_id, core_id = 0;
605 	int ret;
606 	unsigned nb_ports, valid_num_ports;
607 	uint16_t portid;
608 
609 	signal(SIGHUP, sighup_handler);
610 
611 	/* init EAL */
612 	ret = rte_eal_init(argc, argv);
613 	if (ret < 0)
614 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
615 	argc -= ret;
616 	argv += ret;
617 
618 	/* parse app arguments */
619 	ret = vmdq_parse_args(argc, argv);
620 	if (ret < 0)
621 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
622 
623 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
624 		if (rte_lcore_is_enabled(lcore_id))
625 			lcore_ids[core_id++] = lcore_id;
626 
627 	if (rte_lcore_count() > RTE_MAX_LCORE)
628 		rte_exit(EXIT_FAILURE, "Not enough cores\n");
629 
630 	nb_ports = rte_eth_dev_count_avail();
631 
632 	/*
633 	 * Update the global var NUM_PORTS and global array PORTS
634 	 * and get value of var VALID_NUM_PORTS according to system ports number
635 	 */
636 	valid_num_ports = check_ports_num(nb_ports);
637 
638 	if (valid_num_ports < 2 || valid_num_ports % 2) {
639 		printf("Current valid ports number is %u\n", valid_num_ports);
640 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
641 	}
642 
643 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
644 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
645 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
646 	if (mbuf_pool == NULL)
647 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
648 
649 	/* initialize all ports */
650 	RTE_ETH_FOREACH_DEV(portid) {
651 		/* skip ports that are not enabled */
652 		if ((enabled_port_mask & (1 << portid)) == 0) {
653 			printf("\nSkipping disabled port %d\n", portid);
654 			continue;
655 		}
656 		if (port_init(portid, mbuf_pool) != 0)
657 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
658 	}
659 
660 	/* call lcore_main() on every lcore */
661 	rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MAIN);
662 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
663 		if (rte_eal_wait_lcore(lcore_id) < 0)
664 			return -1;
665 	}
666 
667 	/* clean up the EAL */
668 	rte_eal_cleanup();
669 
670 	return 0;
671 }
672