xref: /dpdk/examples/vmdq/main.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <signal.h>
13 #include <stdarg.h>
14 #include <inttypes.h>
15 #include <getopt.h>
16 
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_cycles.h>
24 #include <rte_prefetch.h>
25 #include <rte_lcore.h>
26 #include <rte_per_lcore.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_interrupts.h>
29 #include <rte_random.h>
30 #include <rte_debug.h>
31 #include <rte_ether.h>
32 #include <rte_ethdev.h>
33 #include <rte_mempool.h>
34 #include <rte_mbuf.h>
35 
36 #define MAX_QUEUES 1024
37 /*
38  * 1024 queues require to meet the needs of a large number of vmdq_pools.
39  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
40  */
41 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
42 						RTE_TEST_TX_DESC_DEFAULT))
43 #define MBUF_CACHE_SIZE 64
44 
45 #define MAX_PKT_BURST 32
46 
47 /*
48  * Configurable number of RX/TX ring descriptors
49  */
50 #define RTE_TEST_RX_DESC_DEFAULT 1024
51 #define RTE_TEST_TX_DESC_DEFAULT 1024
52 
53 #define INVALID_PORT_ID 0xFF
54 
55 /* mask of enabled ports */
56 static uint32_t enabled_port_mask;
57 
58 /* number of pools (if user does not specify any, 8 by default */
59 static uint32_t num_queues = 8;
60 static uint32_t num_pools = 8;
61 static uint8_t rss_enable;
62 
63 /* Default structure for VMDq. 8< */
64 
65 /* empty vmdq configuration structure. Filled in programatically */
66 static const struct rte_eth_conf vmdq_conf_default = {
67 	.rxmode = {
68 		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
69 		.split_hdr_size = 0,
70 	},
71 
72 	.txmode = {
73 		.mq_mode = RTE_ETH_MQ_TX_NONE,
74 	},
75 	.rx_adv_conf = {
76 		/*
77 		 * should be overridden separately in code with
78 		 * appropriate values
79 		 */
80 		.vmdq_rx_conf = {
81 			.nb_queue_pools = RTE_ETH_8_POOLS,
82 			.enable_default_pool = 0,
83 			.default_pool = 0,
84 			.nb_pool_maps = 0,
85 			.pool_map = {{0, 0},},
86 		},
87 	},
88 };
89 /* >8 End of Empty vdmq configuration structure. */
90 
91 static unsigned lcore_ids[RTE_MAX_LCORE];
92 static uint16_t ports[RTE_MAX_ETHPORTS];
93 static unsigned num_ports; /**< The number of ports specified in command line */
94 
95 /* array used for printing out statistics */
96 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
97 
98 /* vlan_tags 8< */
99 const uint16_t vlan_tags[] = {
100 	0,  1,  2,  3,  4,  5,  6,  7,
101 	8,  9, 10, 11,	12, 13, 14, 15,
102 	16, 17, 18, 19, 20, 21, 22, 23,
103 	24, 25, 26, 27, 28, 29, 30, 31,
104 	32, 33, 34, 35, 36, 37, 38, 39,
105 	40, 41, 42, 43, 44, 45, 46, 47,
106 	48, 49, 50, 51, 52, 53, 54, 55,
107 	56, 57, 58, 59, 60, 61, 62, 63,
108 };
109 /* >8 End of vlan_tags. */
110 
111 const uint16_t num_vlans = RTE_DIM(vlan_tags);
112 static uint16_t num_pf_queues,  num_vmdq_queues;
113 static uint16_t vmdq_pool_base, vmdq_queue_base;
114 
115 /* Pool mac address template. 8< */
116 
117 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
118 static struct rte_ether_addr pool_addr_template = {
119 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
120 };
121 /* >8 End of mac addr template. */
122 
123 /* ethernet addresses of ports */
124 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
125 
126 #define MAX_QUEUE_NUM_10G 128
127 #define MAX_QUEUE_NUM_1G 8
128 #define MAX_POOL_MAP_NUM_10G 64
129 #define MAX_POOL_MAP_NUM_1G 32
130 #define MAX_POOL_NUM_10G 64
131 #define MAX_POOL_NUM_1G 8
132 /*
133  * Builds up the correct configuration for vmdq based on the vlan tags array
134  * given above, and determine the queue number and pool map number according to
135  * valid pool number
136  */
137 
138  /* Building correct configruration for vdmq. 8< */
139 static inline int
140 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
141 {
142 	struct rte_eth_vmdq_rx_conf conf;
143 	unsigned i;
144 
145 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
146 	conf.nb_pool_maps = num_pools;
147 	conf.enable_default_pool = 0;
148 	conf.default_pool = 0; /* set explicit value, even if not used */
149 
150 	for (i = 0; i < conf.nb_pool_maps; i++) {
151 		conf.pool_map[i].vlan_id = vlan_tags[i];
152 		conf.pool_map[i].pools = (1UL << (i % num_pools));
153 	}
154 
155 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
156 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
157 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
158 	if (rss_enable) {
159 		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
160 		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
161 							RTE_ETH_RSS_UDP |
162 							RTE_ETH_RSS_TCP |
163 							RTE_ETH_RSS_SCTP;
164 	}
165 	return 0;
166 }
167 
168 /*
169  * Initialises a given port using global settings and with the rx buffers
170  * coming from the mbuf_pool passed as parameter
171  */
172 static inline int
173 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
174 {
175 	struct rte_eth_dev_info dev_info;
176 	struct rte_eth_rxconf *rxconf;
177 	struct rte_eth_txconf *txconf;
178 	struct rte_eth_conf port_conf;
179 	uint16_t rxRings, txRings;
180 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
181 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
182 	int retval;
183 	uint16_t q;
184 	uint16_t queues_per_pool;
185 	uint32_t max_nb_pools;
186 	uint64_t rss_hf_tmp;
187 
188 	/*
189 	 * The max pool number from dev_info will be used to validate the pool
190 	 * number specified in cmd line
191 	 */
192 	retval = rte_eth_dev_info_get(port, &dev_info);
193 	if (retval != 0) {
194 		printf("Error during getting device (port %u) info: %s\n",
195 				port, strerror(-retval));
196 		return retval;
197 	}
198 
199 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
200 	/*
201 	 * We allow to process part of VMDQ pools specified by num_pools in
202 	 * command line.
203 	 */
204 	if (num_pools > max_nb_pools) {
205 		printf("num_pools %d >max_nb_pools %d\n",
206 			num_pools, max_nb_pools);
207 		return -1;
208 	}
209 	retval = get_eth_conf(&port_conf, max_nb_pools);
210 	if (retval < 0)
211 		return retval;
212 
213 	/*
214 	 * NIC queues are divided into pf queues and vmdq queues.
215 	 */
216 	/* There is assumption here all ports have the same configuration! */
217 	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
218 	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
219 	num_vmdq_queues = num_pools * queues_per_pool;
220 	num_queues = num_pf_queues + num_vmdq_queues;
221 	vmdq_queue_base = dev_info.vmdq_queue_base;
222 	vmdq_pool_base  = dev_info.vmdq_pool_base;
223 
224 	printf("pf queue num: %u, configured vmdq pool num: %u,"
225 		" each vmdq pool has %u queues\n",
226 		num_pf_queues, num_pools, queues_per_pool);
227 	printf("vmdq queue base: %d pool base %d\n",
228 		vmdq_queue_base, vmdq_pool_base);
229 	if (!rte_eth_dev_is_valid_port(port))
230 		return -1;
231 
232 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
233 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
234 		dev_info.flow_type_rss_offloads;
235 	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
236 		printf("Port %u modified RSS hash function based on hardware support,"
237 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
238 			port,
239 			rss_hf_tmp,
240 			port_conf.rx_adv_conf.rss_conf.rss_hf);
241 	}
242 
243 	/*
244 	 * Though in this example, we only receive packets from the first queue
245 	 * of each pool and send packets through first rte_lcore_count() tx
246 	 * queues of vmdq queues, all queues including pf queues are setup.
247 	 * This is because VMDQ queues doesn't always start from zero, and the
248 	 * PMD layer doesn't support selectively initialising part of rx/tx
249 	 * queues.
250 	 */
251 	rxRings = (uint16_t)dev_info.max_rx_queues;
252 	txRings = (uint16_t)dev_info.max_tx_queues;
253 
254 	retval = rte_eth_dev_info_get(port, &dev_info);
255 	if (retval != 0) {
256 		printf("Error during getting device (port %u) info: %s\n",
257 				port, strerror(-retval));
258 		return retval;
259 	}
260 
261 	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
262 		port_conf.txmode.offloads |=
263 			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
264 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
265 	if (retval != 0)
266 		return retval;
267 
268 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
269 				&txRingSize);
270 	if (retval != 0)
271 		return retval;
272 	if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT,
273 			RTE_TEST_TX_DESC_DEFAULT)) {
274 		printf("Mbuf pool has an insufficient size for port %u.\n",
275 			port);
276 		return -1;
277 	}
278 
279 	rxconf = &dev_info.default_rxconf;
280 	rxconf->rx_drop_en = 1;
281 	txconf = &dev_info.default_txconf;
282 	txconf->offloads = port_conf.txmode.offloads;
283 	for (q = 0; q < rxRings; q++) {
284 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
285 					rte_eth_dev_socket_id(port),
286 					rxconf,
287 					mbuf_pool);
288 		if (retval < 0) {
289 			printf("initialise rx queue %d failed\n", q);
290 			return retval;
291 		}
292 	}
293 
294 	for (q = 0; q < txRings; q++) {
295 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
296 					rte_eth_dev_socket_id(port),
297 					txconf);
298 		if (retval < 0) {
299 			printf("initialise tx queue %d failed\n", q);
300 			return retval;
301 		}
302 	}
303 
304 	retval  = rte_eth_dev_start(port);
305 	if (retval < 0) {
306 		printf("port %d start failed\n", port);
307 		return retval;
308 	}
309 
310 	retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
311 	if (retval < 0) {
312 		printf("port %d MAC address get failed: %s\n", port,
313 		       rte_strerror(-retval));
314 		return retval;
315 	}
316 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
317 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
318 			(unsigned)port,
319 			RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
320 
321 	/*
322 	 * Set mac for each pool.
323 	 * There is no default mac for the pools in i40.
324 	 * Removes this after i40e fixes this issue.
325 	 */
326 	for (q = 0; q < num_pools; q++) {
327 		struct rte_ether_addr mac;
328 		mac = pool_addr_template;
329 		mac.addr_bytes[4] = port;
330 		mac.addr_bytes[5] = q;
331 		printf("Port %u vmdq pool %u set mac " RTE_ETHER_ADDR_PRT_FMT "\n",
332 			port, q, RTE_ETHER_ADDR_BYTES(&mac));
333 		retval = rte_eth_dev_mac_addr_add(port, &mac,
334 				q + vmdq_pool_base);
335 		if (retval) {
336 			printf("mac addr add failed at pool %d\n", q);
337 			return retval;
338 		}
339 	}
340 
341 	return 0;
342 }
343 /* >8 End of get_eth_conf. */
344 
345 /* Check num_pools parameter and set it if OK*/
346 static int
347 vmdq_parse_num_pools(const char *q_arg)
348 {
349 	char *end = NULL;
350 	int n;
351 
352 	/* parse number string */
353 	n = strtol(q_arg, &end, 10);
354 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
355 		return -1;
356 
357 	if (num_pools > num_vlans) {
358 		printf("num_pools %d > num_vlans %d\n", num_pools, num_vlans);
359 		return -1;
360 	}
361 
362 	num_pools = n;
363 
364 	return 0;
365 }
366 
367 
368 static int
369 parse_portmask(const char *portmask)
370 {
371 	char *end = NULL;
372 	unsigned long pm;
373 
374 	/* parse hexadecimal string */
375 	pm = strtoul(portmask, &end, 16);
376 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
377 		return 0;
378 
379 	return pm;
380 }
381 
382 /* Display usage */
383 static void
384 vmdq_usage(const char *prgname)
385 {
386 	printf("%s [EAL options] -- -p PORTMASK]\n"
387 	"  --nb-pools NP: number of pools\n"
388 	"  --enable-rss: enable RSS (disabled by default)\n",
389 	       prgname);
390 }
391 
392 /*  Parse the argument (num_pools) given in the command line of the application */
393 static int
394 vmdq_parse_args(int argc, char **argv)
395 {
396 	int opt;
397 	int option_index;
398 	unsigned i;
399 	const char *prgname = argv[0];
400 	static struct option long_option[] = {
401 		{"nb-pools", required_argument, NULL, 0},
402 		{"enable-rss", 0, NULL, 0},
403 		{NULL, 0, 0, 0}
404 	};
405 
406 	/* Parse command line */
407 	while ((opt = getopt_long(argc, argv, "p:", long_option,
408 		&option_index)) != EOF) {
409 		switch (opt) {
410 		/* portmask */
411 		case 'p':
412 			enabled_port_mask = parse_portmask(optarg);
413 			if (enabled_port_mask == 0) {
414 				printf("invalid portmask\n");
415 				vmdq_usage(prgname);
416 				return -1;
417 			}
418 			break;
419 		case 0:
420 			if (!strcmp(long_option[option_index].name,
421 			    "nb-pools")) {
422 				if (vmdq_parse_num_pools(optarg) == -1) {
423 					printf("invalid number of pools\n");
424 					vmdq_usage(prgname);
425 					return -1;
426 				}
427 			}
428 
429 			if (!strcmp(long_option[option_index].name,
430 			    "enable-rss"))
431 				rss_enable = 1;
432 			break;
433 
434 		default:
435 			vmdq_usage(prgname);
436 			return -1;
437 		}
438 	}
439 
440 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
441 		if (enabled_port_mask & (1 << i))
442 			ports[num_ports++] = (uint8_t)i;
443 	}
444 
445 	if (num_ports < 2 || num_ports % 2) {
446 		printf("Current enabled port number is %u,"
447 			"but it should be even and at least 2\n", num_ports);
448 		return -1;
449 	}
450 
451 	return 0;
452 }
453 
454 static void
455 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
456 {
457 	struct rte_ether_hdr *eth;
458 	void *tmp;
459 
460 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
461 
462 	/* 02:00:00:00:00:xx */
463 	tmp = &eth->dst_addr.addr_bytes[0];
464 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
465 
466 	/* src addr */
467 	rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->src_addr);
468 }
469 
470 /* When we receive a HUP signal, print out our stats */
471 static void
472 sighup_handler(int signum)
473 {
474 	unsigned int q = vmdq_queue_base;
475 	for (; q < num_queues; q++) {
476 		if ((q - vmdq_queue_base) % (num_vmdq_queues / num_pools) == 0)
477 			printf("\nPool %u: ", (q - vmdq_queue_base) /
478 			       (num_vmdq_queues / num_pools));
479 		printf("%lu ", rxPackets[q]);
480 	}
481 	printf("\nFinished handling signal %d\n", signum);
482 }
483 
484 /*
485  * Main thread that does the work, reading from INPUT_PORT
486  * and writing to OUTPUT_PORT
487  */
488 static int
489 lcore_main(__rte_unused void *dummy)
490 {
491 	const uint16_t lcore_id = (uint16_t)rte_lcore_id();
492 	const uint16_t num_cores = (uint16_t)rte_lcore_count();
493 	uint16_t core_id = 0;
494 	uint16_t startQueue, endQueue;
495 	uint16_t q, i, p;
496 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
497 
498 	for (i = 0; i < num_cores; i++)
499 		if (lcore_ids[i] == lcore_id) {
500 			core_id = i;
501 			break;
502 		}
503 
504 	if (remainder != 0) {
505 		if (core_id < remainder) {
506 			startQueue = (uint16_t)(core_id *
507 					(num_vmdq_queues / num_cores + 1));
508 			endQueue = (uint16_t)(startQueue +
509 					(num_vmdq_queues / num_cores) + 1);
510 		} else {
511 			startQueue = (uint16_t)(core_id *
512 					(num_vmdq_queues / num_cores) +
513 					remainder);
514 			endQueue = (uint16_t)(startQueue +
515 					(num_vmdq_queues / num_cores));
516 		}
517 	} else {
518 		startQueue = (uint16_t)(core_id *
519 				(num_vmdq_queues / num_cores));
520 		endQueue = (uint16_t)(startQueue +
521 				(num_vmdq_queues / num_cores));
522 	}
523 
524 	/* vmdq queue idx doesn't always start from zero.*/
525 	startQueue += vmdq_queue_base;
526 	endQueue   += vmdq_queue_base;
527 	printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id,
528 		(unsigned)lcore_id, startQueue, endQueue - 1);
529 
530 	if (startQueue == endQueue) {
531 		printf("lcore %u has nothing to do\n", lcore_id);
532 		return 0;
533 	}
534 
535 	for (;;) {
536 		struct rte_mbuf *buf[MAX_PKT_BURST];
537 		const uint16_t buf_size = RTE_DIM(buf);
538 
539 		for (p = 0; p < num_ports; p++) {
540 			const uint8_t sport = ports[p];
541 			/* 0 <-> 1, 2 <-> 3 etc */
542 			const uint8_t dport = ports[p ^ 1];
543 			if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID))
544 				continue;
545 
546 			for (q = startQueue; q < endQueue; q++) {
547 				const uint16_t rxCount = rte_eth_rx_burst(sport,
548 					q, buf, buf_size);
549 
550 				if (unlikely(rxCount == 0))
551 					continue;
552 
553 				rxPackets[q] += rxCount;
554 
555 				for (i = 0; i < rxCount; i++)
556 					update_mac_address(buf[i], dport);
557 
558 				const uint16_t txCount = rte_eth_tx_burst(dport,
559 					vmdq_queue_base + core_id,
560 					buf,
561 					rxCount);
562 
563 				if (txCount != rxCount) {
564 					for (i = txCount; i < rxCount; i++)
565 						rte_pktmbuf_free(buf[i]);
566 				}
567 			}
568 		}
569 	}
570 }
571 
572 /*
573  * Update the global var NUM_PORTS and array PORTS according to system ports number
574  * and return valid ports number
575  */
576 static unsigned check_ports_num(unsigned nb_ports)
577 {
578 	unsigned valid_num_ports = num_ports;
579 	unsigned portid;
580 
581 	if (num_ports > nb_ports) {
582 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
583 			num_ports, nb_ports);
584 		num_ports = nb_ports;
585 	}
586 
587 	for (portid = 0; portid < num_ports; portid++) {
588 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
589 			printf("\nSpecified port ID(%u) is not valid\n",
590 				ports[portid]);
591 			ports[portid] = INVALID_PORT_ID;
592 			valid_num_ports--;
593 		}
594 	}
595 	return valid_num_ports;
596 }
597 
598 /* Main function, does initialisation and calls the per-lcore functions */
599 int
600 main(int argc, char *argv[])
601 {
602 	struct rte_mempool *mbuf_pool;
603 	unsigned lcore_id, core_id = 0;
604 	int ret;
605 	unsigned nb_ports, valid_num_ports;
606 	uint16_t portid;
607 
608 	signal(SIGHUP, sighup_handler);
609 
610 	/* init EAL */
611 	ret = rte_eal_init(argc, argv);
612 	if (ret < 0)
613 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
614 	argc -= ret;
615 	argv += ret;
616 
617 	/* parse app arguments */
618 	ret = vmdq_parse_args(argc, argv);
619 	if (ret < 0)
620 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
621 
622 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
623 		if (rte_lcore_is_enabled(lcore_id))
624 			lcore_ids[core_id++] = lcore_id;
625 
626 	if (rte_lcore_count() > RTE_MAX_LCORE)
627 		rte_exit(EXIT_FAILURE, "Not enough cores\n");
628 
629 	nb_ports = rte_eth_dev_count_avail();
630 
631 	/*
632 	 * Update the global var NUM_PORTS and global array PORTS
633 	 * and get value of var VALID_NUM_PORTS according to system ports number
634 	 */
635 	valid_num_ports = check_ports_num(nb_ports);
636 
637 	if (valid_num_ports < 2 || valid_num_ports % 2) {
638 		printf("Current valid ports number is %u\n", valid_num_ports);
639 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
640 	}
641 
642 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
643 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
644 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
645 	if (mbuf_pool == NULL)
646 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
647 
648 	/* initialize all ports */
649 	RTE_ETH_FOREACH_DEV(portid) {
650 		/* skip ports that are not enabled */
651 		if ((enabled_port_mask & (1 << portid)) == 0) {
652 			printf("\nSkipping disabled port %d\n", portid);
653 			continue;
654 		}
655 		if (port_init(portid, mbuf_pool) != 0)
656 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
657 	}
658 
659 	/* call lcore_main() on every lcore */
660 	rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MAIN);
661 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
662 		if (rte_eal_wait_lcore(lcore_id) < 0)
663 			return -1;
664 	}
665 
666 	/* clean up the EAL */
667 	rte_eal_cleanup();
668 
669 	return 0;
670 }
671