xref: /dpdk/examples/vmdq/main.c (revision e11bdd37745229bf26b557305c07d118c3dbaad7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <signal.h>
13 #include <stdarg.h>
14 #include <inttypes.h>
15 #include <getopt.h>
16 
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
35 #include <rte_mbuf.h>
36 
37 #define MAX_QUEUES 1024
38 /*
39  * 1024 queues require to meet the needs of a large number of vmdq_pools.
40  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
41  */
42 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
43 						RTE_TEST_TX_DESC_DEFAULT))
44 #define MBUF_CACHE_SIZE 64
45 
46 #define MAX_PKT_BURST 32
47 
48 /*
49  * Configurable number of RX/TX ring descriptors
50  */
51 #define RTE_TEST_RX_DESC_DEFAULT 1024
52 #define RTE_TEST_TX_DESC_DEFAULT 1024
53 
54 #define INVALID_PORT_ID 0xFF
55 
56 /* mask of enabled ports */
57 static uint32_t enabled_port_mask;
58 
59 /* number of pools (if user does not specify any, 8 by default */
60 static uint32_t num_queues = 8;
61 static uint32_t num_pools = 8;
62 static uint8_t rss_enable;
63 
64 /* empty vmdq configuration structure. Filled in programatically */
65 static const struct rte_eth_conf vmdq_conf_default = {
66 	.rxmode = {
67 		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
68 		.split_hdr_size = 0,
69 	},
70 
71 	.txmode = {
72 		.mq_mode = ETH_MQ_TX_NONE,
73 	},
74 	.rx_adv_conf = {
75 		/*
76 		 * should be overridden separately in code with
77 		 * appropriate values
78 		 */
79 		.vmdq_rx_conf = {
80 			.nb_queue_pools = ETH_8_POOLS,
81 			.enable_default_pool = 0,
82 			.default_pool = 0,
83 			.nb_pool_maps = 0,
84 			.pool_map = {{0, 0},},
85 		},
86 	},
87 };
88 
89 static unsigned lcore_ids[RTE_MAX_LCORE];
90 static uint16_t ports[RTE_MAX_ETHPORTS];
91 static unsigned num_ports; /**< The number of ports specified in command line */
92 
93 /* array used for printing out statistics */
94 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
95 
96 const uint16_t vlan_tags[] = {
97 	0,  1,  2,  3,  4,  5,  6,  7,
98 	8,  9, 10, 11,	12, 13, 14, 15,
99 	16, 17, 18, 19, 20, 21, 22, 23,
100 	24, 25, 26, 27, 28, 29, 30, 31,
101 	32, 33, 34, 35, 36, 37, 38, 39,
102 	40, 41, 42, 43, 44, 45, 46, 47,
103 	48, 49, 50, 51, 52, 53, 54, 55,
104 	56, 57, 58, 59, 60, 61, 62, 63,
105 };
106 const uint16_t num_vlans = RTE_DIM(vlan_tags);
107 static uint16_t num_pf_queues,  num_vmdq_queues;
108 static uint16_t vmdq_pool_base, vmdq_queue_base;
109 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
110 static struct rte_ether_addr pool_addr_template = {
111 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
112 };
113 
114 /* ethernet addresses of ports */
115 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
116 
117 #define MAX_QUEUE_NUM_10G 128
118 #define MAX_QUEUE_NUM_1G 8
119 #define MAX_POOL_MAP_NUM_10G 64
120 #define MAX_POOL_MAP_NUM_1G 32
121 #define MAX_POOL_NUM_10G 64
122 #define MAX_POOL_NUM_1G 8
123 /*
124  * Builds up the correct configuration for vmdq based on the vlan tags array
125  * given above, and determine the queue number and pool map number according to
126  * valid pool number
127  */
128 static inline int
129 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
130 {
131 	struct rte_eth_vmdq_rx_conf conf;
132 	unsigned i;
133 
134 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
135 	conf.nb_pool_maps = num_pools;
136 	conf.enable_default_pool = 0;
137 	conf.default_pool = 0; /* set explicit value, even if not used */
138 
139 	for (i = 0; i < conf.nb_pool_maps; i++) {
140 		conf.pool_map[i].vlan_id = vlan_tags[i];
141 		conf.pool_map[i].pools = (1UL << (i % num_pools));
142 	}
143 
144 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
145 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
146 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
147 	if (rss_enable) {
148 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
149 		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
150 							ETH_RSS_UDP |
151 							ETH_RSS_TCP |
152 							ETH_RSS_SCTP;
153 	}
154 	return 0;
155 }
156 
157 /*
158  * Initialises a given port using global settings and with the rx buffers
159  * coming from the mbuf_pool passed as parameter
160  */
161 static inline int
162 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
163 {
164 	struct rte_eth_dev_info dev_info;
165 	struct rte_eth_rxconf *rxconf;
166 	struct rte_eth_txconf *txconf;
167 	struct rte_eth_conf port_conf;
168 	uint16_t rxRings, txRings;
169 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
170 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
171 	int retval;
172 	uint16_t q;
173 	uint16_t queues_per_pool;
174 	uint32_t max_nb_pools;
175 	uint64_t rss_hf_tmp;
176 
177 	/*
178 	 * The max pool number from dev_info will be used to validate the pool
179 	 * number specified in cmd line
180 	 */
181 	retval = rte_eth_dev_info_get(port, &dev_info);
182 	if (retval != 0) {
183 		printf("Error during getting device (port %u) info: %s\n",
184 				port, strerror(-retval));
185 		return retval;
186 	}
187 
188 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
189 	/*
190 	 * We allow to process part of VMDQ pools specified by num_pools in
191 	 * command line.
192 	 */
193 	if (num_pools > max_nb_pools) {
194 		printf("num_pools %d >max_nb_pools %d\n",
195 			num_pools, max_nb_pools);
196 		return -1;
197 	}
198 	retval = get_eth_conf(&port_conf, max_nb_pools);
199 	if (retval < 0)
200 		return retval;
201 
202 	/*
203 	 * NIC queues are divided into pf queues and vmdq queues.
204 	 */
205 	/* There is assumption here all ports have the same configuration! */
206 	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
207 	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
208 	num_vmdq_queues = num_pools * queues_per_pool;
209 	num_queues = num_pf_queues + num_vmdq_queues;
210 	vmdq_queue_base = dev_info.vmdq_queue_base;
211 	vmdq_pool_base  = dev_info.vmdq_pool_base;
212 
213 	printf("pf queue num: %u, configured vmdq pool num: %u,"
214 		" each vmdq pool has %u queues\n",
215 		num_pf_queues, num_pools, queues_per_pool);
216 	printf("vmdq queue base: %d pool base %d\n",
217 		vmdq_queue_base, vmdq_pool_base);
218 	if (!rte_eth_dev_is_valid_port(port))
219 		return -1;
220 
221 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
222 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
223 		dev_info.flow_type_rss_offloads;
224 	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
225 		printf("Port %u modified RSS hash function based on hardware support,"
226 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
227 			port,
228 			rss_hf_tmp,
229 			port_conf.rx_adv_conf.rss_conf.rss_hf);
230 	}
231 
232 	/*
233 	 * Though in this example, we only receive packets from the first queue
234 	 * of each pool and send packets through first rte_lcore_count() tx
235 	 * queues of vmdq queues, all queues including pf queues are setup.
236 	 * This is because VMDQ queues doesn't always start from zero, and the
237 	 * PMD layer doesn't support selectively initialising part of rx/tx
238 	 * queues.
239 	 */
240 	rxRings = (uint16_t)dev_info.max_rx_queues;
241 	txRings = (uint16_t)dev_info.max_tx_queues;
242 
243 	retval = rte_eth_dev_info_get(port, &dev_info);
244 	if (retval != 0) {
245 		printf("Error during getting device (port %u) info: %s\n",
246 				port, strerror(-retval));
247 		return retval;
248 	}
249 
250 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
251 		port_conf.txmode.offloads |=
252 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
253 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
254 	if (retval != 0)
255 		return retval;
256 
257 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
258 				&txRingSize);
259 	if (retval != 0)
260 		return retval;
261 	if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT,
262 			RTE_TEST_TX_DESC_DEFAULT)) {
263 		printf("Mbuf pool has an insufficient size for port %u.\n",
264 			port);
265 		return -1;
266 	}
267 
268 	rxconf = &dev_info.default_rxconf;
269 	rxconf->rx_drop_en = 1;
270 	txconf = &dev_info.default_txconf;
271 	txconf->offloads = port_conf.txmode.offloads;
272 	for (q = 0; q < rxRings; q++) {
273 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
274 					rte_eth_dev_socket_id(port),
275 					rxconf,
276 					mbuf_pool);
277 		if (retval < 0) {
278 			printf("initialise rx queue %d failed\n", q);
279 			return retval;
280 		}
281 	}
282 
283 	for (q = 0; q < txRings; q++) {
284 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
285 					rte_eth_dev_socket_id(port),
286 					txconf);
287 		if (retval < 0) {
288 			printf("initialise tx queue %d failed\n", q);
289 			return retval;
290 		}
291 	}
292 
293 	retval  = rte_eth_dev_start(port);
294 	if (retval < 0) {
295 		printf("port %d start failed\n", port);
296 		return retval;
297 	}
298 
299 	retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
300 	if (retval < 0) {
301 		printf("port %d MAC address get failed: %s\n", port,
302 		       rte_strerror(-retval));
303 		return retval;
304 	}
305 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
306 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
307 			(unsigned)port,
308 			vmdq_ports_eth_addr[port].addr_bytes[0],
309 			vmdq_ports_eth_addr[port].addr_bytes[1],
310 			vmdq_ports_eth_addr[port].addr_bytes[2],
311 			vmdq_ports_eth_addr[port].addr_bytes[3],
312 			vmdq_ports_eth_addr[port].addr_bytes[4],
313 			vmdq_ports_eth_addr[port].addr_bytes[5]);
314 
315 	/*
316 	 * Set mac for each pool.
317 	 * There is no default mac for the pools in i40.
318 	 * Removes this after i40e fixes this issue.
319 	 */
320 	for (q = 0; q < num_pools; q++) {
321 		struct rte_ether_addr mac;
322 		mac = pool_addr_template;
323 		mac.addr_bytes[4] = port;
324 		mac.addr_bytes[5] = q;
325 		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
326 			port, q,
327 			mac.addr_bytes[0], mac.addr_bytes[1],
328 			mac.addr_bytes[2], mac.addr_bytes[3],
329 			mac.addr_bytes[4], mac.addr_bytes[5]);
330 		retval = rte_eth_dev_mac_addr_add(port, &mac,
331 				q + vmdq_pool_base);
332 		if (retval) {
333 			printf("mac addr add failed at pool %d\n", q);
334 			return retval;
335 		}
336 	}
337 
338 	return 0;
339 }
340 
341 /* Check num_pools parameter and set it if OK*/
342 static int
343 vmdq_parse_num_pools(const char *q_arg)
344 {
345 	char *end = NULL;
346 	int n;
347 
348 	/* parse number string */
349 	n = strtol(q_arg, &end, 10);
350 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
351 		return -1;
352 
353 	if (num_pools > num_vlans) {
354 		printf("num_pools %d > num_vlans %d\n", num_pools, num_vlans);
355 		return -1;
356 	}
357 
358 	num_pools = n;
359 
360 	return 0;
361 }
362 
363 
364 static int
365 parse_portmask(const char *portmask)
366 {
367 	char *end = NULL;
368 	unsigned long pm;
369 
370 	/* parse hexadecimal string */
371 	pm = strtoul(portmask, &end, 16);
372 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
373 		return -1;
374 
375 	if (pm == 0)
376 		return -1;
377 
378 	return pm;
379 }
380 
381 /* Display usage */
382 static void
383 vmdq_usage(const char *prgname)
384 {
385 	printf("%s [EAL options] -- -p PORTMASK]\n"
386 	"  --nb-pools NP: number of pools\n"
387 	"  --enable-rss: enable RSS (disabled by default)\n",
388 	       prgname);
389 }
390 
391 /*  Parse the argument (num_pools) given in the command line of the application */
392 static int
393 vmdq_parse_args(int argc, char **argv)
394 {
395 	int opt;
396 	int option_index;
397 	unsigned i;
398 	const char *prgname = argv[0];
399 	static struct option long_option[] = {
400 		{"nb-pools", required_argument, NULL, 0},
401 		{"enable-rss", 0, NULL, 0},
402 		{NULL, 0, 0, 0}
403 	};
404 
405 	/* Parse command line */
406 	while ((opt = getopt_long(argc, argv, "p:", long_option,
407 		&option_index)) != EOF) {
408 		switch (opt) {
409 		/* portmask */
410 		case 'p':
411 			enabled_port_mask = parse_portmask(optarg);
412 			if (enabled_port_mask == 0) {
413 				printf("invalid portmask\n");
414 				vmdq_usage(prgname);
415 				return -1;
416 			}
417 			break;
418 		case 0:
419 			if (!strcmp(long_option[option_index].name,
420 			    "nb-pools")) {
421 				if (vmdq_parse_num_pools(optarg) == -1) {
422 					printf("invalid number of pools\n");
423 					vmdq_usage(prgname);
424 					return -1;
425 				}
426 			}
427 
428 			if (!strcmp(long_option[option_index].name,
429 			    "enable-rss"))
430 				rss_enable = 1;
431 			break;
432 
433 		default:
434 			vmdq_usage(prgname);
435 			return -1;
436 		}
437 	}
438 
439 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
440 		if (enabled_port_mask & (1 << i))
441 			ports[num_ports++] = (uint8_t)i;
442 	}
443 
444 	if (num_ports < 2 || num_ports % 2) {
445 		printf("Current enabled port number is %u,"
446 			"but it should be even and at least 2\n", num_ports);
447 		return -1;
448 	}
449 
450 	return 0;
451 }
452 
453 static void
454 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
455 {
456 	struct rte_ether_hdr *eth;
457 	void *tmp;
458 
459 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
460 
461 	/* 02:00:00:00:00:xx */
462 	tmp = &eth->d_addr.addr_bytes[0];
463 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
464 
465 	/* src addr */
466 	rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->s_addr);
467 }
468 
469 /* When we receive a HUP signal, print out our stats */
470 static void
471 sighup_handler(int signum)
472 {
473 	unsigned int q = vmdq_queue_base;
474 	for (; q < num_queues; q++) {
475 		if ((q - vmdq_queue_base) % (num_vmdq_queues / num_pools) == 0)
476 			printf("\nPool %u: ", (q - vmdq_queue_base) /
477 			       (num_vmdq_queues / num_pools));
478 		printf("%lu ", rxPackets[q]);
479 	}
480 	printf("\nFinished handling signal %d\n", signum);
481 }
482 
483 /*
484  * Main thread that does the work, reading from INPUT_PORT
485  * and writing to OUTPUT_PORT
486  */
487 static int
488 lcore_main(__rte_unused void *dummy)
489 {
490 	const uint16_t lcore_id = (uint16_t)rte_lcore_id();
491 	const uint16_t num_cores = (uint16_t)rte_lcore_count();
492 	uint16_t core_id = 0;
493 	uint16_t startQueue, endQueue;
494 	uint16_t q, i, p;
495 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
496 
497 	for (i = 0; i < num_cores; i++)
498 		if (lcore_ids[i] == lcore_id) {
499 			core_id = i;
500 			break;
501 		}
502 
503 	if (remainder != 0) {
504 		if (core_id < remainder) {
505 			startQueue = (uint16_t)(core_id *
506 					(num_vmdq_queues / num_cores + 1));
507 			endQueue = (uint16_t)(startQueue +
508 					(num_vmdq_queues / num_cores) + 1);
509 		} else {
510 			startQueue = (uint16_t)(core_id *
511 					(num_vmdq_queues / num_cores) +
512 					remainder);
513 			endQueue = (uint16_t)(startQueue +
514 					(num_vmdq_queues / num_cores));
515 		}
516 	} else {
517 		startQueue = (uint16_t)(core_id *
518 				(num_vmdq_queues / num_cores));
519 		endQueue = (uint16_t)(startQueue +
520 				(num_vmdq_queues / num_cores));
521 	}
522 
523 	/* vmdq queue idx doesn't always start from zero.*/
524 	startQueue += vmdq_queue_base;
525 	endQueue   += vmdq_queue_base;
526 	printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id,
527 		(unsigned)lcore_id, startQueue, endQueue - 1);
528 
529 	if (startQueue == endQueue) {
530 		printf("lcore %u has nothing to do\n", lcore_id);
531 		return 0;
532 	}
533 
534 	for (;;) {
535 		struct rte_mbuf *buf[MAX_PKT_BURST];
536 		const uint16_t buf_size = RTE_DIM(buf);
537 
538 		for (p = 0; p < num_ports; p++) {
539 			const uint8_t sport = ports[p];
540 			/* 0 <-> 1, 2 <-> 3 etc */
541 			const uint8_t dport = ports[p ^ 1];
542 			if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID))
543 				continue;
544 
545 			for (q = startQueue; q < endQueue; q++) {
546 				const uint16_t rxCount = rte_eth_rx_burst(sport,
547 					q, buf, buf_size);
548 
549 				if (unlikely(rxCount == 0))
550 					continue;
551 
552 				rxPackets[q] += rxCount;
553 
554 				for (i = 0; i < rxCount; i++)
555 					update_mac_address(buf[i], dport);
556 
557 				const uint16_t txCount = rte_eth_tx_burst(dport,
558 					vmdq_queue_base + core_id,
559 					buf,
560 					rxCount);
561 
562 				if (txCount != rxCount) {
563 					for (i = txCount; i < rxCount; i++)
564 						rte_pktmbuf_free(buf[i]);
565 				}
566 			}
567 		}
568 	}
569 }
570 
571 /*
572  * Update the global var NUM_PORTS and array PORTS according to system ports number
573  * and return valid ports number
574  */
575 static unsigned check_ports_num(unsigned nb_ports)
576 {
577 	unsigned valid_num_ports = num_ports;
578 	unsigned portid;
579 
580 	if (num_ports > nb_ports) {
581 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
582 			num_ports, nb_ports);
583 		num_ports = nb_ports;
584 	}
585 
586 	for (portid = 0; portid < num_ports; portid++) {
587 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
588 			printf("\nSpecified port ID(%u) is not valid\n",
589 				ports[portid]);
590 			ports[portid] = INVALID_PORT_ID;
591 			valid_num_ports--;
592 		}
593 	}
594 	return valid_num_ports;
595 }
596 
597 /* Main function, does initialisation and calls the per-lcore functions */
598 int
599 main(int argc, char *argv[])
600 {
601 	struct rte_mempool *mbuf_pool;
602 	unsigned lcore_id, core_id = 0;
603 	int ret;
604 	unsigned nb_ports, valid_num_ports;
605 	uint16_t portid;
606 
607 	signal(SIGHUP, sighup_handler);
608 
609 	/* init EAL */
610 	ret = rte_eal_init(argc, argv);
611 	if (ret < 0)
612 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
613 	argc -= ret;
614 	argv += ret;
615 
616 	/* parse app arguments */
617 	ret = vmdq_parse_args(argc, argv);
618 	if (ret < 0)
619 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
620 
621 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
622 		if (rte_lcore_is_enabled(lcore_id))
623 			lcore_ids[core_id++] = lcore_id;
624 
625 	if (rte_lcore_count() > RTE_MAX_LCORE)
626 		rte_exit(EXIT_FAILURE, "Not enough cores\n");
627 
628 	nb_ports = rte_eth_dev_count_avail();
629 
630 	/*
631 	 * Update the global var NUM_PORTS and global array PORTS
632 	 * and get value of var VALID_NUM_PORTS according to system ports number
633 	 */
634 	valid_num_ports = check_ports_num(nb_ports);
635 
636 	if (valid_num_ports < 2 || valid_num_ports % 2) {
637 		printf("Current valid ports number is %u\n", valid_num_ports);
638 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
639 	}
640 
641 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
642 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
643 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
644 	if (mbuf_pool == NULL)
645 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
646 
647 	/* initialize all ports */
648 	RTE_ETH_FOREACH_DEV(portid) {
649 		/* skip ports that are not enabled */
650 		if ((enabled_port_mask & (1 << portid)) == 0) {
651 			printf("\nSkipping disabled port %d\n", portid);
652 			continue;
653 		}
654 		if (port_init(portid, mbuf_pool) != 0)
655 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
656 	}
657 
658 	/* call lcore_main() on every lcore */
659 	rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER);
660 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
661 		if (rte_eal_wait_lcore(lcore_id) < 0)
662 			return -1;
663 	}
664 
665 	return 0;
666 }
667