xref: /dpdk/examples/vmdq/main.c (revision fc1f2750a3ec6da919e3c86e59d56f34ec97154b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdint.h>
35 #include <sys/queue.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <stdio.h>
39 #include <assert.h>
40 #include <errno.h>
41 #include <signal.h>
42 #include <stdarg.h>
43 #include <inttypes.h>
44 #include <getopt.h>
45 
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
51 #include <rte_tailq.h>
52 #include <rte_eal.h>
53 #include <rte_per_lcore.h>
54 #include <rte_launch.h>
55 #include <rte_atomic.h>
56 #include <rte_cycles.h>
57 #include <rte_prefetch.h>
58 #include <rte_lcore.h>
59 #include <rte_per_lcore.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_interrupts.h>
62 #include <rte_pci.h>
63 #include <rte_random.h>
64 #include <rte_debug.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_ring.h>
68 #include <rte_log.h>
69 #include <rte_mempool.h>
70 #include <rte_mbuf.h>
71 #include <rte_memcpy.h>
72 
73 #include "main.h"
74 
75 #define MAX_QUEUES 128
76 /*
77  * For 10 GbE, 128 queues require roughly
78  * 128*512 (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
79  */
80 #define NUM_MBUFS_PER_PORT (128*512)
81 #define MBUF_CACHE_SIZE 64
82 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
83 
84 #define MAX_PKT_BURST 32
85 
86 /*
87  * Configurable number of RX/TX ring descriptors
88  */
89 #define RTE_TEST_RX_DESC_DEFAULT 128
90 #define RTE_TEST_TX_DESC_DEFAULT 512
91 
92 #define INVALID_PORT_ID 0xFF
93 
94 /* mask of enabled ports */
95 static uint32_t enabled_port_mask;
96 
97 /* number of pools (if user does not specify any, 8 by default */
98 static uint32_t num_queues = 8;
99 static uint32_t num_pools = 8;
100 
101 /* empty vmdq configuration structure. Filled in programatically */
102 static const struct rte_eth_conf vmdq_conf_default = {
103 	.rxmode = {
104 		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
105 		.split_hdr_size = 0,
106 		.header_split   = 0, /**< Header Split disabled */
107 		.hw_ip_checksum = 0, /**< IP checksum offload disabled */
108 		.hw_vlan_filter = 0, /**< VLAN filtering disabled */
109 		.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
110 	},
111 
112 	.txmode = {
113 		.mq_mode = ETH_MQ_TX_NONE,
114 	},
115 	.rx_adv_conf = {
116 		/*
117 		 * should be overridden separately in code with
118 		 * appropriate values
119 		 */
120 		.vmdq_rx_conf = {
121 			.nb_queue_pools = ETH_8_POOLS,
122 			.enable_default_pool = 0,
123 			.default_pool = 0,
124 			.nb_pool_maps = 0,
125 			.pool_map = {{0, 0},},
126 		},
127 	},
128 };
129 
130 static unsigned lcore_ids[RTE_MAX_LCORE];
131 static uint8_t ports[RTE_MAX_ETHPORTS];
132 static unsigned num_ports; /**< The number of ports specified in command line */
133 
134 /* array used for printing out statistics */
135 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
136 
137 const uint16_t vlan_tags[] = {
138 	0,  1,  2,  3,  4,  5,  6,  7,
139 	8,  9, 10, 11,	12, 13, 14, 15,
140 	16, 17, 18, 19, 20, 21, 22, 23,
141 	24, 25, 26, 27, 28, 29, 30, 31,
142 	32, 33, 34, 35, 36, 37, 38, 39,
143 	40, 41, 42, 43, 44, 45, 46, 47,
144 	48, 49, 50, 51, 52, 53, 54, 55,
145 	56, 57, 58, 59, 60, 61, 62, 63,
146 };
147 const uint16_t num_vlans = RTE_DIM(vlan_tags);
148 static uint16_t num_pf_queues,  num_vmdq_queues;
149 static uint16_t vmdq_pool_base, vmdq_queue_base;
150 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
151 static struct ether_addr pool_addr_template = {
152 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
153 };
154 
155 /* ethernet addresses of ports */
156 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
157 
158 #define MAX_QUEUE_NUM_10G 128
159 #define MAX_QUEUE_NUM_1G 8
160 #define MAX_POOL_MAP_NUM_10G 64
161 #define MAX_POOL_MAP_NUM_1G 32
162 #define MAX_POOL_NUM_10G 64
163 #define MAX_POOL_NUM_1G 8
164 /*
165  * Builds up the correct configuration for vmdq based on the vlan tags array
166  * given above, and determine the queue number and pool map number according to
167  * valid pool number
168  */
169 static inline int
170 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
171 {
172 	struct rte_eth_vmdq_rx_conf conf;
173 	unsigned i;
174 
175 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
176 	conf.nb_pool_maps = num_pools;
177 	conf.enable_default_pool = 0;
178 	conf.default_pool = 0; /* set explicit value, even if not used */
179 
180 	for (i = 0; i < conf.nb_pool_maps; i++) {
181 		conf.pool_map[i].vlan_id = vlan_tags[i];
182 		conf.pool_map[i].pools = (1UL << (i % num_pools));
183 	}
184 
185 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
186 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
187 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
188 	return 0;
189 }
190 
191 /*
192  * Initialises a given port using global settings and with the rx buffers
193  * coming from the mbuf_pool passed as parameter
194  */
195 static inline int
196 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
197 {
198 	struct rte_eth_dev_info dev_info;
199 	struct rte_eth_rxconf *rxconf;
200 	struct rte_eth_conf port_conf;
201 	uint16_t rxRings, txRings;
202 	const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT, txRingSize = RTE_TEST_TX_DESC_DEFAULT;
203 	int retval;
204 	uint16_t q;
205 	uint16_t queues_per_pool;
206 	uint32_t max_nb_pools;
207 
208 	/*
209 	 * The max pool number from dev_info will be used to validate the pool
210 	 * number specified in cmd line
211 	 */
212 	rte_eth_dev_info_get(port, &dev_info);
213 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
214 	/*
215 	 * We allow to process part of VMDQ pools specified by num_pools in
216 	 * command line.
217 	 */
218 	if (num_pools > max_nb_pools) {
219 		printf("num_pools %d >max_nb_pools %d\n",
220 			num_pools, max_nb_pools);
221 		return -1;
222 	}
223 	retval = get_eth_conf(&port_conf, max_nb_pools);
224 	if (retval < 0)
225 		return retval;
226 
227 	/*
228 	 * NIC queues are divided into pf queues and vmdq queues.
229 	 */
230 	/* There is assumption here all ports have the same configuration! */
231 	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
232 	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
233 	num_vmdq_queues = num_pools * queues_per_pool;
234 	num_queues = num_pf_queues + num_vmdq_queues;
235 	vmdq_queue_base = dev_info.vmdq_queue_base;
236 	vmdq_pool_base  = dev_info.vmdq_pool_base;
237 
238 	printf("pf queue num: %u, configured vmdq pool num: %u,"
239 		" each vmdq pool has %u queues\n",
240 		num_pf_queues, num_pools, queues_per_pool);
241 	printf("vmdq queue base: %d pool base %d\n",
242 		vmdq_queue_base, vmdq_pool_base);
243 	if (port >= rte_eth_dev_count())
244 		return -1;
245 
246 	/*
247 	 * Though in this example, we only receive packets from the first queue
248 	 * of each pool and send packets through first rte_lcore_count() tx
249 	 * queues of vmdq queues, all queues including pf queues are setup.
250 	 * This is because VMDQ queues doesn't always start from zero, and the
251 	 * PMD layer doesn't support selectively initialising part of rx/tx
252 	 * queues.
253 	 */
254 	rxRings = (uint16_t)dev_info.max_rx_queues;
255 	txRings = (uint16_t)dev_info.max_tx_queues;
256 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
257 	if (retval != 0)
258 		return retval;
259 
260 	rte_eth_dev_info_get(port, &dev_info);
261 	rxconf = &dev_info.default_rxconf;
262 	rxconf->rx_drop_en = 1;
263 	for (q = 0; q < rxRings; q++) {
264 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
265 					rte_eth_dev_socket_id(port),
266 					rxconf,
267 					mbuf_pool);
268 		if (retval < 0) {
269 			printf("initialise rx queue %d failed\n", q);
270 			return retval;
271 		}
272 	}
273 
274 	for (q = 0; q < txRings; q++) {
275 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
276 					rte_eth_dev_socket_id(port),
277 					NULL);
278 		if (retval < 0) {
279 			printf("initialise tx queue %d failed\n", q);
280 			return retval;
281 		}
282 	}
283 
284 	retval  = rte_eth_dev_start(port);
285 	if (retval < 0) {
286 		printf("port %d start failed\n", port);
287 		return retval;
288 	}
289 
290 	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
291 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
292 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
293 			(unsigned)port,
294 			vmdq_ports_eth_addr[port].addr_bytes[0],
295 			vmdq_ports_eth_addr[port].addr_bytes[1],
296 			vmdq_ports_eth_addr[port].addr_bytes[2],
297 			vmdq_ports_eth_addr[port].addr_bytes[3],
298 			vmdq_ports_eth_addr[port].addr_bytes[4],
299 			vmdq_ports_eth_addr[port].addr_bytes[5]);
300 
301 	/*
302 	 * Set mac for each pool.
303 	 * There is no default mac for the pools in i40.
304 	 * Removes this after i40e fixes this issue.
305 	 */
306 	for (q = 0; q < num_pools; q++) {
307 		struct ether_addr mac;
308 		mac = pool_addr_template;
309 		mac.addr_bytes[4] = port;
310 		mac.addr_bytes[5] = q;
311 		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
312 			port, q,
313 			mac.addr_bytes[0], mac.addr_bytes[1],
314 			mac.addr_bytes[2], mac.addr_bytes[3],
315 			mac.addr_bytes[4], mac.addr_bytes[5]);
316 		retval = rte_eth_dev_mac_addr_add(port, &mac,
317 				q + vmdq_pool_base);
318 		if (retval) {
319 			printf("mac addr add failed at pool %d\n", q);
320 			return retval;
321 		}
322 	}
323 
324 	return 0;
325 }
326 
327 /* Check num_pools parameter and set it if OK*/
328 static int
329 vmdq_parse_num_pools(const char *q_arg)
330 {
331 	char *end = NULL;
332 	int n;
333 
334 	/* parse number string */
335 	n = strtol(q_arg, &end, 10);
336 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
337 		return -1;
338 
339 	if (num_pools > num_vlans) {
340 		printf("num_pools %d > num_vlans %d\n", num_pools, num_vlans);
341 		return -1;
342 	}
343 
344 	num_pools = n;
345 
346 	return 0;
347 }
348 
349 
350 static int
351 parse_portmask(const char *portmask)
352 {
353 	char *end = NULL;
354 	unsigned long pm;
355 
356 	/* parse hexadecimal string */
357 	pm = strtoul(portmask, &end, 16);
358 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
359 		return -1;
360 
361 	if (pm == 0)
362 		return -1;
363 
364 	return pm;
365 }
366 
367 /* Display usage */
368 static void
369 vmdq_usage(const char *prgname)
370 {
371 	printf("%s [EAL options] -- -p PORTMASK]\n"
372 	"  --nb-pools NP: number of pools\n",
373 	       prgname);
374 }
375 
376 /*  Parse the argument (num_pools) given in the command line of the application */
377 static int
378 vmdq_parse_args(int argc, char **argv)
379 {
380 	int opt;
381 	int option_index;
382 	unsigned i;
383 	const char *prgname = argv[0];
384 	static struct option long_option[] = {
385 		{"nb-pools", required_argument, NULL, 0},
386 		{NULL, 0, 0, 0}
387 	};
388 
389 	/* Parse command line */
390 	while ((opt = getopt_long(argc, argv, "p:", long_option,
391 		&option_index)) != EOF) {
392 		switch (opt) {
393 		/* portmask */
394 		case 'p':
395 			enabled_port_mask = parse_portmask(optarg);
396 			if (enabled_port_mask == 0) {
397 				printf("invalid portmask\n");
398 				vmdq_usage(prgname);
399 				return -1;
400 			}
401 			break;
402 		case 0:
403 			if (vmdq_parse_num_pools(optarg) == -1) {
404 				printf("invalid number of pools\n");
405 				vmdq_usage(prgname);
406 				return -1;
407 			}
408 			break;
409 
410 		default:
411 			vmdq_usage(prgname);
412 			return -1;
413 		}
414 	}
415 
416 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
417 		if (enabled_port_mask & (1 << i))
418 			ports[num_ports++] = (uint8_t)i;
419 	}
420 
421 	if (num_ports < 2 || num_ports % 2) {
422 		printf("Current enabled port number is %u,"
423 			"but it should be even and at least 2\n", num_ports);
424 		return -1;
425 	}
426 
427 	return 0;
428 }
429 
430 static void
431 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
432 {
433 	struct ether_hdr *eth;
434 	void *tmp;
435 
436 	eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
437 
438 	/* 02:00:00:00:00:xx */
439 	tmp = &eth->d_addr.addr_bytes[0];
440 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
441 
442 	/* src addr */
443 	ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->s_addr);
444 }
445 
446 #ifndef RTE_EXEC_ENV_BAREMETAL
447 /* When we receive a HUP signal, print out our stats */
448 static void
449 sighup_handler(int signum)
450 {
451 	unsigned q;
452 	for (q = 0; q < num_queues; q++) {
453 		if (q % (num_queues/num_pools) == 0)
454 			printf("\nPool %u: ", q/(num_queues/num_pools));
455 		printf("%lu ", rxPackets[q]);
456 	}
457 	printf("\nFinished handling signal %d\n", signum);
458 }
459 #endif
460 
461 /*
462  * Main thread that does the work, reading from INPUT_PORT
463  * and writing to OUTPUT_PORT
464  */
465 static int
466 lcore_main(__attribute__((__unused__)) void *dummy)
467 {
468 	const uint16_t lcore_id = (uint16_t)rte_lcore_id();
469 	const uint16_t num_cores = (uint16_t)rte_lcore_count();
470 	uint16_t core_id = 0;
471 	uint16_t startQueue, endQueue;
472 	uint16_t q, i, p;
473 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
474 
475 	for (i = 0; i < num_cores; i++)
476 		if (lcore_ids[i] == lcore_id) {
477 			core_id = i;
478 			break;
479 		}
480 
481 	if (remainder != 0) {
482 		if (core_id < remainder) {
483 			startQueue = (uint16_t)(core_id *
484 					(num_vmdq_queues / num_cores + 1));
485 			endQueue = (uint16_t)(startQueue +
486 					(num_vmdq_queues / num_cores) + 1);
487 		} else {
488 			startQueue = (uint16_t)(core_id *
489 					(num_vmdq_queues / num_cores) +
490 					remainder);
491 			endQueue = (uint16_t)(startQueue +
492 					(num_vmdq_queues / num_cores));
493 		}
494 	} else {
495 		startQueue = (uint16_t)(core_id *
496 				(num_vmdq_queues / num_cores));
497 		endQueue = (uint16_t)(startQueue +
498 				(num_vmdq_queues / num_cores));
499 	}
500 
501 	/* vmdq queue idx doesn't always start from zero.*/
502 	startQueue += vmdq_queue_base;
503 	endQueue   += vmdq_queue_base;
504 	printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id,
505 		(unsigned)lcore_id, startQueue, endQueue - 1);
506 
507 	if (startQueue == endQueue) {
508 		printf("lcore %u has nothing to do\n", lcore_id);
509 		return 0;
510 	}
511 
512 	for (;;) {
513 		struct rte_mbuf *buf[MAX_PKT_BURST];
514 		const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]);
515 
516 		for (p = 0; p < num_ports; p++) {
517 			const uint8_t sport = ports[p];
518 			/* 0 <-> 1, 2 <-> 3 etc */
519 			const uint8_t dport = ports[p ^ 1];
520 			if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID))
521 				continue;
522 
523 			for (q = startQueue; q < endQueue; q++) {
524 				const uint16_t rxCount = rte_eth_rx_burst(sport,
525 					q, buf, buf_size);
526 
527 				if (unlikely(rxCount == 0))
528 					continue;
529 
530 				rxPackets[q] += rxCount;
531 
532 				for (i = 0; i < rxCount; i++)
533 					update_mac_address(buf[i], dport);
534 
535 				const uint16_t txCount = rte_eth_tx_burst(dport,
536 					vmdq_queue_base + core_id,
537 					buf,
538 					rxCount);
539 
540 				if (txCount != rxCount) {
541 					for (i = txCount; i < rxCount; i++)
542 						rte_pktmbuf_free(buf[i]);
543 				}
544 			}
545 		}
546 	}
547 }
548 
549 /*
550  * Update the global var NUM_PORTS and array PORTS according to system ports number
551  * and return valid ports number
552  */
553 static unsigned check_ports_num(unsigned nb_ports)
554 {
555 	unsigned valid_num_ports = num_ports;
556 	unsigned portid;
557 
558 	if (num_ports > nb_ports) {
559 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
560 			num_ports, nb_ports);
561 		num_ports = nb_ports;
562 	}
563 
564 	for (portid = 0; portid < num_ports; portid++) {
565 		if (ports[portid] >= nb_ports) {
566 			printf("\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
567 				ports[portid], (nb_ports - 1));
568 			ports[portid] = INVALID_PORT_ID;
569 			valid_num_ports--;
570 		}
571 	}
572 	return valid_num_ports;
573 }
574 
575 /* Main function, does initialisation and calls the per-lcore functions */
576 int
577 MAIN(int argc, char *argv[])
578 {
579 	struct rte_mempool *mbuf_pool;
580 	unsigned lcore_id, core_id = 0;
581 	int ret;
582 	unsigned nb_ports, valid_num_ports;
583 	uint8_t portid;
584 
585 #ifndef RTE_EXEC_ENV_BAREMETAL
586 	signal(SIGHUP, sighup_handler);
587 #endif
588 
589 	/* init EAL */
590 	ret = rte_eal_init(argc, argv);
591 	if (ret < 0)
592 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
593 	argc -= ret;
594 	argv += ret;
595 
596 	/* parse app arguments */
597 	ret = vmdq_parse_args(argc, argv);
598 	if (ret < 0)
599 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
600 
601 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
602 		if (rte_lcore_is_enabled(lcore_id))
603 			lcore_ids[core_id++] = lcore_id;
604 
605 	if (rte_lcore_count() > RTE_MAX_LCORE)
606 		rte_exit(EXIT_FAILURE, "Not enough cores\n");
607 
608 	nb_ports = rte_eth_dev_count();
609 	if (nb_ports > RTE_MAX_ETHPORTS)
610 		nb_ports = RTE_MAX_ETHPORTS;
611 
612 	/*
613 	 * Update the global var NUM_PORTS and global array PORTS
614 	 * and get value of var VALID_NUM_PORTS according to system ports number
615 	 */
616 	valid_num_ports = check_ports_num(nb_ports);
617 
618 	if (valid_num_ports < 2 || valid_num_ports % 2) {
619 		printf("Current valid ports number is %u\n", valid_num_ports);
620 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
621 	}
622 
623 	mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS_PER_PORT * nb_ports,
624 				       MBUF_SIZE, MBUF_CACHE_SIZE,
625 				       sizeof(struct rte_pktmbuf_pool_private),
626 				       rte_pktmbuf_pool_init, NULL,
627 				       rte_pktmbuf_init, NULL,
628 				       rte_socket_id(), 0);
629 	if (mbuf_pool == NULL)
630 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
631 
632 	/* initialize all ports */
633 	for (portid = 0; portid < nb_ports; portid++) {
634 		/* skip ports that are not enabled */
635 		if ((enabled_port_mask & (1 << portid)) == 0) {
636 			printf("\nSkipping disabled port %d\n", portid);
637 			continue;
638 		}
639 		if (port_init(portid, mbuf_pool) != 0)
640 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
641 	}
642 
643 	/* call lcore_main() on every lcore */
644 	rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER);
645 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
646 		if (rte_eal_wait_lcore(lcore_id) < 0)
647 			return -1;
648 	}
649 
650 	return 0;
651 }
652