xref: /dpdk/examples/vmdq_dcb/main.c (revision 945acb4a0d644d194f1823084a234f9c286dcf8c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <signal.h>
13 #include <stdarg.h>
14 #include <inttypes.h>
15 #include <getopt.h>
16 
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
35 #include <rte_mbuf.h>
36 
37 /* basic constants used in application */
38 #define MAX_QUEUES 1024
39 /*
40  * 1024 queues require to meet the needs of a large number of vmdq_pools.
41  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
42  */
43 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
44 						RTE_TEST_TX_DESC_DEFAULT))
45 #define MBUF_CACHE_SIZE 64
46 
47 #define MAX_PKT_BURST 32
48 
49 /*
50  * Configurable number of RX/TX ring descriptors
51  */
52 #define RTE_TEST_RX_DESC_DEFAULT 128
53 #define RTE_TEST_TX_DESC_DEFAULT 512
54 
55 #define INVALID_PORT_ID 0xFF
56 
57 /* mask of enabled ports */
58 static uint32_t enabled_port_mask;
59 static uint16_t ports[RTE_MAX_ETHPORTS];
60 static unsigned num_ports;
61 
62 /* number of pools (if user does not specify any, 32 by default */
63 static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
64 static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
65 static uint16_t num_queues, num_vmdq_queues;
66 static uint16_t vmdq_pool_base, vmdq_queue_base;
67 static uint8_t rss_enable;
68 
69 /* empty vmdq+dcb configuration structure. Filled in programatically */
70 static const struct rte_eth_conf vmdq_dcb_conf_default = {
71 	.rxmode = {
72 		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
73 		.split_hdr_size = 0,
74 		.ignore_offload_bitfield = 1,
75 	},
76 	.txmode = {
77 		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
78 	},
79 	/*
80 	 * should be overridden separately in code with
81 	 * appropriate values
82 	 */
83 	.rx_adv_conf = {
84 		.vmdq_dcb_conf = {
85 			.nb_queue_pools = ETH_32_POOLS,
86 			.enable_default_pool = 0,
87 			.default_pool = 0,
88 			.nb_pool_maps = 0,
89 			.pool_map = {{0, 0},},
90 			.dcb_tc = {0},
91 		},
92 		.dcb_rx_conf = {
93 				.nb_tcs = ETH_4_TCS,
94 				/** Traffic class each UP mapped to. */
95 				.dcb_tc = {0},
96 		},
97 		.vmdq_rx_conf = {
98 			.nb_queue_pools = ETH_32_POOLS,
99 			.enable_default_pool = 0,
100 			.default_pool = 0,
101 			.nb_pool_maps = 0,
102 			.pool_map = {{0, 0},},
103 		},
104 	},
105 	.tx_adv_conf = {
106 		.vmdq_dcb_tx_conf = {
107 			.nb_queue_pools = ETH_32_POOLS,
108 			.dcb_tc = {0},
109 		},
110 	},
111 };
112 
113 /* array used for printing out statistics */
114 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
115 
116 const uint16_t vlan_tags[] = {
117 	0,  1,  2,  3,  4,  5,  6,  7,
118 	8,  9, 10, 11,	12, 13, 14, 15,
119 	16, 17, 18, 19, 20, 21, 22, 23,
120 	24, 25, 26, 27, 28, 29, 30, 31
121 };
122 
123 const uint16_t num_vlans = RTE_DIM(vlan_tags);
124 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
125 static struct ether_addr pool_addr_template = {
126 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
127 };
128 
129 /* ethernet addresses of ports */
130 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
131 
132 /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
133  * given above, and the number of traffic classes available for use. */
134 static inline int
135 get_eth_conf(struct rte_eth_conf *eth_conf)
136 {
137 	struct rte_eth_vmdq_dcb_conf conf;
138 	struct rte_eth_vmdq_rx_conf  vmdq_conf;
139 	struct rte_eth_dcb_rx_conf   dcb_conf;
140 	struct rte_eth_vmdq_dcb_tx_conf tx_conf;
141 	uint8_t i;
142 
143 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
144 	vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
145 	tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
146 	conf.nb_pool_maps = num_pools;
147 	vmdq_conf.nb_pool_maps = num_pools;
148 	conf.enable_default_pool = 0;
149 	vmdq_conf.enable_default_pool = 0;
150 	conf.default_pool = 0; /* set explicit value, even if not used */
151 	vmdq_conf.default_pool = 0;
152 
153 	for (i = 0; i < conf.nb_pool_maps; i++) {
154 		conf.pool_map[i].vlan_id = vlan_tags[i];
155 		vmdq_conf.pool_map[i].vlan_id = vlan_tags[i];
156 		conf.pool_map[i].pools = 1UL << i;
157 		vmdq_conf.pool_map[i].pools = 1UL << i;
158 	}
159 	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
160 		conf.dcb_tc[i] = i % num_tcs;
161 		dcb_conf.dcb_tc[i] = i % num_tcs;
162 		tx_conf.dcb_tc[i] = i % num_tcs;
163 	}
164 	dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs;
165 	(void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
166 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
167 			  sizeof(conf)));
168 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf,
169 			  sizeof(dcb_conf)));
170 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf,
171 			  sizeof(vmdq_conf)));
172 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
173 			  sizeof(tx_conf)));
174 	if (rss_enable) {
175 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
176 		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
177 							ETH_RSS_UDP |
178 							ETH_RSS_TCP |
179 							ETH_RSS_SCTP;
180 	}
181 	return 0;
182 }
183 
184 /*
185  * Initialises a given port using global settings and with the rx buffers
186  * coming from the mbuf_pool passed as parameter
187  */
188 static inline int
189 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
190 {
191 	struct rte_eth_dev_info dev_info;
192 	struct rte_eth_conf port_conf = {0};
193 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
194 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
195 	int retval;
196 	uint16_t q;
197 	uint16_t queues_per_pool;
198 	uint32_t max_nb_pools;
199 	struct rte_eth_txconf txq_conf;
200 
201 	/*
202 	 * The max pool number from dev_info will be used to validate the pool
203 	 * number specified in cmd line
204 	 */
205 	rte_eth_dev_info_get(port, &dev_info);
206 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
207 	/*
208 	 * We allow to process part of VMDQ pools specified by num_pools in
209 	 * command line.
210 	 */
211 	if (num_pools > max_nb_pools) {
212 		printf("num_pools %d >max_nb_pools %d\n",
213 			num_pools, max_nb_pools);
214 		return -1;
215 	}
216 
217 	/*
218 	 * NIC queues are divided into pf queues and vmdq queues.
219 	 * There is assumption here all ports have the same configuration!
220 	*/
221 	vmdq_queue_base = dev_info.vmdq_queue_base;
222 	vmdq_pool_base  = dev_info.vmdq_pool_base;
223 	printf("vmdq queue base: %d pool base %d\n",
224 		vmdq_queue_base, vmdq_pool_base);
225 	if (vmdq_pool_base == 0) {
226 		num_vmdq_queues = dev_info.max_rx_queues;
227 		num_queues = dev_info.max_rx_queues;
228 		if (num_tcs != num_vmdq_queues / num_pools) {
229 			printf("nb_tcs %d is invalid considering with"
230 				" nb_pools %d, nb_tcs * nb_pools should = %d\n",
231 				num_tcs, num_pools, num_vmdq_queues);
232 			return -1;
233 		}
234 	} else {
235 		queues_per_pool = dev_info.vmdq_queue_num /
236 				  dev_info.max_vmdq_pools;
237 		if (num_tcs > queues_per_pool) {
238 			printf("num_tcs %d > num of queues per pool %d\n",
239 				num_tcs, queues_per_pool);
240 			return -1;
241 		}
242 		num_vmdq_queues = num_pools * queues_per_pool;
243 		num_queues = vmdq_queue_base + num_vmdq_queues;
244 		printf("Configured vmdq pool num: %u,"
245 			" each vmdq pool has %u queues\n",
246 			num_pools, queues_per_pool);
247 	}
248 
249 	if (port >= rte_eth_dev_count())
250 		return -1;
251 
252 	retval = get_eth_conf(&port_conf);
253 	if (retval < 0)
254 		return retval;
255 
256 	rte_eth_dev_info_get(port, &dev_info);
257 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
258 		port_conf.txmode.offloads |=
259 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
260 	/*
261 	 * Though in this example, all queues including pf queues are setup.
262 	 * This is because VMDQ queues doesn't always start from zero, and the
263 	 * PMD layer doesn't support selectively initialising part of rx/tx
264 	 * queues.
265 	 */
266 	retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
267 	if (retval != 0)
268 		return retval;
269 
270 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
271 				&txRingSize);
272 	if (retval != 0)
273 		return retval;
274 	if (RTE_MAX(rxRingSize, txRingSize) >
275 	    RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
276 		printf("Mbuf pool has an insufficient size for port %u.\n",
277 			port);
278 		return -1;
279 	}
280 
281 	for (q = 0; q < num_queues; q++) {
282 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
283 					rte_eth_dev_socket_id(port),
284 					NULL,
285 					mbuf_pool);
286 		if (retval < 0) {
287 			printf("initialize rx queue %d failed\n", q);
288 			return retval;
289 		}
290 	}
291 
292 	txq_conf = dev_info.default_txconf;
293 	txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
294 	txq_conf.offloads = port_conf.txmode.offloads;
295 	for (q = 0; q < num_queues; q++) {
296 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
297 					rte_eth_dev_socket_id(port),
298 					&txq_conf);
299 		if (retval < 0) {
300 			printf("initialize tx queue %d failed\n", q);
301 			return retval;
302 		}
303 	}
304 
305 	retval  = rte_eth_dev_start(port);
306 	if (retval < 0) {
307 		printf("port %d start failed\n", port);
308 		return retval;
309 	}
310 
311 	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
312 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
313 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
314 			(unsigned)port,
315 			vmdq_ports_eth_addr[port].addr_bytes[0],
316 			vmdq_ports_eth_addr[port].addr_bytes[1],
317 			vmdq_ports_eth_addr[port].addr_bytes[2],
318 			vmdq_ports_eth_addr[port].addr_bytes[3],
319 			vmdq_ports_eth_addr[port].addr_bytes[4],
320 			vmdq_ports_eth_addr[port].addr_bytes[5]);
321 
322 	/* Set mac for each pool.*/
323 	for (q = 0; q < num_pools; q++) {
324 		struct ether_addr mac;
325 
326 		mac = pool_addr_template;
327 		mac.addr_bytes[4] = port;
328 		mac.addr_bytes[5] = q;
329 		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
330 			port, q,
331 			mac.addr_bytes[0], mac.addr_bytes[1],
332 			mac.addr_bytes[2], mac.addr_bytes[3],
333 			mac.addr_bytes[4], mac.addr_bytes[5]);
334 		retval = rte_eth_dev_mac_addr_add(port, &mac,
335 				q + vmdq_pool_base);
336 		if (retval) {
337 			printf("mac addr add failed at pool %d\n", q);
338 			return retval;
339 		}
340 	}
341 
342 	return 0;
343 }
344 
345 /* Check num_pools parameter and set it if OK*/
346 static int
347 vmdq_parse_num_pools(const char *q_arg)
348 {
349 	char *end = NULL;
350 	int n;
351 
352 	/* parse number string */
353 	n = strtol(q_arg, &end, 10);
354 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
355 		return -1;
356 	if (n != 16 && n != 32)
357 		return -1;
358 	if (n == 16)
359 		num_pools = ETH_16_POOLS;
360 	else
361 		num_pools = ETH_32_POOLS;
362 
363 	return 0;
364 }
365 
366 /* Check num_tcs parameter and set it if OK*/
367 static int
368 vmdq_parse_num_tcs(const char *q_arg)
369 {
370 	char *end = NULL;
371 	int n;
372 
373 	/* parse number string */
374 	n = strtol(q_arg, &end, 10);
375 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
376 		return -1;
377 
378 	if (n != 4 && n != 8)
379 		return -1;
380 	if (n == 4)
381 		num_tcs = ETH_4_TCS;
382 	else
383 		num_tcs = ETH_8_TCS;
384 
385 	return 0;
386 }
387 
388 static int
389 parse_portmask(const char *portmask)
390 {
391 	char *end = NULL;
392 	unsigned long pm;
393 
394 	/* parse hexadecimal string */
395 	pm = strtoul(portmask, &end, 16);
396 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
397 		return -1;
398 
399 	if (pm == 0)
400 		return -1;
401 
402 	return pm;
403 }
404 
405 /* Display usage */
406 static void
407 vmdq_usage(const char *prgname)
408 {
409 	printf("%s [EAL options] -- -p PORTMASK]\n"
410 	"  --nb-pools NP: number of pools (32 default, 16)\n"
411 	"  --nb-tcs NP: number of TCs (4 default, 8)\n"
412 	"  --enable-rss: enable RSS (disabled by default)\n",
413 	       prgname);
414 }
415 
416 /*  Parse the argument (num_pools) given in the command line of the application */
417 static int
418 vmdq_parse_args(int argc, char **argv)
419 {
420 	int opt;
421 	int option_index;
422 	unsigned i;
423 	const char *prgname = argv[0];
424 	static struct option long_option[] = {
425 		{"nb-pools", required_argument, NULL, 0},
426 		{"nb-tcs", required_argument, NULL, 0},
427 		{"enable-rss", 0, NULL, 0},
428 		{NULL, 0, 0, 0}
429 	};
430 
431 	/* Parse command line */
432 	while ((opt = getopt_long(argc, argv, "p:", long_option,
433 		&option_index)) != EOF) {
434 		switch (opt) {
435 		/* portmask */
436 		case 'p':
437 			enabled_port_mask = parse_portmask(optarg);
438 			if (enabled_port_mask == 0) {
439 				printf("invalid portmask\n");
440 				vmdq_usage(prgname);
441 				return -1;
442 			}
443 			break;
444 		case 0:
445 			if (!strcmp(long_option[option_index].name, "nb-pools")) {
446 				if (vmdq_parse_num_pools(optarg) == -1) {
447 					printf("invalid number of pools\n");
448 					return -1;
449 				}
450 			}
451 
452 			if (!strcmp(long_option[option_index].name, "nb-tcs")) {
453 				if (vmdq_parse_num_tcs(optarg) == -1) {
454 					printf("invalid number of tcs\n");
455 					return -1;
456 				}
457 			}
458 
459 			if (!strcmp(long_option[option_index].name, "enable-rss"))
460 				rss_enable = 1;
461 			break;
462 
463 		default:
464 			vmdq_usage(prgname);
465 			return -1;
466 		}
467 	}
468 
469 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
470 		if (enabled_port_mask & (1 << i))
471 			ports[num_ports++] = (uint8_t)i;
472 	}
473 
474 	if (num_ports < 2 || num_ports % 2) {
475 		printf("Current enabled port number is %u,"
476 			" but it should be even and at least 2\n", num_ports);
477 		return -1;
478 	}
479 
480 	return 0;
481 }
482 
483 static void
484 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
485 {
486 	struct ether_hdr *eth;
487 	void *tmp;
488 
489 	eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
490 
491 	/* 02:00:00:00:00:xx */
492 	tmp = &eth->d_addr.addr_bytes[0];
493 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
494 
495 	/* src addr */
496 	ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->s_addr);
497 }
498 
499 /* When we receive a HUP signal, print out our stats */
500 static void
501 sighup_handler(int signum)
502 {
503 	unsigned q = vmdq_queue_base;
504 
505 	for (; q < num_queues; q++) {
506 		if (q % (num_vmdq_queues / num_pools) == 0)
507 			printf("\nPool %u: ", (q - vmdq_queue_base) /
508 					      (num_vmdq_queues / num_pools));
509 		printf("%lu ", rxPackets[q]);
510 	}
511 	printf("\nFinished handling signal %d\n", signum);
512 }
513 
514 /*
515  * Main thread that does the work, reading from INPUT_PORT
516  * and writing to OUTPUT_PORT
517  */
518 static int
519 lcore_main(void *arg)
520 {
521 	const uintptr_t core_num = (uintptr_t)arg;
522 	const unsigned num_cores = rte_lcore_count();
523 	uint16_t startQueue, endQueue;
524 	uint16_t q, i, p;
525 	const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores);
526 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
527 
528 
529 	if (remainder) {
530 		if (core_num < remainder) {
531 			startQueue = (uint16_t)(core_num * (quot + 1));
532 			endQueue = (uint16_t)(startQueue + quot + 1);
533 		} else {
534 			startQueue = (uint16_t)(core_num * quot + remainder);
535 			endQueue = (uint16_t)(startQueue + quot);
536 		}
537 	} else {
538 		startQueue = (uint16_t)(core_num * quot);
539 		endQueue = (uint16_t)(startQueue + quot);
540 	}
541 
542 	/* vmdq queue idx doesn't always start from zero.*/
543 	startQueue += vmdq_queue_base;
544 	endQueue   += vmdq_queue_base;
545 	printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
546 	       rte_lcore_id(), startQueue, endQueue - 1);
547 
548 	if (startQueue == endQueue) {
549 		printf("lcore %u has nothing to do\n", (unsigned)core_num);
550 		return 0;
551 	}
552 
553 	for (;;) {
554 		struct rte_mbuf *buf[MAX_PKT_BURST];
555 		const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]);
556 		for (p = 0; p < num_ports; p++) {
557 			const uint8_t src = ports[p];
558 			const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */
559 
560 			if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID))
561 				continue;
562 
563 			for (q = startQueue; q < endQueue; q++) {
564 				const uint16_t rxCount = rte_eth_rx_burst(src,
565 					q, buf, buf_size);
566 
567 				if (unlikely(rxCount == 0))
568 					continue;
569 
570 				rxPackets[q] += rxCount;
571 
572 				for (i = 0; i < rxCount; i++)
573 					update_mac_address(buf[i], dst);
574 
575 				const uint16_t txCount = rte_eth_tx_burst(dst,
576 					q, buf, rxCount);
577 				if (txCount != rxCount) {
578 					for (i = txCount; i < rxCount; i++)
579 						rte_pktmbuf_free(buf[i]);
580 				}
581 			}
582 		}
583 	}
584 }
585 
586 /*
587  * Update the global var NUM_PORTS and array PORTS according to system ports number
588  * and return valid ports number
589  */
590 static unsigned check_ports_num(unsigned nb_ports)
591 {
592 	unsigned valid_num_ports = num_ports;
593 	unsigned portid;
594 
595 	if (num_ports > nb_ports) {
596 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
597 			num_ports, nb_ports);
598 		num_ports = nb_ports;
599 	}
600 
601 	for (portid = 0; portid < num_ports; portid++) {
602 		if (ports[portid] >= nb_ports) {
603 			printf("\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
604 				ports[portid], (nb_ports - 1));
605 			ports[portid] = INVALID_PORT_ID;
606 			valid_num_ports--;
607 		}
608 	}
609 	return valid_num_ports;
610 }
611 
612 
613 /* Main function, does initialisation and calls the per-lcore functions */
614 int
615 main(int argc, char *argv[])
616 {
617 	unsigned cores;
618 	struct rte_mempool *mbuf_pool;
619 	unsigned lcore_id;
620 	uintptr_t i;
621 	int ret;
622 	unsigned nb_ports, valid_num_ports;
623 	uint16_t portid;
624 
625 	signal(SIGHUP, sighup_handler);
626 
627 	/* init EAL */
628 	ret = rte_eal_init(argc, argv);
629 	if (ret < 0)
630 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
631 	argc -= ret;
632 	argv += ret;
633 
634 	/* parse app arguments */
635 	ret = vmdq_parse_args(argc, argv);
636 	if (ret < 0)
637 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
638 
639 	cores = rte_lcore_count();
640 	if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
641 		rte_exit(EXIT_FAILURE,"This program can only run on an even"
642 				" number of cores(1-%d)\n\n", RTE_MAX_LCORE);
643 	}
644 
645 	nb_ports = rte_eth_dev_count();
646 
647 	/*
648 	 * Update the global var NUM_PORTS and global array PORTS
649 	 * and get value of var VALID_NUM_PORTS according to system ports number
650 	 */
651 	valid_num_ports = check_ports_num(nb_ports);
652 
653 	if (valid_num_ports < 2 || valid_num_ports % 2) {
654 		printf("Current valid ports number is %u\n", valid_num_ports);
655 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
656 	}
657 
658 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
659 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
660 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
661 	if (mbuf_pool == NULL)
662 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
663 
664 	/* initialize all ports */
665 	for (portid = 0; portid < nb_ports; portid++) {
666 		/* skip ports that are not enabled */
667 		if ((enabled_port_mask & (1 << portid)) == 0) {
668 			printf("\nSkipping disabled port %d\n", portid);
669 			continue;
670 		}
671 		if (port_init(portid, mbuf_pool) != 0)
672 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
673 	}
674 
675 	/* call lcore_main() on every slave lcore */
676 	i = 0;
677 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
678 		rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
679 	}
680 	/* call on master too */
681 	(void) lcore_main((void*)i);
682 
683 	return 0;
684 }
685