xref: /dpdk/examples/vmdq_dcb/main.c (revision 1cde1b9a9b4dbf31cb5e5ccdfc5da3cb079f43a2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <signal.h>
13 #include <stdarg.h>
14 #include <inttypes.h>
15 #include <getopt.h>
16 
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
35 #include <rte_mbuf.h>
36 
37 /* basic constants used in application */
38 #define MAX_QUEUES 1024
39 /*
40  * 1024 queues require to meet the needs of a large number of vmdq_pools.
41  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
42  */
43 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
44 						RTE_TEST_TX_DESC_DEFAULT))
45 #define MBUF_CACHE_SIZE 64
46 
47 #define MAX_PKT_BURST 32
48 
49 /*
50  * Configurable number of RX/TX ring descriptors
51  */
52 #define RTE_TEST_RX_DESC_DEFAULT 1024
53 #define RTE_TEST_TX_DESC_DEFAULT 1024
54 
55 #define INVALID_PORT_ID 0xFF
56 
57 /* mask of enabled ports */
58 static uint32_t enabled_port_mask;
59 static uint16_t ports[RTE_MAX_ETHPORTS];
60 static unsigned num_ports;
61 
62 /* number of pools (if user does not specify any, 32 by default */
63 static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
64 static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
65 static uint16_t num_queues, num_vmdq_queues;
66 static uint16_t vmdq_pool_base, vmdq_queue_base;
67 static uint8_t rss_enable;
68 
69 /* empty vmdq+dcb configuration structure. Filled in programatically */
70 static const struct rte_eth_conf vmdq_dcb_conf_default = {
71 	.rxmode = {
72 		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
73 		.split_hdr_size = 0,
74 	},
75 	.txmode = {
76 		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
77 	},
78 	/*
79 	 * should be overridden separately in code with
80 	 * appropriate values
81 	 */
82 	.rx_adv_conf = {
83 		.vmdq_dcb_conf = {
84 			.nb_queue_pools = ETH_32_POOLS,
85 			.enable_default_pool = 0,
86 			.default_pool = 0,
87 			.nb_pool_maps = 0,
88 			.pool_map = {{0, 0},},
89 			.dcb_tc = {0},
90 		},
91 		.dcb_rx_conf = {
92 				.nb_tcs = ETH_4_TCS,
93 				/** Traffic class each UP mapped to. */
94 				.dcb_tc = {0},
95 		},
96 		.vmdq_rx_conf = {
97 			.nb_queue_pools = ETH_32_POOLS,
98 			.enable_default_pool = 0,
99 			.default_pool = 0,
100 			.nb_pool_maps = 0,
101 			.pool_map = {{0, 0},},
102 		},
103 	},
104 	.tx_adv_conf = {
105 		.vmdq_dcb_tx_conf = {
106 			.nb_queue_pools = ETH_32_POOLS,
107 			.dcb_tc = {0},
108 		},
109 	},
110 };
111 
112 /* array used for printing out statistics */
113 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
114 
115 const uint16_t vlan_tags[] = {
116 	0,  1,  2,  3,  4,  5,  6,  7,
117 	8,  9, 10, 11,	12, 13, 14, 15,
118 	16, 17, 18, 19, 20, 21, 22, 23,
119 	24, 25, 26, 27, 28, 29, 30, 31
120 };
121 
122 const uint16_t num_vlans = RTE_DIM(vlan_tags);
123 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
124 static struct rte_ether_addr pool_addr_template = {
125 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
126 };
127 
128 /* ethernet addresses of ports */
129 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
130 
131 /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
132  * given above, and the number of traffic classes available for use. */
133 static inline int
134 get_eth_conf(struct rte_eth_conf *eth_conf)
135 {
136 	struct rte_eth_vmdq_dcb_conf conf;
137 	struct rte_eth_vmdq_rx_conf  vmdq_conf;
138 	struct rte_eth_dcb_rx_conf   dcb_conf;
139 	struct rte_eth_vmdq_dcb_tx_conf tx_conf;
140 	uint8_t i;
141 
142 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
143 	vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
144 	tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
145 	conf.nb_pool_maps = num_pools;
146 	vmdq_conf.nb_pool_maps = num_pools;
147 	conf.enable_default_pool = 0;
148 	vmdq_conf.enable_default_pool = 0;
149 	conf.default_pool = 0; /* set explicit value, even if not used */
150 	vmdq_conf.default_pool = 0;
151 
152 	for (i = 0; i < conf.nb_pool_maps; i++) {
153 		conf.pool_map[i].vlan_id = vlan_tags[i];
154 		vmdq_conf.pool_map[i].vlan_id = vlan_tags[i];
155 		conf.pool_map[i].pools = 1UL << i;
156 		vmdq_conf.pool_map[i].pools = 1UL << i;
157 	}
158 	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
159 		conf.dcb_tc[i] = i % num_tcs;
160 		dcb_conf.dcb_tc[i] = i % num_tcs;
161 		tx_conf.dcb_tc[i] = i % num_tcs;
162 	}
163 	dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs;
164 	(void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
165 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
166 			  sizeof(conf)));
167 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf,
168 			  sizeof(dcb_conf)));
169 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf,
170 			  sizeof(vmdq_conf)));
171 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
172 			  sizeof(tx_conf)));
173 	if (rss_enable) {
174 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
175 		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
176 							ETH_RSS_UDP |
177 							ETH_RSS_TCP |
178 							ETH_RSS_SCTP;
179 	}
180 	return 0;
181 }
182 
183 /*
184  * Initialises a given port using global settings and with the rx buffers
185  * coming from the mbuf_pool passed as parameter
186  */
187 static inline int
188 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
189 {
190 	struct rte_eth_dev_info dev_info;
191 	struct rte_eth_conf port_conf = {0};
192 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
193 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
194 	int retval;
195 	uint16_t q;
196 	uint16_t queues_per_pool;
197 	uint32_t max_nb_pools;
198 	struct rte_eth_txconf txq_conf;
199 	uint64_t rss_hf_tmp;
200 
201 	/*
202 	 * The max pool number from dev_info will be used to validate the pool
203 	 * number specified in cmd line
204 	 */
205 	retval = rte_eth_dev_info_get(port, &dev_info);
206 	if (retval != 0) {
207 		printf("Error during getting device (port %u) info: %s\n",
208 				port, strerror(-retval));
209 
210 		return retval;
211 	}
212 
213 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
214 	/*
215 	 * We allow to process part of VMDQ pools specified by num_pools in
216 	 * command line.
217 	 */
218 	if (num_pools > max_nb_pools) {
219 		printf("num_pools %d >max_nb_pools %d\n",
220 			num_pools, max_nb_pools);
221 		return -1;
222 	}
223 
224 	/*
225 	 * NIC queues are divided into pf queues and vmdq queues.
226 	 * There is assumption here all ports have the same configuration!
227 	*/
228 	vmdq_queue_base = dev_info.vmdq_queue_base;
229 	vmdq_pool_base  = dev_info.vmdq_pool_base;
230 	printf("vmdq queue base: %d pool base %d\n",
231 		vmdq_queue_base, vmdq_pool_base);
232 	if (vmdq_pool_base == 0) {
233 		num_vmdq_queues = dev_info.max_rx_queues;
234 		num_queues = dev_info.max_rx_queues;
235 		if (num_tcs != num_vmdq_queues / num_pools) {
236 			printf("nb_tcs %d is invalid considering with"
237 				" nb_pools %d, nb_tcs * nb_pools should = %d\n",
238 				num_tcs, num_pools, num_vmdq_queues);
239 			return -1;
240 		}
241 	} else {
242 		queues_per_pool = dev_info.vmdq_queue_num /
243 				  dev_info.max_vmdq_pools;
244 		if (num_tcs > queues_per_pool) {
245 			printf("num_tcs %d > num of queues per pool %d\n",
246 				num_tcs, queues_per_pool);
247 			return -1;
248 		}
249 		num_vmdq_queues = num_pools * queues_per_pool;
250 		num_queues = vmdq_queue_base + num_vmdq_queues;
251 		printf("Configured vmdq pool num: %u,"
252 			" each vmdq pool has %u queues\n",
253 			num_pools, queues_per_pool);
254 	}
255 
256 	if (!rte_eth_dev_is_valid_port(port))
257 		return -1;
258 
259 	retval = get_eth_conf(&port_conf);
260 	if (retval < 0)
261 		return retval;
262 
263 	retval = rte_eth_dev_info_get(port, &dev_info);
264 	if (retval != 0) {
265 		printf("Error during getting device (port %u) info: %s\n",
266 				port, strerror(-retval));
267 
268 		return retval;
269 	}
270 
271 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
272 		port_conf.txmode.offloads |=
273 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
274 
275 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
276 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
277 		dev_info.flow_type_rss_offloads;
278 	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
279 		printf("Port %u modified RSS hash function based on hardware support,"
280 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
281 			port,
282 			rss_hf_tmp,
283 			port_conf.rx_adv_conf.rss_conf.rss_hf);
284 	}
285 
286 	/*
287 	 * Though in this example, all queues including pf queues are setup.
288 	 * This is because VMDQ queues doesn't always start from zero, and the
289 	 * PMD layer doesn't support selectively initialising part of rx/tx
290 	 * queues.
291 	 */
292 	retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
293 	if (retval != 0)
294 		return retval;
295 
296 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
297 				&txRingSize);
298 	if (retval != 0)
299 		return retval;
300 	if (RTE_MAX(rxRingSize, txRingSize) >
301 	    RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
302 		printf("Mbuf pool has an insufficient size for port %u.\n",
303 			port);
304 		return -1;
305 	}
306 
307 	for (q = 0; q < num_queues; q++) {
308 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
309 					rte_eth_dev_socket_id(port),
310 					NULL,
311 					mbuf_pool);
312 		if (retval < 0) {
313 			printf("initialize rx queue %d failed\n", q);
314 			return retval;
315 		}
316 	}
317 
318 	txq_conf = dev_info.default_txconf;
319 	txq_conf.offloads = port_conf.txmode.offloads;
320 	for (q = 0; q < num_queues; q++) {
321 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
322 					rte_eth_dev_socket_id(port),
323 					&txq_conf);
324 		if (retval < 0) {
325 			printf("initialize tx queue %d failed\n", q);
326 			return retval;
327 		}
328 	}
329 
330 	retval  = rte_eth_dev_start(port);
331 	if (retval < 0) {
332 		printf("port %d start failed\n", port);
333 		return retval;
334 	}
335 
336 	retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
337 	if (retval < 0) {
338 		printf("port %d MAC address get failed: %s\n", port,
339 		       rte_strerror(-retval));
340 		return retval;
341 	}
342 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
343 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
344 			(unsigned)port,
345 			vmdq_ports_eth_addr[port].addr_bytes[0],
346 			vmdq_ports_eth_addr[port].addr_bytes[1],
347 			vmdq_ports_eth_addr[port].addr_bytes[2],
348 			vmdq_ports_eth_addr[port].addr_bytes[3],
349 			vmdq_ports_eth_addr[port].addr_bytes[4],
350 			vmdq_ports_eth_addr[port].addr_bytes[5]);
351 
352 	/* Set mac for each pool.*/
353 	for (q = 0; q < num_pools; q++) {
354 		struct rte_ether_addr mac;
355 
356 		mac = pool_addr_template;
357 		mac.addr_bytes[4] = port;
358 		mac.addr_bytes[5] = q;
359 		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
360 			port, q,
361 			mac.addr_bytes[0], mac.addr_bytes[1],
362 			mac.addr_bytes[2], mac.addr_bytes[3],
363 			mac.addr_bytes[4], mac.addr_bytes[5]);
364 		retval = rte_eth_dev_mac_addr_add(port, &mac,
365 				q + vmdq_pool_base);
366 		if (retval) {
367 			printf("mac addr add failed at pool %d\n", q);
368 			return retval;
369 		}
370 	}
371 
372 	return 0;
373 }
374 
375 /* Check num_pools parameter and set it if OK*/
376 static int
377 vmdq_parse_num_pools(const char *q_arg)
378 {
379 	char *end = NULL;
380 	int n;
381 
382 	/* parse number string */
383 	n = strtol(q_arg, &end, 10);
384 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
385 		return -1;
386 	if (n != 16 && n != 32)
387 		return -1;
388 	if (n == 16)
389 		num_pools = ETH_16_POOLS;
390 	else
391 		num_pools = ETH_32_POOLS;
392 
393 	return 0;
394 }
395 
396 /* Check num_tcs parameter and set it if OK*/
397 static int
398 vmdq_parse_num_tcs(const char *q_arg)
399 {
400 	char *end = NULL;
401 	int n;
402 
403 	/* parse number string */
404 	n = strtol(q_arg, &end, 10);
405 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
406 		return -1;
407 
408 	if (n != 4 && n != 8)
409 		return -1;
410 	if (n == 4)
411 		num_tcs = ETH_4_TCS;
412 	else
413 		num_tcs = ETH_8_TCS;
414 
415 	return 0;
416 }
417 
418 static int
419 parse_portmask(const char *portmask)
420 {
421 	char *end = NULL;
422 	unsigned long pm;
423 
424 	/* parse hexadecimal string */
425 	pm = strtoul(portmask, &end, 16);
426 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
427 		return -1;
428 
429 	if (pm == 0)
430 		return -1;
431 
432 	return pm;
433 }
434 
435 /* Display usage */
436 static void
437 vmdq_usage(const char *prgname)
438 {
439 	printf("%s [EAL options] -- -p PORTMASK]\n"
440 	"  --nb-pools NP: number of pools (32 default, 16)\n"
441 	"  --nb-tcs NP: number of TCs (4 default, 8)\n"
442 	"  --enable-rss: enable RSS (disabled by default)\n",
443 	       prgname);
444 }
445 
446 /*  Parse the argument (num_pools) given in the command line of the application */
447 static int
448 vmdq_parse_args(int argc, char **argv)
449 {
450 	int opt;
451 	int option_index;
452 	unsigned i;
453 	const char *prgname = argv[0];
454 	static struct option long_option[] = {
455 		{"nb-pools", required_argument, NULL, 0},
456 		{"nb-tcs", required_argument, NULL, 0},
457 		{"enable-rss", 0, NULL, 0},
458 		{NULL, 0, 0, 0}
459 	};
460 
461 	/* Parse command line */
462 	while ((opt = getopt_long(argc, argv, "p:", long_option,
463 		&option_index)) != EOF) {
464 		switch (opt) {
465 		/* portmask */
466 		case 'p':
467 			enabled_port_mask = parse_portmask(optarg);
468 			if (enabled_port_mask == 0) {
469 				printf("invalid portmask\n");
470 				vmdq_usage(prgname);
471 				return -1;
472 			}
473 			break;
474 		case 0:
475 			if (!strcmp(long_option[option_index].name, "nb-pools")) {
476 				if (vmdq_parse_num_pools(optarg) == -1) {
477 					printf("invalid number of pools\n");
478 					return -1;
479 				}
480 			}
481 
482 			if (!strcmp(long_option[option_index].name, "nb-tcs")) {
483 				if (vmdq_parse_num_tcs(optarg) == -1) {
484 					printf("invalid number of tcs\n");
485 					return -1;
486 				}
487 			}
488 
489 			if (!strcmp(long_option[option_index].name, "enable-rss"))
490 				rss_enable = 1;
491 			break;
492 
493 		default:
494 			vmdq_usage(prgname);
495 			return -1;
496 		}
497 	}
498 
499 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
500 		if (enabled_port_mask & (1 << i))
501 			ports[num_ports++] = (uint8_t)i;
502 	}
503 
504 	if (num_ports < 2 || num_ports % 2) {
505 		printf("Current enabled port number is %u,"
506 			" but it should be even and at least 2\n", num_ports);
507 		return -1;
508 	}
509 
510 	return 0;
511 }
512 
513 static void
514 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
515 {
516 	struct rte_ether_hdr *eth;
517 	void *tmp;
518 
519 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
520 
521 	/* 02:00:00:00:00:xx */
522 	tmp = &eth->d_addr.addr_bytes[0];
523 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
524 
525 	/* src addr */
526 	rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->s_addr);
527 }
528 
529 /* When we receive a HUP signal, print out our stats */
530 static void
531 sighup_handler(int signum)
532 {
533 	unsigned q = vmdq_queue_base;
534 
535 	for (; q < num_queues; q++) {
536 		if (q % (num_vmdq_queues / num_pools) == 0)
537 			printf("\nPool %u: ", (q - vmdq_queue_base) /
538 					      (num_vmdq_queues / num_pools));
539 		printf("%lu ", rxPackets[q]);
540 	}
541 	printf("\nFinished handling signal %d\n", signum);
542 }
543 
544 /*
545  * Main thread that does the work, reading from INPUT_PORT
546  * and writing to OUTPUT_PORT
547  */
548 static int
549 lcore_main(void *arg)
550 {
551 	const uintptr_t core_num = (uintptr_t)arg;
552 	const unsigned num_cores = rte_lcore_count();
553 	uint16_t startQueue, endQueue;
554 	uint16_t q, i, p;
555 	const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores);
556 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
557 
558 
559 	if (remainder) {
560 		if (core_num < remainder) {
561 			startQueue = (uint16_t)(core_num * (quot + 1));
562 			endQueue = (uint16_t)(startQueue + quot + 1);
563 		} else {
564 			startQueue = (uint16_t)(core_num * quot + remainder);
565 			endQueue = (uint16_t)(startQueue + quot);
566 		}
567 	} else {
568 		startQueue = (uint16_t)(core_num * quot);
569 		endQueue = (uint16_t)(startQueue + quot);
570 	}
571 
572 	/* vmdq queue idx doesn't always start from zero.*/
573 	startQueue += vmdq_queue_base;
574 	endQueue   += vmdq_queue_base;
575 	printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
576 	       rte_lcore_id(), startQueue, endQueue - 1);
577 
578 	if (startQueue == endQueue) {
579 		printf("lcore %u has nothing to do\n", (unsigned)core_num);
580 		return 0;
581 	}
582 
583 	for (;;) {
584 		struct rte_mbuf *buf[MAX_PKT_BURST];
585 		const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]);
586 		for (p = 0; p < num_ports; p++) {
587 			const uint8_t src = ports[p];
588 			const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */
589 
590 			if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID))
591 				continue;
592 
593 			for (q = startQueue; q < endQueue; q++) {
594 				const uint16_t rxCount = rte_eth_rx_burst(src,
595 					q, buf, buf_size);
596 
597 				if (unlikely(rxCount == 0))
598 					continue;
599 
600 				rxPackets[q] += rxCount;
601 
602 				for (i = 0; i < rxCount; i++)
603 					update_mac_address(buf[i], dst);
604 
605 				const uint16_t txCount = rte_eth_tx_burst(dst,
606 					q, buf, rxCount);
607 				if (txCount != rxCount) {
608 					for (i = txCount; i < rxCount; i++)
609 						rte_pktmbuf_free(buf[i]);
610 				}
611 			}
612 		}
613 	}
614 }
615 
616 /*
617  * Update the global var NUM_PORTS and array PORTS according to system ports number
618  * and return valid ports number
619  */
620 static unsigned check_ports_num(unsigned nb_ports)
621 {
622 	unsigned valid_num_ports = num_ports;
623 	unsigned portid;
624 
625 	if (num_ports > nb_ports) {
626 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
627 			num_ports, nb_ports);
628 		num_ports = nb_ports;
629 	}
630 
631 	for (portid = 0; portid < num_ports; portid++) {
632 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
633 			printf("\nSpecified port ID(%u) is not valid\n",
634 				ports[portid]);
635 			ports[portid] = INVALID_PORT_ID;
636 			valid_num_ports--;
637 		}
638 	}
639 	return valid_num_ports;
640 }
641 
642 
643 /* Main function, does initialisation and calls the per-lcore functions */
644 int
645 main(int argc, char *argv[])
646 {
647 	unsigned cores;
648 	struct rte_mempool *mbuf_pool;
649 	unsigned lcore_id;
650 	uintptr_t i;
651 	int ret;
652 	unsigned nb_ports, valid_num_ports;
653 	uint16_t portid;
654 
655 	signal(SIGHUP, sighup_handler);
656 
657 	/* init EAL */
658 	ret = rte_eal_init(argc, argv);
659 	if (ret < 0)
660 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
661 	argc -= ret;
662 	argv += ret;
663 
664 	/* parse app arguments */
665 	ret = vmdq_parse_args(argc, argv);
666 	if (ret < 0)
667 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
668 
669 	cores = rte_lcore_count();
670 	if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
671 		rte_exit(EXIT_FAILURE,"This program can only run on an even"
672 				" number of cores(1-%d)\n\n", RTE_MAX_LCORE);
673 	}
674 
675 	nb_ports = rte_eth_dev_count_avail();
676 
677 	/*
678 	 * Update the global var NUM_PORTS and global array PORTS
679 	 * and get value of var VALID_NUM_PORTS according to system ports number
680 	 */
681 	valid_num_ports = check_ports_num(nb_ports);
682 
683 	if (valid_num_ports < 2 || valid_num_ports % 2) {
684 		printf("Current valid ports number is %u\n", valid_num_ports);
685 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
686 	}
687 
688 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
689 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
690 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
691 	if (mbuf_pool == NULL)
692 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
693 
694 	/* initialize all ports */
695 	RTE_ETH_FOREACH_DEV(portid) {
696 		/* skip ports that are not enabled */
697 		if ((enabled_port_mask & (1 << portid)) == 0) {
698 			printf("\nSkipping disabled port %d\n", portid);
699 			continue;
700 		}
701 		if (port_init(portid, mbuf_pool) != 0)
702 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
703 	}
704 
705 	/* call lcore_main() on every slave lcore */
706 	i = 0;
707 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
708 		rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
709 	}
710 	/* call on master too */
711 	(void) lcore_main((void*)i);
712 
713 	return 0;
714 }
715