xref: /dpdk/examples/vmdq_dcb/main.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <signal.h>
13 #include <stdarg.h>
14 #include <inttypes.h>
15 #include <getopt.h>
16 
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_cycles.h>
24 #include <rte_prefetch.h>
25 #include <rte_lcore.h>
26 #include <rte_per_lcore.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_interrupts.h>
29 #include <rte_random.h>
30 #include <rte_debug.h>
31 #include <rte_ether.h>
32 #include <rte_ethdev.h>
33 #include <rte_mempool.h>
34 #include <rte_mbuf.h>
35 
36 /* basic constants used in application */
37 #define MAX_QUEUES 1024
38 /*
39  * 1024 queues require to meet the needs of a large number of vmdq_pools.
40  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
41  */
42 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
43 						RTE_TEST_TX_DESC_DEFAULT))
44 #define MBUF_CACHE_SIZE 64
45 
46 #define MAX_PKT_BURST 32
47 
48 /*
49  * Configurable number of RX/TX ring descriptors
50  */
51 #define RTE_TEST_RX_DESC_DEFAULT 1024
52 #define RTE_TEST_TX_DESC_DEFAULT 1024
53 
54 #define INVALID_PORT_ID 0xFF
55 
56 /* mask of enabled ports */
57 static uint32_t enabled_port_mask;
58 static uint16_t ports[RTE_MAX_ETHPORTS];
59 static unsigned num_ports;
60 
61 /* number of pools (if user does not specify any, 32 by default */
62 static enum rte_eth_nb_pools num_pools = RTE_ETH_32_POOLS;
63 static enum rte_eth_nb_tcs   num_tcs   = RTE_ETH_4_TCS;
64 static uint16_t num_queues, num_vmdq_queues;
65 static uint16_t vmdq_pool_base, vmdq_queue_base;
66 static uint8_t rss_enable;
67 
68 /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
69 static const struct rte_eth_conf vmdq_dcb_conf_default = {
70 	.rxmode = {
71 		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_DCB,
72 		.split_hdr_size = 0,
73 	},
74 	.txmode = {
75 		.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB,
76 	},
77 	/*
78 	 * should be overridden separately in code with
79 	 * appropriate values
80 	 */
81 	.rx_adv_conf = {
82 		.vmdq_dcb_conf = {
83 			.nb_queue_pools = RTE_ETH_32_POOLS,
84 			.enable_default_pool = 0,
85 			.default_pool = 0,
86 			.nb_pool_maps = 0,
87 			.pool_map = {{0, 0},},
88 			.dcb_tc = {0},
89 		},
90 		.dcb_rx_conf = {
91 				.nb_tcs = RTE_ETH_4_TCS,
92 				/** Traffic class each UP mapped to. */
93 				.dcb_tc = {0},
94 		},
95 		.vmdq_rx_conf = {
96 			.nb_queue_pools = RTE_ETH_32_POOLS,
97 			.enable_default_pool = 0,
98 			.default_pool = 0,
99 			.nb_pool_maps = 0,
100 			.pool_map = {{0, 0},},
101 		},
102 	},
103 	.tx_adv_conf = {
104 		.vmdq_dcb_tx_conf = {
105 			.nb_queue_pools = RTE_ETH_32_POOLS,
106 			.dcb_tc = {0},
107 		},
108 	},
109 };
110 /* >8 End of empty vmdq+dcb configuration structure. */
111 
112 /* array used for printing out statistics */
113 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
114 
115 /* Dividing up the possible user priority values. 8< */
116 const uint16_t vlan_tags[] = {
117 	0,  1,  2,  3,  4,  5,  6,  7,
118 	8,  9, 10, 11,	12, 13, 14, 15,
119 	16, 17, 18, 19, 20, 21, 22, 23,
120 	24, 25, 26, 27, 28, 29, 30, 31
121 };
122 
123 const uint16_t num_vlans = RTE_DIM(vlan_tags);
124 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
125 static struct rte_ether_addr pool_addr_template = {
126 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
127 };
128 
129 /* ethernet addresses of ports */
130 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
131 
132 /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
133  * given above, and the number of traffic classes available for use. */
134 static inline int
135 get_eth_conf(struct rte_eth_conf *eth_conf)
136 {
137 	struct rte_eth_vmdq_dcb_conf conf;
138 	struct rte_eth_vmdq_rx_conf  vmdq_conf;
139 	struct rte_eth_dcb_rx_conf   dcb_conf;
140 	struct rte_eth_vmdq_dcb_tx_conf tx_conf;
141 	uint8_t i;
142 
143 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
144 	vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
145 	tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
146 	conf.nb_pool_maps = num_pools;
147 	vmdq_conf.nb_pool_maps = num_pools;
148 	conf.enable_default_pool = 0;
149 	vmdq_conf.enable_default_pool = 0;
150 	conf.default_pool = 0; /* set explicit value, even if not used */
151 	vmdq_conf.default_pool = 0;
152 
153 	for (i = 0; i < conf.nb_pool_maps; i++) {
154 		conf.pool_map[i].vlan_id = vlan_tags[i];
155 		vmdq_conf.pool_map[i].vlan_id = vlan_tags[i];
156 		conf.pool_map[i].pools = 1UL << i;
157 		vmdq_conf.pool_map[i].pools = 1UL << i;
158 	}
159 	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
160 		conf.dcb_tc[i] = i % num_tcs;
161 		dcb_conf.dcb_tc[i] = i % num_tcs;
162 		tx_conf.dcb_tc[i] = i % num_tcs;
163 	}
164 	dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs;
165 	(void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
166 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
167 			  sizeof(conf)));
168 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf,
169 			  sizeof(dcb_conf)));
170 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf,
171 			  sizeof(vmdq_conf)));
172 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
173 			  sizeof(tx_conf)));
174 	if (rss_enable) {
175 		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
176 		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
177 							RTE_ETH_RSS_UDP |
178 							RTE_ETH_RSS_TCP |
179 							RTE_ETH_RSS_SCTP;
180 	}
181 	return 0;
182 }
183 /* >8 End of dividing up the possible user priority values. */
184 
185 /*
186  * Initialises a given port using global settings and with the rx buffers
187  * coming from the mbuf_pool passed as parameter
188  */
189 static inline int
190 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
191 {
192 	struct rte_eth_dev_info dev_info;
193 	struct rte_eth_conf port_conf = {0};
194 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
195 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
196 	int retval;
197 	uint16_t q;
198 	uint16_t queues_per_pool;
199 	uint32_t max_nb_pools;
200 	struct rte_eth_txconf txq_conf;
201 	uint64_t rss_hf_tmp;
202 
203 	/*
204 	 * The max pool number from dev_info will be used to validate the pool
205 	 * number specified in cmd line
206 	 */
207 	retval = rte_eth_dev_info_get(port, &dev_info);
208 	if (retval != 0) {
209 		printf("Error during getting device (port %u) info: %s\n",
210 				port, strerror(-retval));
211 
212 		return retval;
213 	}
214 
215 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
216 	/*
217 	 * We allow to process part of VMDQ pools specified by num_pools in
218 	 * command line.
219 	 */
220 	if (num_pools > max_nb_pools) {
221 		printf("num_pools %d >max_nb_pools %d\n",
222 			num_pools, max_nb_pools);
223 		return -1;
224 	}
225 
226 	/*
227 	 * NIC queues are divided into pf queues and vmdq queues.
228 	 * There is assumption here all ports have the same configuration!
229 	*/
230 	vmdq_queue_base = dev_info.vmdq_queue_base;
231 	vmdq_pool_base  = dev_info.vmdq_pool_base;
232 	printf("vmdq queue base: %d pool base %d\n",
233 		vmdq_queue_base, vmdq_pool_base);
234 	if (vmdq_pool_base == 0) {
235 		num_vmdq_queues = dev_info.max_rx_queues;
236 		num_queues = dev_info.max_rx_queues;
237 		if (num_tcs != num_vmdq_queues / num_pools) {
238 			printf("nb_tcs %d is invalid considering with"
239 				" nb_pools %d, nb_tcs * nb_pools should = %d\n",
240 				num_tcs, num_pools, num_vmdq_queues);
241 			return -1;
242 		}
243 	} else {
244 		queues_per_pool = dev_info.vmdq_queue_num /
245 				  dev_info.max_vmdq_pools;
246 		if (num_tcs > queues_per_pool) {
247 			printf("num_tcs %d > num of queues per pool %d\n",
248 				num_tcs, queues_per_pool);
249 			return -1;
250 		}
251 		num_vmdq_queues = num_pools * queues_per_pool;
252 		num_queues = vmdq_queue_base + num_vmdq_queues;
253 		printf("Configured vmdq pool num: %u,"
254 			" each vmdq pool has %u queues\n",
255 			num_pools, queues_per_pool);
256 	}
257 
258 	if (!rte_eth_dev_is_valid_port(port))
259 		return -1;
260 
261 	retval = get_eth_conf(&port_conf);
262 	if (retval < 0)
263 		return retval;
264 
265 	retval = rte_eth_dev_info_get(port, &dev_info);
266 	if (retval != 0) {
267 		printf("Error during getting device (port %u) info: %s\n",
268 				port, strerror(-retval));
269 
270 		return retval;
271 	}
272 
273 	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
274 		port_conf.txmode.offloads |=
275 			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
276 
277 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
278 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
279 		dev_info.flow_type_rss_offloads;
280 	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
281 		printf("Port %u modified RSS hash function based on hardware support,"
282 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
283 			port,
284 			rss_hf_tmp,
285 			port_conf.rx_adv_conf.rss_conf.rss_hf);
286 	}
287 
288 	/*
289 	 * Though in this example, all queues including pf queues are setup.
290 	 * This is because VMDQ queues doesn't always start from zero, and the
291 	 * PMD layer doesn't support selectively initialising part of rx/tx
292 	 * queues.
293 	 */
294 	retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
295 	if (retval != 0)
296 		return retval;
297 
298 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
299 				&txRingSize);
300 	if (retval != 0)
301 		return retval;
302 	if (RTE_MAX(rxRingSize, txRingSize) >
303 	    RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
304 		printf("Mbuf pool has an insufficient size for port %u.\n",
305 			port);
306 		return -1;
307 	}
308 
309 	for (q = 0; q < num_queues; q++) {
310 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
311 					rte_eth_dev_socket_id(port),
312 					NULL,
313 					mbuf_pool);
314 		if (retval < 0) {
315 			printf("initialize rx queue %d failed\n", q);
316 			return retval;
317 		}
318 	}
319 
320 	txq_conf = dev_info.default_txconf;
321 	txq_conf.offloads = port_conf.txmode.offloads;
322 	for (q = 0; q < num_queues; q++) {
323 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
324 					rte_eth_dev_socket_id(port),
325 					&txq_conf);
326 		if (retval < 0) {
327 			printf("initialize tx queue %d failed\n", q);
328 			return retval;
329 		}
330 	}
331 
332 	retval  = rte_eth_dev_start(port);
333 	if (retval < 0) {
334 		printf("port %d start failed\n", port);
335 		return retval;
336 	}
337 
338 	retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
339 	if (retval < 0) {
340 		printf("port %d MAC address get failed: %s\n", port,
341 		       rte_strerror(-retval));
342 		return retval;
343 	}
344 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
345 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
346 			(unsigned)port,
347 			RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
348 
349 	/* Set mac for each pool. 8< */
350 	for (q = 0; q < num_pools; q++) {
351 		struct rte_ether_addr mac;
352 
353 		mac = pool_addr_template;
354 		mac.addr_bytes[4] = port;
355 		mac.addr_bytes[5] = q;
356 		printf("Port %u vmdq pool %u set mac " RTE_ETHER_ADDR_PRT_FMT "\n",
357 			port, q, RTE_ETHER_ADDR_BYTES(&mac));
358 		retval = rte_eth_dev_mac_addr_add(port, &mac,
359 				q + vmdq_pool_base);
360 		if (retval) {
361 			printf("mac addr add failed at pool %d\n", q);
362 			return retval;
363 		}
364 	}
365 	/* >8 End of set mac for each pool. */
366 
367 	return 0;
368 }
369 
370 /* Check num_pools parameter and set it if OK*/
371 static int
372 vmdq_parse_num_pools(const char *q_arg)
373 {
374 	char *end = NULL;
375 	int n;
376 
377 	/* parse number string */
378 	n = strtol(q_arg, &end, 10);
379 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
380 		return -1;
381 	if (n != 16 && n != 32)
382 		return -1;
383 	if (n == 16)
384 		num_pools = RTE_ETH_16_POOLS;
385 	else
386 		num_pools = RTE_ETH_32_POOLS;
387 
388 	return 0;
389 }
390 
391 /* Check num_tcs parameter and set it if OK*/
392 static int
393 vmdq_parse_num_tcs(const char *q_arg)
394 {
395 	char *end = NULL;
396 	int n;
397 
398 	/* parse number string */
399 	n = strtol(q_arg, &end, 10);
400 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
401 		return -1;
402 
403 	if (n != 4 && n != 8)
404 		return -1;
405 	if (n == 4)
406 		num_tcs = RTE_ETH_4_TCS;
407 	else
408 		num_tcs = RTE_ETH_8_TCS;
409 
410 	return 0;
411 }
412 
413 static int
414 parse_portmask(const char *portmask)
415 {
416 	char *end = NULL;
417 	unsigned long pm;
418 
419 	/* parse hexadecimal string */
420 	pm = strtoul(portmask, &end, 16);
421 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
422 		return 0;
423 
424 	return pm;
425 }
426 
427 /* Display usage */
428 static void
429 vmdq_usage(const char *prgname)
430 {
431 	printf("%s [EAL options] -- -p PORTMASK]\n"
432 	"  --nb-pools NP: number of pools (32 default, 16)\n"
433 	"  --nb-tcs NP: number of TCs (4 default, 8)\n"
434 	"  --enable-rss: enable RSS (disabled by default)\n",
435 	       prgname);
436 }
437 
438 /*  Parse the argument (num_pools) given in the command line of the application */
439 static int
440 vmdq_parse_args(int argc, char **argv)
441 {
442 	int opt;
443 	int option_index;
444 	unsigned i;
445 	const char *prgname = argv[0];
446 	static struct option long_option[] = {
447 		{"nb-pools", required_argument, NULL, 0},
448 		{"nb-tcs", required_argument, NULL, 0},
449 		{"enable-rss", 0, NULL, 0},
450 		{NULL, 0, 0, 0}
451 	};
452 
453 	/* Parse command line */
454 	while ((opt = getopt_long(argc, argv, "p:", long_option,
455 		&option_index)) != EOF) {
456 		switch (opt) {
457 		/* portmask */
458 		case 'p':
459 			enabled_port_mask = parse_portmask(optarg);
460 			if (enabled_port_mask == 0) {
461 				printf("invalid portmask\n");
462 				vmdq_usage(prgname);
463 				return -1;
464 			}
465 			break;
466 		case 0:
467 			if (!strcmp(long_option[option_index].name, "nb-pools")) {
468 				if (vmdq_parse_num_pools(optarg) == -1) {
469 					printf("invalid number of pools\n");
470 					return -1;
471 				}
472 			}
473 
474 			if (!strcmp(long_option[option_index].name, "nb-tcs")) {
475 				if (vmdq_parse_num_tcs(optarg) == -1) {
476 					printf("invalid number of tcs\n");
477 					return -1;
478 				}
479 			}
480 
481 			if (!strcmp(long_option[option_index].name, "enable-rss"))
482 				rss_enable = 1;
483 			break;
484 
485 		default:
486 			vmdq_usage(prgname);
487 			return -1;
488 		}
489 	}
490 
491 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
492 		if (enabled_port_mask & (1 << i))
493 			ports[num_ports++] = (uint8_t)i;
494 	}
495 
496 	if (num_ports < 2 || num_ports % 2) {
497 		printf("Current enabled port number is %u,"
498 			" but it should be even and at least 2\n", num_ports);
499 		return -1;
500 	}
501 
502 	return 0;
503 }
504 
505 static void
506 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
507 {
508 	struct rte_ether_hdr *eth;
509 	void *tmp;
510 
511 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
512 
513 	/* 02:00:00:00:00:xx */
514 	tmp = &eth->dst_addr.addr_bytes[0];
515 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
516 
517 	/* src addr */
518 	rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->src_addr);
519 }
520 
521 /* When we receive a HUP signal, print out our stats */
522 static void
523 sighup_handler(int signum)
524 {
525 	unsigned q = vmdq_queue_base;
526 
527 	for (; q < num_queues; q++) {
528 		if (q % (num_vmdq_queues / num_pools) == 0)
529 			printf("\nPool %u: ", (q - vmdq_queue_base) /
530 					      (num_vmdq_queues / num_pools));
531 		printf("%lu ", rxPackets[q]);
532 	}
533 	printf("\nFinished handling signal %d\n", signum);
534 }
535 
536 /*
537  * Main thread that does the work, reading from INPUT_PORT
538  * and writing to OUTPUT_PORT
539  */
540 static int
541 lcore_main(void *arg)
542 {
543 	const uintptr_t core_num = (uintptr_t)arg;
544 	const unsigned num_cores = rte_lcore_count();
545 	uint16_t startQueue, endQueue;
546 	uint16_t q, i, p;
547 	const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores);
548 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
549 
550 
551 	if (remainder) {
552 		if (core_num < remainder) {
553 			startQueue = (uint16_t)(core_num * (quot + 1));
554 			endQueue = (uint16_t)(startQueue + quot + 1);
555 		} else {
556 			startQueue = (uint16_t)(core_num * quot + remainder);
557 			endQueue = (uint16_t)(startQueue + quot);
558 		}
559 	} else {
560 		startQueue = (uint16_t)(core_num * quot);
561 		endQueue = (uint16_t)(startQueue + quot);
562 	}
563 
564 	/* vmdq queue idx doesn't always start from zero.*/
565 	startQueue += vmdq_queue_base;
566 	endQueue   += vmdq_queue_base;
567 	printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
568 	       rte_lcore_id(), startQueue, endQueue - 1);
569 
570 	if (startQueue == endQueue) {
571 		printf("lcore %u has nothing to do\n", (unsigned)core_num);
572 		return 0;
573 	}
574 
575 	for (;;) {
576 		struct rte_mbuf *buf[MAX_PKT_BURST];
577 		const uint16_t buf_size = RTE_DIM(buf);
578 		for (p = 0; p < num_ports; p++) {
579 			const uint8_t src = ports[p];
580 			const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */
581 
582 			if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID))
583 				continue;
584 
585 			for (q = startQueue; q < endQueue; q++) {
586 				const uint16_t rxCount = rte_eth_rx_burst(src,
587 					q, buf, buf_size);
588 
589 				if (unlikely(rxCount == 0))
590 					continue;
591 
592 				rxPackets[q] += rxCount;
593 
594 				for (i = 0; i < rxCount; i++)
595 					update_mac_address(buf[i], dst);
596 
597 				const uint16_t txCount = rte_eth_tx_burst(dst,
598 					q, buf, rxCount);
599 				if (txCount != rxCount) {
600 					for (i = txCount; i < rxCount; i++)
601 						rte_pktmbuf_free(buf[i]);
602 				}
603 			}
604 		}
605 	}
606 }
607 
608 /*
609  * Update the global var NUM_PORTS and array PORTS according to system ports number
610  * and return valid ports number
611  */
612 static unsigned check_ports_num(unsigned nb_ports)
613 {
614 	unsigned valid_num_ports = num_ports;
615 	unsigned portid;
616 
617 	if (num_ports > nb_ports) {
618 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
619 			num_ports, nb_ports);
620 		num_ports = nb_ports;
621 	}
622 
623 	for (portid = 0; portid < num_ports; portid++) {
624 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
625 			printf("\nSpecified port ID(%u) is not valid\n",
626 				ports[portid]);
627 			ports[portid] = INVALID_PORT_ID;
628 			valid_num_ports--;
629 		}
630 	}
631 	return valid_num_ports;
632 }
633 
634 
635 /* Main function, does initialisation and calls the per-lcore functions */
636 int
637 main(int argc, char *argv[])
638 {
639 	unsigned cores;
640 	struct rte_mempool *mbuf_pool;
641 	unsigned lcore_id;
642 	uintptr_t i;
643 	int ret;
644 	unsigned nb_ports, valid_num_ports;
645 	uint16_t portid;
646 
647 	signal(SIGHUP, sighup_handler);
648 
649 	/* init EAL */
650 	ret = rte_eal_init(argc, argv);
651 	if (ret < 0)
652 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
653 	argc -= ret;
654 	argv += ret;
655 
656 	/* parse app arguments */
657 	ret = vmdq_parse_args(argc, argv);
658 	if (ret < 0)
659 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
660 
661 	cores = rte_lcore_count();
662 	if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
663 		rte_exit(EXIT_FAILURE,"This program can only run on an even"
664 				" number of cores(1-%d)\n\n", RTE_MAX_LCORE);
665 	}
666 
667 	nb_ports = rte_eth_dev_count_avail();
668 
669 	/*
670 	 * Update the global var NUM_PORTS and global array PORTS
671 	 * and get value of var VALID_NUM_PORTS according to system ports number
672 	 */
673 	valid_num_ports = check_ports_num(nb_ports);
674 
675 	if (valid_num_ports < 2 || valid_num_ports % 2) {
676 		printf("Current valid ports number is %u\n", valid_num_ports);
677 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
678 	}
679 
680 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
681 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
682 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
683 	if (mbuf_pool == NULL)
684 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
685 
686 	/* initialize all ports */
687 	RTE_ETH_FOREACH_DEV(portid) {
688 		/* skip ports that are not enabled */
689 		if ((enabled_port_mask & (1 << portid)) == 0) {
690 			printf("\nSkipping disabled port %d\n", portid);
691 			continue;
692 		}
693 		if (port_init(portid, mbuf_pool) != 0)
694 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
695 	}
696 
697 	/* call lcore_main() on every worker lcore */
698 	i = 0;
699 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
700 		rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
701 	}
702 	/* call on main too */
703 	(void) lcore_main((void*)i);
704 
705 	/* clean up the EAL */
706 	rte_eal_cleanup();
707 
708 	return 0;
709 }
710