xref: /dpdk/examples/vmdq_dcb/main.c (revision 200bc52e5aa0d72e70464c9cd22b55cf536ed13c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <signal.h>
13 #include <stdarg.h>
14 #include <inttypes.h>
15 #include <getopt.h>
16 
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
35 #include <rte_mbuf.h>
36 
37 /* basic constants used in application */
38 #define MAX_QUEUES 1024
39 /*
40  * 1024 queues require to meet the needs of a large number of vmdq_pools.
41  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
42  */
43 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
44 						RTE_TEST_TX_DESC_DEFAULT))
45 #define MBUF_CACHE_SIZE 64
46 
47 #define MAX_PKT_BURST 32
48 
49 /*
50  * Configurable number of RX/TX ring descriptors
51  */
52 #define RTE_TEST_RX_DESC_DEFAULT 1024
53 #define RTE_TEST_TX_DESC_DEFAULT 1024
54 
55 #define INVALID_PORT_ID 0xFF
56 
57 /* mask of enabled ports */
58 static uint32_t enabled_port_mask;
59 static uint16_t ports[RTE_MAX_ETHPORTS];
60 static unsigned num_ports;
61 
62 /* number of pools (if user does not specify any, 32 by default */
63 static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
64 static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
65 static uint16_t num_queues, num_vmdq_queues;
66 static uint16_t vmdq_pool_base, vmdq_queue_base;
67 static uint8_t rss_enable;
68 
69 /* empty vmdq+dcb configuration structure. Filled in programatically */
70 static const struct rte_eth_conf vmdq_dcb_conf_default = {
71 	.rxmode = {
72 		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
73 		.split_hdr_size = 0,
74 	},
75 	.txmode = {
76 		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
77 	},
78 	/*
79 	 * should be overridden separately in code with
80 	 * appropriate values
81 	 */
82 	.rx_adv_conf = {
83 		.vmdq_dcb_conf = {
84 			.nb_queue_pools = ETH_32_POOLS,
85 			.enable_default_pool = 0,
86 			.default_pool = 0,
87 			.nb_pool_maps = 0,
88 			.pool_map = {{0, 0},},
89 			.dcb_tc = {0},
90 		},
91 		.dcb_rx_conf = {
92 				.nb_tcs = ETH_4_TCS,
93 				/** Traffic class each UP mapped to. */
94 				.dcb_tc = {0},
95 		},
96 		.vmdq_rx_conf = {
97 			.nb_queue_pools = ETH_32_POOLS,
98 			.enable_default_pool = 0,
99 			.default_pool = 0,
100 			.nb_pool_maps = 0,
101 			.pool_map = {{0, 0},},
102 		},
103 	},
104 	.tx_adv_conf = {
105 		.vmdq_dcb_tx_conf = {
106 			.nb_queue_pools = ETH_32_POOLS,
107 			.dcb_tc = {0},
108 		},
109 	},
110 };
111 
112 /* array used for printing out statistics */
113 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
114 
115 const uint16_t vlan_tags[] = {
116 	0,  1,  2,  3,  4,  5,  6,  7,
117 	8,  9, 10, 11,	12, 13, 14, 15,
118 	16, 17, 18, 19, 20, 21, 22, 23,
119 	24, 25, 26, 27, 28, 29, 30, 31
120 };
121 
122 const uint16_t num_vlans = RTE_DIM(vlan_tags);
123 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
124 static struct rte_ether_addr pool_addr_template = {
125 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
126 };
127 
128 /* ethernet addresses of ports */
129 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
130 
131 /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
132  * given above, and the number of traffic classes available for use. */
133 static inline int
134 get_eth_conf(struct rte_eth_conf *eth_conf)
135 {
136 	struct rte_eth_vmdq_dcb_conf conf;
137 	struct rte_eth_vmdq_rx_conf  vmdq_conf;
138 	struct rte_eth_dcb_rx_conf   dcb_conf;
139 	struct rte_eth_vmdq_dcb_tx_conf tx_conf;
140 	uint8_t i;
141 
142 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
143 	vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
144 	tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
145 	conf.nb_pool_maps = num_pools;
146 	vmdq_conf.nb_pool_maps = num_pools;
147 	conf.enable_default_pool = 0;
148 	vmdq_conf.enable_default_pool = 0;
149 	conf.default_pool = 0; /* set explicit value, even if not used */
150 	vmdq_conf.default_pool = 0;
151 
152 	for (i = 0; i < conf.nb_pool_maps; i++) {
153 		conf.pool_map[i].vlan_id = vlan_tags[i];
154 		vmdq_conf.pool_map[i].vlan_id = vlan_tags[i];
155 		conf.pool_map[i].pools = 1UL << i;
156 		vmdq_conf.pool_map[i].pools = 1UL << i;
157 	}
158 	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
159 		conf.dcb_tc[i] = i % num_tcs;
160 		dcb_conf.dcb_tc[i] = i % num_tcs;
161 		tx_conf.dcb_tc[i] = i % num_tcs;
162 	}
163 	dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs;
164 	(void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
165 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
166 			  sizeof(conf)));
167 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf,
168 			  sizeof(dcb_conf)));
169 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf,
170 			  sizeof(vmdq_conf)));
171 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
172 			  sizeof(tx_conf)));
173 	if (rss_enable) {
174 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
175 		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
176 							ETH_RSS_UDP |
177 							ETH_RSS_TCP |
178 							ETH_RSS_SCTP;
179 	}
180 	return 0;
181 }
182 
183 /*
184  * Initialises a given port using global settings and with the rx buffers
185  * coming from the mbuf_pool passed as parameter
186  */
187 static inline int
188 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
189 {
190 	struct rte_eth_dev_info dev_info;
191 	struct rte_eth_conf port_conf = {0};
192 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
193 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
194 	int retval;
195 	uint16_t q;
196 	uint16_t queues_per_pool;
197 	uint32_t max_nb_pools;
198 	struct rte_eth_txconf txq_conf;
199 	uint64_t rss_hf_tmp;
200 
201 	/*
202 	 * The max pool number from dev_info will be used to validate the pool
203 	 * number specified in cmd line
204 	 */
205 	rte_eth_dev_info_get(port, &dev_info);
206 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
207 	/*
208 	 * We allow to process part of VMDQ pools specified by num_pools in
209 	 * command line.
210 	 */
211 	if (num_pools > max_nb_pools) {
212 		printf("num_pools %d >max_nb_pools %d\n",
213 			num_pools, max_nb_pools);
214 		return -1;
215 	}
216 
217 	/*
218 	 * NIC queues are divided into pf queues and vmdq queues.
219 	 * There is assumption here all ports have the same configuration!
220 	*/
221 	vmdq_queue_base = dev_info.vmdq_queue_base;
222 	vmdq_pool_base  = dev_info.vmdq_pool_base;
223 	printf("vmdq queue base: %d pool base %d\n",
224 		vmdq_queue_base, vmdq_pool_base);
225 	if (vmdq_pool_base == 0) {
226 		num_vmdq_queues = dev_info.max_rx_queues;
227 		num_queues = dev_info.max_rx_queues;
228 		if (num_tcs != num_vmdq_queues / num_pools) {
229 			printf("nb_tcs %d is invalid considering with"
230 				" nb_pools %d, nb_tcs * nb_pools should = %d\n",
231 				num_tcs, num_pools, num_vmdq_queues);
232 			return -1;
233 		}
234 	} else {
235 		queues_per_pool = dev_info.vmdq_queue_num /
236 				  dev_info.max_vmdq_pools;
237 		if (num_tcs > queues_per_pool) {
238 			printf("num_tcs %d > num of queues per pool %d\n",
239 				num_tcs, queues_per_pool);
240 			return -1;
241 		}
242 		num_vmdq_queues = num_pools * queues_per_pool;
243 		num_queues = vmdq_queue_base + num_vmdq_queues;
244 		printf("Configured vmdq pool num: %u,"
245 			" each vmdq pool has %u queues\n",
246 			num_pools, queues_per_pool);
247 	}
248 
249 	if (!rte_eth_dev_is_valid_port(port))
250 		return -1;
251 
252 	retval = get_eth_conf(&port_conf);
253 	if (retval < 0)
254 		return retval;
255 
256 	rte_eth_dev_info_get(port, &dev_info);
257 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
258 		port_conf.txmode.offloads |=
259 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
260 
261 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
262 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
263 		dev_info.flow_type_rss_offloads;
264 	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
265 		printf("Port %u modified RSS hash function based on hardware support,"
266 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
267 			port,
268 			rss_hf_tmp,
269 			port_conf.rx_adv_conf.rss_conf.rss_hf);
270 	}
271 
272 	/*
273 	 * Though in this example, all queues including pf queues are setup.
274 	 * This is because VMDQ queues doesn't always start from zero, and the
275 	 * PMD layer doesn't support selectively initialising part of rx/tx
276 	 * queues.
277 	 */
278 	retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
279 	if (retval != 0)
280 		return retval;
281 
282 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
283 				&txRingSize);
284 	if (retval != 0)
285 		return retval;
286 	if (RTE_MAX(rxRingSize, txRingSize) >
287 	    RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
288 		printf("Mbuf pool has an insufficient size for port %u.\n",
289 			port);
290 		return -1;
291 	}
292 
293 	for (q = 0; q < num_queues; q++) {
294 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
295 					rte_eth_dev_socket_id(port),
296 					NULL,
297 					mbuf_pool);
298 		if (retval < 0) {
299 			printf("initialize rx queue %d failed\n", q);
300 			return retval;
301 		}
302 	}
303 
304 	txq_conf = dev_info.default_txconf;
305 	txq_conf.offloads = port_conf.txmode.offloads;
306 	for (q = 0; q < num_queues; q++) {
307 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
308 					rte_eth_dev_socket_id(port),
309 					&txq_conf);
310 		if (retval < 0) {
311 			printf("initialize tx queue %d failed\n", q);
312 			return retval;
313 		}
314 	}
315 
316 	retval  = rte_eth_dev_start(port);
317 	if (retval < 0) {
318 		printf("port %d start failed\n", port);
319 		return retval;
320 	}
321 
322 	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
323 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
324 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
325 			(unsigned)port,
326 			vmdq_ports_eth_addr[port].addr_bytes[0],
327 			vmdq_ports_eth_addr[port].addr_bytes[1],
328 			vmdq_ports_eth_addr[port].addr_bytes[2],
329 			vmdq_ports_eth_addr[port].addr_bytes[3],
330 			vmdq_ports_eth_addr[port].addr_bytes[4],
331 			vmdq_ports_eth_addr[port].addr_bytes[5]);
332 
333 	/* Set mac for each pool.*/
334 	for (q = 0; q < num_pools; q++) {
335 		struct rte_ether_addr mac;
336 
337 		mac = pool_addr_template;
338 		mac.addr_bytes[4] = port;
339 		mac.addr_bytes[5] = q;
340 		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
341 			port, q,
342 			mac.addr_bytes[0], mac.addr_bytes[1],
343 			mac.addr_bytes[2], mac.addr_bytes[3],
344 			mac.addr_bytes[4], mac.addr_bytes[5]);
345 		retval = rte_eth_dev_mac_addr_add(port, &mac,
346 				q + vmdq_pool_base);
347 		if (retval) {
348 			printf("mac addr add failed at pool %d\n", q);
349 			return retval;
350 		}
351 	}
352 
353 	return 0;
354 }
355 
356 /* Check num_pools parameter and set it if OK*/
357 static int
358 vmdq_parse_num_pools(const char *q_arg)
359 {
360 	char *end = NULL;
361 	int n;
362 
363 	/* parse number string */
364 	n = strtol(q_arg, &end, 10);
365 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
366 		return -1;
367 	if (n != 16 && n != 32)
368 		return -1;
369 	if (n == 16)
370 		num_pools = ETH_16_POOLS;
371 	else
372 		num_pools = ETH_32_POOLS;
373 
374 	return 0;
375 }
376 
377 /* Check num_tcs parameter and set it if OK*/
378 static int
379 vmdq_parse_num_tcs(const char *q_arg)
380 {
381 	char *end = NULL;
382 	int n;
383 
384 	/* parse number string */
385 	n = strtol(q_arg, &end, 10);
386 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
387 		return -1;
388 
389 	if (n != 4 && n != 8)
390 		return -1;
391 	if (n == 4)
392 		num_tcs = ETH_4_TCS;
393 	else
394 		num_tcs = ETH_8_TCS;
395 
396 	return 0;
397 }
398 
399 static int
400 parse_portmask(const char *portmask)
401 {
402 	char *end = NULL;
403 	unsigned long pm;
404 
405 	/* parse hexadecimal string */
406 	pm = strtoul(portmask, &end, 16);
407 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
408 		return -1;
409 
410 	if (pm == 0)
411 		return -1;
412 
413 	return pm;
414 }
415 
416 /* Display usage */
417 static void
418 vmdq_usage(const char *prgname)
419 {
420 	printf("%s [EAL options] -- -p PORTMASK]\n"
421 	"  --nb-pools NP: number of pools (32 default, 16)\n"
422 	"  --nb-tcs NP: number of TCs (4 default, 8)\n"
423 	"  --enable-rss: enable RSS (disabled by default)\n",
424 	       prgname);
425 }
426 
427 /*  Parse the argument (num_pools) given in the command line of the application */
428 static int
429 vmdq_parse_args(int argc, char **argv)
430 {
431 	int opt;
432 	int option_index;
433 	unsigned i;
434 	const char *prgname = argv[0];
435 	static struct option long_option[] = {
436 		{"nb-pools", required_argument, NULL, 0},
437 		{"nb-tcs", required_argument, NULL, 0},
438 		{"enable-rss", 0, NULL, 0},
439 		{NULL, 0, 0, 0}
440 	};
441 
442 	/* Parse command line */
443 	while ((opt = getopt_long(argc, argv, "p:", long_option,
444 		&option_index)) != EOF) {
445 		switch (opt) {
446 		/* portmask */
447 		case 'p':
448 			enabled_port_mask = parse_portmask(optarg);
449 			if (enabled_port_mask == 0) {
450 				printf("invalid portmask\n");
451 				vmdq_usage(prgname);
452 				return -1;
453 			}
454 			break;
455 		case 0:
456 			if (!strcmp(long_option[option_index].name, "nb-pools")) {
457 				if (vmdq_parse_num_pools(optarg) == -1) {
458 					printf("invalid number of pools\n");
459 					return -1;
460 				}
461 			}
462 
463 			if (!strcmp(long_option[option_index].name, "nb-tcs")) {
464 				if (vmdq_parse_num_tcs(optarg) == -1) {
465 					printf("invalid number of tcs\n");
466 					return -1;
467 				}
468 			}
469 
470 			if (!strcmp(long_option[option_index].name, "enable-rss"))
471 				rss_enable = 1;
472 			break;
473 
474 		default:
475 			vmdq_usage(prgname);
476 			return -1;
477 		}
478 	}
479 
480 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
481 		if (enabled_port_mask & (1 << i))
482 			ports[num_ports++] = (uint8_t)i;
483 	}
484 
485 	if (num_ports < 2 || num_ports % 2) {
486 		printf("Current enabled port number is %u,"
487 			" but it should be even and at least 2\n", num_ports);
488 		return -1;
489 	}
490 
491 	return 0;
492 }
493 
494 static void
495 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
496 {
497 	struct rte_ether_hdr *eth;
498 	void *tmp;
499 
500 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
501 
502 	/* 02:00:00:00:00:xx */
503 	tmp = &eth->d_addr.addr_bytes[0];
504 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
505 
506 	/* src addr */
507 	rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->s_addr);
508 }
509 
510 /* When we receive a HUP signal, print out our stats */
511 static void
512 sighup_handler(int signum)
513 {
514 	unsigned q = vmdq_queue_base;
515 
516 	for (; q < num_queues; q++) {
517 		if (q % (num_vmdq_queues / num_pools) == 0)
518 			printf("\nPool %u: ", (q - vmdq_queue_base) /
519 					      (num_vmdq_queues / num_pools));
520 		printf("%lu ", rxPackets[q]);
521 	}
522 	printf("\nFinished handling signal %d\n", signum);
523 }
524 
525 /*
526  * Main thread that does the work, reading from INPUT_PORT
527  * and writing to OUTPUT_PORT
528  */
529 static int
530 lcore_main(void *arg)
531 {
532 	const uintptr_t core_num = (uintptr_t)arg;
533 	const unsigned num_cores = rte_lcore_count();
534 	uint16_t startQueue, endQueue;
535 	uint16_t q, i, p;
536 	const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores);
537 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
538 
539 
540 	if (remainder) {
541 		if (core_num < remainder) {
542 			startQueue = (uint16_t)(core_num * (quot + 1));
543 			endQueue = (uint16_t)(startQueue + quot + 1);
544 		} else {
545 			startQueue = (uint16_t)(core_num * quot + remainder);
546 			endQueue = (uint16_t)(startQueue + quot);
547 		}
548 	} else {
549 		startQueue = (uint16_t)(core_num * quot);
550 		endQueue = (uint16_t)(startQueue + quot);
551 	}
552 
553 	/* vmdq queue idx doesn't always start from zero.*/
554 	startQueue += vmdq_queue_base;
555 	endQueue   += vmdq_queue_base;
556 	printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
557 	       rte_lcore_id(), startQueue, endQueue - 1);
558 
559 	if (startQueue == endQueue) {
560 		printf("lcore %u has nothing to do\n", (unsigned)core_num);
561 		return 0;
562 	}
563 
564 	for (;;) {
565 		struct rte_mbuf *buf[MAX_PKT_BURST];
566 		const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]);
567 		for (p = 0; p < num_ports; p++) {
568 			const uint8_t src = ports[p];
569 			const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */
570 
571 			if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID))
572 				continue;
573 
574 			for (q = startQueue; q < endQueue; q++) {
575 				const uint16_t rxCount = rte_eth_rx_burst(src,
576 					q, buf, buf_size);
577 
578 				if (unlikely(rxCount == 0))
579 					continue;
580 
581 				rxPackets[q] += rxCount;
582 
583 				for (i = 0; i < rxCount; i++)
584 					update_mac_address(buf[i], dst);
585 
586 				const uint16_t txCount = rte_eth_tx_burst(dst,
587 					q, buf, rxCount);
588 				if (txCount != rxCount) {
589 					for (i = txCount; i < rxCount; i++)
590 						rte_pktmbuf_free(buf[i]);
591 				}
592 			}
593 		}
594 	}
595 }
596 
597 /*
598  * Update the global var NUM_PORTS and array PORTS according to system ports number
599  * and return valid ports number
600  */
601 static unsigned check_ports_num(unsigned nb_ports)
602 {
603 	unsigned valid_num_ports = num_ports;
604 	unsigned portid;
605 
606 	if (num_ports > nb_ports) {
607 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
608 			num_ports, nb_ports);
609 		num_ports = nb_ports;
610 	}
611 
612 	for (portid = 0; portid < num_ports; portid++) {
613 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
614 			printf("\nSpecified port ID(%u) is not valid\n",
615 				ports[portid]);
616 			ports[portid] = INVALID_PORT_ID;
617 			valid_num_ports--;
618 		}
619 	}
620 	return valid_num_ports;
621 }
622 
623 
624 /* Main function, does initialisation and calls the per-lcore functions */
625 int
626 main(int argc, char *argv[])
627 {
628 	unsigned cores;
629 	struct rte_mempool *mbuf_pool;
630 	unsigned lcore_id;
631 	uintptr_t i;
632 	int ret;
633 	unsigned nb_ports, valid_num_ports;
634 	uint16_t portid;
635 
636 	signal(SIGHUP, sighup_handler);
637 
638 	/* init EAL */
639 	ret = rte_eal_init(argc, argv);
640 	if (ret < 0)
641 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
642 	argc -= ret;
643 	argv += ret;
644 
645 	/* parse app arguments */
646 	ret = vmdq_parse_args(argc, argv);
647 	if (ret < 0)
648 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
649 
650 	cores = rte_lcore_count();
651 	if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
652 		rte_exit(EXIT_FAILURE,"This program can only run on an even"
653 				" number of cores(1-%d)\n\n", RTE_MAX_LCORE);
654 	}
655 
656 	nb_ports = rte_eth_dev_count_avail();
657 
658 	/*
659 	 * Update the global var NUM_PORTS and global array PORTS
660 	 * and get value of var VALID_NUM_PORTS according to system ports number
661 	 */
662 	valid_num_ports = check_ports_num(nb_ports);
663 
664 	if (valid_num_ports < 2 || valid_num_ports % 2) {
665 		printf("Current valid ports number is %u\n", valid_num_ports);
666 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
667 	}
668 
669 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
670 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
671 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
672 	if (mbuf_pool == NULL)
673 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
674 
675 	/* initialize all ports */
676 	RTE_ETH_FOREACH_DEV(portid) {
677 		/* skip ports that are not enabled */
678 		if ((enabled_port_mask & (1 << portid)) == 0) {
679 			printf("\nSkipping disabled port %d\n", portid);
680 			continue;
681 		}
682 		if (port_init(portid, mbuf_pool) != 0)
683 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
684 	}
685 
686 	/* call lcore_main() on every slave lcore */
687 	i = 0;
688 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
689 		rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
690 	}
691 	/* call on master too */
692 	(void) lcore_main((void*)i);
693 
694 	return 0;
695 }
696