xref: /dpdk/examples/vmdq_dcb/main.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <signal.h>
13 #include <stdarg.h>
14 #include <inttypes.h>
15 #include <getopt.h>
16 
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
35 #include <rte_mbuf.h>
36 
37 /* basic constants used in application */
38 #define MAX_QUEUES 1024
39 /*
40  * 1024 queues require to meet the needs of a large number of vmdq_pools.
41  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
42  */
43 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
44 						RTE_TEST_TX_DESC_DEFAULT))
45 #define MBUF_CACHE_SIZE 64
46 
47 #define MAX_PKT_BURST 32
48 
49 /*
50  * Configurable number of RX/TX ring descriptors
51  */
52 #define RTE_TEST_RX_DESC_DEFAULT 1024
53 #define RTE_TEST_TX_DESC_DEFAULT 1024
54 
55 #define INVALID_PORT_ID 0xFF
56 
57 /* mask of enabled ports */
58 static uint32_t enabled_port_mask;
59 static uint16_t ports[RTE_MAX_ETHPORTS];
60 static unsigned num_ports;
61 
62 /* number of pools (if user does not specify any, 32 by default */
63 static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
64 static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
65 static uint16_t num_queues, num_vmdq_queues;
66 static uint16_t vmdq_pool_base, vmdq_queue_base;
67 static uint8_t rss_enable;
68 
69 /* empty vmdq+dcb configuration structure. Filled in programatically */
70 static const struct rte_eth_conf vmdq_dcb_conf_default = {
71 	.rxmode = {
72 		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
73 		.split_hdr_size = 0,
74 	},
75 	.txmode = {
76 		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
77 	},
78 	/*
79 	 * should be overridden separately in code with
80 	 * appropriate values
81 	 */
82 	.rx_adv_conf = {
83 		.vmdq_dcb_conf = {
84 			.nb_queue_pools = ETH_32_POOLS,
85 			.enable_default_pool = 0,
86 			.default_pool = 0,
87 			.nb_pool_maps = 0,
88 			.pool_map = {{0, 0},},
89 			.dcb_tc = {0},
90 		},
91 		.dcb_rx_conf = {
92 				.nb_tcs = ETH_4_TCS,
93 				/** Traffic class each UP mapped to. */
94 				.dcb_tc = {0},
95 		},
96 		.vmdq_rx_conf = {
97 			.nb_queue_pools = ETH_32_POOLS,
98 			.enable_default_pool = 0,
99 			.default_pool = 0,
100 			.nb_pool_maps = 0,
101 			.pool_map = {{0, 0},},
102 		},
103 	},
104 	.tx_adv_conf = {
105 		.vmdq_dcb_tx_conf = {
106 			.nb_queue_pools = ETH_32_POOLS,
107 			.dcb_tc = {0},
108 		},
109 	},
110 };
111 
112 /* array used for printing out statistics */
113 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
114 
115 const uint16_t vlan_tags[] = {
116 	0,  1,  2,  3,  4,  5,  6,  7,
117 	8,  9, 10, 11,	12, 13, 14, 15,
118 	16, 17, 18, 19, 20, 21, 22, 23,
119 	24, 25, 26, 27, 28, 29, 30, 31
120 };
121 
122 const uint16_t num_vlans = RTE_DIM(vlan_tags);
123 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
124 static struct rte_ether_addr pool_addr_template = {
125 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
126 };
127 
128 /* ethernet addresses of ports */
129 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
130 
131 /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
132  * given above, and the number of traffic classes available for use. */
133 static inline int
134 get_eth_conf(struct rte_eth_conf *eth_conf)
135 {
136 	struct rte_eth_vmdq_dcb_conf conf;
137 	struct rte_eth_vmdq_rx_conf  vmdq_conf;
138 	struct rte_eth_dcb_rx_conf   dcb_conf;
139 	struct rte_eth_vmdq_dcb_tx_conf tx_conf;
140 	uint8_t i;
141 
142 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
143 	vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
144 	tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
145 	conf.nb_pool_maps = num_pools;
146 	vmdq_conf.nb_pool_maps = num_pools;
147 	conf.enable_default_pool = 0;
148 	vmdq_conf.enable_default_pool = 0;
149 	conf.default_pool = 0; /* set explicit value, even if not used */
150 	vmdq_conf.default_pool = 0;
151 
152 	for (i = 0; i < conf.nb_pool_maps; i++) {
153 		conf.pool_map[i].vlan_id = vlan_tags[i];
154 		vmdq_conf.pool_map[i].vlan_id = vlan_tags[i];
155 		conf.pool_map[i].pools = 1UL << i;
156 		vmdq_conf.pool_map[i].pools = 1UL << i;
157 	}
158 	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
159 		conf.dcb_tc[i] = i % num_tcs;
160 		dcb_conf.dcb_tc[i] = i % num_tcs;
161 		tx_conf.dcb_tc[i] = i % num_tcs;
162 	}
163 	dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs;
164 	(void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
165 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
166 			  sizeof(conf)));
167 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf,
168 			  sizeof(dcb_conf)));
169 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf,
170 			  sizeof(vmdq_conf)));
171 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
172 			  sizeof(tx_conf)));
173 	if (rss_enable) {
174 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
175 		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
176 							ETH_RSS_UDP |
177 							ETH_RSS_TCP |
178 							ETH_RSS_SCTP;
179 	}
180 	return 0;
181 }
182 
183 /*
184  * Initialises a given port using global settings and with the rx buffers
185  * coming from the mbuf_pool passed as parameter
186  */
187 static inline int
188 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
189 {
190 	struct rte_eth_dev_info dev_info;
191 	struct rte_eth_conf port_conf = {0};
192 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
193 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
194 	int retval;
195 	uint16_t q;
196 	uint16_t queues_per_pool;
197 	uint32_t max_nb_pools;
198 	struct rte_eth_txconf txq_conf;
199 	uint64_t rss_hf_tmp;
200 
201 	/*
202 	 * The max pool number from dev_info will be used to validate the pool
203 	 * number specified in cmd line
204 	 */
205 	retval = rte_eth_dev_info_get(port, &dev_info);
206 	if (retval != 0) {
207 		printf("Error during getting device (port %u) info: %s\n",
208 				port, strerror(-retval));
209 
210 		return retval;
211 	}
212 
213 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
214 	/*
215 	 * We allow to process part of VMDQ pools specified by num_pools in
216 	 * command line.
217 	 */
218 	if (num_pools > max_nb_pools) {
219 		printf("num_pools %d >max_nb_pools %d\n",
220 			num_pools, max_nb_pools);
221 		return -1;
222 	}
223 
224 	/*
225 	 * NIC queues are divided into pf queues and vmdq queues.
226 	 * There is assumption here all ports have the same configuration!
227 	*/
228 	vmdq_queue_base = dev_info.vmdq_queue_base;
229 	vmdq_pool_base  = dev_info.vmdq_pool_base;
230 	printf("vmdq queue base: %d pool base %d\n",
231 		vmdq_queue_base, vmdq_pool_base);
232 	if (vmdq_pool_base == 0) {
233 		num_vmdq_queues = dev_info.max_rx_queues;
234 		num_queues = dev_info.max_rx_queues;
235 		if (num_tcs != num_vmdq_queues / num_pools) {
236 			printf("nb_tcs %d is invalid considering with"
237 				" nb_pools %d, nb_tcs * nb_pools should = %d\n",
238 				num_tcs, num_pools, num_vmdq_queues);
239 			return -1;
240 		}
241 	} else {
242 		queues_per_pool = dev_info.vmdq_queue_num /
243 				  dev_info.max_vmdq_pools;
244 		if (num_tcs > queues_per_pool) {
245 			printf("num_tcs %d > num of queues per pool %d\n",
246 				num_tcs, queues_per_pool);
247 			return -1;
248 		}
249 		num_vmdq_queues = num_pools * queues_per_pool;
250 		num_queues = vmdq_queue_base + num_vmdq_queues;
251 		printf("Configured vmdq pool num: %u,"
252 			" each vmdq pool has %u queues\n",
253 			num_pools, queues_per_pool);
254 	}
255 
256 	if (!rte_eth_dev_is_valid_port(port))
257 		return -1;
258 
259 	retval = get_eth_conf(&port_conf);
260 	if (retval < 0)
261 		return retval;
262 
263 	retval = rte_eth_dev_info_get(port, &dev_info);
264 	if (retval != 0) {
265 		printf("Error during getting device (port %u) info: %s\n",
266 				port, strerror(-retval));
267 
268 		return retval;
269 	}
270 
271 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
272 		port_conf.txmode.offloads |=
273 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
274 
275 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
276 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
277 		dev_info.flow_type_rss_offloads;
278 	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
279 		printf("Port %u modified RSS hash function based on hardware support,"
280 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
281 			port,
282 			rss_hf_tmp,
283 			port_conf.rx_adv_conf.rss_conf.rss_hf);
284 	}
285 
286 	/*
287 	 * Though in this example, all queues including pf queues are setup.
288 	 * This is because VMDQ queues doesn't always start from zero, and the
289 	 * PMD layer doesn't support selectively initialising part of rx/tx
290 	 * queues.
291 	 */
292 	retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
293 	if (retval != 0)
294 		return retval;
295 
296 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
297 				&txRingSize);
298 	if (retval != 0)
299 		return retval;
300 	if (RTE_MAX(rxRingSize, txRingSize) >
301 	    RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
302 		printf("Mbuf pool has an insufficient size for port %u.\n",
303 			port);
304 		return -1;
305 	}
306 
307 	for (q = 0; q < num_queues; q++) {
308 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
309 					rte_eth_dev_socket_id(port),
310 					NULL,
311 					mbuf_pool);
312 		if (retval < 0) {
313 			printf("initialize rx queue %d failed\n", q);
314 			return retval;
315 		}
316 	}
317 
318 	txq_conf = dev_info.default_txconf;
319 	txq_conf.offloads = port_conf.txmode.offloads;
320 	for (q = 0; q < num_queues; q++) {
321 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
322 					rte_eth_dev_socket_id(port),
323 					&txq_conf);
324 		if (retval < 0) {
325 			printf("initialize tx queue %d failed\n", q);
326 			return retval;
327 		}
328 	}
329 
330 	retval  = rte_eth_dev_start(port);
331 	if (retval < 0) {
332 		printf("port %d start failed\n", port);
333 		return retval;
334 	}
335 
336 	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
337 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
338 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
339 			(unsigned)port,
340 			vmdq_ports_eth_addr[port].addr_bytes[0],
341 			vmdq_ports_eth_addr[port].addr_bytes[1],
342 			vmdq_ports_eth_addr[port].addr_bytes[2],
343 			vmdq_ports_eth_addr[port].addr_bytes[3],
344 			vmdq_ports_eth_addr[port].addr_bytes[4],
345 			vmdq_ports_eth_addr[port].addr_bytes[5]);
346 
347 	/* Set mac for each pool.*/
348 	for (q = 0; q < num_pools; q++) {
349 		struct rte_ether_addr mac;
350 
351 		mac = pool_addr_template;
352 		mac.addr_bytes[4] = port;
353 		mac.addr_bytes[5] = q;
354 		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
355 			port, q,
356 			mac.addr_bytes[0], mac.addr_bytes[1],
357 			mac.addr_bytes[2], mac.addr_bytes[3],
358 			mac.addr_bytes[4], mac.addr_bytes[5]);
359 		retval = rte_eth_dev_mac_addr_add(port, &mac,
360 				q + vmdq_pool_base);
361 		if (retval) {
362 			printf("mac addr add failed at pool %d\n", q);
363 			return retval;
364 		}
365 	}
366 
367 	return 0;
368 }
369 
370 /* Check num_pools parameter and set it if OK*/
371 static int
372 vmdq_parse_num_pools(const char *q_arg)
373 {
374 	char *end = NULL;
375 	int n;
376 
377 	/* parse number string */
378 	n = strtol(q_arg, &end, 10);
379 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
380 		return -1;
381 	if (n != 16 && n != 32)
382 		return -1;
383 	if (n == 16)
384 		num_pools = ETH_16_POOLS;
385 	else
386 		num_pools = ETH_32_POOLS;
387 
388 	return 0;
389 }
390 
391 /* Check num_tcs parameter and set it if OK*/
392 static int
393 vmdq_parse_num_tcs(const char *q_arg)
394 {
395 	char *end = NULL;
396 	int n;
397 
398 	/* parse number string */
399 	n = strtol(q_arg, &end, 10);
400 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
401 		return -1;
402 
403 	if (n != 4 && n != 8)
404 		return -1;
405 	if (n == 4)
406 		num_tcs = ETH_4_TCS;
407 	else
408 		num_tcs = ETH_8_TCS;
409 
410 	return 0;
411 }
412 
413 static int
414 parse_portmask(const char *portmask)
415 {
416 	char *end = NULL;
417 	unsigned long pm;
418 
419 	/* parse hexadecimal string */
420 	pm = strtoul(portmask, &end, 16);
421 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
422 		return -1;
423 
424 	if (pm == 0)
425 		return -1;
426 
427 	return pm;
428 }
429 
430 /* Display usage */
431 static void
432 vmdq_usage(const char *prgname)
433 {
434 	printf("%s [EAL options] -- -p PORTMASK]\n"
435 	"  --nb-pools NP: number of pools (32 default, 16)\n"
436 	"  --nb-tcs NP: number of TCs (4 default, 8)\n"
437 	"  --enable-rss: enable RSS (disabled by default)\n",
438 	       prgname);
439 }
440 
441 /*  Parse the argument (num_pools) given in the command line of the application */
442 static int
443 vmdq_parse_args(int argc, char **argv)
444 {
445 	int opt;
446 	int option_index;
447 	unsigned i;
448 	const char *prgname = argv[0];
449 	static struct option long_option[] = {
450 		{"nb-pools", required_argument, NULL, 0},
451 		{"nb-tcs", required_argument, NULL, 0},
452 		{"enable-rss", 0, NULL, 0},
453 		{NULL, 0, 0, 0}
454 	};
455 
456 	/* Parse command line */
457 	while ((opt = getopt_long(argc, argv, "p:", long_option,
458 		&option_index)) != EOF) {
459 		switch (opt) {
460 		/* portmask */
461 		case 'p':
462 			enabled_port_mask = parse_portmask(optarg);
463 			if (enabled_port_mask == 0) {
464 				printf("invalid portmask\n");
465 				vmdq_usage(prgname);
466 				return -1;
467 			}
468 			break;
469 		case 0:
470 			if (!strcmp(long_option[option_index].name, "nb-pools")) {
471 				if (vmdq_parse_num_pools(optarg) == -1) {
472 					printf("invalid number of pools\n");
473 					return -1;
474 				}
475 			}
476 
477 			if (!strcmp(long_option[option_index].name, "nb-tcs")) {
478 				if (vmdq_parse_num_tcs(optarg) == -1) {
479 					printf("invalid number of tcs\n");
480 					return -1;
481 				}
482 			}
483 
484 			if (!strcmp(long_option[option_index].name, "enable-rss"))
485 				rss_enable = 1;
486 			break;
487 
488 		default:
489 			vmdq_usage(prgname);
490 			return -1;
491 		}
492 	}
493 
494 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
495 		if (enabled_port_mask & (1 << i))
496 			ports[num_ports++] = (uint8_t)i;
497 	}
498 
499 	if (num_ports < 2 || num_ports % 2) {
500 		printf("Current enabled port number is %u,"
501 			" but it should be even and at least 2\n", num_ports);
502 		return -1;
503 	}
504 
505 	return 0;
506 }
507 
508 static void
509 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
510 {
511 	struct rte_ether_hdr *eth;
512 	void *tmp;
513 
514 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
515 
516 	/* 02:00:00:00:00:xx */
517 	tmp = &eth->d_addr.addr_bytes[0];
518 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
519 
520 	/* src addr */
521 	rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->s_addr);
522 }
523 
524 /* When we receive a HUP signal, print out our stats */
525 static void
526 sighup_handler(int signum)
527 {
528 	unsigned q = vmdq_queue_base;
529 
530 	for (; q < num_queues; q++) {
531 		if (q % (num_vmdq_queues / num_pools) == 0)
532 			printf("\nPool %u: ", (q - vmdq_queue_base) /
533 					      (num_vmdq_queues / num_pools));
534 		printf("%lu ", rxPackets[q]);
535 	}
536 	printf("\nFinished handling signal %d\n", signum);
537 }
538 
539 /*
540  * Main thread that does the work, reading from INPUT_PORT
541  * and writing to OUTPUT_PORT
542  */
543 static int
544 lcore_main(void *arg)
545 {
546 	const uintptr_t core_num = (uintptr_t)arg;
547 	const unsigned num_cores = rte_lcore_count();
548 	uint16_t startQueue, endQueue;
549 	uint16_t q, i, p;
550 	const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores);
551 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
552 
553 
554 	if (remainder) {
555 		if (core_num < remainder) {
556 			startQueue = (uint16_t)(core_num * (quot + 1));
557 			endQueue = (uint16_t)(startQueue + quot + 1);
558 		} else {
559 			startQueue = (uint16_t)(core_num * quot + remainder);
560 			endQueue = (uint16_t)(startQueue + quot);
561 		}
562 	} else {
563 		startQueue = (uint16_t)(core_num * quot);
564 		endQueue = (uint16_t)(startQueue + quot);
565 	}
566 
567 	/* vmdq queue idx doesn't always start from zero.*/
568 	startQueue += vmdq_queue_base;
569 	endQueue   += vmdq_queue_base;
570 	printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
571 	       rte_lcore_id(), startQueue, endQueue - 1);
572 
573 	if (startQueue == endQueue) {
574 		printf("lcore %u has nothing to do\n", (unsigned)core_num);
575 		return 0;
576 	}
577 
578 	for (;;) {
579 		struct rte_mbuf *buf[MAX_PKT_BURST];
580 		const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]);
581 		for (p = 0; p < num_ports; p++) {
582 			const uint8_t src = ports[p];
583 			const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */
584 
585 			if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID))
586 				continue;
587 
588 			for (q = startQueue; q < endQueue; q++) {
589 				const uint16_t rxCount = rte_eth_rx_burst(src,
590 					q, buf, buf_size);
591 
592 				if (unlikely(rxCount == 0))
593 					continue;
594 
595 				rxPackets[q] += rxCount;
596 
597 				for (i = 0; i < rxCount; i++)
598 					update_mac_address(buf[i], dst);
599 
600 				const uint16_t txCount = rte_eth_tx_burst(dst,
601 					q, buf, rxCount);
602 				if (txCount != rxCount) {
603 					for (i = txCount; i < rxCount; i++)
604 						rte_pktmbuf_free(buf[i]);
605 				}
606 			}
607 		}
608 	}
609 }
610 
611 /*
612  * Update the global var NUM_PORTS and array PORTS according to system ports number
613  * and return valid ports number
614  */
615 static unsigned check_ports_num(unsigned nb_ports)
616 {
617 	unsigned valid_num_ports = num_ports;
618 	unsigned portid;
619 
620 	if (num_ports > nb_ports) {
621 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
622 			num_ports, nb_ports);
623 		num_ports = nb_ports;
624 	}
625 
626 	for (portid = 0; portid < num_ports; portid++) {
627 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
628 			printf("\nSpecified port ID(%u) is not valid\n",
629 				ports[portid]);
630 			ports[portid] = INVALID_PORT_ID;
631 			valid_num_ports--;
632 		}
633 	}
634 	return valid_num_ports;
635 }
636 
637 
638 /* Main function, does initialisation and calls the per-lcore functions */
639 int
640 main(int argc, char *argv[])
641 {
642 	unsigned cores;
643 	struct rte_mempool *mbuf_pool;
644 	unsigned lcore_id;
645 	uintptr_t i;
646 	int ret;
647 	unsigned nb_ports, valid_num_ports;
648 	uint16_t portid;
649 
650 	signal(SIGHUP, sighup_handler);
651 
652 	/* init EAL */
653 	ret = rte_eal_init(argc, argv);
654 	if (ret < 0)
655 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
656 	argc -= ret;
657 	argv += ret;
658 
659 	/* parse app arguments */
660 	ret = vmdq_parse_args(argc, argv);
661 	if (ret < 0)
662 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
663 
664 	cores = rte_lcore_count();
665 	if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
666 		rte_exit(EXIT_FAILURE,"This program can only run on an even"
667 				" number of cores(1-%d)\n\n", RTE_MAX_LCORE);
668 	}
669 
670 	nb_ports = rte_eth_dev_count_avail();
671 
672 	/*
673 	 * Update the global var NUM_PORTS and global array PORTS
674 	 * and get value of var VALID_NUM_PORTS according to system ports number
675 	 */
676 	valid_num_ports = check_ports_num(nb_ports);
677 
678 	if (valid_num_ports < 2 || valid_num_ports % 2) {
679 		printf("Current valid ports number is %u\n", valid_num_ports);
680 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
681 	}
682 
683 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
684 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
685 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
686 	if (mbuf_pool == NULL)
687 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
688 
689 	/* initialize all ports */
690 	RTE_ETH_FOREACH_DEV(portid) {
691 		/* skip ports that are not enabled */
692 		if ((enabled_port_mask & (1 << portid)) == 0) {
693 			printf("\nSkipping disabled port %d\n", portid);
694 			continue;
695 		}
696 		if (port_init(portid, mbuf_pool) != 0)
697 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
698 	}
699 
700 	/* call lcore_main() on every slave lcore */
701 	i = 0;
702 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
703 		rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
704 	}
705 	/* call on master too */
706 	(void) lcore_main((void*)i);
707 
708 	return 0;
709 }
710