xref: /dpdk/examples/vmdq_dcb/main.c (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <signal.h>
13 #include <stdarg.h>
14 #include <inttypes.h>
15 #include <getopt.h>
16 
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
35 #include <rte_mbuf.h>
36 
37 /* basic constants used in application */
38 #define MAX_QUEUES 1024
39 /*
40  * 1024 queues require to meet the needs of a large number of vmdq_pools.
41  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
42  */
43 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
44 						RTE_TEST_TX_DESC_DEFAULT))
45 #define MBUF_CACHE_SIZE 64
46 
47 #define MAX_PKT_BURST 32
48 
49 /*
50  * Configurable number of RX/TX ring descriptors
51  */
52 #define RTE_TEST_RX_DESC_DEFAULT 1024
53 #define RTE_TEST_TX_DESC_DEFAULT 1024
54 
55 #define INVALID_PORT_ID 0xFF
56 
57 /* mask of enabled ports */
58 static uint32_t enabled_port_mask;
59 static uint16_t ports[RTE_MAX_ETHPORTS];
60 static unsigned num_ports;
61 
62 /* number of pools (if user does not specify any, 32 by default */
63 static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
64 static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
65 static uint16_t num_queues, num_vmdq_queues;
66 static uint16_t vmdq_pool_base, vmdq_queue_base;
67 static uint8_t rss_enable;
68 
69 /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
70 static const struct rte_eth_conf vmdq_dcb_conf_default = {
71 	.rxmode = {
72 		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
73 		.split_hdr_size = 0,
74 	},
75 	.txmode = {
76 		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
77 	},
78 	/*
79 	 * should be overridden separately in code with
80 	 * appropriate values
81 	 */
82 	.rx_adv_conf = {
83 		.vmdq_dcb_conf = {
84 			.nb_queue_pools = ETH_32_POOLS,
85 			.enable_default_pool = 0,
86 			.default_pool = 0,
87 			.nb_pool_maps = 0,
88 			.pool_map = {{0, 0},},
89 			.dcb_tc = {0},
90 		},
91 		.dcb_rx_conf = {
92 				.nb_tcs = ETH_4_TCS,
93 				/** Traffic class each UP mapped to. */
94 				.dcb_tc = {0},
95 		},
96 		.vmdq_rx_conf = {
97 			.nb_queue_pools = ETH_32_POOLS,
98 			.enable_default_pool = 0,
99 			.default_pool = 0,
100 			.nb_pool_maps = 0,
101 			.pool_map = {{0, 0},},
102 		},
103 	},
104 	.tx_adv_conf = {
105 		.vmdq_dcb_tx_conf = {
106 			.nb_queue_pools = ETH_32_POOLS,
107 			.dcb_tc = {0},
108 		},
109 	},
110 };
111 /* >8 End of empty vmdq+dcb configuration structure. */
112 
113 /* array used for printing out statistics */
114 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
115 
116 /* Dividing up the possible user priority values. 8< */
117 const uint16_t vlan_tags[] = {
118 	0,  1,  2,  3,  4,  5,  6,  7,
119 	8,  9, 10, 11,	12, 13, 14, 15,
120 	16, 17, 18, 19, 20, 21, 22, 23,
121 	24, 25, 26, 27, 28, 29, 30, 31
122 };
123 
124 const uint16_t num_vlans = RTE_DIM(vlan_tags);
125 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
126 static struct rte_ether_addr pool_addr_template = {
127 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
128 };
129 
130 /* ethernet addresses of ports */
131 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
132 
133 /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
134  * given above, and the number of traffic classes available for use. */
135 static inline int
136 get_eth_conf(struct rte_eth_conf *eth_conf)
137 {
138 	struct rte_eth_vmdq_dcb_conf conf;
139 	struct rte_eth_vmdq_rx_conf  vmdq_conf;
140 	struct rte_eth_dcb_rx_conf   dcb_conf;
141 	struct rte_eth_vmdq_dcb_tx_conf tx_conf;
142 	uint8_t i;
143 
144 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
145 	vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
146 	tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
147 	conf.nb_pool_maps = num_pools;
148 	vmdq_conf.nb_pool_maps = num_pools;
149 	conf.enable_default_pool = 0;
150 	vmdq_conf.enable_default_pool = 0;
151 	conf.default_pool = 0; /* set explicit value, even if not used */
152 	vmdq_conf.default_pool = 0;
153 
154 	for (i = 0; i < conf.nb_pool_maps; i++) {
155 		conf.pool_map[i].vlan_id = vlan_tags[i];
156 		vmdq_conf.pool_map[i].vlan_id = vlan_tags[i];
157 		conf.pool_map[i].pools = 1UL << i;
158 		vmdq_conf.pool_map[i].pools = 1UL << i;
159 	}
160 	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
161 		conf.dcb_tc[i] = i % num_tcs;
162 		dcb_conf.dcb_tc[i] = i % num_tcs;
163 		tx_conf.dcb_tc[i] = i % num_tcs;
164 	}
165 	dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs;
166 	(void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
167 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
168 			  sizeof(conf)));
169 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf,
170 			  sizeof(dcb_conf)));
171 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf,
172 			  sizeof(vmdq_conf)));
173 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
174 			  sizeof(tx_conf)));
175 	if (rss_enable) {
176 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
177 		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
178 							ETH_RSS_UDP |
179 							ETH_RSS_TCP |
180 							ETH_RSS_SCTP;
181 	}
182 	return 0;
183 }
184 /* >8 End of dividing up the possible user priority values. */
185 
186 /*
187  * Initialises a given port using global settings and with the rx buffers
188  * coming from the mbuf_pool passed as parameter
189  */
190 static inline int
191 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
192 {
193 	struct rte_eth_dev_info dev_info;
194 	struct rte_eth_conf port_conf = {0};
195 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
196 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
197 	int retval;
198 	uint16_t q;
199 	uint16_t queues_per_pool;
200 	uint32_t max_nb_pools;
201 	struct rte_eth_txconf txq_conf;
202 	uint64_t rss_hf_tmp;
203 
204 	/*
205 	 * The max pool number from dev_info will be used to validate the pool
206 	 * number specified in cmd line
207 	 */
208 	retval = rte_eth_dev_info_get(port, &dev_info);
209 	if (retval != 0) {
210 		printf("Error during getting device (port %u) info: %s\n",
211 				port, strerror(-retval));
212 
213 		return retval;
214 	}
215 
216 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
217 	/*
218 	 * We allow to process part of VMDQ pools specified by num_pools in
219 	 * command line.
220 	 */
221 	if (num_pools > max_nb_pools) {
222 		printf("num_pools %d >max_nb_pools %d\n",
223 			num_pools, max_nb_pools);
224 		return -1;
225 	}
226 
227 	/*
228 	 * NIC queues are divided into pf queues and vmdq queues.
229 	 * There is assumption here all ports have the same configuration!
230 	*/
231 	vmdq_queue_base = dev_info.vmdq_queue_base;
232 	vmdq_pool_base  = dev_info.vmdq_pool_base;
233 	printf("vmdq queue base: %d pool base %d\n",
234 		vmdq_queue_base, vmdq_pool_base);
235 	if (vmdq_pool_base == 0) {
236 		num_vmdq_queues = dev_info.max_rx_queues;
237 		num_queues = dev_info.max_rx_queues;
238 		if (num_tcs != num_vmdq_queues / num_pools) {
239 			printf("nb_tcs %d is invalid considering with"
240 				" nb_pools %d, nb_tcs * nb_pools should = %d\n",
241 				num_tcs, num_pools, num_vmdq_queues);
242 			return -1;
243 		}
244 	} else {
245 		queues_per_pool = dev_info.vmdq_queue_num /
246 				  dev_info.max_vmdq_pools;
247 		if (num_tcs > queues_per_pool) {
248 			printf("num_tcs %d > num of queues per pool %d\n",
249 				num_tcs, queues_per_pool);
250 			return -1;
251 		}
252 		num_vmdq_queues = num_pools * queues_per_pool;
253 		num_queues = vmdq_queue_base + num_vmdq_queues;
254 		printf("Configured vmdq pool num: %u,"
255 			" each vmdq pool has %u queues\n",
256 			num_pools, queues_per_pool);
257 	}
258 
259 	if (!rte_eth_dev_is_valid_port(port))
260 		return -1;
261 
262 	retval = get_eth_conf(&port_conf);
263 	if (retval < 0)
264 		return retval;
265 
266 	retval = rte_eth_dev_info_get(port, &dev_info);
267 	if (retval != 0) {
268 		printf("Error during getting device (port %u) info: %s\n",
269 				port, strerror(-retval));
270 
271 		return retval;
272 	}
273 
274 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
275 		port_conf.txmode.offloads |=
276 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
277 
278 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
279 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
280 		dev_info.flow_type_rss_offloads;
281 	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
282 		printf("Port %u modified RSS hash function based on hardware support,"
283 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
284 			port,
285 			rss_hf_tmp,
286 			port_conf.rx_adv_conf.rss_conf.rss_hf);
287 	}
288 
289 	/*
290 	 * Though in this example, all queues including pf queues are setup.
291 	 * This is because VMDQ queues doesn't always start from zero, and the
292 	 * PMD layer doesn't support selectively initialising part of rx/tx
293 	 * queues.
294 	 */
295 	retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
296 	if (retval != 0)
297 		return retval;
298 
299 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
300 				&txRingSize);
301 	if (retval != 0)
302 		return retval;
303 	if (RTE_MAX(rxRingSize, txRingSize) >
304 	    RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
305 		printf("Mbuf pool has an insufficient size for port %u.\n",
306 			port);
307 		return -1;
308 	}
309 
310 	for (q = 0; q < num_queues; q++) {
311 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
312 					rte_eth_dev_socket_id(port),
313 					NULL,
314 					mbuf_pool);
315 		if (retval < 0) {
316 			printf("initialize rx queue %d failed\n", q);
317 			return retval;
318 		}
319 	}
320 
321 	txq_conf = dev_info.default_txconf;
322 	txq_conf.offloads = port_conf.txmode.offloads;
323 	for (q = 0; q < num_queues; q++) {
324 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
325 					rte_eth_dev_socket_id(port),
326 					&txq_conf);
327 		if (retval < 0) {
328 			printf("initialize tx queue %d failed\n", q);
329 			return retval;
330 		}
331 	}
332 
333 	retval  = rte_eth_dev_start(port);
334 	if (retval < 0) {
335 		printf("port %d start failed\n", port);
336 		return retval;
337 	}
338 
339 	retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
340 	if (retval < 0) {
341 		printf("port %d MAC address get failed: %s\n", port,
342 		       rte_strerror(-retval));
343 		return retval;
344 	}
345 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
346 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
347 			(unsigned)port,
348 			RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
349 
350 	/* Set mac for each pool. 8< */
351 	for (q = 0; q < num_pools; q++) {
352 		struct rte_ether_addr mac;
353 
354 		mac = pool_addr_template;
355 		mac.addr_bytes[4] = port;
356 		mac.addr_bytes[5] = q;
357 		printf("Port %u vmdq pool %u set mac " RTE_ETHER_ADDR_PRT_FMT "\n",
358 			port, q, RTE_ETHER_ADDR_BYTES(&mac));
359 		retval = rte_eth_dev_mac_addr_add(port, &mac,
360 				q + vmdq_pool_base);
361 		if (retval) {
362 			printf("mac addr add failed at pool %d\n", q);
363 			return retval;
364 		}
365 	}
366 	/* >8 End of set mac for each pool. */
367 
368 	return 0;
369 }
370 
371 /* Check num_pools parameter and set it if OK*/
372 static int
373 vmdq_parse_num_pools(const char *q_arg)
374 {
375 	char *end = NULL;
376 	int n;
377 
378 	/* parse number string */
379 	n = strtol(q_arg, &end, 10);
380 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
381 		return -1;
382 	if (n != 16 && n != 32)
383 		return -1;
384 	if (n == 16)
385 		num_pools = ETH_16_POOLS;
386 	else
387 		num_pools = ETH_32_POOLS;
388 
389 	return 0;
390 }
391 
392 /* Check num_tcs parameter and set it if OK*/
393 static int
394 vmdq_parse_num_tcs(const char *q_arg)
395 {
396 	char *end = NULL;
397 	int n;
398 
399 	/* parse number string */
400 	n = strtol(q_arg, &end, 10);
401 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
402 		return -1;
403 
404 	if (n != 4 && n != 8)
405 		return -1;
406 	if (n == 4)
407 		num_tcs = ETH_4_TCS;
408 	else
409 		num_tcs = ETH_8_TCS;
410 
411 	return 0;
412 }
413 
414 static int
415 parse_portmask(const char *portmask)
416 {
417 	char *end = NULL;
418 	unsigned long pm;
419 
420 	/* parse hexadecimal string */
421 	pm = strtoul(portmask, &end, 16);
422 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
423 		return 0;
424 
425 	return pm;
426 }
427 
428 /* Display usage */
429 static void
430 vmdq_usage(const char *prgname)
431 {
432 	printf("%s [EAL options] -- -p PORTMASK]\n"
433 	"  --nb-pools NP: number of pools (32 default, 16)\n"
434 	"  --nb-tcs NP: number of TCs (4 default, 8)\n"
435 	"  --enable-rss: enable RSS (disabled by default)\n",
436 	       prgname);
437 }
438 
439 /*  Parse the argument (num_pools) given in the command line of the application */
440 static int
441 vmdq_parse_args(int argc, char **argv)
442 {
443 	int opt;
444 	int option_index;
445 	unsigned i;
446 	const char *prgname = argv[0];
447 	static struct option long_option[] = {
448 		{"nb-pools", required_argument, NULL, 0},
449 		{"nb-tcs", required_argument, NULL, 0},
450 		{"enable-rss", 0, NULL, 0},
451 		{NULL, 0, 0, 0}
452 	};
453 
454 	/* Parse command line */
455 	while ((opt = getopt_long(argc, argv, "p:", long_option,
456 		&option_index)) != EOF) {
457 		switch (opt) {
458 		/* portmask */
459 		case 'p':
460 			enabled_port_mask = parse_portmask(optarg);
461 			if (enabled_port_mask == 0) {
462 				printf("invalid portmask\n");
463 				vmdq_usage(prgname);
464 				return -1;
465 			}
466 			break;
467 		case 0:
468 			if (!strcmp(long_option[option_index].name, "nb-pools")) {
469 				if (vmdq_parse_num_pools(optarg) == -1) {
470 					printf("invalid number of pools\n");
471 					return -1;
472 				}
473 			}
474 
475 			if (!strcmp(long_option[option_index].name, "nb-tcs")) {
476 				if (vmdq_parse_num_tcs(optarg) == -1) {
477 					printf("invalid number of tcs\n");
478 					return -1;
479 				}
480 			}
481 
482 			if (!strcmp(long_option[option_index].name, "enable-rss"))
483 				rss_enable = 1;
484 			break;
485 
486 		default:
487 			vmdq_usage(prgname);
488 			return -1;
489 		}
490 	}
491 
492 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
493 		if (enabled_port_mask & (1 << i))
494 			ports[num_ports++] = (uint8_t)i;
495 	}
496 
497 	if (num_ports < 2 || num_ports % 2) {
498 		printf("Current enabled port number is %u,"
499 			" but it should be even and at least 2\n", num_ports);
500 		return -1;
501 	}
502 
503 	return 0;
504 }
505 
506 static void
507 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
508 {
509 	struct rte_ether_hdr *eth;
510 	void *tmp;
511 
512 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
513 
514 	/* 02:00:00:00:00:xx */
515 	tmp = &eth->d_addr.addr_bytes[0];
516 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
517 
518 	/* src addr */
519 	rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->s_addr);
520 }
521 
522 /* When we receive a HUP signal, print out our stats */
523 static void
524 sighup_handler(int signum)
525 {
526 	unsigned q = vmdq_queue_base;
527 
528 	for (; q < num_queues; q++) {
529 		if (q % (num_vmdq_queues / num_pools) == 0)
530 			printf("\nPool %u: ", (q - vmdq_queue_base) /
531 					      (num_vmdq_queues / num_pools));
532 		printf("%lu ", rxPackets[q]);
533 	}
534 	printf("\nFinished handling signal %d\n", signum);
535 }
536 
537 /*
538  * Main thread that does the work, reading from INPUT_PORT
539  * and writing to OUTPUT_PORT
540  */
541 static int
542 lcore_main(void *arg)
543 {
544 	const uintptr_t core_num = (uintptr_t)arg;
545 	const unsigned num_cores = rte_lcore_count();
546 	uint16_t startQueue, endQueue;
547 	uint16_t q, i, p;
548 	const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores);
549 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
550 
551 
552 	if (remainder) {
553 		if (core_num < remainder) {
554 			startQueue = (uint16_t)(core_num * (quot + 1));
555 			endQueue = (uint16_t)(startQueue + quot + 1);
556 		} else {
557 			startQueue = (uint16_t)(core_num * quot + remainder);
558 			endQueue = (uint16_t)(startQueue + quot);
559 		}
560 	} else {
561 		startQueue = (uint16_t)(core_num * quot);
562 		endQueue = (uint16_t)(startQueue + quot);
563 	}
564 
565 	/* vmdq queue idx doesn't always start from zero.*/
566 	startQueue += vmdq_queue_base;
567 	endQueue   += vmdq_queue_base;
568 	printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
569 	       rte_lcore_id(), startQueue, endQueue - 1);
570 
571 	if (startQueue == endQueue) {
572 		printf("lcore %u has nothing to do\n", (unsigned)core_num);
573 		return 0;
574 	}
575 
576 	for (;;) {
577 		struct rte_mbuf *buf[MAX_PKT_BURST];
578 		const uint16_t buf_size = RTE_DIM(buf);
579 		for (p = 0; p < num_ports; p++) {
580 			const uint8_t src = ports[p];
581 			const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */
582 
583 			if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID))
584 				continue;
585 
586 			for (q = startQueue; q < endQueue; q++) {
587 				const uint16_t rxCount = rte_eth_rx_burst(src,
588 					q, buf, buf_size);
589 
590 				if (unlikely(rxCount == 0))
591 					continue;
592 
593 				rxPackets[q] += rxCount;
594 
595 				for (i = 0; i < rxCount; i++)
596 					update_mac_address(buf[i], dst);
597 
598 				const uint16_t txCount = rte_eth_tx_burst(dst,
599 					q, buf, rxCount);
600 				if (txCount != rxCount) {
601 					for (i = txCount; i < rxCount; i++)
602 						rte_pktmbuf_free(buf[i]);
603 				}
604 			}
605 		}
606 	}
607 }
608 
609 /*
610  * Update the global var NUM_PORTS and array PORTS according to system ports number
611  * and return valid ports number
612  */
613 static unsigned check_ports_num(unsigned nb_ports)
614 {
615 	unsigned valid_num_ports = num_ports;
616 	unsigned portid;
617 
618 	if (num_ports > nb_ports) {
619 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
620 			num_ports, nb_ports);
621 		num_ports = nb_ports;
622 	}
623 
624 	for (portid = 0; portid < num_ports; portid++) {
625 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
626 			printf("\nSpecified port ID(%u) is not valid\n",
627 				ports[portid]);
628 			ports[portid] = INVALID_PORT_ID;
629 			valid_num_ports--;
630 		}
631 	}
632 	return valid_num_ports;
633 }
634 
635 
636 /* Main function, does initialisation and calls the per-lcore functions */
637 int
638 main(int argc, char *argv[])
639 {
640 	unsigned cores;
641 	struct rte_mempool *mbuf_pool;
642 	unsigned lcore_id;
643 	uintptr_t i;
644 	int ret;
645 	unsigned nb_ports, valid_num_ports;
646 	uint16_t portid;
647 
648 	signal(SIGHUP, sighup_handler);
649 
650 	/* init EAL */
651 	ret = rte_eal_init(argc, argv);
652 	if (ret < 0)
653 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
654 	argc -= ret;
655 	argv += ret;
656 
657 	/* parse app arguments */
658 	ret = vmdq_parse_args(argc, argv);
659 	if (ret < 0)
660 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
661 
662 	cores = rte_lcore_count();
663 	if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
664 		rte_exit(EXIT_FAILURE,"This program can only run on an even"
665 				" number of cores(1-%d)\n\n", RTE_MAX_LCORE);
666 	}
667 
668 	nb_ports = rte_eth_dev_count_avail();
669 
670 	/*
671 	 * Update the global var NUM_PORTS and global array PORTS
672 	 * and get value of var VALID_NUM_PORTS according to system ports number
673 	 */
674 	valid_num_ports = check_ports_num(nb_ports);
675 
676 	if (valid_num_ports < 2 || valid_num_ports % 2) {
677 		printf("Current valid ports number is %u\n", valid_num_ports);
678 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
679 	}
680 
681 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
682 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
683 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
684 	if (mbuf_pool == NULL)
685 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
686 
687 	/* initialize all ports */
688 	RTE_ETH_FOREACH_DEV(portid) {
689 		/* skip ports that are not enabled */
690 		if ((enabled_port_mask & (1 << portid)) == 0) {
691 			printf("\nSkipping disabled port %d\n", portid);
692 			continue;
693 		}
694 		if (port_init(portid, mbuf_pool) != 0)
695 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
696 	}
697 
698 	/* call lcore_main() on every worker lcore */
699 	i = 0;
700 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
701 		rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
702 	}
703 	/* call on main too */
704 	(void) lcore_main((void*)i);
705 
706 	/* clean up the EAL */
707 	rte_eal_cleanup();
708 
709 	return 0;
710 }
711