1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <signal.h>
13 #include <stdarg.h>
14 #include <inttypes.h>
15 #include <getopt.h>
16
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_cycles.h>
24 #include <rte_prefetch.h>
25 #include <rte_lcore.h>
26 #include <rte_per_lcore.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_interrupts.h>
29 #include <rte_random.h>
30 #include <rte_debug.h>
31 #include <rte_ether.h>
32 #include <rte_ethdev.h>
33 #include <rte_mempool.h>
34 #include <rte_mbuf.h>
35
36 /* basic constants used in application */
37 #define MAX_QUEUES 1024
38 /*
39 * 1024 queues require to meet the needs of a large number of vmdq_pools.
40 * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
41 */
42 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RX_DESC_DEFAULT, \
43 TX_DESC_DEFAULT))
44 #define MBUF_CACHE_SIZE 64
45
46 #define MAX_PKT_BURST 32
47
48 /*
49 * Configurable number of RX/TX ring descriptors
50 */
51 #define RX_DESC_DEFAULT 1024
52 #define TX_DESC_DEFAULT 1024
53
54 #define INVALID_PORT_ID 0xFF
55
56 /* mask of enabled ports */
57 static uint32_t enabled_port_mask;
58 static uint16_t ports[RTE_MAX_ETHPORTS];
59 static unsigned num_ports;
60
61 /* number of pools (if user does not specify any, 32 by default */
62 static enum rte_eth_nb_pools num_pools = RTE_ETH_32_POOLS;
63 static enum rte_eth_nb_tcs num_tcs = RTE_ETH_4_TCS;
64 static uint16_t num_queues, num_vmdq_queues;
65 static uint16_t vmdq_pool_base, vmdq_queue_base;
66 static uint8_t rss_enable;
67
68 /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
69 static const struct rte_eth_conf vmdq_dcb_conf_default = {
70 .rxmode = {
71 .mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB,
72 },
73 .txmode = {
74 .mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB,
75 },
76 /*
77 * should be overridden separately in code with
78 * appropriate values
79 */
80 .rx_adv_conf = {
81 .vmdq_dcb_conf = {
82 .nb_queue_pools = RTE_ETH_32_POOLS,
83 .enable_default_pool = 0,
84 .default_pool = 0,
85 .nb_pool_maps = 0,
86 .pool_map = {{0, 0},},
87 .dcb_tc = {0},
88 },
89 .dcb_rx_conf = {
90 .nb_tcs = RTE_ETH_4_TCS,
91 /** Traffic class each UP mapped to. */
92 .dcb_tc = {0},
93 },
94 .vmdq_rx_conf = {
95 .nb_queue_pools = RTE_ETH_32_POOLS,
96 .enable_default_pool = 0,
97 .default_pool = 0,
98 .nb_pool_maps = 0,
99 .pool_map = {{0, 0},},
100 },
101 },
102 .tx_adv_conf = {
103 .vmdq_dcb_tx_conf = {
104 .nb_queue_pools = RTE_ETH_32_POOLS,
105 .dcb_tc = {0},
106 },
107 },
108 };
109 /* >8 End of empty vmdq+dcb configuration structure. */
110
111 /* array used for printing out statistics */
112 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
113
114 /* Dividing up the possible user priority values. 8< */
115 const uint16_t vlan_tags[] = {
116 0, 1, 2, 3, 4, 5, 6, 7,
117 8, 9, 10, 11, 12, 13, 14, 15,
118 16, 17, 18, 19, 20, 21, 22, 23,
119 24, 25, 26, 27, 28, 29, 30, 31
120 };
121
122 const uint16_t num_vlans = RTE_DIM(vlan_tags);
123 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
124 static struct rte_ether_addr pool_addr_template = {
125 .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
126 };
127
128 /* ethernet addresses of ports */
129 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
130
131 /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
132 * given above, and the number of traffic classes available for use. */
133 static inline int
get_eth_conf(struct rte_eth_conf * eth_conf)134 get_eth_conf(struct rte_eth_conf *eth_conf)
135 {
136 struct rte_eth_vmdq_dcb_conf conf;
137 struct rte_eth_vmdq_rx_conf vmdq_conf;
138 struct rte_eth_dcb_rx_conf dcb_conf;
139 struct rte_eth_vmdq_dcb_tx_conf tx_conf;
140 uint8_t i;
141
142 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
143 vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
144 tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
145 conf.nb_pool_maps = num_pools;
146 vmdq_conf.nb_pool_maps = num_pools;
147 conf.enable_default_pool = 0;
148 vmdq_conf.enable_default_pool = 0;
149 conf.default_pool = 0; /* set explicit value, even if not used */
150 vmdq_conf.default_pool = 0;
151
152 for (i = 0; i < conf.nb_pool_maps; i++) {
153 conf.pool_map[i].vlan_id = vlan_tags[i];
154 vmdq_conf.pool_map[i].vlan_id = vlan_tags[i];
155 conf.pool_map[i].pools = 1UL << i;
156 vmdq_conf.pool_map[i].pools = 1UL << i;
157 }
158 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
159 conf.dcb_tc[i] = i % num_tcs;
160 dcb_conf.dcb_tc[i] = i % num_tcs;
161 tx_conf.dcb_tc[i] = i % num_tcs;
162 }
163 dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs;
164 (void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
165 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
166 sizeof(conf)));
167 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf,
168 sizeof(dcb_conf)));
169 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf,
170 sizeof(vmdq_conf)));
171 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
172 sizeof(tx_conf)));
173 if (rss_enable) {
174 eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
175 eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
176 RTE_ETH_RSS_UDP |
177 RTE_ETH_RSS_TCP |
178 RTE_ETH_RSS_SCTP;
179 }
180 return 0;
181 }
182 /* >8 End of dividing up the possible user priority values. */
183
184 /*
185 * Initialises a given port using global settings and with the rx buffers
186 * coming from the mbuf_pool passed as parameter
187 */
188 static inline int
port_init(uint16_t port,struct rte_mempool * mbuf_pool)189 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
190 {
191 struct rte_eth_dev_info dev_info;
192 struct rte_eth_conf port_conf = {0};
193 uint16_t rxRingSize = RX_DESC_DEFAULT;
194 uint16_t txRingSize = TX_DESC_DEFAULT;
195 int retval;
196 uint16_t q;
197 uint16_t queues_per_pool;
198 uint32_t max_nb_pools;
199 struct rte_eth_txconf txq_conf;
200 uint64_t rss_hf_tmp;
201
202 /*
203 * The max pool number from dev_info will be used to validate the pool
204 * number specified in cmd line
205 */
206 retval = rte_eth_dev_info_get(port, &dev_info);
207 if (retval != 0) {
208 printf("Error during getting device (port %u) info: %s\n",
209 port, strerror(-retval));
210
211 return retval;
212 }
213
214 max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
215 /*
216 * We allow to process part of VMDQ pools specified by num_pools in
217 * command line.
218 */
219 if (num_pools > max_nb_pools) {
220 printf("num_pools %d >max_nb_pools %d\n",
221 num_pools, max_nb_pools);
222 return -1;
223 }
224
225 /*
226 * NIC queues are divided into pf queues and vmdq queues.
227 * There is assumption here all ports have the same configuration!
228 */
229 vmdq_queue_base = dev_info.vmdq_queue_base;
230 vmdq_pool_base = dev_info.vmdq_pool_base;
231 printf("vmdq queue base: %d pool base %d\n",
232 vmdq_queue_base, vmdq_pool_base);
233 if (vmdq_pool_base == 0) {
234 num_vmdq_queues = dev_info.max_rx_queues;
235 num_queues = dev_info.max_rx_queues;
236 if (num_tcs != num_vmdq_queues / num_pools) {
237 printf("nb_tcs %d is invalid considering with"
238 " nb_pools %d, nb_tcs * nb_pools should = %d\n",
239 num_tcs, num_pools, num_vmdq_queues);
240 return -1;
241 }
242 } else {
243 queues_per_pool = dev_info.vmdq_queue_num /
244 dev_info.max_vmdq_pools;
245 if (num_tcs > queues_per_pool) {
246 printf("num_tcs %d > num of queues per pool %d\n",
247 num_tcs, queues_per_pool);
248 return -1;
249 }
250 num_vmdq_queues = num_pools * queues_per_pool;
251 num_queues = vmdq_queue_base + num_vmdq_queues;
252 printf("Configured vmdq pool num: %u,"
253 " each vmdq pool has %u queues\n",
254 num_pools, queues_per_pool);
255 }
256
257 if (!rte_eth_dev_is_valid_port(port))
258 return -1;
259
260 retval = get_eth_conf(&port_conf);
261 if (retval < 0)
262 return retval;
263
264 retval = rte_eth_dev_info_get(port, &dev_info);
265 if (retval != 0) {
266 printf("Error during getting device (port %u) info: %s\n",
267 port, strerror(-retval));
268
269 return retval;
270 }
271
272 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
273 port_conf.txmode.offloads |=
274 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
275
276 rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
277 port_conf.rx_adv_conf.rss_conf.rss_hf &=
278 dev_info.flow_type_rss_offloads;
279 if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
280 printf("Port %u modified RSS hash function based on hardware support,"
281 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
282 port,
283 rss_hf_tmp,
284 port_conf.rx_adv_conf.rss_conf.rss_hf);
285 }
286
287 /*
288 * Though in this example, all queues including pf queues are setup.
289 * This is because VMDQ queues doesn't always start from zero, and the
290 * PMD layer doesn't support selectively initialising part of rx/tx
291 * queues.
292 */
293 retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
294 if (retval != 0)
295 return retval;
296
297 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
298 &txRingSize);
299 if (retval != 0)
300 return retval;
301 if (RTE_MAX(rxRingSize, txRingSize) >
302 RTE_MAX(RX_DESC_DEFAULT, TX_DESC_DEFAULT)) {
303 printf("Mbuf pool has an insufficient size for port %u.\n",
304 port);
305 return -1;
306 }
307
308 for (q = 0; q < num_queues; q++) {
309 retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
310 rte_eth_dev_socket_id(port),
311 NULL,
312 mbuf_pool);
313 if (retval < 0) {
314 printf("initialize rx queue %d failed\n", q);
315 return retval;
316 }
317 }
318
319 txq_conf = dev_info.default_txconf;
320 txq_conf.offloads = port_conf.txmode.offloads;
321 for (q = 0; q < num_queues; q++) {
322 retval = rte_eth_tx_queue_setup(port, q, txRingSize,
323 rte_eth_dev_socket_id(port),
324 &txq_conf);
325 if (retval < 0) {
326 printf("initialize tx queue %d failed\n", q);
327 return retval;
328 }
329 }
330
331 retval = rte_eth_dev_start(port);
332 if (retval < 0) {
333 printf("port %d start failed\n", port);
334 return retval;
335 }
336
337 retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
338 if (retval < 0) {
339 printf("port %d MAC address get failed: %s\n", port,
340 rte_strerror(-retval));
341 return retval;
342 }
343 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
344 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
345 (unsigned)port,
346 RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
347
348 /* Set mac for each pool. 8< */
349 for (q = 0; q < num_pools; q++) {
350 struct rte_ether_addr mac;
351
352 mac = pool_addr_template;
353 mac.addr_bytes[4] = port;
354 mac.addr_bytes[5] = q;
355 printf("Port %u vmdq pool %u set mac " RTE_ETHER_ADDR_PRT_FMT "\n",
356 port, q, RTE_ETHER_ADDR_BYTES(&mac));
357 retval = rte_eth_dev_mac_addr_add(port, &mac,
358 q + vmdq_pool_base);
359 if (retval) {
360 printf("mac addr add failed at pool %d\n", q);
361 return retval;
362 }
363 }
364 /* >8 End of set mac for each pool. */
365
366 return 0;
367 }
368
369 /* Check num_pools parameter and set it if OK*/
370 static int
vmdq_parse_num_pools(const char * q_arg)371 vmdq_parse_num_pools(const char *q_arg)
372 {
373 char *end = NULL;
374 int n;
375
376 /* parse number string */
377 n = strtol(q_arg, &end, 10);
378 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
379 return -1;
380 if (n != 16 && n != 32)
381 return -1;
382 if (n == 16)
383 num_pools = RTE_ETH_16_POOLS;
384 else
385 num_pools = RTE_ETH_32_POOLS;
386
387 return 0;
388 }
389
390 /* Check num_tcs parameter and set it if OK*/
391 static int
vmdq_parse_num_tcs(const char * q_arg)392 vmdq_parse_num_tcs(const char *q_arg)
393 {
394 char *end = NULL;
395 int n;
396
397 /* parse number string */
398 n = strtol(q_arg, &end, 10);
399 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
400 return -1;
401
402 if (n != 4 && n != 8)
403 return -1;
404 if (n == 4)
405 num_tcs = RTE_ETH_4_TCS;
406 else
407 num_tcs = RTE_ETH_8_TCS;
408
409 return 0;
410 }
411
412 static int
parse_portmask(const char * portmask)413 parse_portmask(const char *portmask)
414 {
415 char *end = NULL;
416 unsigned long pm;
417
418 /* parse hexadecimal string */
419 pm = strtoul(portmask, &end, 16);
420 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
421 return 0;
422
423 return pm;
424 }
425
426 /* Display usage */
427 static void
vmdq_usage(const char * prgname)428 vmdq_usage(const char *prgname)
429 {
430 printf("%s [EAL options] -- -p PORTMASK]\n"
431 " --nb-pools NP: number of pools (32 default, 16)\n"
432 " --nb-tcs NP: number of TCs (4 default, 8)\n"
433 " --enable-rss: enable RSS (disabled by default)\n",
434 prgname);
435 }
436
437 /* Parse the argument (num_pools) given in the command line of the application */
438 static int
vmdq_parse_args(int argc,char ** argv)439 vmdq_parse_args(int argc, char **argv)
440 {
441 int opt;
442 int option_index;
443 unsigned i;
444 const char *prgname = argv[0];
445 static struct option long_option[] = {
446 {"nb-pools", required_argument, NULL, 0},
447 {"nb-tcs", required_argument, NULL, 0},
448 {"enable-rss", 0, NULL, 0},
449 {NULL, 0, 0, 0}
450 };
451
452 /* Parse command line */
453 while ((opt = getopt_long(argc, argv, "p:", long_option,
454 &option_index)) != EOF) {
455 switch (opt) {
456 /* portmask */
457 case 'p':
458 enabled_port_mask = parse_portmask(optarg);
459 if (enabled_port_mask == 0) {
460 printf("invalid portmask\n");
461 vmdq_usage(prgname);
462 return -1;
463 }
464 break;
465 case 0:
466 if (!strcmp(long_option[option_index].name, "nb-pools")) {
467 if (vmdq_parse_num_pools(optarg) == -1) {
468 printf("invalid number of pools\n");
469 return -1;
470 }
471 }
472
473 if (!strcmp(long_option[option_index].name, "nb-tcs")) {
474 if (vmdq_parse_num_tcs(optarg) == -1) {
475 printf("invalid number of tcs\n");
476 return -1;
477 }
478 }
479
480 if (!strcmp(long_option[option_index].name, "enable-rss"))
481 rss_enable = 1;
482 break;
483
484 default:
485 vmdq_usage(prgname);
486 return -1;
487 }
488 }
489
490 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
491 if (enabled_port_mask & (1 << i))
492 ports[num_ports++] = (uint8_t)i;
493 }
494
495 if (num_ports < 2 || num_ports % 2) {
496 printf("Current enabled port number is %u,"
497 " but it should be even and at least 2\n", num_ports);
498 return -1;
499 }
500
501 return 0;
502 }
503
504 static void
update_mac_address(struct rte_mbuf * m,unsigned dst_port)505 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
506 {
507 struct rte_ether_hdr *eth;
508 void *tmp;
509
510 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
511
512 /* 02:00:00:00:00:xx */
513 tmp = ð->dst_addr.addr_bytes[0];
514 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
515
516 /* src addr */
517 rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], ð->src_addr);
518 }
519
520 /* When we receive a HUP signal, print out our stats */
521 static void
sighup_handler(int signum)522 sighup_handler(int signum)
523 {
524 unsigned q = vmdq_queue_base;
525
526 for (; q < num_queues; q++) {
527 if (q % (num_vmdq_queues / num_pools) == 0)
528 printf("\nPool %u: ", (q - vmdq_queue_base) /
529 (num_vmdq_queues / num_pools));
530 printf("%lu ", rxPackets[q]);
531 }
532 printf("\nFinished handling signal %d\n", signum);
533 }
534
535 /*
536 * Main thread that does the work, reading from INPUT_PORT
537 * and writing to OUTPUT_PORT
538 */
539 static int
lcore_main(void * arg)540 lcore_main(void *arg)
541 {
542 const uintptr_t core_num = (uintptr_t)arg;
543 const unsigned num_cores = rte_lcore_count();
544 uint16_t startQueue, endQueue;
545 uint16_t q, i, p;
546 const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores);
547 const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
548
549
550 if (remainder) {
551 if (core_num < remainder) {
552 startQueue = (uint16_t)(core_num * (quot + 1));
553 endQueue = (uint16_t)(startQueue + quot + 1);
554 } else {
555 startQueue = (uint16_t)(core_num * quot + remainder);
556 endQueue = (uint16_t)(startQueue + quot);
557 }
558 } else {
559 startQueue = (uint16_t)(core_num * quot);
560 endQueue = (uint16_t)(startQueue + quot);
561 }
562
563 /* vmdq queue idx doesn't always start from zero.*/
564 startQueue += vmdq_queue_base;
565 endQueue += vmdq_queue_base;
566 printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
567 rte_lcore_id(), startQueue, endQueue - 1);
568
569 if (startQueue == endQueue) {
570 printf("lcore %u has nothing to do\n", (unsigned)core_num);
571 return 0;
572 }
573
574 for (;;) {
575 struct rte_mbuf *buf[MAX_PKT_BURST];
576 const uint16_t buf_size = RTE_DIM(buf);
577 for (p = 0; p < num_ports; p++) {
578 const uint8_t src = ports[p];
579 const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */
580
581 if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID))
582 continue;
583
584 for (q = startQueue; q < endQueue; q++) {
585 const uint16_t rxCount = rte_eth_rx_burst(src,
586 q, buf, buf_size);
587
588 if (unlikely(rxCount == 0))
589 continue;
590
591 rxPackets[q] += rxCount;
592
593 for (i = 0; i < rxCount; i++)
594 update_mac_address(buf[i], dst);
595
596 const uint16_t txCount = rte_eth_tx_burst(dst,
597 q, buf, rxCount);
598 if (txCount != rxCount) {
599 for (i = txCount; i < rxCount; i++)
600 rte_pktmbuf_free(buf[i]);
601 }
602 }
603 }
604 }
605 }
606
607 /*
608 * Update the global var NUM_PORTS and array PORTS according to system ports number
609 * and return valid ports number
610 */
check_ports_num(unsigned nb_ports)611 static unsigned check_ports_num(unsigned nb_ports)
612 {
613 unsigned valid_num_ports = num_ports;
614 unsigned portid;
615
616 if (num_ports > nb_ports) {
617 printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
618 num_ports, nb_ports);
619 num_ports = nb_ports;
620 }
621
622 for (portid = 0; portid < num_ports; portid++) {
623 if (!rte_eth_dev_is_valid_port(ports[portid])) {
624 printf("\nSpecified port ID(%u) is not valid\n",
625 ports[portid]);
626 ports[portid] = INVALID_PORT_ID;
627 valid_num_ports--;
628 }
629 }
630 return valid_num_ports;
631 }
632
633
634 /* Main function, does initialisation and calls the per-lcore functions */
635 int
main(int argc,char * argv[])636 main(int argc, char *argv[])
637 {
638 unsigned cores;
639 struct rte_mempool *mbuf_pool;
640 unsigned lcore_id;
641 uintptr_t i;
642 int ret;
643 unsigned nb_ports, valid_num_ports;
644 uint16_t portid;
645
646 signal(SIGHUP, sighup_handler);
647
648 /* init EAL */
649 ret = rte_eal_init(argc, argv);
650 if (ret < 0)
651 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
652 argc -= ret;
653 argv += ret;
654
655 /* parse app arguments */
656 ret = vmdq_parse_args(argc, argv);
657 if (ret < 0)
658 rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
659
660 cores = rte_lcore_count();
661 if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
662 rte_exit(EXIT_FAILURE,"This program can only run on an even"
663 " number of cores(1-%d)\n\n", RTE_MAX_LCORE);
664 }
665
666 nb_ports = rte_eth_dev_count_avail();
667
668 /*
669 * Update the global var NUM_PORTS and global array PORTS
670 * and get value of var VALID_NUM_PORTS according to system ports number
671 */
672 valid_num_ports = check_ports_num(nb_ports);
673
674 if (valid_num_ports < 2 || valid_num_ports % 2) {
675 printf("Current valid ports number is %u\n", valid_num_ports);
676 rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
677 }
678
679 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
680 NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
681 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
682 if (mbuf_pool == NULL)
683 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
684
685 /* initialize all ports */
686 RTE_ETH_FOREACH_DEV(portid) {
687 /* skip ports that are not enabled */
688 if ((enabled_port_mask & (1 << portid)) == 0) {
689 printf("\nSkipping disabled port %d\n", portid);
690 continue;
691 }
692 if (port_init(portid, mbuf_pool) != 0)
693 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
694 }
695
696 /* call lcore_main() on every worker lcore */
697 i = 0;
698 RTE_LCORE_FOREACH_WORKER(lcore_id) {
699 rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
700 }
701 /* call on main too */
702 (void) lcore_main((void*)i);
703
704 /* clean up the EAL */
705 rte_eal_cleanup();
706
707 return 0;
708 }
709