1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <sys/queue.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <stdio.h> 10 #include <assert.h> 11 #include <errno.h> 12 #include <signal.h> 13 #include <stdarg.h> 14 #include <inttypes.h> 15 #include <getopt.h> 16 17 #include <rte_common.h> 18 #include <rte_log.h> 19 #include <rte_memory.h> 20 #include <rte_memcpy.h> 21 #include <rte_eal.h> 22 #include <rte_launch.h> 23 #include <rte_atomic.h> 24 #include <rte_cycles.h> 25 #include <rte_prefetch.h> 26 #include <rte_lcore.h> 27 #include <rte_per_lcore.h> 28 #include <rte_branch_prediction.h> 29 #include <rte_interrupts.h> 30 #include <rte_random.h> 31 #include <rte_debug.h> 32 #include <rte_ether.h> 33 #include <rte_ethdev.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 37 /* basic constants used in application */ 38 #define MAX_QUEUES 1024 39 /* 40 * 1024 queues require to meet the needs of a large number of vmdq_pools. 41 * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port. 42 */ 43 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \ 44 RTE_TEST_TX_DESC_DEFAULT)) 45 #define MBUF_CACHE_SIZE 64 46 47 #define MAX_PKT_BURST 32 48 49 /* 50 * Configurable number of RX/TX ring descriptors 51 */ 52 #define RTE_TEST_RX_DESC_DEFAULT 1024 53 #define RTE_TEST_TX_DESC_DEFAULT 1024 54 55 #define INVALID_PORT_ID 0xFF 56 57 /* mask of enabled ports */ 58 static uint32_t enabled_port_mask; 59 static uint16_t ports[RTE_MAX_ETHPORTS]; 60 static unsigned num_ports; 61 62 /* number of pools (if user does not specify any, 32 by default */ 63 static enum rte_eth_nb_pools num_pools = ETH_32_POOLS; 64 static enum rte_eth_nb_tcs num_tcs = ETH_4_TCS; 65 static uint16_t num_queues, num_vmdq_queues; 66 static uint16_t vmdq_pool_base, vmdq_queue_base; 67 static uint8_t rss_enable; 68 69 /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */ 70 static const struct rte_eth_conf vmdq_dcb_conf_default = { 71 .rxmode = { 72 .mq_mode = ETH_MQ_RX_VMDQ_DCB, 73 .split_hdr_size = 0, 74 }, 75 .txmode = { 76 .mq_mode = ETH_MQ_TX_VMDQ_DCB, 77 }, 78 /* 79 * should be overridden separately in code with 80 * appropriate values 81 */ 82 .rx_adv_conf = { 83 .vmdq_dcb_conf = { 84 .nb_queue_pools = ETH_32_POOLS, 85 .enable_default_pool = 0, 86 .default_pool = 0, 87 .nb_pool_maps = 0, 88 .pool_map = {{0, 0},}, 89 .dcb_tc = {0}, 90 }, 91 .dcb_rx_conf = { 92 .nb_tcs = ETH_4_TCS, 93 /** Traffic class each UP mapped to. */ 94 .dcb_tc = {0}, 95 }, 96 .vmdq_rx_conf = { 97 .nb_queue_pools = ETH_32_POOLS, 98 .enable_default_pool = 0, 99 .default_pool = 0, 100 .nb_pool_maps = 0, 101 .pool_map = {{0, 0},}, 102 }, 103 }, 104 .tx_adv_conf = { 105 .vmdq_dcb_tx_conf = { 106 .nb_queue_pools = ETH_32_POOLS, 107 .dcb_tc = {0}, 108 }, 109 }, 110 }; 111 /* >8 End of empty vmdq+dcb configuration structure. */ 112 113 /* array used for printing out statistics */ 114 volatile unsigned long rxPackets[MAX_QUEUES] = {0}; 115 116 /* Dividing up the possible user priority values. 8< */ 117 const uint16_t vlan_tags[] = { 118 0, 1, 2, 3, 4, 5, 6, 7, 119 8, 9, 10, 11, 12, 13, 14, 15, 120 16, 17, 18, 19, 20, 21, 22, 23, 121 24, 25, 26, 27, 28, 29, 30, 31 122 }; 123 124 const uint16_t num_vlans = RTE_DIM(vlan_tags); 125 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */ 126 static struct rte_ether_addr pool_addr_template = { 127 .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00} 128 }; 129 130 /* ethernet addresses of ports */ 131 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; 132 133 /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array 134 * given above, and the number of traffic classes available for use. */ 135 static inline int 136 get_eth_conf(struct rte_eth_conf *eth_conf) 137 { 138 struct rte_eth_vmdq_dcb_conf conf; 139 struct rte_eth_vmdq_rx_conf vmdq_conf; 140 struct rte_eth_dcb_rx_conf dcb_conf; 141 struct rte_eth_vmdq_dcb_tx_conf tx_conf; 142 uint8_t i; 143 144 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; 145 vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; 146 tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; 147 conf.nb_pool_maps = num_pools; 148 vmdq_conf.nb_pool_maps = num_pools; 149 conf.enable_default_pool = 0; 150 vmdq_conf.enable_default_pool = 0; 151 conf.default_pool = 0; /* set explicit value, even if not used */ 152 vmdq_conf.default_pool = 0; 153 154 for (i = 0; i < conf.nb_pool_maps; i++) { 155 conf.pool_map[i].vlan_id = vlan_tags[i]; 156 vmdq_conf.pool_map[i].vlan_id = vlan_tags[i]; 157 conf.pool_map[i].pools = 1UL << i; 158 vmdq_conf.pool_map[i].pools = 1UL << i; 159 } 160 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ 161 conf.dcb_tc[i] = i % num_tcs; 162 dcb_conf.dcb_tc[i] = i % num_tcs; 163 tx_conf.dcb_tc[i] = i % num_tcs; 164 } 165 dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs; 166 (void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf))); 167 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf, 168 sizeof(conf))); 169 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf, 170 sizeof(dcb_conf))); 171 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf, 172 sizeof(vmdq_conf))); 173 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf, 174 sizeof(tx_conf))); 175 if (rss_enable) { 176 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; 177 eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | 178 ETH_RSS_UDP | 179 ETH_RSS_TCP | 180 ETH_RSS_SCTP; 181 } 182 return 0; 183 } 184 /* >8 End of dividing up the possible user priority values. */ 185 186 /* 187 * Initialises a given port using global settings and with the rx buffers 188 * coming from the mbuf_pool passed as parameter 189 */ 190 static inline int 191 port_init(uint16_t port, struct rte_mempool *mbuf_pool) 192 { 193 struct rte_eth_dev_info dev_info; 194 struct rte_eth_conf port_conf = {0}; 195 uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT; 196 uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT; 197 int retval; 198 uint16_t q; 199 uint16_t queues_per_pool; 200 uint32_t max_nb_pools; 201 struct rte_eth_txconf txq_conf; 202 uint64_t rss_hf_tmp; 203 204 /* 205 * The max pool number from dev_info will be used to validate the pool 206 * number specified in cmd line 207 */ 208 retval = rte_eth_dev_info_get(port, &dev_info); 209 if (retval != 0) { 210 printf("Error during getting device (port %u) info: %s\n", 211 port, strerror(-retval)); 212 213 return retval; 214 } 215 216 max_nb_pools = (uint32_t)dev_info.max_vmdq_pools; 217 /* 218 * We allow to process part of VMDQ pools specified by num_pools in 219 * command line. 220 */ 221 if (num_pools > max_nb_pools) { 222 printf("num_pools %d >max_nb_pools %d\n", 223 num_pools, max_nb_pools); 224 return -1; 225 } 226 227 /* 228 * NIC queues are divided into pf queues and vmdq queues. 229 * There is assumption here all ports have the same configuration! 230 */ 231 vmdq_queue_base = dev_info.vmdq_queue_base; 232 vmdq_pool_base = dev_info.vmdq_pool_base; 233 printf("vmdq queue base: %d pool base %d\n", 234 vmdq_queue_base, vmdq_pool_base); 235 if (vmdq_pool_base == 0) { 236 num_vmdq_queues = dev_info.max_rx_queues; 237 num_queues = dev_info.max_rx_queues; 238 if (num_tcs != num_vmdq_queues / num_pools) { 239 printf("nb_tcs %d is invalid considering with" 240 " nb_pools %d, nb_tcs * nb_pools should = %d\n", 241 num_tcs, num_pools, num_vmdq_queues); 242 return -1; 243 } 244 } else { 245 queues_per_pool = dev_info.vmdq_queue_num / 246 dev_info.max_vmdq_pools; 247 if (num_tcs > queues_per_pool) { 248 printf("num_tcs %d > num of queues per pool %d\n", 249 num_tcs, queues_per_pool); 250 return -1; 251 } 252 num_vmdq_queues = num_pools * queues_per_pool; 253 num_queues = vmdq_queue_base + num_vmdq_queues; 254 printf("Configured vmdq pool num: %u," 255 " each vmdq pool has %u queues\n", 256 num_pools, queues_per_pool); 257 } 258 259 if (!rte_eth_dev_is_valid_port(port)) 260 return -1; 261 262 retval = get_eth_conf(&port_conf); 263 if (retval < 0) 264 return retval; 265 266 retval = rte_eth_dev_info_get(port, &dev_info); 267 if (retval != 0) { 268 printf("Error during getting device (port %u) info: %s\n", 269 port, strerror(-retval)); 270 271 return retval; 272 } 273 274 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 275 port_conf.txmode.offloads |= 276 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 277 278 rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf; 279 port_conf.rx_adv_conf.rss_conf.rss_hf &= 280 dev_info.flow_type_rss_offloads; 281 if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) { 282 printf("Port %u modified RSS hash function based on hardware support," 283 "requested:%#"PRIx64" configured:%#"PRIx64"\n", 284 port, 285 rss_hf_tmp, 286 port_conf.rx_adv_conf.rss_conf.rss_hf); 287 } 288 289 /* 290 * Though in this example, all queues including pf queues are setup. 291 * This is because VMDQ queues doesn't always start from zero, and the 292 * PMD layer doesn't support selectively initialising part of rx/tx 293 * queues. 294 */ 295 retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf); 296 if (retval != 0) 297 return retval; 298 299 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize, 300 &txRingSize); 301 if (retval != 0) 302 return retval; 303 if (RTE_MAX(rxRingSize, txRingSize) > 304 RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) { 305 printf("Mbuf pool has an insufficient size for port %u.\n", 306 port); 307 return -1; 308 } 309 310 for (q = 0; q < num_queues; q++) { 311 retval = rte_eth_rx_queue_setup(port, q, rxRingSize, 312 rte_eth_dev_socket_id(port), 313 NULL, 314 mbuf_pool); 315 if (retval < 0) { 316 printf("initialize rx queue %d failed\n", q); 317 return retval; 318 } 319 } 320 321 txq_conf = dev_info.default_txconf; 322 txq_conf.offloads = port_conf.txmode.offloads; 323 for (q = 0; q < num_queues; q++) { 324 retval = rte_eth_tx_queue_setup(port, q, txRingSize, 325 rte_eth_dev_socket_id(port), 326 &txq_conf); 327 if (retval < 0) { 328 printf("initialize tx queue %d failed\n", q); 329 return retval; 330 } 331 } 332 333 retval = rte_eth_dev_start(port); 334 if (retval < 0) { 335 printf("port %d start failed\n", port); 336 return retval; 337 } 338 339 retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); 340 if (retval < 0) { 341 printf("port %d MAC address get failed: %s\n", port, 342 rte_strerror(-retval)); 343 return retval; 344 } 345 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 346 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 347 (unsigned)port, 348 vmdq_ports_eth_addr[port].addr_bytes[0], 349 vmdq_ports_eth_addr[port].addr_bytes[1], 350 vmdq_ports_eth_addr[port].addr_bytes[2], 351 vmdq_ports_eth_addr[port].addr_bytes[3], 352 vmdq_ports_eth_addr[port].addr_bytes[4], 353 vmdq_ports_eth_addr[port].addr_bytes[5]); 354 355 /* Set mac for each pool. 8< */ 356 for (q = 0; q < num_pools; q++) { 357 struct rte_ether_addr mac; 358 359 mac = pool_addr_template; 360 mac.addr_bytes[4] = port; 361 mac.addr_bytes[5] = q; 362 printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n", 363 port, q, 364 mac.addr_bytes[0], mac.addr_bytes[1], 365 mac.addr_bytes[2], mac.addr_bytes[3], 366 mac.addr_bytes[4], mac.addr_bytes[5]); 367 retval = rte_eth_dev_mac_addr_add(port, &mac, 368 q + vmdq_pool_base); 369 if (retval) { 370 printf("mac addr add failed at pool %d\n", q); 371 return retval; 372 } 373 } 374 /* >8 End of set mac for each pool. */ 375 376 return 0; 377 } 378 379 /* Check num_pools parameter and set it if OK*/ 380 static int 381 vmdq_parse_num_pools(const char *q_arg) 382 { 383 char *end = NULL; 384 int n; 385 386 /* parse number string */ 387 n = strtol(q_arg, &end, 10); 388 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 389 return -1; 390 if (n != 16 && n != 32) 391 return -1; 392 if (n == 16) 393 num_pools = ETH_16_POOLS; 394 else 395 num_pools = ETH_32_POOLS; 396 397 return 0; 398 } 399 400 /* Check num_tcs parameter and set it if OK*/ 401 static int 402 vmdq_parse_num_tcs(const char *q_arg) 403 { 404 char *end = NULL; 405 int n; 406 407 /* parse number string */ 408 n = strtol(q_arg, &end, 10); 409 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 410 return -1; 411 412 if (n != 4 && n != 8) 413 return -1; 414 if (n == 4) 415 num_tcs = ETH_4_TCS; 416 else 417 num_tcs = ETH_8_TCS; 418 419 return 0; 420 } 421 422 static int 423 parse_portmask(const char *portmask) 424 { 425 char *end = NULL; 426 unsigned long pm; 427 428 /* parse hexadecimal string */ 429 pm = strtoul(portmask, &end, 16); 430 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 431 return 0; 432 433 return pm; 434 } 435 436 /* Display usage */ 437 static void 438 vmdq_usage(const char *prgname) 439 { 440 printf("%s [EAL options] -- -p PORTMASK]\n" 441 " --nb-pools NP: number of pools (32 default, 16)\n" 442 " --nb-tcs NP: number of TCs (4 default, 8)\n" 443 " --enable-rss: enable RSS (disabled by default)\n", 444 prgname); 445 } 446 447 /* Parse the argument (num_pools) given in the command line of the application */ 448 static int 449 vmdq_parse_args(int argc, char **argv) 450 { 451 int opt; 452 int option_index; 453 unsigned i; 454 const char *prgname = argv[0]; 455 static struct option long_option[] = { 456 {"nb-pools", required_argument, NULL, 0}, 457 {"nb-tcs", required_argument, NULL, 0}, 458 {"enable-rss", 0, NULL, 0}, 459 {NULL, 0, 0, 0} 460 }; 461 462 /* Parse command line */ 463 while ((opt = getopt_long(argc, argv, "p:", long_option, 464 &option_index)) != EOF) { 465 switch (opt) { 466 /* portmask */ 467 case 'p': 468 enabled_port_mask = parse_portmask(optarg); 469 if (enabled_port_mask == 0) { 470 printf("invalid portmask\n"); 471 vmdq_usage(prgname); 472 return -1; 473 } 474 break; 475 case 0: 476 if (!strcmp(long_option[option_index].name, "nb-pools")) { 477 if (vmdq_parse_num_pools(optarg) == -1) { 478 printf("invalid number of pools\n"); 479 return -1; 480 } 481 } 482 483 if (!strcmp(long_option[option_index].name, "nb-tcs")) { 484 if (vmdq_parse_num_tcs(optarg) == -1) { 485 printf("invalid number of tcs\n"); 486 return -1; 487 } 488 } 489 490 if (!strcmp(long_option[option_index].name, "enable-rss")) 491 rss_enable = 1; 492 break; 493 494 default: 495 vmdq_usage(prgname); 496 return -1; 497 } 498 } 499 500 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 501 if (enabled_port_mask & (1 << i)) 502 ports[num_ports++] = (uint8_t)i; 503 } 504 505 if (num_ports < 2 || num_ports % 2) { 506 printf("Current enabled port number is %u," 507 " but it should be even and at least 2\n", num_ports); 508 return -1; 509 } 510 511 return 0; 512 } 513 514 static void 515 update_mac_address(struct rte_mbuf *m, unsigned dst_port) 516 { 517 struct rte_ether_hdr *eth; 518 void *tmp; 519 520 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 521 522 /* 02:00:00:00:00:xx */ 523 tmp = ð->d_addr.addr_bytes[0]; 524 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 525 526 /* src addr */ 527 rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], ð->s_addr); 528 } 529 530 /* When we receive a HUP signal, print out our stats */ 531 static void 532 sighup_handler(int signum) 533 { 534 unsigned q = vmdq_queue_base; 535 536 for (; q < num_queues; q++) { 537 if (q % (num_vmdq_queues / num_pools) == 0) 538 printf("\nPool %u: ", (q - vmdq_queue_base) / 539 (num_vmdq_queues / num_pools)); 540 printf("%lu ", rxPackets[q]); 541 } 542 printf("\nFinished handling signal %d\n", signum); 543 } 544 545 /* 546 * Main thread that does the work, reading from INPUT_PORT 547 * and writing to OUTPUT_PORT 548 */ 549 static int 550 lcore_main(void *arg) 551 { 552 const uintptr_t core_num = (uintptr_t)arg; 553 const unsigned num_cores = rte_lcore_count(); 554 uint16_t startQueue, endQueue; 555 uint16_t q, i, p; 556 const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores); 557 const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores); 558 559 560 if (remainder) { 561 if (core_num < remainder) { 562 startQueue = (uint16_t)(core_num * (quot + 1)); 563 endQueue = (uint16_t)(startQueue + quot + 1); 564 } else { 565 startQueue = (uint16_t)(core_num * quot + remainder); 566 endQueue = (uint16_t)(startQueue + quot); 567 } 568 } else { 569 startQueue = (uint16_t)(core_num * quot); 570 endQueue = (uint16_t)(startQueue + quot); 571 } 572 573 /* vmdq queue idx doesn't always start from zero.*/ 574 startQueue += vmdq_queue_base; 575 endQueue += vmdq_queue_base; 576 printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num, 577 rte_lcore_id(), startQueue, endQueue - 1); 578 579 if (startQueue == endQueue) { 580 printf("lcore %u has nothing to do\n", (unsigned)core_num); 581 return 0; 582 } 583 584 for (;;) { 585 struct rte_mbuf *buf[MAX_PKT_BURST]; 586 const uint16_t buf_size = RTE_DIM(buf); 587 for (p = 0; p < num_ports; p++) { 588 const uint8_t src = ports[p]; 589 const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */ 590 591 if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID)) 592 continue; 593 594 for (q = startQueue; q < endQueue; q++) { 595 const uint16_t rxCount = rte_eth_rx_burst(src, 596 q, buf, buf_size); 597 598 if (unlikely(rxCount == 0)) 599 continue; 600 601 rxPackets[q] += rxCount; 602 603 for (i = 0; i < rxCount; i++) 604 update_mac_address(buf[i], dst); 605 606 const uint16_t txCount = rte_eth_tx_burst(dst, 607 q, buf, rxCount); 608 if (txCount != rxCount) { 609 for (i = txCount; i < rxCount; i++) 610 rte_pktmbuf_free(buf[i]); 611 } 612 } 613 } 614 } 615 } 616 617 /* 618 * Update the global var NUM_PORTS and array PORTS according to system ports number 619 * and return valid ports number 620 */ 621 static unsigned check_ports_num(unsigned nb_ports) 622 { 623 unsigned valid_num_ports = num_ports; 624 unsigned portid; 625 626 if (num_ports > nb_ports) { 627 printf("\nSpecified port number(%u) exceeds total system port number(%u)\n", 628 num_ports, nb_ports); 629 num_ports = nb_ports; 630 } 631 632 for (portid = 0; portid < num_ports; portid++) { 633 if (!rte_eth_dev_is_valid_port(ports[portid])) { 634 printf("\nSpecified port ID(%u) is not valid\n", 635 ports[portid]); 636 ports[portid] = INVALID_PORT_ID; 637 valid_num_ports--; 638 } 639 } 640 return valid_num_ports; 641 } 642 643 644 /* Main function, does initialisation and calls the per-lcore functions */ 645 int 646 main(int argc, char *argv[]) 647 { 648 unsigned cores; 649 struct rte_mempool *mbuf_pool; 650 unsigned lcore_id; 651 uintptr_t i; 652 int ret; 653 unsigned nb_ports, valid_num_ports; 654 uint16_t portid; 655 656 signal(SIGHUP, sighup_handler); 657 658 /* init EAL */ 659 ret = rte_eal_init(argc, argv); 660 if (ret < 0) 661 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 662 argc -= ret; 663 argv += ret; 664 665 /* parse app arguments */ 666 ret = vmdq_parse_args(argc, argv); 667 if (ret < 0) 668 rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n"); 669 670 cores = rte_lcore_count(); 671 if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) { 672 rte_exit(EXIT_FAILURE,"This program can only run on an even" 673 " number of cores(1-%d)\n\n", RTE_MAX_LCORE); 674 } 675 676 nb_ports = rte_eth_dev_count_avail(); 677 678 /* 679 * Update the global var NUM_PORTS and global array PORTS 680 * and get value of var VALID_NUM_PORTS according to system ports number 681 */ 682 valid_num_ports = check_ports_num(nb_ports); 683 684 if (valid_num_ports < 2 || valid_num_ports % 2) { 685 printf("Current valid ports number is %u\n", valid_num_ports); 686 rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n"); 687 } 688 689 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", 690 NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE, 691 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 692 if (mbuf_pool == NULL) 693 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 694 695 /* initialize all ports */ 696 RTE_ETH_FOREACH_DEV(portid) { 697 /* skip ports that are not enabled */ 698 if ((enabled_port_mask & (1 << portid)) == 0) { 699 printf("\nSkipping disabled port %d\n", portid); 700 continue; 701 } 702 if (port_init(portid, mbuf_pool) != 0) 703 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n"); 704 } 705 706 /* call lcore_main() on every worker lcore */ 707 i = 0; 708 RTE_LCORE_FOREACH_WORKER(lcore_id) { 709 rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id); 710 } 711 /* call on main too */ 712 (void) lcore_main((void*)i); 713 714 /* clean up the EAL */ 715 rte_eal_cleanup(); 716 717 return 0; 718 } 719