1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <sys/queue.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <stdio.h> 10 #include <assert.h> 11 #include <errno.h> 12 #include <signal.h> 13 #include <stdarg.h> 14 #include <inttypes.h> 15 #include <getopt.h> 16 17 #include <rte_common.h> 18 #include <rte_log.h> 19 #include <rte_memory.h> 20 #include <rte_memcpy.h> 21 #include <rte_eal.h> 22 #include <rte_launch.h> 23 #include <rte_atomic.h> 24 #include <rte_cycles.h> 25 #include <rte_prefetch.h> 26 #include <rte_lcore.h> 27 #include <rte_per_lcore.h> 28 #include <rte_branch_prediction.h> 29 #include <rte_interrupts.h> 30 #include <rte_random.h> 31 #include <rte_debug.h> 32 #include <rte_ether.h> 33 #include <rte_ethdev.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 37 /* basic constants used in application */ 38 #define MAX_QUEUES 1024 39 /* 40 * 1024 queues require to meet the needs of a large number of vmdq_pools. 41 * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port. 42 */ 43 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \ 44 RTE_TEST_TX_DESC_DEFAULT)) 45 #define MBUF_CACHE_SIZE 64 46 47 #define MAX_PKT_BURST 32 48 49 /* 50 * Configurable number of RX/TX ring descriptors 51 */ 52 #define RTE_TEST_RX_DESC_DEFAULT 128 53 #define RTE_TEST_TX_DESC_DEFAULT 512 54 55 #define INVALID_PORT_ID 0xFF 56 57 /* mask of enabled ports */ 58 static uint32_t enabled_port_mask; 59 static uint16_t ports[RTE_MAX_ETHPORTS]; 60 static unsigned num_ports; 61 62 /* number of pools (if user does not specify any, 32 by default */ 63 static enum rte_eth_nb_pools num_pools = ETH_32_POOLS; 64 static enum rte_eth_nb_tcs num_tcs = ETH_4_TCS; 65 static uint16_t num_queues, num_vmdq_queues; 66 static uint16_t vmdq_pool_base, vmdq_queue_base; 67 static uint8_t rss_enable; 68 69 /* empty vmdq+dcb configuration structure. Filled in programatically */ 70 static const struct rte_eth_conf vmdq_dcb_conf_default = { 71 .rxmode = { 72 .mq_mode = ETH_MQ_RX_VMDQ_DCB, 73 .split_hdr_size = 0, 74 .header_split = 0, /**< Header Split disabled */ 75 .hw_ip_checksum = 0, /**< IP checksum offload disabled */ 76 .hw_vlan_filter = 0, /**< VLAN filtering disabled */ 77 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ 78 }, 79 .txmode = { 80 .mq_mode = ETH_MQ_TX_VMDQ_DCB, 81 }, 82 /* 83 * should be overridden separately in code with 84 * appropriate values 85 */ 86 .rx_adv_conf = { 87 .vmdq_dcb_conf = { 88 .nb_queue_pools = ETH_32_POOLS, 89 .enable_default_pool = 0, 90 .default_pool = 0, 91 .nb_pool_maps = 0, 92 .pool_map = {{0, 0},}, 93 .dcb_tc = {0}, 94 }, 95 .dcb_rx_conf = { 96 .nb_tcs = ETH_4_TCS, 97 /** Traffic class each UP mapped to. */ 98 .dcb_tc = {0}, 99 }, 100 .vmdq_rx_conf = { 101 .nb_queue_pools = ETH_32_POOLS, 102 .enable_default_pool = 0, 103 .default_pool = 0, 104 .nb_pool_maps = 0, 105 .pool_map = {{0, 0},}, 106 }, 107 }, 108 .tx_adv_conf = { 109 .vmdq_dcb_tx_conf = { 110 .nb_queue_pools = ETH_32_POOLS, 111 .dcb_tc = {0}, 112 }, 113 }, 114 }; 115 116 /* array used for printing out statistics */ 117 volatile unsigned long rxPackets[MAX_QUEUES] = {0}; 118 119 const uint16_t vlan_tags[] = { 120 0, 1, 2, 3, 4, 5, 6, 7, 121 8, 9, 10, 11, 12, 13, 14, 15, 122 16, 17, 18, 19, 20, 21, 22, 23, 123 24, 25, 26, 27, 28, 29, 30, 31 124 }; 125 126 const uint16_t num_vlans = RTE_DIM(vlan_tags); 127 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */ 128 static struct ether_addr pool_addr_template = { 129 .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00} 130 }; 131 132 /* ethernet addresses of ports */ 133 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; 134 135 /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array 136 * given above, and the number of traffic classes available for use. */ 137 static inline int 138 get_eth_conf(struct rte_eth_conf *eth_conf) 139 { 140 struct rte_eth_vmdq_dcb_conf conf; 141 struct rte_eth_vmdq_rx_conf vmdq_conf; 142 struct rte_eth_dcb_rx_conf dcb_conf; 143 struct rte_eth_vmdq_dcb_tx_conf tx_conf; 144 uint8_t i; 145 146 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; 147 vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; 148 tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; 149 conf.nb_pool_maps = num_pools; 150 vmdq_conf.nb_pool_maps = num_pools; 151 conf.enable_default_pool = 0; 152 vmdq_conf.enable_default_pool = 0; 153 conf.default_pool = 0; /* set explicit value, even if not used */ 154 vmdq_conf.default_pool = 0; 155 156 for (i = 0; i < conf.nb_pool_maps; i++) { 157 conf.pool_map[i].vlan_id = vlan_tags[i]; 158 vmdq_conf.pool_map[i].vlan_id = vlan_tags[i]; 159 conf.pool_map[i].pools = 1UL << i; 160 vmdq_conf.pool_map[i].pools = 1UL << i; 161 } 162 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ 163 conf.dcb_tc[i] = i % num_tcs; 164 dcb_conf.dcb_tc[i] = i % num_tcs; 165 tx_conf.dcb_tc[i] = i % num_tcs; 166 } 167 dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs; 168 (void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf))); 169 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf, 170 sizeof(conf))); 171 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf, 172 sizeof(dcb_conf))); 173 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf, 174 sizeof(vmdq_conf))); 175 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf, 176 sizeof(tx_conf))); 177 if (rss_enable) { 178 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; 179 eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | 180 ETH_RSS_UDP | 181 ETH_RSS_TCP | 182 ETH_RSS_SCTP; 183 } 184 return 0; 185 } 186 187 /* 188 * Initialises a given port using global settings and with the rx buffers 189 * coming from the mbuf_pool passed as parameter 190 */ 191 static inline int 192 port_init(uint16_t port, struct rte_mempool *mbuf_pool) 193 { 194 struct rte_eth_dev_info dev_info; 195 struct rte_eth_conf port_conf = {0}; 196 uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT; 197 uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT; 198 int retval; 199 uint16_t q; 200 uint16_t queues_per_pool; 201 uint32_t max_nb_pools; 202 203 /* 204 * The max pool number from dev_info will be used to validate the pool 205 * number specified in cmd line 206 */ 207 rte_eth_dev_info_get(port, &dev_info); 208 max_nb_pools = (uint32_t)dev_info.max_vmdq_pools; 209 /* 210 * We allow to process part of VMDQ pools specified by num_pools in 211 * command line. 212 */ 213 if (num_pools > max_nb_pools) { 214 printf("num_pools %d >max_nb_pools %d\n", 215 num_pools, max_nb_pools); 216 return -1; 217 } 218 219 /* 220 * NIC queues are divided into pf queues and vmdq queues. 221 * There is assumption here all ports have the same configuration! 222 */ 223 vmdq_queue_base = dev_info.vmdq_queue_base; 224 vmdq_pool_base = dev_info.vmdq_pool_base; 225 printf("vmdq queue base: %d pool base %d\n", 226 vmdq_queue_base, vmdq_pool_base); 227 if (vmdq_pool_base == 0) { 228 num_vmdq_queues = dev_info.max_rx_queues; 229 num_queues = dev_info.max_rx_queues; 230 if (num_tcs != num_vmdq_queues / num_pools) { 231 printf("nb_tcs %d is invalid considering with" 232 " nb_pools %d, nb_tcs * nb_pools should = %d\n", 233 num_tcs, num_pools, num_vmdq_queues); 234 return -1; 235 } 236 } else { 237 queues_per_pool = dev_info.vmdq_queue_num / 238 dev_info.max_vmdq_pools; 239 if (num_tcs > queues_per_pool) { 240 printf("num_tcs %d > num of queues per pool %d\n", 241 num_tcs, queues_per_pool); 242 return -1; 243 } 244 num_vmdq_queues = num_pools * queues_per_pool; 245 num_queues = vmdq_queue_base + num_vmdq_queues; 246 printf("Configured vmdq pool num: %u," 247 " each vmdq pool has %u queues\n", 248 num_pools, queues_per_pool); 249 } 250 251 if (port >= rte_eth_dev_count()) 252 return -1; 253 254 retval = get_eth_conf(&port_conf); 255 if (retval < 0) 256 return retval; 257 258 /* 259 * Though in this example, all queues including pf queues are setup. 260 * This is because VMDQ queues doesn't always start from zero, and the 261 * PMD layer doesn't support selectively initialising part of rx/tx 262 * queues. 263 */ 264 retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf); 265 if (retval != 0) 266 return retval; 267 268 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize, 269 &txRingSize); 270 if (retval != 0) 271 return retval; 272 if (RTE_MAX(rxRingSize, txRingSize) > 273 RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) { 274 printf("Mbuf pool has an insufficient size for port %u.\n", 275 port); 276 return -1; 277 } 278 279 for (q = 0; q < num_queues; q++) { 280 retval = rte_eth_rx_queue_setup(port, q, rxRingSize, 281 rte_eth_dev_socket_id(port), 282 NULL, 283 mbuf_pool); 284 if (retval < 0) { 285 printf("initialize rx queue %d failed\n", q); 286 return retval; 287 } 288 } 289 290 for (q = 0; q < num_queues; q++) { 291 retval = rte_eth_tx_queue_setup(port, q, txRingSize, 292 rte_eth_dev_socket_id(port), 293 NULL); 294 if (retval < 0) { 295 printf("initialize tx queue %d failed\n", q); 296 return retval; 297 } 298 } 299 300 retval = rte_eth_dev_start(port); 301 if (retval < 0) { 302 printf("port %d start failed\n", port); 303 return retval; 304 } 305 306 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); 307 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 308 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 309 (unsigned)port, 310 vmdq_ports_eth_addr[port].addr_bytes[0], 311 vmdq_ports_eth_addr[port].addr_bytes[1], 312 vmdq_ports_eth_addr[port].addr_bytes[2], 313 vmdq_ports_eth_addr[port].addr_bytes[3], 314 vmdq_ports_eth_addr[port].addr_bytes[4], 315 vmdq_ports_eth_addr[port].addr_bytes[5]); 316 317 /* Set mac for each pool.*/ 318 for (q = 0; q < num_pools; q++) { 319 struct ether_addr mac; 320 321 mac = pool_addr_template; 322 mac.addr_bytes[4] = port; 323 mac.addr_bytes[5] = q; 324 printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n", 325 port, q, 326 mac.addr_bytes[0], mac.addr_bytes[1], 327 mac.addr_bytes[2], mac.addr_bytes[3], 328 mac.addr_bytes[4], mac.addr_bytes[5]); 329 retval = rte_eth_dev_mac_addr_add(port, &mac, 330 q + vmdq_pool_base); 331 if (retval) { 332 printf("mac addr add failed at pool %d\n", q); 333 return retval; 334 } 335 } 336 337 return 0; 338 } 339 340 /* Check num_pools parameter and set it if OK*/ 341 static int 342 vmdq_parse_num_pools(const char *q_arg) 343 { 344 char *end = NULL; 345 int n; 346 347 /* parse number string */ 348 n = strtol(q_arg, &end, 10); 349 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 350 return -1; 351 if (n != 16 && n != 32) 352 return -1; 353 if (n == 16) 354 num_pools = ETH_16_POOLS; 355 else 356 num_pools = ETH_32_POOLS; 357 358 return 0; 359 } 360 361 /* Check num_tcs parameter and set it if OK*/ 362 static int 363 vmdq_parse_num_tcs(const char *q_arg) 364 { 365 char *end = NULL; 366 int n; 367 368 /* parse number string */ 369 n = strtol(q_arg, &end, 10); 370 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 371 return -1; 372 373 if (n != 4 && n != 8) 374 return -1; 375 if (n == 4) 376 num_tcs = ETH_4_TCS; 377 else 378 num_tcs = ETH_8_TCS; 379 380 return 0; 381 } 382 383 static int 384 parse_portmask(const char *portmask) 385 { 386 char *end = NULL; 387 unsigned long pm; 388 389 /* parse hexadecimal string */ 390 pm = strtoul(portmask, &end, 16); 391 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 392 return -1; 393 394 if (pm == 0) 395 return -1; 396 397 return pm; 398 } 399 400 /* Display usage */ 401 static void 402 vmdq_usage(const char *prgname) 403 { 404 printf("%s [EAL options] -- -p PORTMASK]\n" 405 " --nb-pools NP: number of pools (32 default, 16)\n" 406 " --nb-tcs NP: number of TCs (4 default, 8)\n" 407 " --enable-rss: enable RSS (disabled by default)\n", 408 prgname); 409 } 410 411 /* Parse the argument (num_pools) given in the command line of the application */ 412 static int 413 vmdq_parse_args(int argc, char **argv) 414 { 415 int opt; 416 int option_index; 417 unsigned i; 418 const char *prgname = argv[0]; 419 static struct option long_option[] = { 420 {"nb-pools", required_argument, NULL, 0}, 421 {"nb-tcs", required_argument, NULL, 0}, 422 {"enable-rss", 0, NULL, 0}, 423 {NULL, 0, 0, 0} 424 }; 425 426 /* Parse command line */ 427 while ((opt = getopt_long(argc, argv, "p:", long_option, 428 &option_index)) != EOF) { 429 switch (opt) { 430 /* portmask */ 431 case 'p': 432 enabled_port_mask = parse_portmask(optarg); 433 if (enabled_port_mask == 0) { 434 printf("invalid portmask\n"); 435 vmdq_usage(prgname); 436 return -1; 437 } 438 break; 439 case 0: 440 if (!strcmp(long_option[option_index].name, "nb-pools")) { 441 if (vmdq_parse_num_pools(optarg) == -1) { 442 printf("invalid number of pools\n"); 443 return -1; 444 } 445 } 446 447 if (!strcmp(long_option[option_index].name, "nb-tcs")) { 448 if (vmdq_parse_num_tcs(optarg) == -1) { 449 printf("invalid number of tcs\n"); 450 return -1; 451 } 452 } 453 454 if (!strcmp(long_option[option_index].name, "enable-rss")) 455 rss_enable = 1; 456 break; 457 458 default: 459 vmdq_usage(prgname); 460 return -1; 461 } 462 } 463 464 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 465 if (enabled_port_mask & (1 << i)) 466 ports[num_ports++] = (uint8_t)i; 467 } 468 469 if (num_ports < 2 || num_ports % 2) { 470 printf("Current enabled port number is %u," 471 " but it should be even and at least 2\n", num_ports); 472 return -1; 473 } 474 475 return 0; 476 } 477 478 static void 479 update_mac_address(struct rte_mbuf *m, unsigned dst_port) 480 { 481 struct ether_hdr *eth; 482 void *tmp; 483 484 eth = rte_pktmbuf_mtod(m, struct ether_hdr *); 485 486 /* 02:00:00:00:00:xx */ 487 tmp = ð->d_addr.addr_bytes[0]; 488 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 489 490 /* src addr */ 491 ether_addr_copy(&vmdq_ports_eth_addr[dst_port], ð->s_addr); 492 } 493 494 /* When we receive a HUP signal, print out our stats */ 495 static void 496 sighup_handler(int signum) 497 { 498 unsigned q = vmdq_queue_base; 499 500 for (; q < num_queues; q++) { 501 if (q % (num_vmdq_queues / num_pools) == 0) 502 printf("\nPool %u: ", (q - vmdq_queue_base) / 503 (num_vmdq_queues / num_pools)); 504 printf("%lu ", rxPackets[q]); 505 } 506 printf("\nFinished handling signal %d\n", signum); 507 } 508 509 /* 510 * Main thread that does the work, reading from INPUT_PORT 511 * and writing to OUTPUT_PORT 512 */ 513 static int 514 lcore_main(void *arg) 515 { 516 const uintptr_t core_num = (uintptr_t)arg; 517 const unsigned num_cores = rte_lcore_count(); 518 uint16_t startQueue, endQueue; 519 uint16_t q, i, p; 520 const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores); 521 const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores); 522 523 524 if (remainder) { 525 if (core_num < remainder) { 526 startQueue = (uint16_t)(core_num * (quot + 1)); 527 endQueue = (uint16_t)(startQueue + quot + 1); 528 } else { 529 startQueue = (uint16_t)(core_num * quot + remainder); 530 endQueue = (uint16_t)(startQueue + quot); 531 } 532 } else { 533 startQueue = (uint16_t)(core_num * quot); 534 endQueue = (uint16_t)(startQueue + quot); 535 } 536 537 /* vmdq queue idx doesn't always start from zero.*/ 538 startQueue += vmdq_queue_base; 539 endQueue += vmdq_queue_base; 540 printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num, 541 rte_lcore_id(), startQueue, endQueue - 1); 542 543 if (startQueue == endQueue) { 544 printf("lcore %u has nothing to do\n", (unsigned)core_num); 545 return 0; 546 } 547 548 for (;;) { 549 struct rte_mbuf *buf[MAX_PKT_BURST]; 550 const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]); 551 for (p = 0; p < num_ports; p++) { 552 const uint8_t src = ports[p]; 553 const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */ 554 555 if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID)) 556 continue; 557 558 for (q = startQueue; q < endQueue; q++) { 559 const uint16_t rxCount = rte_eth_rx_burst(src, 560 q, buf, buf_size); 561 562 if (unlikely(rxCount == 0)) 563 continue; 564 565 rxPackets[q] += rxCount; 566 567 for (i = 0; i < rxCount; i++) 568 update_mac_address(buf[i], dst); 569 570 const uint16_t txCount = rte_eth_tx_burst(dst, 571 q, buf, rxCount); 572 if (txCount != rxCount) { 573 for (i = txCount; i < rxCount; i++) 574 rte_pktmbuf_free(buf[i]); 575 } 576 } 577 } 578 } 579 } 580 581 /* 582 * Update the global var NUM_PORTS and array PORTS according to system ports number 583 * and return valid ports number 584 */ 585 static unsigned check_ports_num(unsigned nb_ports) 586 { 587 unsigned valid_num_ports = num_ports; 588 unsigned portid; 589 590 if (num_ports > nb_ports) { 591 printf("\nSpecified port number(%u) exceeds total system port number(%u)\n", 592 num_ports, nb_ports); 593 num_ports = nb_ports; 594 } 595 596 for (portid = 0; portid < num_ports; portid++) { 597 if (ports[portid] >= nb_ports) { 598 printf("\nSpecified port ID(%u) exceeds max system port ID(%u)\n", 599 ports[portid], (nb_ports - 1)); 600 ports[portid] = INVALID_PORT_ID; 601 valid_num_ports--; 602 } 603 } 604 return valid_num_ports; 605 } 606 607 608 /* Main function, does initialisation and calls the per-lcore functions */ 609 int 610 main(int argc, char *argv[]) 611 { 612 unsigned cores; 613 struct rte_mempool *mbuf_pool; 614 unsigned lcore_id; 615 uintptr_t i; 616 int ret; 617 unsigned nb_ports, valid_num_ports; 618 uint16_t portid; 619 620 signal(SIGHUP, sighup_handler); 621 622 /* init EAL */ 623 ret = rte_eal_init(argc, argv); 624 if (ret < 0) 625 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 626 argc -= ret; 627 argv += ret; 628 629 /* parse app arguments */ 630 ret = vmdq_parse_args(argc, argv); 631 if (ret < 0) 632 rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n"); 633 634 cores = rte_lcore_count(); 635 if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) { 636 rte_exit(EXIT_FAILURE,"This program can only run on an even" 637 " number of cores(1-%d)\n\n", RTE_MAX_LCORE); 638 } 639 640 nb_ports = rte_eth_dev_count(); 641 642 /* 643 * Update the global var NUM_PORTS and global array PORTS 644 * and get value of var VALID_NUM_PORTS according to system ports number 645 */ 646 valid_num_ports = check_ports_num(nb_ports); 647 648 if (valid_num_ports < 2 || valid_num_ports % 2) { 649 printf("Current valid ports number is %u\n", valid_num_ports); 650 rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n"); 651 } 652 653 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", 654 NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE, 655 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 656 if (mbuf_pool == NULL) 657 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 658 659 /* initialize all ports */ 660 for (portid = 0; portid < nb_ports; portid++) { 661 /* skip ports that are not enabled */ 662 if ((enabled_port_mask & (1 << portid)) == 0) { 663 printf("\nSkipping disabled port %d\n", portid); 664 continue; 665 } 666 if (port_init(portid, mbuf_pool) != 0) 667 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n"); 668 } 669 670 /* call lcore_main() on every slave lcore */ 671 i = 0; 672 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 673 rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id); 674 } 675 /* call on master too */ 676 (void) lcore_main((void*)i); 677 678 return 0; 679 } 680