1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <sys/queue.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <stdio.h> 10 #include <assert.h> 11 #include <errno.h> 12 #include <signal.h> 13 #include <stdarg.h> 14 #include <inttypes.h> 15 #include <getopt.h> 16 17 #include <rte_common.h> 18 #include <rte_log.h> 19 #include <rte_memory.h> 20 #include <rte_memcpy.h> 21 #include <rte_eal.h> 22 #include <rte_launch.h> 23 #include <rte_atomic.h> 24 #include <rte_cycles.h> 25 #include <rte_prefetch.h> 26 #include <rte_lcore.h> 27 #include <rte_per_lcore.h> 28 #include <rte_branch_prediction.h> 29 #include <rte_interrupts.h> 30 #include <rte_random.h> 31 #include <rte_debug.h> 32 #include <rte_ether.h> 33 #include <rte_ethdev.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 37 #define MAX_QUEUES 1024 38 /* 39 * 1024 queues require to meet the needs of a large number of vmdq_pools. 40 * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port. 41 */ 42 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \ 43 RTE_TEST_TX_DESC_DEFAULT)) 44 #define MBUF_CACHE_SIZE 64 45 46 #define MAX_PKT_BURST 32 47 48 /* 49 * Configurable number of RX/TX ring descriptors 50 */ 51 #define RTE_TEST_RX_DESC_DEFAULT 1024 52 #define RTE_TEST_TX_DESC_DEFAULT 1024 53 54 #define INVALID_PORT_ID 0xFF 55 56 /* mask of enabled ports */ 57 static uint32_t enabled_port_mask; 58 59 /* number of pools (if user does not specify any, 8 by default */ 60 static uint32_t num_queues = 8; 61 static uint32_t num_pools = 8; 62 static uint8_t rss_enable; 63 64 /* Default structure for VMDq. 8< */ 65 66 /* empty vmdq configuration structure. Filled in programatically */ 67 static const struct rte_eth_conf vmdq_conf_default = { 68 .rxmode = { 69 .mq_mode = ETH_MQ_RX_VMDQ_ONLY, 70 .split_hdr_size = 0, 71 }, 72 73 .txmode = { 74 .mq_mode = ETH_MQ_TX_NONE, 75 }, 76 .rx_adv_conf = { 77 /* 78 * should be overridden separately in code with 79 * appropriate values 80 */ 81 .vmdq_rx_conf = { 82 .nb_queue_pools = ETH_8_POOLS, 83 .enable_default_pool = 0, 84 .default_pool = 0, 85 .nb_pool_maps = 0, 86 .pool_map = {{0, 0},}, 87 }, 88 }, 89 }; 90 /* >8 End of Empty vdmq configuration structure. */ 91 92 static unsigned lcore_ids[RTE_MAX_LCORE]; 93 static uint16_t ports[RTE_MAX_ETHPORTS]; 94 static unsigned num_ports; /**< The number of ports specified in command line */ 95 96 /* array used for printing out statistics */ 97 volatile unsigned long rxPackets[MAX_QUEUES] = {0}; 98 99 /* vlan_tags 8< */ 100 const uint16_t vlan_tags[] = { 101 0, 1, 2, 3, 4, 5, 6, 7, 102 8, 9, 10, 11, 12, 13, 14, 15, 103 16, 17, 18, 19, 20, 21, 22, 23, 104 24, 25, 26, 27, 28, 29, 30, 31, 105 32, 33, 34, 35, 36, 37, 38, 39, 106 40, 41, 42, 43, 44, 45, 46, 47, 107 48, 49, 50, 51, 52, 53, 54, 55, 108 56, 57, 58, 59, 60, 61, 62, 63, 109 }; 110 /* >8 End of vlan_tags. */ 111 112 const uint16_t num_vlans = RTE_DIM(vlan_tags); 113 static uint16_t num_pf_queues, num_vmdq_queues; 114 static uint16_t vmdq_pool_base, vmdq_queue_base; 115 116 /* Pool mac address template. 8< */ 117 118 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */ 119 static struct rte_ether_addr pool_addr_template = { 120 .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00} 121 }; 122 /* >8 End of mac addr template. */ 123 124 /* ethernet addresses of ports */ 125 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; 126 127 #define MAX_QUEUE_NUM_10G 128 128 #define MAX_QUEUE_NUM_1G 8 129 #define MAX_POOL_MAP_NUM_10G 64 130 #define MAX_POOL_MAP_NUM_1G 32 131 #define MAX_POOL_NUM_10G 64 132 #define MAX_POOL_NUM_1G 8 133 /* 134 * Builds up the correct configuration for vmdq based on the vlan tags array 135 * given above, and determine the queue number and pool map number according to 136 * valid pool number 137 */ 138 139 /* Building correct configruration for vdmq. 8< */ 140 static inline int 141 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools) 142 { 143 struct rte_eth_vmdq_rx_conf conf; 144 unsigned i; 145 146 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; 147 conf.nb_pool_maps = num_pools; 148 conf.enable_default_pool = 0; 149 conf.default_pool = 0; /* set explicit value, even if not used */ 150 151 for (i = 0; i < conf.nb_pool_maps; i++) { 152 conf.pool_map[i].vlan_id = vlan_tags[i]; 153 conf.pool_map[i].pools = (1UL << (i % num_pools)); 154 } 155 156 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf))); 157 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf, 158 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf))); 159 if (rss_enable) { 160 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; 161 eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | 162 ETH_RSS_UDP | 163 ETH_RSS_TCP | 164 ETH_RSS_SCTP; 165 } 166 return 0; 167 } 168 169 /* 170 * Initialises a given port using global settings and with the rx buffers 171 * coming from the mbuf_pool passed as parameter 172 */ 173 static inline int 174 port_init(uint16_t port, struct rte_mempool *mbuf_pool) 175 { 176 struct rte_eth_dev_info dev_info; 177 struct rte_eth_rxconf *rxconf; 178 struct rte_eth_txconf *txconf; 179 struct rte_eth_conf port_conf; 180 uint16_t rxRings, txRings; 181 uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT; 182 uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT; 183 int retval; 184 uint16_t q; 185 uint16_t queues_per_pool; 186 uint32_t max_nb_pools; 187 uint64_t rss_hf_tmp; 188 189 /* 190 * The max pool number from dev_info will be used to validate the pool 191 * number specified in cmd line 192 */ 193 retval = rte_eth_dev_info_get(port, &dev_info); 194 if (retval != 0) { 195 printf("Error during getting device (port %u) info: %s\n", 196 port, strerror(-retval)); 197 return retval; 198 } 199 200 max_nb_pools = (uint32_t)dev_info.max_vmdq_pools; 201 /* 202 * We allow to process part of VMDQ pools specified by num_pools in 203 * command line. 204 */ 205 if (num_pools > max_nb_pools) { 206 printf("num_pools %d >max_nb_pools %d\n", 207 num_pools, max_nb_pools); 208 return -1; 209 } 210 retval = get_eth_conf(&port_conf, max_nb_pools); 211 if (retval < 0) 212 return retval; 213 214 /* 215 * NIC queues are divided into pf queues and vmdq queues. 216 */ 217 /* There is assumption here all ports have the same configuration! */ 218 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num; 219 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; 220 num_vmdq_queues = num_pools * queues_per_pool; 221 num_queues = num_pf_queues + num_vmdq_queues; 222 vmdq_queue_base = dev_info.vmdq_queue_base; 223 vmdq_pool_base = dev_info.vmdq_pool_base; 224 225 printf("pf queue num: %u, configured vmdq pool num: %u," 226 " each vmdq pool has %u queues\n", 227 num_pf_queues, num_pools, queues_per_pool); 228 printf("vmdq queue base: %d pool base %d\n", 229 vmdq_queue_base, vmdq_pool_base); 230 if (!rte_eth_dev_is_valid_port(port)) 231 return -1; 232 233 rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf; 234 port_conf.rx_adv_conf.rss_conf.rss_hf &= 235 dev_info.flow_type_rss_offloads; 236 if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) { 237 printf("Port %u modified RSS hash function based on hardware support," 238 "requested:%#"PRIx64" configured:%#"PRIx64"\n", 239 port, 240 rss_hf_tmp, 241 port_conf.rx_adv_conf.rss_conf.rss_hf); 242 } 243 244 /* 245 * Though in this example, we only receive packets from the first queue 246 * of each pool and send packets through first rte_lcore_count() tx 247 * queues of vmdq queues, all queues including pf queues are setup. 248 * This is because VMDQ queues doesn't always start from zero, and the 249 * PMD layer doesn't support selectively initialising part of rx/tx 250 * queues. 251 */ 252 rxRings = (uint16_t)dev_info.max_rx_queues; 253 txRings = (uint16_t)dev_info.max_tx_queues; 254 255 retval = rte_eth_dev_info_get(port, &dev_info); 256 if (retval != 0) { 257 printf("Error during getting device (port %u) info: %s\n", 258 port, strerror(-retval)); 259 return retval; 260 } 261 262 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 263 port_conf.txmode.offloads |= 264 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 265 retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf); 266 if (retval != 0) 267 return retval; 268 269 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize, 270 &txRingSize); 271 if (retval != 0) 272 return retval; 273 if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, 274 RTE_TEST_TX_DESC_DEFAULT)) { 275 printf("Mbuf pool has an insufficient size for port %u.\n", 276 port); 277 return -1; 278 } 279 280 rxconf = &dev_info.default_rxconf; 281 rxconf->rx_drop_en = 1; 282 txconf = &dev_info.default_txconf; 283 txconf->offloads = port_conf.txmode.offloads; 284 for (q = 0; q < rxRings; q++) { 285 retval = rte_eth_rx_queue_setup(port, q, rxRingSize, 286 rte_eth_dev_socket_id(port), 287 rxconf, 288 mbuf_pool); 289 if (retval < 0) { 290 printf("initialise rx queue %d failed\n", q); 291 return retval; 292 } 293 } 294 295 for (q = 0; q < txRings; q++) { 296 retval = rte_eth_tx_queue_setup(port, q, txRingSize, 297 rte_eth_dev_socket_id(port), 298 txconf); 299 if (retval < 0) { 300 printf("initialise tx queue %d failed\n", q); 301 return retval; 302 } 303 } 304 305 retval = rte_eth_dev_start(port); 306 if (retval < 0) { 307 printf("port %d start failed\n", port); 308 return retval; 309 } 310 311 retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); 312 if (retval < 0) { 313 printf("port %d MAC address get failed: %s\n", port, 314 rte_strerror(-retval)); 315 return retval; 316 } 317 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 318 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 319 (unsigned)port, 320 vmdq_ports_eth_addr[port].addr_bytes[0], 321 vmdq_ports_eth_addr[port].addr_bytes[1], 322 vmdq_ports_eth_addr[port].addr_bytes[2], 323 vmdq_ports_eth_addr[port].addr_bytes[3], 324 vmdq_ports_eth_addr[port].addr_bytes[4], 325 vmdq_ports_eth_addr[port].addr_bytes[5]); 326 327 /* 328 * Set mac for each pool. 329 * There is no default mac for the pools in i40. 330 * Removes this after i40e fixes this issue. 331 */ 332 for (q = 0; q < num_pools; q++) { 333 struct rte_ether_addr mac; 334 mac = pool_addr_template; 335 mac.addr_bytes[4] = port; 336 mac.addr_bytes[5] = q; 337 printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n", 338 port, q, 339 mac.addr_bytes[0], mac.addr_bytes[1], 340 mac.addr_bytes[2], mac.addr_bytes[3], 341 mac.addr_bytes[4], mac.addr_bytes[5]); 342 retval = rte_eth_dev_mac_addr_add(port, &mac, 343 q + vmdq_pool_base); 344 if (retval) { 345 printf("mac addr add failed at pool %d\n", q); 346 return retval; 347 } 348 } 349 350 return 0; 351 } 352 /* >8 End of get_eth_conf. */ 353 354 /* Check num_pools parameter and set it if OK*/ 355 static int 356 vmdq_parse_num_pools(const char *q_arg) 357 { 358 char *end = NULL; 359 int n; 360 361 /* parse number string */ 362 n = strtol(q_arg, &end, 10); 363 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 364 return -1; 365 366 if (num_pools > num_vlans) { 367 printf("num_pools %d > num_vlans %d\n", num_pools, num_vlans); 368 return -1; 369 } 370 371 num_pools = n; 372 373 return 0; 374 } 375 376 377 static int 378 parse_portmask(const char *portmask) 379 { 380 char *end = NULL; 381 unsigned long pm; 382 383 /* parse hexadecimal string */ 384 pm = strtoul(portmask, &end, 16); 385 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 386 return 0; 387 388 return pm; 389 } 390 391 /* Display usage */ 392 static void 393 vmdq_usage(const char *prgname) 394 { 395 printf("%s [EAL options] -- -p PORTMASK]\n" 396 " --nb-pools NP: number of pools\n" 397 " --enable-rss: enable RSS (disabled by default)\n", 398 prgname); 399 } 400 401 /* Parse the argument (num_pools) given in the command line of the application */ 402 static int 403 vmdq_parse_args(int argc, char **argv) 404 { 405 int opt; 406 int option_index; 407 unsigned i; 408 const char *prgname = argv[0]; 409 static struct option long_option[] = { 410 {"nb-pools", required_argument, NULL, 0}, 411 {"enable-rss", 0, NULL, 0}, 412 {NULL, 0, 0, 0} 413 }; 414 415 /* Parse command line */ 416 while ((opt = getopt_long(argc, argv, "p:", long_option, 417 &option_index)) != EOF) { 418 switch (opt) { 419 /* portmask */ 420 case 'p': 421 enabled_port_mask = parse_portmask(optarg); 422 if (enabled_port_mask == 0) { 423 printf("invalid portmask\n"); 424 vmdq_usage(prgname); 425 return -1; 426 } 427 break; 428 case 0: 429 if (!strcmp(long_option[option_index].name, 430 "nb-pools")) { 431 if (vmdq_parse_num_pools(optarg) == -1) { 432 printf("invalid number of pools\n"); 433 vmdq_usage(prgname); 434 return -1; 435 } 436 } 437 438 if (!strcmp(long_option[option_index].name, 439 "enable-rss")) 440 rss_enable = 1; 441 break; 442 443 default: 444 vmdq_usage(prgname); 445 return -1; 446 } 447 } 448 449 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 450 if (enabled_port_mask & (1 << i)) 451 ports[num_ports++] = (uint8_t)i; 452 } 453 454 if (num_ports < 2 || num_ports % 2) { 455 printf("Current enabled port number is %u," 456 "but it should be even and at least 2\n", num_ports); 457 return -1; 458 } 459 460 return 0; 461 } 462 463 static void 464 update_mac_address(struct rte_mbuf *m, unsigned dst_port) 465 { 466 struct rte_ether_hdr *eth; 467 void *tmp; 468 469 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 470 471 /* 02:00:00:00:00:xx */ 472 tmp = ð->d_addr.addr_bytes[0]; 473 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 474 475 /* src addr */ 476 rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], ð->s_addr); 477 } 478 479 /* When we receive a HUP signal, print out our stats */ 480 static void 481 sighup_handler(int signum) 482 { 483 unsigned int q = vmdq_queue_base; 484 for (; q < num_queues; q++) { 485 if ((q - vmdq_queue_base) % (num_vmdq_queues / num_pools) == 0) 486 printf("\nPool %u: ", (q - vmdq_queue_base) / 487 (num_vmdq_queues / num_pools)); 488 printf("%lu ", rxPackets[q]); 489 } 490 printf("\nFinished handling signal %d\n", signum); 491 } 492 493 /* 494 * Main thread that does the work, reading from INPUT_PORT 495 * and writing to OUTPUT_PORT 496 */ 497 static int 498 lcore_main(__rte_unused void *dummy) 499 { 500 const uint16_t lcore_id = (uint16_t)rte_lcore_id(); 501 const uint16_t num_cores = (uint16_t)rte_lcore_count(); 502 uint16_t core_id = 0; 503 uint16_t startQueue, endQueue; 504 uint16_t q, i, p; 505 const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores); 506 507 for (i = 0; i < num_cores; i++) 508 if (lcore_ids[i] == lcore_id) { 509 core_id = i; 510 break; 511 } 512 513 if (remainder != 0) { 514 if (core_id < remainder) { 515 startQueue = (uint16_t)(core_id * 516 (num_vmdq_queues / num_cores + 1)); 517 endQueue = (uint16_t)(startQueue + 518 (num_vmdq_queues / num_cores) + 1); 519 } else { 520 startQueue = (uint16_t)(core_id * 521 (num_vmdq_queues / num_cores) + 522 remainder); 523 endQueue = (uint16_t)(startQueue + 524 (num_vmdq_queues / num_cores)); 525 } 526 } else { 527 startQueue = (uint16_t)(core_id * 528 (num_vmdq_queues / num_cores)); 529 endQueue = (uint16_t)(startQueue + 530 (num_vmdq_queues / num_cores)); 531 } 532 533 /* vmdq queue idx doesn't always start from zero.*/ 534 startQueue += vmdq_queue_base; 535 endQueue += vmdq_queue_base; 536 printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id, 537 (unsigned)lcore_id, startQueue, endQueue - 1); 538 539 if (startQueue == endQueue) { 540 printf("lcore %u has nothing to do\n", lcore_id); 541 return 0; 542 } 543 544 for (;;) { 545 struct rte_mbuf *buf[MAX_PKT_BURST]; 546 const uint16_t buf_size = RTE_DIM(buf); 547 548 for (p = 0; p < num_ports; p++) { 549 const uint8_t sport = ports[p]; 550 /* 0 <-> 1, 2 <-> 3 etc */ 551 const uint8_t dport = ports[p ^ 1]; 552 if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID)) 553 continue; 554 555 for (q = startQueue; q < endQueue; q++) { 556 const uint16_t rxCount = rte_eth_rx_burst(sport, 557 q, buf, buf_size); 558 559 if (unlikely(rxCount == 0)) 560 continue; 561 562 rxPackets[q] += rxCount; 563 564 for (i = 0; i < rxCount; i++) 565 update_mac_address(buf[i], dport); 566 567 const uint16_t txCount = rte_eth_tx_burst(dport, 568 vmdq_queue_base + core_id, 569 buf, 570 rxCount); 571 572 if (txCount != rxCount) { 573 for (i = txCount; i < rxCount; i++) 574 rte_pktmbuf_free(buf[i]); 575 } 576 } 577 } 578 } 579 } 580 581 /* 582 * Update the global var NUM_PORTS and array PORTS according to system ports number 583 * and return valid ports number 584 */ 585 static unsigned check_ports_num(unsigned nb_ports) 586 { 587 unsigned valid_num_ports = num_ports; 588 unsigned portid; 589 590 if (num_ports > nb_ports) { 591 printf("\nSpecified port number(%u) exceeds total system port number(%u)\n", 592 num_ports, nb_ports); 593 num_ports = nb_ports; 594 } 595 596 for (portid = 0; portid < num_ports; portid++) { 597 if (!rte_eth_dev_is_valid_port(ports[portid])) { 598 printf("\nSpecified port ID(%u) is not valid\n", 599 ports[portid]); 600 ports[portid] = INVALID_PORT_ID; 601 valid_num_ports--; 602 } 603 } 604 return valid_num_ports; 605 } 606 607 /* Main function, does initialisation and calls the per-lcore functions */ 608 int 609 main(int argc, char *argv[]) 610 { 611 struct rte_mempool *mbuf_pool; 612 unsigned lcore_id, core_id = 0; 613 int ret; 614 unsigned nb_ports, valid_num_ports; 615 uint16_t portid; 616 617 signal(SIGHUP, sighup_handler); 618 619 /* init EAL */ 620 ret = rte_eal_init(argc, argv); 621 if (ret < 0) 622 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 623 argc -= ret; 624 argv += ret; 625 626 /* parse app arguments */ 627 ret = vmdq_parse_args(argc, argv); 628 if (ret < 0) 629 rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n"); 630 631 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) 632 if (rte_lcore_is_enabled(lcore_id)) 633 lcore_ids[core_id++] = lcore_id; 634 635 if (rte_lcore_count() > RTE_MAX_LCORE) 636 rte_exit(EXIT_FAILURE, "Not enough cores\n"); 637 638 nb_ports = rte_eth_dev_count_avail(); 639 640 /* 641 * Update the global var NUM_PORTS and global array PORTS 642 * and get value of var VALID_NUM_PORTS according to system ports number 643 */ 644 valid_num_ports = check_ports_num(nb_ports); 645 646 if (valid_num_ports < 2 || valid_num_ports % 2) { 647 printf("Current valid ports number is %u\n", valid_num_ports); 648 rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n"); 649 } 650 651 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", 652 NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE, 653 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 654 if (mbuf_pool == NULL) 655 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 656 657 /* initialize all ports */ 658 RTE_ETH_FOREACH_DEV(portid) { 659 /* skip ports that are not enabled */ 660 if ((enabled_port_mask & (1 << portid)) == 0) { 661 printf("\nSkipping disabled port %d\n", portid); 662 continue; 663 } 664 if (port_init(portid, mbuf_pool) != 0) 665 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n"); 666 } 667 668 /* call lcore_main() on every lcore */ 669 rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MAIN); 670 RTE_LCORE_FOREACH_WORKER(lcore_id) { 671 if (rte_eal_wait_lcore(lcore_id) < 0) 672 return -1; 673 } 674 675 /* clean up the EAL */ 676 rte_eal_cleanup(); 677 678 return 0; 679 } 680