1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <signal.h> 6 #include <getopt.h> 7 8 #include <rte_eal.h> 9 #include <rte_common.h> 10 #include <rte_errno.h> 11 #include <rte_ethdev.h> 12 #include <rte_lcore.h> 13 #include <rte_malloc.h> 14 #include <rte_mbuf.h> 15 #include <rte_mempool.h> 16 #include <rte_ring.h> 17 #include <rte_reorder.h> 18 19 #define RX_DESC_PER_QUEUE 1024 20 #define TX_DESC_PER_QUEUE 1024 21 22 #define MAX_PKTS_BURST 32 23 #define REORDER_BUFFER_SIZE 8192 24 #define MBUF_PER_POOL 65535 25 #define MBUF_POOL_CACHE_SIZE 250 26 27 #define RING_SIZE 16384 28 29 /* Macros for printing using RTE_LOG */ 30 #define RTE_LOGTYPE_REORDERAPP RTE_LOGTYPE_USER1 31 32 unsigned int portmask; 33 unsigned int disable_reorder; 34 unsigned int insight_worker; 35 volatile uint8_t quit_signal; 36 37 static struct rte_mempool *mbuf_pool; 38 39 static struct rte_eth_conf port_conf_default; 40 41 struct worker_thread_args { 42 struct rte_ring *ring_in; 43 struct rte_ring *ring_out; 44 }; 45 46 struct send_thread_args { 47 struct rte_ring *ring_in; 48 struct rte_reorder_buffer *buffer; 49 }; 50 51 volatile struct app_stats { 52 struct { 53 uint64_t rx_pkts; 54 uint64_t enqueue_pkts; 55 uint64_t enqueue_failed_pkts; 56 } rx __rte_cache_aligned; 57 58 struct { 59 uint64_t dequeue_pkts; 60 uint64_t enqueue_pkts; 61 uint64_t enqueue_failed_pkts; 62 } wkr __rte_cache_aligned; 63 64 struct { 65 uint64_t dequeue_pkts; 66 /* Too early pkts transmitted directly w/o reordering */ 67 uint64_t early_pkts_txtd_woro; 68 /* Too early pkts failed from direct transmit */ 69 uint64_t early_pkts_tx_failed_woro; 70 uint64_t ro_tx_pkts; 71 uint64_t ro_tx_failed_pkts; 72 } tx __rte_cache_aligned; 73 } app_stats; 74 75 /* per worker lcore stats */ 76 struct wkr_stats_per { 77 uint64_t deq_pkts; 78 uint64_t enq_pkts; 79 uint64_t enq_failed_pkts; 80 } __rte_cache_aligned; 81 82 static struct wkr_stats_per wkr_stats[RTE_MAX_LCORE] = { {0} }; 83 /** 84 * Get the last enabled lcore ID 85 * 86 * @return 87 * The last enabled lcore ID. 88 */ 89 static unsigned int 90 get_last_lcore_id(void) 91 { 92 int i; 93 94 for (i = RTE_MAX_LCORE - 1; i >= 0; i--) 95 if (rte_lcore_is_enabled(i)) 96 return i; 97 return 0; 98 } 99 100 /** 101 * Get the previous enabled lcore ID 102 * @param id 103 * The current lcore ID 104 * @return 105 * The previous enabled lcore ID or the current lcore 106 * ID if it is the first available core. 107 */ 108 static unsigned int 109 get_previous_lcore_id(unsigned int id) 110 { 111 int i; 112 113 for (i = id - 1; i >= 0; i--) 114 if (rte_lcore_is_enabled(i)) 115 return i; 116 return id; 117 } 118 119 static inline void 120 pktmbuf_free_bulk(struct rte_mbuf *mbuf_table[], unsigned n) 121 { 122 unsigned int i; 123 124 for (i = 0; i < n; i++) 125 rte_pktmbuf_free(mbuf_table[i]); 126 } 127 128 /* display usage */ 129 static void 130 print_usage(const char *prgname) 131 { 132 printf("%s [EAL options] -- -p PORTMASK\n" 133 " -p PORTMASK: hexadecimal bitmask of ports to configure\n", 134 prgname); 135 } 136 137 static int 138 parse_portmask(const char *portmask) 139 { 140 unsigned long pm; 141 char *end = NULL; 142 143 /* parse hexadecimal string */ 144 pm = strtoul(portmask, &end, 16); 145 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 146 return 0; 147 148 return pm; 149 } 150 151 /* Parse the argument given in the command line of the application */ 152 static int 153 parse_args(int argc, char **argv) 154 { 155 int opt; 156 int option_index; 157 char **argvopt; 158 char *prgname = argv[0]; 159 static struct option lgopts[] = { 160 {"disable-reorder", 0, 0, 0}, 161 {"insight-worker", 0, 0, 0}, 162 {NULL, 0, 0, 0} 163 }; 164 165 argvopt = argv; 166 167 while ((opt = getopt_long(argc, argvopt, "p:", 168 lgopts, &option_index)) != EOF) { 169 switch (opt) { 170 /* portmask */ 171 case 'p': 172 portmask = parse_portmask(optarg); 173 if (portmask == 0) { 174 printf("invalid portmask\n"); 175 print_usage(prgname); 176 return -1; 177 } 178 break; 179 /* long options */ 180 case 0: 181 if (!strcmp(lgopts[option_index].name, "disable-reorder")) { 182 printf("reorder disabled\n"); 183 disable_reorder = 1; 184 } 185 if (!strcmp(lgopts[option_index].name, 186 "insight-worker")) { 187 printf("print all worker statistics\n"); 188 insight_worker = 1; 189 } 190 break; 191 default: 192 print_usage(prgname); 193 return -1; 194 } 195 } 196 if (optind <= 1) { 197 print_usage(prgname); 198 return -1; 199 } 200 201 argv[optind-1] = prgname; 202 optind = 1; /* reset getopt lib */ 203 return 0; 204 } 205 206 /* 207 * Tx buffer error callback 208 */ 209 static void 210 flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count, 211 void *userdata __rte_unused) { 212 213 /* free the mbufs which failed from transmit */ 214 app_stats.tx.ro_tx_failed_pkts += count; 215 RTE_LOG_DP(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__); 216 pktmbuf_free_bulk(unsent, count); 217 218 } 219 220 static inline int 221 free_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[]) { 222 uint16_t port_id; 223 224 /* initialize buffers for all ports */ 225 RTE_ETH_FOREACH_DEV(port_id) { 226 /* skip ports that are not enabled */ 227 if ((portmask & (1 << port_id)) == 0) 228 continue; 229 230 rte_free(tx_buffer[port_id]); 231 } 232 return 0; 233 } 234 235 static inline int 236 configure_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[]) 237 { 238 uint16_t port_id; 239 int ret; 240 241 /* initialize buffers for all ports */ 242 RTE_ETH_FOREACH_DEV(port_id) { 243 /* skip ports that are not enabled */ 244 if ((portmask & (1 << port_id)) == 0) 245 continue; 246 247 /* Initialize TX buffers */ 248 tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer", 249 RTE_ETH_TX_BUFFER_SIZE(MAX_PKTS_BURST), 0, 250 rte_eth_dev_socket_id(port_id)); 251 if (tx_buffer[port_id] == NULL) 252 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n", 253 port_id); 254 255 rte_eth_tx_buffer_init(tx_buffer[port_id], MAX_PKTS_BURST); 256 257 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id], 258 flush_tx_error_callback, NULL); 259 if (ret < 0) 260 rte_exit(EXIT_FAILURE, 261 "Cannot set error callback for tx buffer on port %u\n", 262 port_id); 263 } 264 return 0; 265 } 266 267 static inline int 268 configure_eth_port(uint16_t port_id) 269 { 270 struct rte_ether_addr addr; 271 const uint16_t rxRings = 1, txRings = 1; 272 int ret; 273 uint16_t q; 274 uint16_t nb_rxd = RX_DESC_PER_QUEUE; 275 uint16_t nb_txd = TX_DESC_PER_QUEUE; 276 struct rte_eth_dev_info dev_info; 277 struct rte_eth_txconf txconf; 278 struct rte_eth_conf port_conf = port_conf_default; 279 280 if (!rte_eth_dev_is_valid_port(port_id)) 281 return -1; 282 283 ret = rte_eth_dev_info_get(port_id, &dev_info); 284 if (ret != 0) { 285 printf("Error during getting device (port %u) info: %s\n", 286 port_id, strerror(-ret)); 287 return ret; 288 } 289 290 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 291 port_conf.txmode.offloads |= 292 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 293 ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf_default); 294 if (ret != 0) 295 return ret; 296 297 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd); 298 if (ret != 0) 299 return ret; 300 301 for (q = 0; q < rxRings; q++) { 302 ret = rte_eth_rx_queue_setup(port_id, q, nb_rxd, 303 rte_eth_dev_socket_id(port_id), NULL, 304 mbuf_pool); 305 if (ret < 0) 306 return ret; 307 } 308 309 txconf = dev_info.default_txconf; 310 txconf.offloads = port_conf.txmode.offloads; 311 for (q = 0; q < txRings; q++) { 312 ret = rte_eth_tx_queue_setup(port_id, q, nb_txd, 313 rte_eth_dev_socket_id(port_id), &txconf); 314 if (ret < 0) 315 return ret; 316 } 317 318 ret = rte_eth_dev_start(port_id); 319 if (ret < 0) 320 return ret; 321 322 ret = rte_eth_macaddr_get(port_id, &addr); 323 if (ret != 0) { 324 printf("Failed to get MAC address (port %u): %s\n", 325 port_id, rte_strerror(-ret)); 326 return ret; 327 } 328 329 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 330 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 331 port_id, 332 addr.addr_bytes[0], addr.addr_bytes[1], 333 addr.addr_bytes[2], addr.addr_bytes[3], 334 addr.addr_bytes[4], addr.addr_bytes[5]); 335 336 ret = rte_eth_promiscuous_enable(port_id); 337 if (ret != 0) 338 return ret; 339 340 return 0; 341 } 342 343 static void 344 print_stats(void) 345 { 346 uint16_t i; 347 struct rte_eth_stats eth_stats; 348 unsigned int lcore_id, last_lcore_id, main_lcore_id, end_w_lcore_id; 349 350 last_lcore_id = get_last_lcore_id(); 351 main_lcore_id = rte_get_main_lcore(); 352 end_w_lcore_id = get_previous_lcore_id(last_lcore_id); 353 354 printf("\nRX thread stats:\n"); 355 printf(" - Pkts rxd: %"PRIu64"\n", 356 app_stats.rx.rx_pkts); 357 printf(" - Pkts enqd to workers ring: %"PRIu64"\n", 358 app_stats.rx.enqueue_pkts); 359 360 for (lcore_id = 0; lcore_id <= end_w_lcore_id; lcore_id++) { 361 if (insight_worker 362 && rte_lcore_is_enabled(lcore_id) 363 && lcore_id != main_lcore_id) { 364 printf("\nWorker thread stats on core [%u]:\n", 365 lcore_id); 366 printf(" - Pkts deqd from workers ring: %"PRIu64"\n", 367 wkr_stats[lcore_id].deq_pkts); 368 printf(" - Pkts enqd to tx ring: %"PRIu64"\n", 369 wkr_stats[lcore_id].enq_pkts); 370 printf(" - Pkts enq to tx failed: %"PRIu64"\n", 371 wkr_stats[lcore_id].enq_failed_pkts); 372 } 373 374 app_stats.wkr.dequeue_pkts += wkr_stats[lcore_id].deq_pkts; 375 app_stats.wkr.enqueue_pkts += wkr_stats[lcore_id].enq_pkts; 376 app_stats.wkr.enqueue_failed_pkts += 377 wkr_stats[lcore_id].enq_failed_pkts; 378 } 379 380 printf("\nWorker thread stats:\n"); 381 printf(" - Pkts deqd from workers ring: %"PRIu64"\n", 382 app_stats.wkr.dequeue_pkts); 383 printf(" - Pkts enqd to tx ring: %"PRIu64"\n", 384 app_stats.wkr.enqueue_pkts); 385 printf(" - Pkts enq to tx failed: %"PRIu64"\n", 386 app_stats.wkr.enqueue_failed_pkts); 387 388 printf("\nTX stats:\n"); 389 printf(" - Pkts deqd from tx ring: %"PRIu64"\n", 390 app_stats.tx.dequeue_pkts); 391 printf(" - Ro Pkts transmitted: %"PRIu64"\n", 392 app_stats.tx.ro_tx_pkts); 393 printf(" - Ro Pkts tx failed: %"PRIu64"\n", 394 app_stats.tx.ro_tx_failed_pkts); 395 printf(" - Pkts transmitted w/o reorder: %"PRIu64"\n", 396 app_stats.tx.early_pkts_txtd_woro); 397 printf(" - Pkts tx failed w/o reorder: %"PRIu64"\n", 398 app_stats.tx.early_pkts_tx_failed_woro); 399 400 RTE_ETH_FOREACH_DEV(i) { 401 rte_eth_stats_get(i, ð_stats); 402 printf("\nPort %u stats:\n", i); 403 printf(" - Pkts in: %"PRIu64"\n", eth_stats.ipackets); 404 printf(" - Pkts out: %"PRIu64"\n", eth_stats.opackets); 405 printf(" - In Errs: %"PRIu64"\n", eth_stats.ierrors); 406 printf(" - Out Errs: %"PRIu64"\n", eth_stats.oerrors); 407 printf(" - Mbuf Errs: %"PRIu64"\n", eth_stats.rx_nombuf); 408 } 409 } 410 411 static void 412 int_handler(int sig_num) 413 { 414 printf("Exiting on signal %d\n", sig_num); 415 quit_signal = 1; 416 } 417 418 /** 419 * This thread receives mbufs from the port and affects them an internal 420 * sequence number to keep track of their order of arrival through an 421 * mbuf structure. 422 * The mbufs are then passed to the worker threads via the rx_to_workers 423 * ring. 424 */ 425 static int 426 rx_thread(struct rte_ring *ring_out) 427 { 428 uint32_t seqn = 0; 429 uint16_t i, ret = 0; 430 uint16_t nb_rx_pkts; 431 uint16_t port_id; 432 struct rte_mbuf *pkts[MAX_PKTS_BURST]; 433 434 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, 435 rte_lcore_id()); 436 437 while (!quit_signal) { 438 439 RTE_ETH_FOREACH_DEV(port_id) { 440 if ((portmask & (1 << port_id)) != 0) { 441 442 /* receive packets */ 443 nb_rx_pkts = rte_eth_rx_burst(port_id, 0, 444 pkts, MAX_PKTS_BURST); 445 if (nb_rx_pkts == 0) { 446 RTE_LOG_DP(DEBUG, REORDERAPP, 447 "%s():Received zero packets\n", __func__); 448 continue; 449 } 450 app_stats.rx.rx_pkts += nb_rx_pkts; 451 452 /* mark sequence number */ 453 for (i = 0; i < nb_rx_pkts; ) 454 *rte_reorder_seqn(pkts[i++]) = seqn++; 455 456 /* enqueue to rx_to_workers ring */ 457 ret = rte_ring_enqueue_burst(ring_out, 458 (void *)pkts, nb_rx_pkts, NULL); 459 app_stats.rx.enqueue_pkts += ret; 460 if (unlikely(ret < nb_rx_pkts)) { 461 app_stats.rx.enqueue_failed_pkts += 462 (nb_rx_pkts-ret); 463 pktmbuf_free_bulk(&pkts[ret], nb_rx_pkts - ret); 464 } 465 } 466 } 467 } 468 return 0; 469 } 470 471 /** 472 * This thread takes bursts of packets from the rx_to_workers ring and 473 * Changes the input port value to output port value. And feds it to 474 * workers_to_tx 475 */ 476 static int 477 worker_thread(void *args_ptr) 478 { 479 const uint16_t nb_ports = rte_eth_dev_count_avail(); 480 uint16_t i, ret = 0; 481 uint16_t burst_size = 0; 482 struct worker_thread_args *args; 483 struct rte_mbuf *burst_buffer[MAX_PKTS_BURST] = { NULL }; 484 struct rte_ring *ring_in, *ring_out; 485 const unsigned xor_val = (nb_ports > 1); 486 unsigned int core_id = rte_lcore_id(); 487 488 args = (struct worker_thread_args *) args_ptr; 489 ring_in = args->ring_in; 490 ring_out = args->ring_out; 491 492 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, 493 core_id); 494 495 while (!quit_signal) { 496 497 /* dequeue the mbufs from rx_to_workers ring */ 498 burst_size = rte_ring_dequeue_burst(ring_in, 499 (void *)burst_buffer, MAX_PKTS_BURST, NULL); 500 if (unlikely(burst_size == 0)) 501 continue; 502 503 wkr_stats[core_id].deq_pkts += burst_size; 504 505 /* just do some operation on mbuf */ 506 for (i = 0; i < burst_size;) 507 burst_buffer[i++]->port ^= xor_val; 508 509 /* enqueue the modified mbufs to workers_to_tx ring */ 510 ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, 511 burst_size, NULL); 512 wkr_stats[core_id].enq_pkts += ret; 513 if (unlikely(ret < burst_size)) { 514 /* Return the mbufs to their respective pool, dropping packets */ 515 wkr_stats[core_id].enq_failed_pkts += burst_size - ret; 516 pktmbuf_free_bulk(&burst_buffer[ret], burst_size - ret); 517 } 518 } 519 return 0; 520 } 521 522 /** 523 * Dequeue mbufs from the workers_to_tx ring and reorder them before 524 * transmitting. 525 */ 526 static int 527 send_thread(struct send_thread_args *args) 528 { 529 int ret; 530 unsigned int i, dret; 531 uint16_t nb_dq_mbufs; 532 uint8_t outp; 533 unsigned sent; 534 struct rte_mbuf *mbufs[MAX_PKTS_BURST]; 535 struct rte_mbuf *rombufs[MAX_PKTS_BURST] = {NULL}; 536 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; 537 538 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, rte_lcore_id()); 539 540 configure_tx_buffers(tx_buffer); 541 542 while (!quit_signal) { 543 544 /* deque the mbufs from workers_to_tx ring */ 545 nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in, 546 (void *)mbufs, MAX_PKTS_BURST, NULL); 547 548 if (unlikely(nb_dq_mbufs == 0)) 549 continue; 550 551 app_stats.tx.dequeue_pkts += nb_dq_mbufs; 552 553 for (i = 0; i < nb_dq_mbufs; i++) { 554 /* send dequeued mbufs for reordering */ 555 ret = rte_reorder_insert(args->buffer, mbufs[i]); 556 557 if (ret == -1 && rte_errno == ERANGE) { 558 /* Too early pkts should be transmitted out directly */ 559 RTE_LOG_DP(DEBUG, REORDERAPP, 560 "%s():Cannot reorder early packet " 561 "direct enqueuing to TX\n", __func__); 562 outp = mbufs[i]->port; 563 if ((portmask & (1 << outp)) == 0) { 564 rte_pktmbuf_free(mbufs[i]); 565 continue; 566 } 567 if (rte_eth_tx_burst(outp, 0, (void *)mbufs[i], 1) != 1) { 568 rte_pktmbuf_free(mbufs[i]); 569 app_stats.tx.early_pkts_tx_failed_woro++; 570 } else 571 app_stats.tx.early_pkts_txtd_woro++; 572 } else if (ret == -1 && rte_errno == ENOSPC) { 573 /** 574 * Early pkts just outside of window should be dropped 575 */ 576 rte_pktmbuf_free(mbufs[i]); 577 } 578 } 579 580 /* 581 * drain MAX_PKTS_BURST of reordered 582 * mbufs for transmit 583 */ 584 dret = rte_reorder_drain(args->buffer, rombufs, MAX_PKTS_BURST); 585 for (i = 0; i < dret; i++) { 586 587 struct rte_eth_dev_tx_buffer *outbuf; 588 uint8_t outp1; 589 590 outp1 = rombufs[i]->port; 591 /* skip ports that are not enabled */ 592 if ((portmask & (1 << outp1)) == 0) { 593 rte_pktmbuf_free(rombufs[i]); 594 continue; 595 } 596 597 outbuf = tx_buffer[outp1]; 598 sent = rte_eth_tx_buffer(outp1, 0, outbuf, rombufs[i]); 599 if (sent) 600 app_stats.tx.ro_tx_pkts += sent; 601 } 602 } 603 604 free_tx_buffers(tx_buffer); 605 606 return 0; 607 } 608 609 /** 610 * Dequeue mbufs from the workers_to_tx ring and transmit them 611 */ 612 static int 613 tx_thread(struct rte_ring *ring_in) 614 { 615 uint32_t i, dqnum; 616 uint8_t outp; 617 unsigned sent; 618 struct rte_mbuf *mbufs[MAX_PKTS_BURST]; 619 struct rte_eth_dev_tx_buffer *outbuf; 620 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; 621 622 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, 623 rte_lcore_id()); 624 625 configure_tx_buffers(tx_buffer); 626 627 while (!quit_signal) { 628 629 /* deque the mbufs from workers_to_tx ring */ 630 dqnum = rte_ring_dequeue_burst(ring_in, 631 (void *)mbufs, MAX_PKTS_BURST, NULL); 632 633 if (unlikely(dqnum == 0)) 634 continue; 635 636 app_stats.tx.dequeue_pkts += dqnum; 637 638 for (i = 0; i < dqnum; i++) { 639 outp = mbufs[i]->port; 640 /* skip ports that are not enabled */ 641 if ((portmask & (1 << outp)) == 0) { 642 rte_pktmbuf_free(mbufs[i]); 643 continue; 644 } 645 646 outbuf = tx_buffer[outp]; 647 sent = rte_eth_tx_buffer(outp, 0, outbuf, mbufs[i]); 648 if (sent) 649 app_stats.tx.ro_tx_pkts += sent; 650 } 651 } 652 653 return 0; 654 } 655 656 int 657 main(int argc, char **argv) 658 { 659 int ret; 660 unsigned nb_ports; 661 unsigned int lcore_id, last_lcore_id, main_lcore_id; 662 uint16_t port_id; 663 uint16_t nb_ports_available; 664 struct worker_thread_args worker_args = {NULL, NULL}; 665 struct send_thread_args send_args = {NULL, NULL}; 666 struct rte_ring *rx_to_workers; 667 struct rte_ring *workers_to_tx; 668 669 /* catch ctrl-c so we can print on exit */ 670 signal(SIGINT, int_handler); 671 672 /* Initialize EAL */ 673 ret = rte_eal_init(argc, argv); 674 if (ret < 0) 675 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); 676 677 argc -= ret; 678 argv += ret; 679 680 /* Parse the application specific arguments */ 681 ret = parse_args(argc, argv); 682 if (ret < 0) 683 rte_exit(EXIT_FAILURE, "Invalid packet_ordering arguments\n"); 684 685 /* Check if we have enought cores */ 686 if (rte_lcore_count() < 3) 687 rte_exit(EXIT_FAILURE, "Error, This application needs at " 688 "least 3 logical cores to run:\n" 689 "1 lcore for packet RX\n" 690 "1 lcore for packet TX\n" 691 "and at least 1 lcore for worker threads\n"); 692 693 nb_ports = rte_eth_dev_count_avail(); 694 if (nb_ports == 0) 695 rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n"); 696 if (nb_ports != 1 && (nb_ports & 1)) 697 rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except " 698 "when using a single port\n"); 699 700 mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 701 MBUF_POOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, 702 rte_socket_id()); 703 if (mbuf_pool == NULL) 704 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); 705 706 nb_ports_available = nb_ports; 707 708 /* initialize all ports */ 709 RTE_ETH_FOREACH_DEV(port_id) { 710 /* skip ports that are not enabled */ 711 if ((portmask & (1 << port_id)) == 0) { 712 printf("\nSkipping disabled port %d\n", port_id); 713 nb_ports_available--; 714 continue; 715 } 716 /* init port */ 717 printf("Initializing port %u... done\n", port_id); 718 719 if (configure_eth_port(port_id) != 0) 720 rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n", 721 port_id); 722 } 723 724 if (!nb_ports_available) { 725 rte_exit(EXIT_FAILURE, 726 "All available ports are disabled. Please set portmask.\n"); 727 } 728 729 /* Create rings for inter core communication */ 730 rx_to_workers = rte_ring_create("rx_to_workers", RING_SIZE, rte_socket_id(), 731 RING_F_SP_ENQ); 732 if (rx_to_workers == NULL) 733 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); 734 735 workers_to_tx = rte_ring_create("workers_to_tx", RING_SIZE, rte_socket_id(), 736 RING_F_SC_DEQ); 737 if (workers_to_tx == NULL) 738 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); 739 740 if (!disable_reorder) { 741 send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(), 742 REORDER_BUFFER_SIZE); 743 if (send_args.buffer == NULL) 744 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); 745 } 746 747 last_lcore_id = get_last_lcore_id(); 748 main_lcore_id = rte_get_main_lcore(); 749 750 worker_args.ring_in = rx_to_workers; 751 worker_args.ring_out = workers_to_tx; 752 753 /* Start worker_thread() on all the available worker cores but the last 1 */ 754 for (lcore_id = 0; lcore_id <= get_previous_lcore_id(last_lcore_id); lcore_id++) 755 if (rte_lcore_is_enabled(lcore_id) && lcore_id != main_lcore_id) 756 rte_eal_remote_launch(worker_thread, (void *)&worker_args, 757 lcore_id); 758 759 if (disable_reorder) { 760 /* Start tx_thread() on the last worker core */ 761 rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx, 762 last_lcore_id); 763 } else { 764 send_args.ring_in = workers_to_tx; 765 /* Start send_thread() on the last worker core */ 766 rte_eal_remote_launch((lcore_function_t *)send_thread, 767 (void *)&send_args, last_lcore_id); 768 } 769 770 /* Start rx_thread() on the main core */ 771 rx_thread(rx_to_workers); 772 773 RTE_LCORE_FOREACH_WORKER(lcore_id) { 774 if (rte_eal_wait_lcore(lcore_id) < 0) 775 return -1; 776 } 777 778 print_stats(); 779 return 0; 780 } 781