1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <stdint.h> 9 #include <inttypes.h> 10 #include <sys/types.h> 11 #include <sys/queue.h> 12 #include <netinet/in.h> 13 #include <setjmp.h> 14 #include <stdarg.h> 15 #include <ctype.h> 16 #include <errno.h> 17 #include <getopt.h> 18 #include <signal.h> 19 20 #include <rte_common.h> 21 #include <rte_log.h> 22 #include <rte_malloc.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_eal.h> 26 #include <rte_launch.h> 27 #include <rte_atomic.h> 28 #include <rte_cycles.h> 29 #include <rte_prefetch.h> 30 #include <rte_lcore.h> 31 #include <rte_per_lcore.h> 32 #include <rte_branch_prediction.h> 33 #include <rte_interrupts.h> 34 #include <rte_random.h> 35 #include <rte_debug.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_mempool.h> 39 #include <rte_mbuf.h> 40 #include <rte_timer.h> 41 #include <rte_keepalive.h> 42 43 #include "shm.h" 44 45 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1 46 47 #define NB_MBUF 8192 48 49 #define MAX_PKT_BURST 32 50 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 51 52 /* 53 * Configurable number of RX/TX ring descriptors 54 */ 55 #define RTE_TEST_RX_DESC_DEFAULT 1024 56 #define RTE_TEST_TX_DESC_DEFAULT 1024 57 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; 58 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; 59 60 /* ethernet addresses of ports */ 61 static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS]; 62 63 /* mask of enabled ports */ 64 static uint32_t l2fwd_enabled_port_mask; 65 66 /* list of enabled ports */ 67 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS]; 68 69 static unsigned int l2fwd_rx_queue_per_lcore = 1; 70 71 #define MAX_RX_QUEUE_PER_LCORE 16 72 #define MAX_TX_QUEUE_PER_PORT 16 73 struct lcore_queue_conf { 74 unsigned n_rx_port; 75 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; 76 } __rte_cache_aligned; 77 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; 78 79 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; 80 81 static struct rte_eth_conf port_conf = { 82 .rxmode = { 83 .split_hdr_size = 0, 84 .offloads = DEV_RX_OFFLOAD_CRC_STRIP, 85 }, 86 .txmode = { 87 .mq_mode = ETH_MQ_TX_NONE, 88 }, 89 }; 90 91 struct rte_mempool *l2fwd_pktmbuf_pool = NULL; 92 93 /* Per-port statistics struct */ 94 struct l2fwd_port_statistics { 95 uint64_t tx; 96 uint64_t rx; 97 uint64_t dropped; 98 } __rte_cache_aligned; 99 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; 100 101 /* A tsc-based timer responsible for triggering statistics printout */ 102 #define TIMER_MILLISECOND 1 103 #define MAX_TIMER_PERIOD 86400 /* 1 day max */ 104 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* 10 seconds */ 105 static int64_t check_period = 5; /* default check cycle is 5ms */ 106 107 /* Keepalive structure */ 108 struct rte_keepalive *rte_global_keepalive_info; 109 110 /* Termination signalling */ 111 static int terminate_signal_received; 112 113 /* Termination signal handler */ 114 static void handle_sigterm(__rte_unused int value) 115 { 116 terminate_signal_received = 1; 117 } 118 119 /* Print out statistics on packets dropped */ 120 static void 121 print_stats(__attribute__((unused)) struct rte_timer *ptr_timer, 122 __attribute__((unused)) void *ptr_data) 123 { 124 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; 125 uint16_t portid; 126 127 total_packets_dropped = 0; 128 total_packets_tx = 0; 129 total_packets_rx = 0; 130 131 const char clr[] = { 27, '[', '2', 'J', '\0' }; 132 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 133 134 /* Clear screen and move to top left */ 135 printf("%s%s", clr, topLeft); 136 137 printf("\nPort statistics ===================================="); 138 139 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { 140 /* skip disabled ports */ 141 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 142 continue; 143 printf("\nStatistics for port %u ------------------------------" 144 "\nPackets sent: %24"PRIu64 145 "\nPackets received: %20"PRIu64 146 "\nPackets dropped: %21"PRIu64, 147 portid, 148 port_statistics[portid].tx, 149 port_statistics[portid].rx, 150 port_statistics[portid].dropped); 151 152 total_packets_dropped += port_statistics[portid].dropped; 153 total_packets_tx += port_statistics[portid].tx; 154 total_packets_rx += port_statistics[portid].rx; 155 } 156 printf("\nAggregate statistics ===============================" 157 "\nTotal packets sent: %18"PRIu64 158 "\nTotal packets received: %14"PRIu64 159 "\nTotal packets dropped: %15"PRIu64, 160 total_packets_tx, 161 total_packets_rx, 162 total_packets_dropped); 163 printf("\n====================================================\n"); 164 } 165 166 static void 167 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) 168 { 169 struct ether_hdr *eth; 170 void *tmp; 171 int sent; 172 unsigned dst_port; 173 struct rte_eth_dev_tx_buffer *buffer; 174 175 dst_port = l2fwd_dst_ports[portid]; 176 eth = rte_pktmbuf_mtod(m, struct ether_hdr *); 177 178 /* 02:00:00:00:00:xx */ 179 tmp = ð->d_addr.addr_bytes[0]; 180 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 181 182 /* src addr */ 183 ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); 184 185 buffer = tx_buffer[dst_port]; 186 sent = rte_eth_tx_buffer(dst_port, 0, buffer, m); 187 if (sent) 188 port_statistics[dst_port].tx += sent; 189 } 190 191 /* main processing loop */ 192 static void 193 l2fwd_main_loop(void) 194 { 195 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 196 struct rte_mbuf *m; 197 int sent; 198 unsigned lcore_id; 199 uint64_t prev_tsc, diff_tsc, cur_tsc; 200 unsigned i, j, portid, nb_rx; 201 struct lcore_queue_conf *qconf; 202 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) 203 / US_PER_S * BURST_TX_DRAIN_US; 204 struct rte_eth_dev_tx_buffer *buffer; 205 206 prev_tsc = 0; 207 208 lcore_id = rte_lcore_id(); 209 qconf = &lcore_queue_conf[lcore_id]; 210 211 if (qconf->n_rx_port == 0) { 212 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id); 213 return; 214 } 215 216 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id); 217 218 for (i = 0; i < qconf->n_rx_port; i++) { 219 220 portid = qconf->rx_port_list[i]; 221 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id, 222 portid); 223 } 224 225 uint64_t tsc_initial = rte_rdtsc(); 226 uint64_t tsc_lifetime = (rand()&0x07) * rte_get_tsc_hz(); 227 228 while (!terminate_signal_received) { 229 /* Keepalive heartbeat */ 230 rte_keepalive_mark_alive(rte_global_keepalive_info); 231 232 cur_tsc = rte_rdtsc(); 233 234 /* 235 * Die randomly within 7 secs for demo purposes if 236 * keepalive enabled 237 */ 238 if (check_period > 0 && cur_tsc - tsc_initial > tsc_lifetime) 239 break; 240 241 /* 242 * TX burst queue drain 243 */ 244 diff_tsc = cur_tsc - prev_tsc; 245 if (unlikely(diff_tsc > drain_tsc)) { 246 247 for (i = 0; i < qconf->n_rx_port; i++) { 248 249 portid = l2fwd_dst_ports[qconf->rx_port_list[i]]; 250 buffer = tx_buffer[portid]; 251 252 sent = rte_eth_tx_buffer_flush(portid, 0, buffer); 253 if (sent) 254 port_statistics[portid].tx += sent; 255 256 } 257 258 prev_tsc = cur_tsc; 259 } 260 261 /* 262 * Read packet from RX queues 263 */ 264 for (i = 0; i < qconf->n_rx_port; i++) { 265 266 portid = qconf->rx_port_list[i]; 267 nb_rx = rte_eth_rx_burst(portid, 0, 268 pkts_burst, MAX_PKT_BURST); 269 270 port_statistics[portid].rx += nb_rx; 271 272 for (j = 0; j < nb_rx; j++) { 273 m = pkts_burst[j]; 274 rte_prefetch0(rte_pktmbuf_mtod(m, void *)); 275 l2fwd_simple_forward(m, portid); 276 } 277 } 278 } 279 } 280 281 static int 282 l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy) 283 { 284 l2fwd_main_loop(); 285 return 0; 286 } 287 288 /* display usage */ 289 static void 290 l2fwd_usage(const char *prgname) 291 { 292 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" 293 " -p PORTMASK: hexadecimal bitmask of ports to configure\n" 294 " -q NQ: number of queue (=ports) per lcore (default is 1)\n" 295 " -K PERIOD: Keepalive check period (5 default; 86400 max)\n" 296 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n", 297 prgname); 298 } 299 300 static int 301 l2fwd_parse_portmask(const char *portmask) 302 { 303 char *end = NULL; 304 unsigned long pm; 305 306 /* parse hexadecimal string */ 307 pm = strtoul(portmask, &end, 16); 308 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 309 return -1; 310 311 if (pm == 0) 312 return -1; 313 314 return pm; 315 } 316 317 static unsigned int 318 l2fwd_parse_nqueue(const char *q_arg) 319 { 320 char *end = NULL; 321 unsigned long n; 322 323 /* parse hexadecimal string */ 324 n = strtoul(q_arg, &end, 10); 325 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 326 return 0; 327 if (n == 0) 328 return 0; 329 if (n >= MAX_RX_QUEUE_PER_LCORE) 330 return 0; 331 332 return n; 333 } 334 335 static int 336 l2fwd_parse_timer_period(const char *q_arg) 337 { 338 char *end = NULL; 339 int n; 340 341 /* parse number string */ 342 n = strtol(q_arg, &end, 10); 343 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 344 return -1; 345 if (n >= MAX_TIMER_PERIOD) 346 return -1; 347 348 return n; 349 } 350 351 static int 352 l2fwd_parse_check_period(const char *q_arg) 353 { 354 char *end = NULL; 355 int n; 356 357 /* parse number string */ 358 n = strtol(q_arg, &end, 10); 359 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 360 return -1; 361 if (n >= MAX_TIMER_PERIOD) 362 return -1; 363 364 return n; 365 } 366 367 /* Parse the argument given in the command line of the application */ 368 static int 369 l2fwd_parse_args(int argc, char **argv) 370 { 371 int opt, ret; 372 char **argvopt; 373 int option_index; 374 char *prgname = argv[0]; 375 static struct option lgopts[] = { 376 {NULL, 0, 0, 0} 377 }; 378 379 argvopt = argv; 380 381 while ((opt = getopt_long(argc, argvopt, "p:q:T:K:", 382 lgopts, &option_index)) != EOF) { 383 384 switch (opt) { 385 /* portmask */ 386 case 'p': 387 l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg); 388 if (l2fwd_enabled_port_mask == 0) { 389 printf("invalid portmask\n"); 390 l2fwd_usage(prgname); 391 return -1; 392 } 393 break; 394 395 /* nqueue */ 396 case 'q': 397 l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg); 398 if (l2fwd_rx_queue_per_lcore == 0) { 399 printf("invalid queue number\n"); 400 l2fwd_usage(prgname); 401 return -1; 402 } 403 break; 404 405 /* timer period */ 406 case 'T': 407 timer_period = l2fwd_parse_timer_period(optarg) 408 * (int64_t)(1000 * TIMER_MILLISECOND); 409 if (timer_period < 0) { 410 printf("invalid timer period\n"); 411 l2fwd_usage(prgname); 412 return -1; 413 } 414 break; 415 416 /* Check period */ 417 case 'K': 418 check_period = l2fwd_parse_check_period(optarg); 419 if (check_period < 0) { 420 printf("invalid check period\n"); 421 l2fwd_usage(prgname); 422 return -1; 423 } 424 break; 425 426 /* long options */ 427 case 0: 428 l2fwd_usage(prgname); 429 return -1; 430 431 default: 432 l2fwd_usage(prgname); 433 return -1; 434 } 435 } 436 437 if (optind >= 0) 438 argv[optind-1] = prgname; 439 440 ret = optind-1; 441 optind = 1; /* reset getopt lib */ 442 return ret; 443 } 444 445 /* Check the link status of all ports in up to 9s, and print them finally */ 446 static void 447 check_all_ports_link_status(uint32_t port_mask) 448 { 449 #define CHECK_INTERVAL 100 /* 100ms */ 450 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 451 uint16_t portid; 452 uint8_t count, all_ports_up, print_flag = 0; 453 struct rte_eth_link link; 454 455 printf("\nChecking link status"); 456 fflush(stdout); 457 for (count = 0; count <= MAX_CHECK_TIME; count++) { 458 all_ports_up = 1; 459 RTE_ETH_FOREACH_DEV(portid) { 460 if ((port_mask & (1 << portid)) == 0) 461 continue; 462 memset(&link, 0, sizeof(link)); 463 rte_eth_link_get_nowait(portid, &link); 464 /* print link status if flag set */ 465 if (print_flag == 1) { 466 if (link.link_status) 467 printf( 468 "Port%d Link Up. Speed %u Mbps - %s\n", 469 portid, link.link_speed, 470 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 471 ("full-duplex") : ("half-duplex\n")); 472 else 473 printf("Port %d Link Down\n", portid); 474 continue; 475 } 476 /* clear all_ports_up flag if any link down */ 477 if (link.link_status == ETH_LINK_DOWN) { 478 all_ports_up = 0; 479 break; 480 } 481 } 482 /* after finally printing all link status, get out */ 483 if (print_flag == 1) 484 break; 485 486 if (all_ports_up == 0) { 487 printf("."); 488 fflush(stdout); 489 rte_delay_ms(CHECK_INTERVAL); 490 } 491 492 /* set the print_flag if all ports up or timeout */ 493 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 494 print_flag = 1; 495 printf("done\n"); 496 } 497 } 498 } 499 500 static void 501 dead_core(__rte_unused void *ptr_data, const int id_core) 502 { 503 if (terminate_signal_received) 504 return; 505 printf("Dead core %i - restarting..\n", id_core); 506 if (rte_eal_get_lcore_state(id_core) == FINISHED) { 507 rte_eal_wait_lcore(id_core); 508 rte_eal_remote_launch(l2fwd_launch_one_lcore, NULL, id_core); 509 } else { 510 printf("..false positive!\n"); 511 } 512 } 513 514 static void 515 relay_core_state(void *ptr_data, const int id_core, 516 const enum rte_keepalive_state core_state, uint64_t last_alive) 517 { 518 rte_keepalive_relayed_state((struct rte_keepalive_shm *)ptr_data, 519 id_core, core_state, last_alive); 520 } 521 522 int 523 main(int argc, char **argv) 524 { 525 struct lcore_queue_conf *qconf; 526 int ret; 527 uint16_t nb_ports; 528 uint16_t nb_ports_available = 0; 529 uint16_t portid, last_port; 530 unsigned lcore_id, rx_lcore_id; 531 unsigned nb_ports_in_mask = 0; 532 struct sigaction signal_handler; 533 struct rte_keepalive_shm *ka_shm; 534 535 memset(&signal_handler, 0, sizeof(signal_handler)); 536 terminate_signal_received = 0; 537 signal_handler.sa_handler = &handle_sigterm; 538 if (sigaction(SIGINT, &signal_handler, NULL) == -1 || 539 sigaction(SIGTERM, &signal_handler, NULL) == -1) 540 rte_exit(EXIT_FAILURE, "SIGNAL\n"); 541 542 543 /* init EAL */ 544 ret = rte_eal_init(argc, argv); 545 if (ret < 0) 546 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); 547 argc -= ret; 548 argv += ret; 549 550 l2fwd_enabled_port_mask = 0; 551 552 /* parse application arguments (after the EAL ones) */ 553 ret = l2fwd_parse_args(argc, argv); 554 if (ret < 0) 555 rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n"); 556 557 /* create the mbuf pool */ 558 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 32, 559 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 560 if (l2fwd_pktmbuf_pool == NULL) 561 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); 562 563 nb_ports = rte_eth_dev_count_avail(); 564 if (nb_ports == 0) 565 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); 566 567 /* reset l2fwd_dst_ports */ 568 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) 569 l2fwd_dst_ports[portid] = 0; 570 last_port = 0; 571 572 /* 573 * Each logical core is assigned a dedicated TX queue on each port. 574 */ 575 RTE_ETH_FOREACH_DEV(portid) { 576 /* skip ports that are not enabled */ 577 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 578 continue; 579 580 if (nb_ports_in_mask % 2) { 581 l2fwd_dst_ports[portid] = last_port; 582 l2fwd_dst_ports[last_port] = portid; 583 } else 584 last_port = portid; 585 586 nb_ports_in_mask++; 587 } 588 if (nb_ports_in_mask % 2) { 589 printf("Notice: odd number of ports in portmask.\n"); 590 l2fwd_dst_ports[last_port] = last_port; 591 } 592 593 rx_lcore_id = 1; 594 qconf = NULL; 595 596 /* Initialize the port/queue configuration of each logical core */ 597 RTE_ETH_FOREACH_DEV(portid) { 598 /* skip ports that are not enabled */ 599 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 600 continue; 601 602 /* get the lcore_id for this port */ 603 while (rte_lcore_is_enabled(rx_lcore_id) == 0 || 604 lcore_queue_conf[rx_lcore_id].n_rx_port == 605 l2fwd_rx_queue_per_lcore) { 606 rx_lcore_id++; 607 if (rx_lcore_id >= RTE_MAX_LCORE) 608 rte_exit(EXIT_FAILURE, "Not enough cores\n"); 609 } 610 611 if (qconf != &lcore_queue_conf[rx_lcore_id]) 612 /* Assigned a new logical core in the loop above. */ 613 qconf = &lcore_queue_conf[rx_lcore_id]; 614 615 qconf->rx_port_list[qconf->n_rx_port] = portid; 616 qconf->n_rx_port++; 617 printf("Lcore %u: RX port %u\n", 618 rx_lcore_id, portid); 619 } 620 621 /* Initialise each port */ 622 RTE_ETH_FOREACH_DEV(portid) { 623 struct rte_eth_dev_info dev_info; 624 struct rte_eth_rxconf rxq_conf; 625 struct rte_eth_txconf txq_conf; 626 struct rte_eth_conf local_port_conf = port_conf; 627 628 /* skip ports that are not enabled */ 629 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) { 630 printf("Skipping disabled port %u\n", portid); 631 continue; 632 } 633 nb_ports_available++; 634 635 /* init port */ 636 printf("Initializing port %u... ", portid); 637 fflush(stdout); 638 rte_eth_dev_info_get(portid, &dev_info); 639 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 640 local_port_conf.txmode.offloads |= 641 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 642 ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf); 643 if (ret < 0) 644 rte_exit(EXIT_FAILURE, 645 "Cannot configure device: err=%d, port=%u\n", 646 ret, portid); 647 648 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, 649 &nb_txd); 650 if (ret < 0) 651 rte_exit(EXIT_FAILURE, 652 "Cannot adjust number of descriptors: err=%d, port=%u\n", 653 ret, portid); 654 655 rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]); 656 657 /* init one RX queue */ 658 fflush(stdout); 659 rxq_conf = dev_info.default_rxconf; 660 rxq_conf.offloads = local_port_conf.rxmode.offloads; 661 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, 662 rte_eth_dev_socket_id(portid), 663 &rxq_conf, 664 l2fwd_pktmbuf_pool); 665 if (ret < 0) 666 rte_exit(EXIT_FAILURE, 667 "rte_eth_rx_queue_setup:err=%d, port=%u\n", 668 ret, portid); 669 670 /* init one TX queue on each port */ 671 fflush(stdout); 672 txq_conf = dev_info.default_txconf; 673 txq_conf.offloads = local_port_conf.txmode.offloads; 674 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, 675 rte_eth_dev_socket_id(portid), 676 &txq_conf); 677 if (ret < 0) 678 rte_exit(EXIT_FAILURE, 679 "rte_eth_tx_queue_setup:err=%d, port=%u\n", 680 ret, portid); 681 682 /* Initialize TX buffers */ 683 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer", 684 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0, 685 rte_eth_dev_socket_id(portid)); 686 if (tx_buffer[portid] == NULL) 687 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n", 688 portid); 689 690 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST); 691 692 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid], 693 rte_eth_tx_buffer_count_callback, 694 &port_statistics[portid].dropped); 695 if (ret < 0) 696 rte_exit(EXIT_FAILURE, 697 "Cannot set error callback for tx buffer on port %u\n", 698 portid); 699 700 /* Start device */ 701 ret = rte_eth_dev_start(portid); 702 if (ret < 0) 703 rte_exit(EXIT_FAILURE, 704 "rte_eth_dev_start:err=%d, port=%u\n", 705 ret, portid); 706 707 rte_eth_promiscuous_enable(portid); 708 709 printf("Port %u, MAC address: " 710 "%02X:%02X:%02X:%02X:%02X:%02X\n\n", 711 portid, 712 l2fwd_ports_eth_addr[portid].addr_bytes[0], 713 l2fwd_ports_eth_addr[portid].addr_bytes[1], 714 l2fwd_ports_eth_addr[portid].addr_bytes[2], 715 l2fwd_ports_eth_addr[portid].addr_bytes[3], 716 l2fwd_ports_eth_addr[portid].addr_bytes[4], 717 l2fwd_ports_eth_addr[portid].addr_bytes[5]); 718 719 /* initialize port stats */ 720 memset(&port_statistics, 0, sizeof(port_statistics)); 721 } 722 723 if (!nb_ports_available) { 724 rte_exit(EXIT_FAILURE, 725 "All available ports are disabled. Please set portmask.\n"); 726 } 727 728 check_all_ports_link_status(l2fwd_enabled_port_mask); 729 730 struct rte_timer hb_timer, stats_timer; 731 732 rte_timer_subsystem_init(); 733 rte_timer_init(&stats_timer); 734 735 ka_shm = NULL; 736 if (check_period > 0) { 737 ka_shm = rte_keepalive_shm_create(); 738 if (ka_shm == NULL) 739 rte_exit(EXIT_FAILURE, 740 "rte_keepalive_shm_create() failed"); 741 rte_global_keepalive_info = 742 rte_keepalive_create(&dead_core, ka_shm); 743 if (rte_global_keepalive_info == NULL) 744 rte_exit(EXIT_FAILURE, "init_keep_alive() failed"); 745 rte_keepalive_register_relay_callback(rte_global_keepalive_info, 746 relay_core_state, ka_shm); 747 rte_timer_init(&hb_timer); 748 if (rte_timer_reset(&hb_timer, 749 (check_period * rte_get_timer_hz()) / 1000, 750 PERIODICAL, 751 rte_lcore_id(), 752 (void(*)(struct rte_timer*, void*)) 753 &rte_keepalive_dispatch_pings, 754 rte_global_keepalive_info 755 ) != 0 ) 756 rte_exit(EXIT_FAILURE, "Keepalive setup failure.\n"); 757 } 758 if (timer_period > 0) { 759 if (rte_timer_reset(&stats_timer, 760 (timer_period * rte_get_timer_hz()) / 1000, 761 PERIODICAL, 762 rte_lcore_id(), 763 &print_stats, NULL 764 ) != 0 ) 765 rte_exit(EXIT_FAILURE, "Stats setup failure.\n"); 766 } 767 /* launch per-lcore init on every slave lcore */ 768 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 769 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id]; 770 771 if (qconf->n_rx_port == 0) 772 RTE_LOG(INFO, L2FWD, 773 "lcore %u has nothing to do\n", 774 lcore_id 775 ); 776 else { 777 rte_eal_remote_launch( 778 l2fwd_launch_one_lcore, 779 NULL, 780 lcore_id 781 ); 782 rte_keepalive_register_core(rte_global_keepalive_info, 783 lcore_id); 784 } 785 } 786 while (!terminate_signal_received) { 787 rte_timer_manage(); 788 rte_delay_ms(5); 789 } 790 791 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 792 if (rte_eal_wait_lcore(lcore_id) < 0) 793 return -1; 794 } 795 796 if (ka_shm != NULL) 797 rte_keepalive_shm_cleanup(ka_shm); 798 return 0; 799 } 800