1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <stdint.h> 9 #include <inttypes.h> 10 #include <sys/types.h> 11 #include <sys/queue.h> 12 #include <netinet/in.h> 13 #include <setjmp.h> 14 #include <stdarg.h> 15 #include <ctype.h> 16 #include <errno.h> 17 #include <getopt.h> 18 #include <signal.h> 19 20 #include <rte_common.h> 21 #include <rte_log.h> 22 #include <rte_malloc.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_eal.h> 26 #include <rte_launch.h> 27 #include <rte_atomic.h> 28 #include <rte_cycles.h> 29 #include <rte_prefetch.h> 30 #include <rte_lcore.h> 31 #include <rte_per_lcore.h> 32 #include <rte_branch_prediction.h> 33 #include <rte_interrupts.h> 34 #include <rte_random.h> 35 #include <rte_debug.h> 36 #include <rte_ether.h> 37 #include <rte_ethdev.h> 38 #include <rte_mempool.h> 39 #include <rte_mbuf.h> 40 #include <rte_timer.h> 41 #include <rte_keepalive.h> 42 43 #include "shm.h" 44 45 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1 46 47 #define NB_MBUF_PER_PORT 3000 48 49 #define MAX_PKT_BURST 32 50 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 51 52 /* 53 * Configurable number of RX/TX ring descriptors 54 */ 55 #define RTE_TEST_RX_DESC_DEFAULT 1024 56 #define RTE_TEST_TX_DESC_DEFAULT 1024 57 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; 58 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; 59 60 /* ethernet addresses of ports */ 61 static struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS]; 62 63 /* mask of enabled ports */ 64 static uint32_t l2fwd_enabled_port_mask; 65 66 /* list of enabled ports */ 67 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS]; 68 69 static unsigned int l2fwd_rx_queue_per_lcore = 1; 70 71 #define MAX_RX_QUEUE_PER_LCORE 16 72 #define MAX_TX_QUEUE_PER_PORT 16 73 struct lcore_queue_conf { 74 unsigned n_rx_port; 75 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; 76 } __rte_cache_aligned; 77 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; 78 79 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; 80 81 static struct rte_eth_conf port_conf = { 82 .rxmode = { 83 .split_hdr_size = 0, 84 }, 85 .txmode = { 86 .mq_mode = ETH_MQ_TX_NONE, 87 }, 88 }; 89 90 struct rte_mempool *l2fwd_pktmbuf_pool = NULL; 91 92 /* Per-port statistics struct */ 93 struct l2fwd_port_statistics { 94 uint64_t tx; 95 uint64_t rx; 96 uint64_t dropped; 97 } __rte_cache_aligned; 98 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; 99 100 /* A tsc-based timer responsible for triggering statistics printout */ 101 #define TIMER_MILLISECOND 1 102 #define MAX_TIMER_PERIOD 86400 /* 1 day max */ 103 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* 10 seconds */ 104 static int64_t check_period = 5; /* default check cycle is 5ms */ 105 106 /* Keepalive structure */ 107 struct rte_keepalive *rte_global_keepalive_info; 108 109 /* Termination signalling */ 110 static int terminate_signal_received; 111 112 /* Termination signal handler */ 113 static void handle_sigterm(__rte_unused int value) 114 { 115 terminate_signal_received = 1; 116 } 117 118 /* Print out statistics on packets dropped */ 119 static void 120 print_stats(__rte_unused struct rte_timer *ptr_timer, 121 __rte_unused void *ptr_data) 122 { 123 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; 124 uint16_t portid; 125 126 total_packets_dropped = 0; 127 total_packets_tx = 0; 128 total_packets_rx = 0; 129 130 const char clr[] = { 27, '[', '2', 'J', '\0' }; 131 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 132 133 /* Clear screen and move to top left */ 134 printf("%s%s", clr, topLeft); 135 136 printf("\nPort statistics ===================================="); 137 138 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { 139 /* skip disabled ports */ 140 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 141 continue; 142 printf("\nStatistics for port %u ------------------------------" 143 "\nPackets sent: %24"PRIu64 144 "\nPackets received: %20"PRIu64 145 "\nPackets dropped: %21"PRIu64, 146 portid, 147 port_statistics[portid].tx, 148 port_statistics[portid].rx, 149 port_statistics[portid].dropped); 150 151 total_packets_dropped += port_statistics[portid].dropped; 152 total_packets_tx += port_statistics[portid].tx; 153 total_packets_rx += port_statistics[portid].rx; 154 } 155 printf("\nAggregate statistics ===============================" 156 "\nTotal packets sent: %18"PRIu64 157 "\nTotal packets received: %14"PRIu64 158 "\nTotal packets dropped: %15"PRIu64, 159 total_packets_tx, 160 total_packets_rx, 161 total_packets_dropped); 162 printf("\n====================================================\n"); 163 164 fflush(stdout); 165 } 166 167 static void 168 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) 169 { 170 struct rte_ether_hdr *eth; 171 void *tmp; 172 int sent; 173 unsigned dst_port; 174 struct rte_eth_dev_tx_buffer *buffer; 175 176 dst_port = l2fwd_dst_ports[portid]; 177 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 178 179 /* 02:00:00:00:00:xx */ 180 tmp = ð->dst_addr.addr_bytes[0]; 181 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 182 183 /* src addr */ 184 rte_ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->src_addr); 185 186 buffer = tx_buffer[dst_port]; 187 sent = rte_eth_tx_buffer(dst_port, 0, buffer, m); 188 if (sent) 189 port_statistics[dst_port].tx += sent; 190 } 191 192 /* main processing loop */ 193 static void 194 l2fwd_main_loop(void) 195 { 196 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 197 struct rte_mbuf *m; 198 int sent; 199 unsigned lcore_id; 200 uint64_t prev_tsc, diff_tsc, cur_tsc; 201 unsigned i, j, portid, nb_rx; 202 struct lcore_queue_conf *qconf; 203 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) 204 / US_PER_S * BURST_TX_DRAIN_US; 205 struct rte_eth_dev_tx_buffer *buffer; 206 207 prev_tsc = 0; 208 209 lcore_id = rte_lcore_id(); 210 qconf = &lcore_queue_conf[lcore_id]; 211 212 if (qconf->n_rx_port == 0) { 213 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id); 214 return; 215 } 216 217 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id); 218 219 for (i = 0; i < qconf->n_rx_port; i++) { 220 221 portid = qconf->rx_port_list[i]; 222 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id, 223 portid); 224 } 225 226 uint64_t tsc_initial = rte_rdtsc(); 227 uint64_t tsc_lifetime = (rand()&0x07) * rte_get_tsc_hz(); 228 229 while (!terminate_signal_received) { 230 /* Keepalive heartbeat. 8< */ 231 rte_keepalive_mark_alive(rte_global_keepalive_info); 232 233 cur_tsc = rte_rdtsc(); 234 235 /* 236 * Die randomly within 7 secs for demo purposes if 237 * keepalive enabled 238 */ 239 if (check_period > 0 && cur_tsc - tsc_initial > tsc_lifetime) 240 break; 241 /* >8 End of keepalive heartbeat. */ 242 243 /* 244 * TX burst queue drain 245 */ 246 diff_tsc = cur_tsc - prev_tsc; 247 if (unlikely(diff_tsc > drain_tsc)) { 248 249 for (i = 0; i < qconf->n_rx_port; i++) { 250 251 portid = l2fwd_dst_ports[qconf->rx_port_list[i]]; 252 buffer = tx_buffer[portid]; 253 254 sent = rte_eth_tx_buffer_flush(portid, 0, buffer); 255 if (sent) 256 port_statistics[portid].tx += sent; 257 258 } 259 260 prev_tsc = cur_tsc; 261 } 262 263 /* 264 * Read packet from RX queues 265 */ 266 for (i = 0; i < qconf->n_rx_port; i++) { 267 268 portid = qconf->rx_port_list[i]; 269 nb_rx = rte_eth_rx_burst(portid, 0, 270 pkts_burst, MAX_PKT_BURST); 271 272 port_statistics[portid].rx += nb_rx; 273 274 for (j = 0; j < nb_rx; j++) { 275 m = pkts_burst[j]; 276 rte_prefetch0(rte_pktmbuf_mtod(m, void *)); 277 l2fwd_simple_forward(m, portid); 278 } 279 } 280 } 281 } 282 283 static int 284 l2fwd_launch_one_lcore(__rte_unused void *dummy) 285 { 286 l2fwd_main_loop(); 287 return 0; 288 } 289 290 /* display usage */ 291 static void 292 l2fwd_usage(const char *prgname) 293 { 294 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" 295 " -p PORTMASK: hexadecimal bitmask of ports to configure\n" 296 " -q NQ: number of queue (=ports) per lcore (default is 1)\n" 297 " -K PERIOD: Keepalive check period (5 default; 86400 max)\n" 298 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n", 299 prgname); 300 } 301 302 static int 303 l2fwd_parse_portmask(const char *portmask) 304 { 305 char *end = NULL; 306 unsigned long pm; 307 308 /* parse hexadecimal string */ 309 pm = strtoul(portmask, &end, 16); 310 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 311 return 0; 312 313 return pm; 314 } 315 316 static unsigned int 317 l2fwd_parse_nqueue(const char *q_arg) 318 { 319 char *end = NULL; 320 unsigned long n; 321 322 /* parse hexadecimal string */ 323 n = strtoul(q_arg, &end, 10); 324 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 325 return 0; 326 if (n == 0) 327 return 0; 328 if (n >= MAX_RX_QUEUE_PER_LCORE) 329 return 0; 330 331 return n; 332 } 333 334 static int 335 l2fwd_parse_timer_period(const char *q_arg) 336 { 337 char *end = NULL; 338 int n; 339 340 /* parse number string */ 341 n = strtol(q_arg, &end, 10); 342 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 343 return -1; 344 if (n >= MAX_TIMER_PERIOD) 345 return -1; 346 347 return n; 348 } 349 350 static int 351 l2fwd_parse_check_period(const char *q_arg) 352 { 353 char *end = NULL; 354 int n; 355 356 /* parse number string */ 357 n = strtol(q_arg, &end, 10); 358 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 359 return -1; 360 if (n >= MAX_TIMER_PERIOD) 361 return -1; 362 363 return n; 364 } 365 366 /* Parse the argument given in the command line of the application */ 367 static int 368 l2fwd_parse_args(int argc, char **argv) 369 { 370 int opt, ret; 371 char **argvopt; 372 int option_index; 373 char *prgname = argv[0]; 374 static struct option lgopts[] = { 375 {NULL, 0, 0, 0} 376 }; 377 378 argvopt = argv; 379 380 while ((opt = getopt_long(argc, argvopt, "p:q:T:K:", 381 lgopts, &option_index)) != EOF) { 382 383 switch (opt) { 384 /* portmask */ 385 case 'p': 386 l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg); 387 if (l2fwd_enabled_port_mask == 0) { 388 printf("invalid portmask\n"); 389 l2fwd_usage(prgname); 390 return -1; 391 } 392 break; 393 394 /* nqueue */ 395 case 'q': 396 l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg); 397 if (l2fwd_rx_queue_per_lcore == 0) { 398 printf("invalid queue number\n"); 399 l2fwd_usage(prgname); 400 return -1; 401 } 402 break; 403 404 /* timer period */ 405 case 'T': 406 timer_period = l2fwd_parse_timer_period(optarg) 407 * (int64_t)(1000 * TIMER_MILLISECOND); 408 if (timer_period < 0) { 409 printf("invalid timer period\n"); 410 l2fwd_usage(prgname); 411 return -1; 412 } 413 break; 414 415 /* Check period */ 416 case 'K': 417 check_period = l2fwd_parse_check_period(optarg); 418 if (check_period < 0) { 419 printf("invalid check period\n"); 420 l2fwd_usage(prgname); 421 return -1; 422 } 423 break; 424 425 /* long options */ 426 case 0: 427 l2fwd_usage(prgname); 428 return -1; 429 430 default: 431 l2fwd_usage(prgname); 432 return -1; 433 } 434 } 435 436 if (optind >= 0) 437 argv[optind-1] = prgname; 438 439 ret = optind-1; 440 optind = 1; /* reset getopt lib */ 441 return ret; 442 } 443 444 /* Check the link status of all ports in up to 9s, and print them finally */ 445 static void 446 check_all_ports_link_status(uint32_t port_mask) 447 { 448 #define CHECK_INTERVAL 100 /* 100ms */ 449 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 450 uint16_t portid; 451 uint8_t count, all_ports_up, print_flag = 0; 452 struct rte_eth_link link; 453 int ret; 454 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN]; 455 456 printf("\nChecking link status"); 457 fflush(stdout); 458 for (count = 0; count <= MAX_CHECK_TIME; count++) { 459 all_ports_up = 1; 460 RTE_ETH_FOREACH_DEV(portid) { 461 if ((port_mask & (1 << portid)) == 0) 462 continue; 463 memset(&link, 0, sizeof(link)); 464 ret = rte_eth_link_get_nowait(portid, &link); 465 if (ret < 0) { 466 all_ports_up = 0; 467 if (print_flag == 1) 468 printf("Port %u link get failed: %s\n", 469 portid, rte_strerror(-ret)); 470 continue; 471 } 472 /* print link status if flag set */ 473 if (print_flag == 1) { 474 rte_eth_link_to_str(link_status_text, 475 sizeof(link_status_text), &link); 476 printf("Port %d %s\n", portid, 477 link_status_text); 478 continue; 479 } 480 /* clear all_ports_up flag if any link down */ 481 if (link.link_status == ETH_LINK_DOWN) { 482 all_ports_up = 0; 483 break; 484 } 485 } 486 /* after finally printing all link status, get out */ 487 if (print_flag == 1) 488 break; 489 490 if (all_ports_up == 0) { 491 printf("."); 492 fflush(stdout); 493 rte_delay_ms(CHECK_INTERVAL); 494 } 495 496 /* set the print_flag if all ports up or timeout */ 497 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 498 print_flag = 1; 499 printf("done\n"); 500 } 501 } 502 } 503 504 static void 505 dead_core(__rte_unused void *ptr_data, const int id_core) 506 { 507 if (terminate_signal_received) 508 return; 509 printf("Dead core %i - restarting..\n", id_core); 510 if (rte_eal_get_lcore_state(id_core) == FINISHED) { 511 rte_eal_wait_lcore(id_core); 512 rte_eal_remote_launch(l2fwd_launch_one_lcore, NULL, id_core); 513 } else { 514 printf("..false positive!\n"); 515 } 516 } 517 518 static void 519 relay_core_state(void *ptr_data, const int id_core, 520 const enum rte_keepalive_state core_state, uint64_t last_alive) 521 { 522 rte_keepalive_relayed_state((struct rte_keepalive_shm *)ptr_data, 523 id_core, core_state, last_alive); 524 } 525 526 int 527 main(int argc, char **argv) 528 { 529 struct lcore_queue_conf *qconf; 530 int ret; 531 uint16_t nb_ports; 532 uint16_t nb_ports_available = 0; 533 uint16_t portid, last_port; 534 unsigned lcore_id, rx_lcore_id; 535 unsigned nb_ports_in_mask = 0; 536 unsigned int total_nb_mbufs; 537 struct sigaction signal_handler; 538 struct rte_keepalive_shm *ka_shm; 539 540 memset(&signal_handler, 0, sizeof(signal_handler)); 541 terminate_signal_received = 0; 542 signal_handler.sa_handler = &handle_sigterm; 543 if (sigaction(SIGINT, &signal_handler, NULL) == -1 || 544 sigaction(SIGTERM, &signal_handler, NULL) == -1) 545 rte_exit(EXIT_FAILURE, "SIGNAL\n"); 546 547 548 /* init EAL */ 549 ret = rte_eal_init(argc, argv); 550 if (ret < 0) 551 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); 552 argc -= ret; 553 argv += ret; 554 555 l2fwd_enabled_port_mask = 0; 556 557 /* parse application arguments (after the EAL ones) */ 558 ret = l2fwd_parse_args(argc, argv); 559 if (ret < 0) 560 rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n"); 561 562 nb_ports = rte_eth_dev_count_avail(); 563 if (nb_ports == 0) 564 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); 565 566 /* create the mbuf pool */ 567 total_nb_mbufs = NB_MBUF_PER_PORT * nb_ports; 568 569 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", 570 total_nb_mbufs, 32, 0, RTE_MBUF_DEFAULT_BUF_SIZE, 571 rte_socket_id()); 572 if (l2fwd_pktmbuf_pool == NULL) 573 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); 574 575 /* reset l2fwd_dst_ports */ 576 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) 577 l2fwd_dst_ports[portid] = 0; 578 last_port = 0; 579 580 /* 581 * Each logical core is assigned a dedicated TX queue on each port. 582 */ 583 RTE_ETH_FOREACH_DEV(portid) { 584 /* skip ports that are not enabled */ 585 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 586 continue; 587 588 if (nb_ports_in_mask % 2) { 589 l2fwd_dst_ports[portid] = last_port; 590 l2fwd_dst_ports[last_port] = portid; 591 } else 592 last_port = portid; 593 594 nb_ports_in_mask++; 595 } 596 if (nb_ports_in_mask % 2) { 597 printf("Notice: odd number of ports in portmask.\n"); 598 l2fwd_dst_ports[last_port] = last_port; 599 } 600 601 rx_lcore_id = 1; 602 qconf = NULL; 603 604 /* Initialize the port/queue configuration of each logical core */ 605 RTE_ETH_FOREACH_DEV(portid) { 606 /* skip ports that are not enabled */ 607 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 608 continue; 609 610 /* get the lcore_id for this port */ 611 while (rte_lcore_is_enabled(rx_lcore_id) == 0 || 612 lcore_queue_conf[rx_lcore_id].n_rx_port == 613 l2fwd_rx_queue_per_lcore) { 614 rx_lcore_id++; 615 if (rx_lcore_id >= RTE_MAX_LCORE) 616 rte_exit(EXIT_FAILURE, "Not enough cores\n"); 617 } 618 619 if (qconf != &lcore_queue_conf[rx_lcore_id]) 620 /* Assigned a new logical core in the loop above. */ 621 qconf = &lcore_queue_conf[rx_lcore_id]; 622 623 qconf->rx_port_list[qconf->n_rx_port] = portid; 624 qconf->n_rx_port++; 625 printf("Lcore %u: RX port %u\n", 626 rx_lcore_id, portid); 627 } 628 629 /* Initialise each port */ 630 RTE_ETH_FOREACH_DEV(portid) { 631 struct rte_eth_dev_info dev_info; 632 struct rte_eth_rxconf rxq_conf; 633 struct rte_eth_txconf txq_conf; 634 struct rte_eth_conf local_port_conf = port_conf; 635 636 /* skip ports that are not enabled */ 637 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) { 638 printf("Skipping disabled port %u\n", portid); 639 continue; 640 } 641 nb_ports_available++; 642 643 /* init port */ 644 printf("Initializing port %u... ", portid); 645 fflush(stdout); 646 647 ret = rte_eth_dev_info_get(portid, &dev_info); 648 if (ret != 0) 649 rte_exit(EXIT_FAILURE, 650 "Error during getting device (port %u) info: %s\n", 651 portid, strerror(-ret)); 652 653 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 654 local_port_conf.txmode.offloads |= 655 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 656 ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf); 657 if (ret < 0) 658 rte_exit(EXIT_FAILURE, 659 "Cannot configure device: err=%d, port=%u\n", 660 ret, portid); 661 662 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, 663 &nb_txd); 664 if (ret < 0) 665 rte_exit(EXIT_FAILURE, 666 "Cannot adjust number of descriptors: err=%d, port=%u\n", 667 ret, portid); 668 669 ret = rte_eth_macaddr_get(portid, 670 &l2fwd_ports_eth_addr[portid]); 671 if (ret < 0) 672 rte_exit(EXIT_FAILURE, 673 "Cannot mac address: err=%d, port=%u\n", 674 ret, portid); 675 676 /* init one RX queue */ 677 fflush(stdout); 678 rxq_conf = dev_info.default_rxconf; 679 rxq_conf.offloads = local_port_conf.rxmode.offloads; 680 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, 681 rte_eth_dev_socket_id(portid), 682 &rxq_conf, 683 l2fwd_pktmbuf_pool); 684 if (ret < 0) 685 rte_exit(EXIT_FAILURE, 686 "rte_eth_rx_queue_setup:err=%d, port=%u\n", 687 ret, portid); 688 689 /* init one TX queue on each port */ 690 fflush(stdout); 691 txq_conf = dev_info.default_txconf; 692 txq_conf.offloads = local_port_conf.txmode.offloads; 693 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, 694 rte_eth_dev_socket_id(portid), 695 &txq_conf); 696 if (ret < 0) 697 rte_exit(EXIT_FAILURE, 698 "rte_eth_tx_queue_setup:err=%d, port=%u\n", 699 ret, portid); 700 701 /* Initialize TX buffers */ 702 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer", 703 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0, 704 rte_eth_dev_socket_id(portid)); 705 if (tx_buffer[portid] == NULL) 706 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n", 707 portid); 708 709 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST); 710 711 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid], 712 rte_eth_tx_buffer_count_callback, 713 &port_statistics[portid].dropped); 714 if (ret < 0) 715 rte_exit(EXIT_FAILURE, 716 "Cannot set error callback for tx buffer on port %u\n", 717 portid); 718 719 /* Start device */ 720 ret = rte_eth_dev_start(portid); 721 if (ret < 0) 722 rte_exit(EXIT_FAILURE, 723 "rte_eth_dev_start:err=%d, port=%u\n", 724 ret, portid); 725 726 ret = rte_eth_promiscuous_enable(portid); 727 if (ret != 0) 728 rte_exit(EXIT_FAILURE, 729 "rte_eth_promiscuous_enable:err=%s, port=%u\n", 730 rte_strerror(-ret), portid); 731 732 printf("Port %u, MAC address: " 733 RTE_ETHER_ADDR_PRT_FMT "\n\n", 734 portid, 735 RTE_ETHER_ADDR_BYTES(&l2fwd_ports_eth_addr[portid])); 736 737 /* initialize port stats */ 738 memset(&port_statistics, 0, sizeof(port_statistics)); 739 } 740 741 if (!nb_ports_available) { 742 rte_exit(EXIT_FAILURE, 743 "All available ports are disabled. Please set portmask.\n"); 744 } 745 746 check_all_ports_link_status(l2fwd_enabled_port_mask); 747 748 struct rte_timer hb_timer, stats_timer; 749 750 rte_timer_subsystem_init(); 751 rte_timer_init(&stats_timer); 752 753 ka_shm = NULL; 754 if (check_period > 0) { 755 ka_shm = rte_keepalive_shm_create(); 756 if (ka_shm == NULL) 757 rte_exit(EXIT_FAILURE, 758 "rte_keepalive_shm_create() failed"); 759 /* Initialize keepalive functionality. 8< */ 760 rte_global_keepalive_info = 761 rte_keepalive_create(&dead_core, ka_shm); 762 if (rte_global_keepalive_info == NULL) 763 rte_exit(EXIT_FAILURE, "init_keep_alive() failed"); 764 /* >8 End of initializing keepalive functionality. */ 765 rte_keepalive_register_relay_callback(rte_global_keepalive_info, 766 relay_core_state, ka_shm); 767 rte_timer_init(&hb_timer); 768 if (rte_timer_reset(&hb_timer, 769 (check_period * rte_get_timer_hz()) / 1000, 770 PERIODICAL, 771 rte_lcore_id(), 772 (void(*)(struct rte_timer*, void*)) 773 &rte_keepalive_dispatch_pings, 774 rte_global_keepalive_info 775 ) != 0 ) 776 rte_exit(EXIT_FAILURE, "Keepalive setup failure.\n"); 777 } 778 if (timer_period > 0) { 779 /* Issues the pings keepalive_dispatch_pings(). 8< */ 780 if (rte_timer_reset(&stats_timer, 781 (timer_period * rte_get_timer_hz()) / 1000, 782 PERIODICAL, 783 rte_lcore_id(), 784 &print_stats, NULL 785 ) != 0 ) 786 rte_exit(EXIT_FAILURE, "Stats setup failure.\n"); 787 /* >8 End of issuing the pings keepalive_dispatch_pings(). */ 788 } 789 /* launch per-lcore init on every worker lcore */ 790 RTE_LCORE_FOREACH_WORKER(lcore_id) { 791 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id]; 792 793 if (qconf->n_rx_port == 0) 794 RTE_LOG(INFO, L2FWD, 795 "lcore %u has nothing to do\n", 796 lcore_id 797 ); 798 else { 799 rte_eal_remote_launch( 800 l2fwd_launch_one_lcore, 801 NULL, 802 lcore_id 803 ); 804 rte_keepalive_register_core(rte_global_keepalive_info, 805 lcore_id); 806 } 807 } 808 while (!terminate_signal_received) { 809 rte_timer_manage(); 810 rte_delay_ms(5); 811 } 812 813 RTE_LCORE_FOREACH_WORKER(lcore_id) { 814 if (rte_eal_wait_lcore(lcore_id) < 0) 815 return -1; 816 } 817 818 if (ka_shm != NULL) 819 rte_keepalive_shm_cleanup(ka_shm); 820 821 /* clean up the EAL */ 822 rte_eal_cleanup(); 823 824 return 0; 825 } 826