1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <stdint.h> 9 #include <inttypes.h> 10 #include <sys/types.h> 11 #include <sys/queue.h> 12 #include <netinet/in.h> 13 #include <setjmp.h> 14 #include <stdarg.h> 15 #include <ctype.h> 16 #include <errno.h> 17 #include <getopt.h> 18 #include <signal.h> 19 20 #include <rte_common.h> 21 #include <rte_log.h> 22 #include <rte_malloc.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_eal.h> 26 #include <rte_launch.h> 27 #include <rte_cycles.h> 28 #include <rte_prefetch.h> 29 #include <rte_lcore.h> 30 #include <rte_per_lcore.h> 31 #include <rte_branch_prediction.h> 32 #include <rte_interrupts.h> 33 #include <rte_random.h> 34 #include <rte_debug.h> 35 #include <rte_ether.h> 36 #include <rte_ethdev.h> 37 #include <rte_mempool.h> 38 #include <rte_mbuf.h> 39 #include <rte_timer.h> 40 #include <rte_keepalive.h> 41 42 #include "shm.h" 43 44 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1 45 46 #define NB_MBUF_PER_PORT 3000 47 48 #define MAX_PKT_BURST 32 49 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 50 51 /* 52 * Configurable number of RX/TX ring descriptors 53 */ 54 #define RTE_TEST_RX_DESC_DEFAULT 1024 55 #define RTE_TEST_TX_DESC_DEFAULT 1024 56 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; 57 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; 58 59 /* ethernet addresses of ports */ 60 static struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS]; 61 62 /* mask of enabled ports */ 63 static uint32_t l2fwd_enabled_port_mask; 64 65 /* list of enabled ports */ 66 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS]; 67 68 static unsigned int l2fwd_rx_queue_per_lcore = 1; 69 70 #define MAX_RX_QUEUE_PER_LCORE 16 71 #define MAX_TX_QUEUE_PER_PORT 16 72 struct lcore_queue_conf { 73 unsigned n_rx_port; 74 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; 75 } __rte_cache_aligned; 76 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; 77 78 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; 79 80 static struct rte_eth_conf port_conf = { 81 .rxmode = { 82 .split_hdr_size = 0, 83 }, 84 .txmode = { 85 .mq_mode = RTE_ETH_MQ_TX_NONE, 86 }, 87 }; 88 89 struct rte_mempool *l2fwd_pktmbuf_pool = NULL; 90 91 /* Per-port statistics struct */ 92 struct l2fwd_port_statistics { 93 uint64_t tx; 94 uint64_t rx; 95 uint64_t dropped; 96 } __rte_cache_aligned; 97 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; 98 99 /* A tsc-based timer responsible for triggering statistics printout */ 100 #define TIMER_MILLISECOND 1 101 #define MAX_TIMER_PERIOD 86400 /* 1 day max */ 102 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* 10 seconds */ 103 static int64_t check_period = 5; /* default check cycle is 5ms */ 104 105 /* Keepalive structure */ 106 struct rte_keepalive *rte_global_keepalive_info; 107 108 /* Termination signalling */ 109 static int terminate_signal_received; 110 111 /* Termination signal handler */ 112 static void handle_sigterm(__rte_unused int value) 113 { 114 terminate_signal_received = 1; 115 } 116 117 /* Print out statistics on packets dropped */ 118 static void 119 print_stats(__rte_unused struct rte_timer *ptr_timer, 120 __rte_unused void *ptr_data) 121 { 122 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; 123 uint16_t portid; 124 125 total_packets_dropped = 0; 126 total_packets_tx = 0; 127 total_packets_rx = 0; 128 129 const char clr[] = { 27, '[', '2', 'J', '\0' }; 130 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 131 132 /* Clear screen and move to top left */ 133 printf("%s%s", clr, topLeft); 134 135 printf("\nPort statistics ===================================="); 136 137 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { 138 /* skip disabled ports */ 139 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 140 continue; 141 printf("\nStatistics for port %u ------------------------------" 142 "\nPackets sent: %24"PRIu64 143 "\nPackets received: %20"PRIu64 144 "\nPackets dropped: %21"PRIu64, 145 portid, 146 port_statistics[portid].tx, 147 port_statistics[portid].rx, 148 port_statistics[portid].dropped); 149 150 total_packets_dropped += port_statistics[portid].dropped; 151 total_packets_tx += port_statistics[portid].tx; 152 total_packets_rx += port_statistics[portid].rx; 153 } 154 printf("\nAggregate statistics ===============================" 155 "\nTotal packets sent: %18"PRIu64 156 "\nTotal packets received: %14"PRIu64 157 "\nTotal packets dropped: %15"PRIu64, 158 total_packets_tx, 159 total_packets_rx, 160 total_packets_dropped); 161 printf("\n====================================================\n"); 162 163 fflush(stdout); 164 } 165 166 static void 167 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) 168 { 169 struct rte_ether_hdr *eth; 170 void *tmp; 171 int sent; 172 unsigned dst_port; 173 struct rte_eth_dev_tx_buffer *buffer; 174 175 dst_port = l2fwd_dst_ports[portid]; 176 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 177 178 /* 02:00:00:00:00:xx */ 179 tmp = ð->dst_addr.addr_bytes[0]; 180 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 181 182 /* src addr */ 183 rte_ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->src_addr); 184 185 buffer = tx_buffer[dst_port]; 186 sent = rte_eth_tx_buffer(dst_port, 0, buffer, m); 187 if (sent) 188 port_statistics[dst_port].tx += sent; 189 } 190 191 /* main processing loop */ 192 static void 193 l2fwd_main_loop(void) 194 { 195 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 196 struct rte_mbuf *m; 197 int sent; 198 unsigned lcore_id; 199 uint64_t prev_tsc, diff_tsc, cur_tsc; 200 unsigned i, j, portid, nb_rx; 201 struct lcore_queue_conf *qconf; 202 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) 203 / US_PER_S * BURST_TX_DRAIN_US; 204 struct rte_eth_dev_tx_buffer *buffer; 205 206 prev_tsc = 0; 207 208 lcore_id = rte_lcore_id(); 209 qconf = &lcore_queue_conf[lcore_id]; 210 211 if (qconf->n_rx_port == 0) { 212 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id); 213 return; 214 } 215 216 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id); 217 218 for (i = 0; i < qconf->n_rx_port; i++) { 219 220 portid = qconf->rx_port_list[i]; 221 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id, 222 portid); 223 } 224 225 uint64_t tsc_initial = rte_rdtsc(); 226 uint64_t tsc_lifetime = (rand()&0x07) * rte_get_tsc_hz(); 227 228 while (!terminate_signal_received) { 229 /* Keepalive heartbeat. 8< */ 230 rte_keepalive_mark_alive(rte_global_keepalive_info); 231 232 cur_tsc = rte_rdtsc(); 233 234 /* 235 * Die randomly within 7 secs for demo purposes if 236 * keepalive enabled 237 */ 238 if (check_period > 0 && cur_tsc - tsc_initial > tsc_lifetime) 239 break; 240 /* >8 End of keepalive heartbeat. */ 241 242 /* 243 * TX burst queue drain 244 */ 245 diff_tsc = cur_tsc - prev_tsc; 246 if (unlikely(diff_tsc > drain_tsc)) { 247 248 for (i = 0; i < qconf->n_rx_port; i++) { 249 250 portid = l2fwd_dst_ports[qconf->rx_port_list[i]]; 251 buffer = tx_buffer[portid]; 252 253 sent = rte_eth_tx_buffer_flush(portid, 0, buffer); 254 if (sent) 255 port_statistics[portid].tx += sent; 256 257 } 258 259 prev_tsc = cur_tsc; 260 } 261 262 /* 263 * Read packet from RX queues 264 */ 265 for (i = 0; i < qconf->n_rx_port; i++) { 266 267 portid = qconf->rx_port_list[i]; 268 nb_rx = rte_eth_rx_burst(portid, 0, 269 pkts_burst, MAX_PKT_BURST); 270 271 port_statistics[portid].rx += nb_rx; 272 273 for (j = 0; j < nb_rx; j++) { 274 m = pkts_burst[j]; 275 rte_prefetch0(rte_pktmbuf_mtod(m, void *)); 276 l2fwd_simple_forward(m, portid); 277 } 278 } 279 } 280 } 281 282 static int 283 l2fwd_launch_one_lcore(__rte_unused void *dummy) 284 { 285 l2fwd_main_loop(); 286 return 0; 287 } 288 289 /* display usage */ 290 static void 291 l2fwd_usage(const char *prgname) 292 { 293 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" 294 " -p PORTMASK: hexadecimal bitmask of ports to configure\n" 295 " -q NQ: number of queue (=ports) per lcore (default is 1)\n" 296 " -K PERIOD: Keepalive check period (5 default; 86400 max)\n" 297 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n", 298 prgname); 299 } 300 301 static int 302 l2fwd_parse_portmask(const char *portmask) 303 { 304 char *end = NULL; 305 unsigned long pm; 306 307 /* parse hexadecimal string */ 308 pm = strtoul(portmask, &end, 16); 309 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 310 return 0; 311 312 return pm; 313 } 314 315 static unsigned int 316 l2fwd_parse_nqueue(const char *q_arg) 317 { 318 char *end = NULL; 319 unsigned long n; 320 321 /* parse hexadecimal string */ 322 n = strtoul(q_arg, &end, 10); 323 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 324 return 0; 325 if (n == 0) 326 return 0; 327 if (n >= MAX_RX_QUEUE_PER_LCORE) 328 return 0; 329 330 return n; 331 } 332 333 static int 334 l2fwd_parse_timer_period(const char *q_arg) 335 { 336 char *end = NULL; 337 int n; 338 339 /* parse number string */ 340 n = strtol(q_arg, &end, 10); 341 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 342 return -1; 343 if (n >= MAX_TIMER_PERIOD) 344 return -1; 345 346 return n; 347 } 348 349 static int 350 l2fwd_parse_check_period(const char *q_arg) 351 { 352 char *end = NULL; 353 int n; 354 355 /* parse number string */ 356 n = strtol(q_arg, &end, 10); 357 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 358 return -1; 359 if (n >= MAX_TIMER_PERIOD) 360 return -1; 361 362 return n; 363 } 364 365 /* Parse the argument given in the command line of the application */ 366 static int 367 l2fwd_parse_args(int argc, char **argv) 368 { 369 int opt, ret; 370 char **argvopt; 371 int option_index; 372 char *prgname = argv[0]; 373 static struct option lgopts[] = { 374 {NULL, 0, 0, 0} 375 }; 376 377 argvopt = argv; 378 379 while ((opt = getopt_long(argc, argvopt, "p:q:T:K:", 380 lgopts, &option_index)) != EOF) { 381 382 switch (opt) { 383 /* portmask */ 384 case 'p': 385 l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg); 386 if (l2fwd_enabled_port_mask == 0) { 387 printf("invalid portmask\n"); 388 l2fwd_usage(prgname); 389 return -1; 390 } 391 break; 392 393 /* nqueue */ 394 case 'q': 395 l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg); 396 if (l2fwd_rx_queue_per_lcore == 0) { 397 printf("invalid queue number\n"); 398 l2fwd_usage(prgname); 399 return -1; 400 } 401 break; 402 403 /* timer period */ 404 case 'T': 405 timer_period = l2fwd_parse_timer_period(optarg) 406 * (int64_t)(1000 * TIMER_MILLISECOND); 407 if (timer_period < 0) { 408 printf("invalid timer period\n"); 409 l2fwd_usage(prgname); 410 return -1; 411 } 412 break; 413 414 /* Check period */ 415 case 'K': 416 check_period = l2fwd_parse_check_period(optarg); 417 if (check_period < 0) { 418 printf("invalid check period\n"); 419 l2fwd_usage(prgname); 420 return -1; 421 } 422 break; 423 424 /* long options */ 425 case 0: 426 l2fwd_usage(prgname); 427 return -1; 428 429 default: 430 l2fwd_usage(prgname); 431 return -1; 432 } 433 } 434 435 if (optind >= 0) 436 argv[optind-1] = prgname; 437 438 ret = optind-1; 439 optind = 1; /* reset getopt lib */ 440 return ret; 441 } 442 443 /* Check the link status of all ports in up to 9s, and print them finally */ 444 static void 445 check_all_ports_link_status(uint32_t port_mask) 446 { 447 #define CHECK_INTERVAL 100 /* 100ms */ 448 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 449 uint16_t portid; 450 uint8_t count, all_ports_up, print_flag = 0; 451 struct rte_eth_link link; 452 int ret; 453 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN]; 454 455 printf("\nChecking link status"); 456 fflush(stdout); 457 for (count = 0; count <= MAX_CHECK_TIME; count++) { 458 all_ports_up = 1; 459 RTE_ETH_FOREACH_DEV(portid) { 460 if ((port_mask & (1 << portid)) == 0) 461 continue; 462 memset(&link, 0, sizeof(link)); 463 ret = rte_eth_link_get_nowait(portid, &link); 464 if (ret < 0) { 465 all_ports_up = 0; 466 if (print_flag == 1) 467 printf("Port %u link get failed: %s\n", 468 portid, rte_strerror(-ret)); 469 continue; 470 } 471 /* print link status if flag set */ 472 if (print_flag == 1) { 473 rte_eth_link_to_str(link_status_text, 474 sizeof(link_status_text), &link); 475 printf("Port %d %s\n", portid, 476 link_status_text); 477 continue; 478 } 479 /* clear all_ports_up flag if any link down */ 480 if (link.link_status == RTE_ETH_LINK_DOWN) { 481 all_ports_up = 0; 482 break; 483 } 484 } 485 /* after finally printing all link status, get out */ 486 if (print_flag == 1) 487 break; 488 489 if (all_ports_up == 0) { 490 printf("."); 491 fflush(stdout); 492 rte_delay_ms(CHECK_INTERVAL); 493 } 494 495 /* set the print_flag if all ports up or timeout */ 496 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 497 print_flag = 1; 498 printf("done\n"); 499 } 500 } 501 } 502 503 static void 504 dead_core(__rte_unused void *ptr_data, const int id_core) 505 { 506 if (terminate_signal_received) 507 return; 508 printf("Dead core %i - restarting..\n", id_core); 509 if (rte_eal_get_lcore_state(id_core) == FINISHED) { 510 rte_eal_wait_lcore(id_core); 511 rte_eal_remote_launch(l2fwd_launch_one_lcore, NULL, id_core); 512 } else { 513 printf("..false positive!\n"); 514 } 515 } 516 517 static void 518 relay_core_state(void *ptr_data, const int id_core, 519 const enum rte_keepalive_state core_state, uint64_t last_alive) 520 { 521 rte_keepalive_relayed_state((struct rte_keepalive_shm *)ptr_data, 522 id_core, core_state, last_alive); 523 } 524 525 int 526 main(int argc, char **argv) 527 { 528 struct lcore_queue_conf *qconf; 529 int ret; 530 uint16_t nb_ports; 531 uint16_t nb_ports_available = 0; 532 uint16_t portid, last_port; 533 unsigned lcore_id, rx_lcore_id; 534 unsigned nb_ports_in_mask = 0; 535 unsigned int total_nb_mbufs; 536 struct sigaction signal_handler; 537 struct rte_keepalive_shm *ka_shm; 538 539 memset(&signal_handler, 0, sizeof(signal_handler)); 540 terminate_signal_received = 0; 541 signal_handler.sa_handler = &handle_sigterm; 542 if (sigaction(SIGINT, &signal_handler, NULL) == -1 || 543 sigaction(SIGTERM, &signal_handler, NULL) == -1) 544 rte_exit(EXIT_FAILURE, "SIGNAL\n"); 545 546 547 /* init EAL */ 548 ret = rte_eal_init(argc, argv); 549 if (ret < 0) 550 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); 551 argc -= ret; 552 argv += ret; 553 554 l2fwd_enabled_port_mask = 0; 555 556 /* parse application arguments (after the EAL ones) */ 557 ret = l2fwd_parse_args(argc, argv); 558 if (ret < 0) 559 rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n"); 560 561 nb_ports = rte_eth_dev_count_avail(); 562 if (nb_ports == 0) 563 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); 564 565 /* create the mbuf pool */ 566 total_nb_mbufs = NB_MBUF_PER_PORT * nb_ports; 567 568 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", 569 total_nb_mbufs, 32, 0, RTE_MBUF_DEFAULT_BUF_SIZE, 570 rte_socket_id()); 571 if (l2fwd_pktmbuf_pool == NULL) 572 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); 573 574 /* reset l2fwd_dst_ports */ 575 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) 576 l2fwd_dst_ports[portid] = 0; 577 last_port = 0; 578 579 /* 580 * Each logical core is assigned a dedicated TX queue on each port. 581 */ 582 RTE_ETH_FOREACH_DEV(portid) { 583 /* skip ports that are not enabled */ 584 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 585 continue; 586 587 if (nb_ports_in_mask % 2) { 588 l2fwd_dst_ports[portid] = last_port; 589 l2fwd_dst_ports[last_port] = portid; 590 } else 591 last_port = portid; 592 593 nb_ports_in_mask++; 594 } 595 if (nb_ports_in_mask % 2) { 596 printf("Notice: odd number of ports in portmask.\n"); 597 l2fwd_dst_ports[last_port] = last_port; 598 } 599 600 rx_lcore_id = 1; 601 qconf = NULL; 602 603 /* Initialize the port/queue configuration of each logical core */ 604 RTE_ETH_FOREACH_DEV(portid) { 605 /* skip ports that are not enabled */ 606 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 607 continue; 608 609 /* get the lcore_id for this port */ 610 while (rte_lcore_is_enabled(rx_lcore_id) == 0 || 611 lcore_queue_conf[rx_lcore_id].n_rx_port == 612 l2fwd_rx_queue_per_lcore) { 613 rx_lcore_id++; 614 if (rx_lcore_id >= RTE_MAX_LCORE) 615 rte_exit(EXIT_FAILURE, "Not enough cores\n"); 616 } 617 618 if (qconf != &lcore_queue_conf[rx_lcore_id]) 619 /* Assigned a new logical core in the loop above. */ 620 qconf = &lcore_queue_conf[rx_lcore_id]; 621 622 qconf->rx_port_list[qconf->n_rx_port] = portid; 623 qconf->n_rx_port++; 624 printf("Lcore %u: RX port %u\n", 625 rx_lcore_id, portid); 626 } 627 628 /* Initialise each port */ 629 RTE_ETH_FOREACH_DEV(portid) { 630 struct rte_eth_dev_info dev_info; 631 struct rte_eth_rxconf rxq_conf; 632 struct rte_eth_txconf txq_conf; 633 struct rte_eth_conf local_port_conf = port_conf; 634 635 /* skip ports that are not enabled */ 636 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) { 637 printf("Skipping disabled port %u\n", portid); 638 continue; 639 } 640 nb_ports_available++; 641 642 /* init port */ 643 printf("Initializing port %u... ", portid); 644 fflush(stdout); 645 646 ret = rte_eth_dev_info_get(portid, &dev_info); 647 if (ret != 0) 648 rte_exit(EXIT_FAILURE, 649 "Error during getting device (port %u) info: %s\n", 650 portid, strerror(-ret)); 651 652 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 653 local_port_conf.txmode.offloads |= 654 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 655 ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf); 656 if (ret < 0) 657 rte_exit(EXIT_FAILURE, 658 "Cannot configure device: err=%d, port=%u\n", 659 ret, portid); 660 661 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, 662 &nb_txd); 663 if (ret < 0) 664 rte_exit(EXIT_FAILURE, 665 "Cannot adjust number of descriptors: err=%d, port=%u\n", 666 ret, portid); 667 668 ret = rte_eth_macaddr_get(portid, 669 &l2fwd_ports_eth_addr[portid]); 670 if (ret < 0) 671 rte_exit(EXIT_FAILURE, 672 "Cannot mac address: err=%d, port=%u\n", 673 ret, portid); 674 675 /* init one RX queue */ 676 fflush(stdout); 677 rxq_conf = dev_info.default_rxconf; 678 rxq_conf.offloads = local_port_conf.rxmode.offloads; 679 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, 680 rte_eth_dev_socket_id(portid), 681 &rxq_conf, 682 l2fwd_pktmbuf_pool); 683 if (ret < 0) 684 rte_exit(EXIT_FAILURE, 685 "rte_eth_rx_queue_setup:err=%d, port=%u\n", 686 ret, portid); 687 688 /* init one TX queue on each port */ 689 fflush(stdout); 690 txq_conf = dev_info.default_txconf; 691 txq_conf.offloads = local_port_conf.txmode.offloads; 692 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, 693 rte_eth_dev_socket_id(portid), 694 &txq_conf); 695 if (ret < 0) 696 rte_exit(EXIT_FAILURE, 697 "rte_eth_tx_queue_setup:err=%d, port=%u\n", 698 ret, portid); 699 700 /* Initialize TX buffers */ 701 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer", 702 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0, 703 rte_eth_dev_socket_id(portid)); 704 if (tx_buffer[portid] == NULL) 705 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n", 706 portid); 707 708 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST); 709 710 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid], 711 rte_eth_tx_buffer_count_callback, 712 &port_statistics[portid].dropped); 713 if (ret < 0) 714 rte_exit(EXIT_FAILURE, 715 "Cannot set error callback for tx buffer on port %u\n", 716 portid); 717 718 /* Start device */ 719 ret = rte_eth_dev_start(portid); 720 if (ret < 0) 721 rte_exit(EXIT_FAILURE, 722 "rte_eth_dev_start:err=%d, port=%u\n", 723 ret, portid); 724 725 ret = rte_eth_promiscuous_enable(portid); 726 if (ret != 0) 727 rte_exit(EXIT_FAILURE, 728 "rte_eth_promiscuous_enable:err=%s, port=%u\n", 729 rte_strerror(-ret), portid); 730 731 printf("Port %u, MAC address: " 732 RTE_ETHER_ADDR_PRT_FMT "\n\n", 733 portid, 734 RTE_ETHER_ADDR_BYTES(&l2fwd_ports_eth_addr[portid])); 735 736 /* initialize port stats */ 737 memset(&port_statistics, 0, sizeof(port_statistics)); 738 } 739 740 if (!nb_ports_available) { 741 rte_exit(EXIT_FAILURE, 742 "All available ports are disabled. Please set portmask.\n"); 743 } 744 745 check_all_ports_link_status(l2fwd_enabled_port_mask); 746 747 struct rte_timer hb_timer, stats_timer; 748 749 rte_timer_subsystem_init(); 750 rte_timer_init(&stats_timer); 751 752 ka_shm = NULL; 753 if (check_period > 0) { 754 ka_shm = rte_keepalive_shm_create(); 755 if (ka_shm == NULL) 756 rte_exit(EXIT_FAILURE, 757 "rte_keepalive_shm_create() failed"); 758 /* Initialize keepalive functionality. 8< */ 759 rte_global_keepalive_info = 760 rte_keepalive_create(&dead_core, ka_shm); 761 if (rte_global_keepalive_info == NULL) 762 rte_exit(EXIT_FAILURE, "init_keep_alive() failed"); 763 /* >8 End of initializing keepalive functionality. */ 764 rte_keepalive_register_relay_callback(rte_global_keepalive_info, 765 relay_core_state, ka_shm); 766 rte_timer_init(&hb_timer); 767 if (rte_timer_reset(&hb_timer, 768 (check_period * rte_get_timer_hz()) / 1000, 769 PERIODICAL, 770 rte_lcore_id(), 771 (void(*)(struct rte_timer*, void*)) 772 &rte_keepalive_dispatch_pings, 773 rte_global_keepalive_info 774 ) != 0 ) 775 rte_exit(EXIT_FAILURE, "Keepalive setup failure.\n"); 776 } 777 if (timer_period > 0) { 778 /* Issues the pings keepalive_dispatch_pings(). 8< */ 779 if (rte_timer_reset(&stats_timer, 780 (timer_period * rte_get_timer_hz()) / 1000, 781 PERIODICAL, 782 rte_lcore_id(), 783 &print_stats, NULL 784 ) != 0 ) 785 rte_exit(EXIT_FAILURE, "Stats setup failure.\n"); 786 /* >8 End of issuing the pings keepalive_dispatch_pings(). */ 787 } 788 /* launch per-lcore init on every worker lcore */ 789 RTE_LCORE_FOREACH_WORKER(lcore_id) { 790 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id]; 791 792 if (qconf->n_rx_port == 0) 793 RTE_LOG(INFO, L2FWD, 794 "lcore %u has nothing to do\n", 795 lcore_id 796 ); 797 else { 798 rte_eal_remote_launch( 799 l2fwd_launch_one_lcore, 800 NULL, 801 lcore_id 802 ); 803 rte_keepalive_register_core(rte_global_keepalive_info, 804 lcore_id); 805 } 806 } 807 while (!terminate_signal_received) { 808 rte_timer_manage(); 809 rte_delay_ms(5); 810 } 811 812 RTE_LCORE_FOREACH_WORKER(lcore_id) { 813 if (rte_eal_wait_lcore(lcore_id) < 0) 814 return -1; 815 } 816 817 if (ka_shm != NULL) 818 rte_keepalive_shm_cleanup(ka_shm); 819 820 /* clean up the EAL */ 821 rte_eal_cleanup(); 822 823 return 0; 824 } 825