1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <stdarg.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <signal.h> 9 #include <string.h> 10 #include <time.h> 11 #include <fcntl.h> 12 #include <sys/mman.h> 13 #include <sys/types.h> 14 #include <errno.h> 15 #include <stdbool.h> 16 17 #include <sys/queue.h> 18 #include <sys/stat.h> 19 20 #include <stdint.h> 21 #include <unistd.h> 22 #include <inttypes.h> 23 24 #include <rte_common.h> 25 #include <rte_errno.h> 26 #include <rte_byteorder.h> 27 #include <rte_log.h> 28 #include <rte_debug.h> 29 #include <rte_cycles.h> 30 #include <rte_memory.h> 31 #include <rte_memcpy.h> 32 #include <rte_launch.h> 33 #include <rte_eal.h> 34 #include <rte_alarm.h> 35 #include <rte_per_lcore.h> 36 #include <rte_lcore.h> 37 #include <rte_atomic.h> 38 #include <rte_branch_prediction.h> 39 #include <rte_mempool.h> 40 #include <rte_malloc.h> 41 #include <rte_mbuf.h> 42 #include <rte_mbuf_pool_ops.h> 43 #include <rte_interrupts.h> 44 #include <rte_pci.h> 45 #include <rte_ether.h> 46 #include <rte_ethdev.h> 47 #include <rte_dev.h> 48 #include <rte_string_fns.h> 49 #ifdef RTE_LIBRTE_IXGBE_PMD 50 #include <rte_pmd_ixgbe.h> 51 #endif 52 #ifdef RTE_LIBRTE_PDUMP 53 #include <rte_pdump.h> 54 #endif 55 #include <rte_flow.h> 56 #include <rte_metrics.h> 57 #ifdef RTE_LIBRTE_BITRATE 58 #include <rte_bitrate.h> 59 #endif 60 #ifdef RTE_LIBRTE_LATENCY_STATS 61 #include <rte_latencystats.h> 62 #endif 63 64 #include "testpmd.h" 65 66 uint16_t verbose_level = 0; /**< Silent by default. */ 67 int testpmd_logtype; /**< Log type for testpmd logs */ 68 69 /* use master core for command line ? */ 70 uint8_t interactive = 0; 71 uint8_t auto_start = 0; 72 uint8_t tx_first; 73 char cmdline_filename[PATH_MAX] = {0}; 74 75 /* 76 * NUMA support configuration. 77 * When set, the NUMA support attempts to dispatch the allocation of the 78 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 79 * probed ports among the CPU sockets 0 and 1. 80 * Otherwise, all memory is allocated from CPU socket 0. 81 */ 82 uint8_t numa_support = 1; /**< numa enabled by default */ 83 84 /* 85 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 86 * not configured. 87 */ 88 uint8_t socket_num = UMA_NO_CONFIG; 89 90 /* 91 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 92 */ 93 uint8_t mp_anon = 0; 94 95 /* 96 * Store specified sockets on which memory pool to be used by ports 97 * is allocated. 98 */ 99 uint8_t port_numa[RTE_MAX_ETHPORTS]; 100 101 /* 102 * Store specified sockets on which RX ring to be used by ports 103 * is allocated. 104 */ 105 uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 106 107 /* 108 * Store specified sockets on which TX ring to be used by ports 109 * is allocated. 110 */ 111 uint8_t txring_numa[RTE_MAX_ETHPORTS]; 112 113 /* 114 * Record the Ethernet address of peer target ports to which packets are 115 * forwarded. 116 * Must be instantiated with the ethernet addresses of peer traffic generator 117 * ports. 118 */ 119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 120 portid_t nb_peer_eth_addrs = 0; 121 122 /* 123 * Probed Target Environment. 124 */ 125 struct rte_port *ports; /**< For all probed ethernet ports. */ 126 portid_t nb_ports; /**< Number of probed ethernet ports. */ 127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 128 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 129 130 /* 131 * Test Forwarding Configuration. 132 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 133 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 134 */ 135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 137 portid_t nb_cfg_ports; /**< Number of configured ports. */ 138 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 139 140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 142 143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 144 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 145 146 /* 147 * Forwarding engines. 148 */ 149 struct fwd_engine * fwd_engines[] = { 150 &io_fwd_engine, 151 &mac_fwd_engine, 152 &mac_swap_engine, 153 &flow_gen_engine, 154 &rx_only_engine, 155 &tx_only_engine, 156 &csum_fwd_engine, 157 &icmp_echo_engine, 158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 159 &softnic_tm_engine, 160 &softnic_tm_bypass_engine, 161 #endif 162 #ifdef RTE_LIBRTE_IEEE1588 163 &ieee1588_fwd_engine, 164 #endif 165 NULL, 166 }; 167 168 struct fwd_config cur_fwd_config; 169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 170 uint32_t retry_enabled; 171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 173 174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 175 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 176 * specified on command-line. */ 177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 178 179 /* 180 * In container, it cannot terminate the process which running with 'stats-period' 181 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 182 */ 183 uint8_t f_quit; 184 185 /* 186 * Configuration of packet segments used by the "txonly" processing engine. 187 */ 188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 190 TXONLY_DEF_PACKET_LEN, 191 }; 192 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 193 194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 195 /**< Split policy for packets to TX. */ 196 197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 199 200 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 201 uint8_t dcb_config = 0; 202 203 /* Whether the dcb is in testing status */ 204 uint8_t dcb_test = 0; 205 206 /* 207 * Configurable number of RX/TX queues. 208 */ 209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 211 212 /* 213 * Configurable number of RX/TX ring descriptors. 214 */ 215 #define RTE_TEST_RX_DESC_DEFAULT 1024 216 #define RTE_TEST_TX_DESC_DEFAULT 1024 217 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 218 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 219 220 #define RTE_PMD_PARAM_UNSET -1 221 /* 222 * Configurable values of RX and TX ring threshold registers. 223 */ 224 225 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 226 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 227 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 228 229 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 230 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 231 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 232 233 /* 234 * Configurable value of RX free threshold. 235 */ 236 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 237 238 /* 239 * Configurable value of RX drop enable. 240 */ 241 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 242 243 /* 244 * Configurable value of TX free threshold. 245 */ 246 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 247 248 /* 249 * Configurable value of TX RS bit threshold. 250 */ 251 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 252 253 /* 254 * Receive Side Scaling (RSS) configuration. 255 */ 256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 257 258 /* 259 * Port topology configuration 260 */ 261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 262 263 /* 264 * Avoids to flush all the RX streams before starts forwarding. 265 */ 266 uint8_t no_flush_rx = 0; /* flush by default */ 267 268 /* 269 * Flow API isolated mode. 270 */ 271 uint8_t flow_isolate_all; 272 273 /* 274 * Avoids to check link status when starting/stopping a port. 275 */ 276 uint8_t no_link_check = 0; /* check by default */ 277 278 /* 279 * Enable link status change notification 280 */ 281 uint8_t lsc_interrupt = 1; /* enabled by default */ 282 283 /* 284 * Enable device removal notification. 285 */ 286 uint8_t rmv_interrupt = 1; /* enabled by default */ 287 288 uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 289 290 /* 291 * Display or mask ether events 292 * Default to all events except VF_MBOX 293 */ 294 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 295 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 296 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 297 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 298 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 299 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 300 301 /* 302 * NIC bypass mode configuration options. 303 */ 304 305 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 306 /* The NIC bypass watchdog timeout. */ 307 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 308 #endif 309 310 311 #ifdef RTE_LIBRTE_LATENCY_STATS 312 313 /* 314 * Set when latency stats is enabled in the commandline 315 */ 316 uint8_t latencystats_enabled; 317 318 /* 319 * Lcore ID to serive latency statistics. 320 */ 321 lcoreid_t latencystats_lcore_id = -1; 322 323 #endif 324 325 /* 326 * Ethernet device configuration. 327 */ 328 struct rte_eth_rxmode rx_mode = { 329 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 330 .offloads = DEV_RX_OFFLOAD_CRC_STRIP, 331 .ignore_offload_bitfield = 1, 332 }; 333 334 struct rte_eth_txmode tx_mode = { 335 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 336 }; 337 338 struct rte_fdir_conf fdir_conf = { 339 .mode = RTE_FDIR_MODE_NONE, 340 .pballoc = RTE_FDIR_PBALLOC_64K, 341 .status = RTE_FDIR_REPORT_STATUS, 342 .mask = { 343 .vlan_tci_mask = 0x0, 344 .ipv4_mask = { 345 .src_ip = 0xFFFFFFFF, 346 .dst_ip = 0xFFFFFFFF, 347 }, 348 .ipv6_mask = { 349 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 350 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 351 }, 352 .src_port_mask = 0xFFFF, 353 .dst_port_mask = 0xFFFF, 354 .mac_addr_byte_mask = 0xFF, 355 .tunnel_type_mask = 1, 356 .tunnel_id_mask = 0xFFFFFFFF, 357 }, 358 .drop_queue = 127, 359 }; 360 361 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 362 363 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 364 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 365 366 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 367 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 368 369 uint16_t nb_tx_queue_stats_mappings = 0; 370 uint16_t nb_rx_queue_stats_mappings = 0; 371 372 /* 373 * Display zero values by default for xstats 374 */ 375 uint8_t xstats_hide_zero; 376 377 unsigned int num_sockets = 0; 378 unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 379 380 #ifdef RTE_LIBRTE_BITRATE 381 /* Bitrate statistics */ 382 struct rte_stats_bitrates *bitrate_data; 383 lcoreid_t bitrate_lcore_id; 384 uint8_t bitrate_enabled; 385 #endif 386 387 struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 388 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 389 390 /* Forward function declarations */ 391 static void map_port_queue_stats_mapping_registers(portid_t pi, 392 struct rte_port *port); 393 static void check_all_ports_link_status(uint32_t port_mask); 394 static int eth_event_callback(portid_t port_id, 395 enum rte_eth_event_type type, 396 void *param, void *ret_param); 397 static void eth_dev_event_callback(char *device_name, 398 enum rte_dev_event_type type, 399 void *param); 400 static int eth_dev_event_callback_register(void); 401 static int eth_dev_event_callback_unregister(void); 402 403 404 /* 405 * Check if all the ports are started. 406 * If yes, return positive value. If not, return zero. 407 */ 408 static int all_ports_started(void); 409 410 struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 411 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 412 413 /* 414 * Helper function to check if socket is already discovered. 415 * If yes, return positive value. If not, return zero. 416 */ 417 int 418 new_socket_id(unsigned int socket_id) 419 { 420 unsigned int i; 421 422 for (i = 0; i < num_sockets; i++) { 423 if (socket_ids[i] == socket_id) 424 return 0; 425 } 426 return 1; 427 } 428 429 /* 430 * Setup default configuration. 431 */ 432 static void 433 set_default_fwd_lcores_config(void) 434 { 435 unsigned int i; 436 unsigned int nb_lc; 437 unsigned int sock_num; 438 439 nb_lc = 0; 440 for (i = 0; i < RTE_MAX_LCORE; i++) { 441 sock_num = rte_lcore_to_socket_id(i); 442 if (new_socket_id(sock_num)) { 443 if (num_sockets >= RTE_MAX_NUMA_NODES) { 444 rte_exit(EXIT_FAILURE, 445 "Total sockets greater than %u\n", 446 RTE_MAX_NUMA_NODES); 447 } 448 socket_ids[num_sockets++] = sock_num; 449 } 450 if (!rte_lcore_is_enabled(i)) 451 continue; 452 if (i == rte_get_master_lcore()) 453 continue; 454 fwd_lcores_cpuids[nb_lc++] = i; 455 } 456 nb_lcores = (lcoreid_t) nb_lc; 457 nb_cfg_lcores = nb_lcores; 458 nb_fwd_lcores = 1; 459 } 460 461 static void 462 set_def_peer_eth_addrs(void) 463 { 464 portid_t i; 465 466 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 467 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 468 peer_eth_addrs[i].addr_bytes[5] = i; 469 } 470 } 471 472 static void 473 set_default_fwd_ports_config(void) 474 { 475 portid_t pt_id; 476 int i = 0; 477 478 RTE_ETH_FOREACH_DEV(pt_id) 479 fwd_ports_ids[i++] = pt_id; 480 481 nb_cfg_ports = nb_ports; 482 nb_fwd_ports = nb_ports; 483 } 484 485 void 486 set_def_fwd_config(void) 487 { 488 set_default_fwd_lcores_config(); 489 set_def_peer_eth_addrs(); 490 set_default_fwd_ports_config(); 491 } 492 493 /* 494 * Configuration initialisation done once at init time. 495 */ 496 static void 497 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 498 unsigned int socket_id) 499 { 500 char pool_name[RTE_MEMPOOL_NAMESIZE]; 501 struct rte_mempool *rte_mp = NULL; 502 uint32_t mb_size; 503 504 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 505 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 506 507 TESTPMD_LOG(INFO, 508 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 509 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 510 511 if (mp_anon != 0) { 512 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 513 mb_size, (unsigned) mb_mempool_cache, 514 sizeof(struct rte_pktmbuf_pool_private), 515 socket_id, 0); 516 if (rte_mp == NULL) 517 goto err; 518 519 if (rte_mempool_populate_anon(rte_mp) == 0) { 520 rte_mempool_free(rte_mp); 521 rte_mp = NULL; 522 goto err; 523 } 524 rte_pktmbuf_pool_init(rte_mp, NULL); 525 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 526 } else { 527 /* wrapper to rte_mempool_create() */ 528 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 529 rte_mbuf_best_mempool_ops()); 530 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 531 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 532 } 533 534 err: 535 if (rte_mp == NULL) { 536 rte_exit(EXIT_FAILURE, 537 "Creation of mbuf pool for socket %u failed: %s\n", 538 socket_id, rte_strerror(rte_errno)); 539 } else if (verbose_level > 0) { 540 rte_mempool_dump(stdout, rte_mp); 541 } 542 } 543 544 /* 545 * Check given socket id is valid or not with NUMA mode, 546 * if valid, return 0, else return -1 547 */ 548 static int 549 check_socket_id(const unsigned int socket_id) 550 { 551 static int warning_once = 0; 552 553 if (new_socket_id(socket_id)) { 554 if (!warning_once && numa_support) 555 printf("Warning: NUMA should be configured manually by" 556 " using --port-numa-config and" 557 " --ring-numa-config parameters along with" 558 " --numa.\n"); 559 warning_once = 1; 560 return -1; 561 } 562 return 0; 563 } 564 565 /* 566 * Get the allowed maximum number of RX queues. 567 * *pid return the port id which has minimal value of 568 * max_rx_queues in all ports. 569 */ 570 queueid_t 571 get_allowed_max_nb_rxq(portid_t *pid) 572 { 573 queueid_t allowed_max_rxq = MAX_QUEUE_ID; 574 portid_t pi; 575 struct rte_eth_dev_info dev_info; 576 577 RTE_ETH_FOREACH_DEV(pi) { 578 rte_eth_dev_info_get(pi, &dev_info); 579 if (dev_info.max_rx_queues < allowed_max_rxq) { 580 allowed_max_rxq = dev_info.max_rx_queues; 581 *pid = pi; 582 } 583 } 584 return allowed_max_rxq; 585 } 586 587 /* 588 * Check input rxq is valid or not. 589 * If input rxq is not greater than any of maximum number 590 * of RX queues of all ports, it is valid. 591 * if valid, return 0, else return -1 592 */ 593 int 594 check_nb_rxq(queueid_t rxq) 595 { 596 queueid_t allowed_max_rxq; 597 portid_t pid = 0; 598 599 allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 600 if (rxq > allowed_max_rxq) { 601 printf("Fail: input rxq (%u) can't be greater " 602 "than max_rx_queues (%u) of port %u\n", 603 rxq, 604 allowed_max_rxq, 605 pid); 606 return -1; 607 } 608 return 0; 609 } 610 611 /* 612 * Get the allowed maximum number of TX queues. 613 * *pid return the port id which has minimal value of 614 * max_tx_queues in all ports. 615 */ 616 queueid_t 617 get_allowed_max_nb_txq(portid_t *pid) 618 { 619 queueid_t allowed_max_txq = MAX_QUEUE_ID; 620 portid_t pi; 621 struct rte_eth_dev_info dev_info; 622 623 RTE_ETH_FOREACH_DEV(pi) { 624 rte_eth_dev_info_get(pi, &dev_info); 625 if (dev_info.max_tx_queues < allowed_max_txq) { 626 allowed_max_txq = dev_info.max_tx_queues; 627 *pid = pi; 628 } 629 } 630 return allowed_max_txq; 631 } 632 633 /* 634 * Check input txq is valid or not. 635 * If input txq is not greater than any of maximum number 636 * of TX queues of all ports, it is valid. 637 * if valid, return 0, else return -1 638 */ 639 int 640 check_nb_txq(queueid_t txq) 641 { 642 queueid_t allowed_max_txq; 643 portid_t pid = 0; 644 645 allowed_max_txq = get_allowed_max_nb_txq(&pid); 646 if (txq > allowed_max_txq) { 647 printf("Fail: input txq (%u) can't be greater " 648 "than max_tx_queues (%u) of port %u\n", 649 txq, 650 allowed_max_txq, 651 pid); 652 return -1; 653 } 654 return 0; 655 } 656 657 static void 658 init_config(void) 659 { 660 portid_t pid; 661 struct rte_port *port; 662 struct rte_mempool *mbp; 663 unsigned int nb_mbuf_per_pool; 664 lcoreid_t lc_id; 665 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 666 struct rte_gro_param gro_param; 667 uint32_t gso_types; 668 669 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 670 671 if (numa_support) { 672 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 673 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 674 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 675 } 676 677 /* Configuration of logical cores. */ 678 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 679 sizeof(struct fwd_lcore *) * nb_lcores, 680 RTE_CACHE_LINE_SIZE); 681 if (fwd_lcores == NULL) { 682 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 683 "failed\n", nb_lcores); 684 } 685 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 686 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 687 sizeof(struct fwd_lcore), 688 RTE_CACHE_LINE_SIZE); 689 if (fwd_lcores[lc_id] == NULL) { 690 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 691 "failed\n"); 692 } 693 fwd_lcores[lc_id]->cpuid_idx = lc_id; 694 } 695 696 RTE_ETH_FOREACH_DEV(pid) { 697 port = &ports[pid]; 698 /* Apply default TxRx configuration for all ports */ 699 port->dev_conf.txmode = tx_mode; 700 port->dev_conf.rxmode = rx_mode; 701 rte_eth_dev_info_get(pid, &port->dev_info); 702 if (!(port->dev_info.tx_offload_capa & 703 DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 704 port->dev_conf.txmode.offloads &= 705 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 706 if (numa_support) { 707 if (port_numa[pid] != NUMA_NO_CONFIG) 708 port_per_socket[port_numa[pid]]++; 709 else { 710 uint32_t socket_id = rte_eth_dev_socket_id(pid); 711 712 /* if socket_id is invalid, set to 0 */ 713 if (check_socket_id(socket_id) < 0) 714 socket_id = 0; 715 port_per_socket[socket_id]++; 716 } 717 } 718 719 /* set flag to initialize port/queue */ 720 port->need_reconfig = 1; 721 port->need_reconfig_queues = 1; 722 } 723 724 /* 725 * Create pools of mbuf. 726 * If NUMA support is disabled, create a single pool of mbuf in 727 * socket 0 memory by default. 728 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 729 * 730 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 731 * nb_txd can be configured at run time. 732 */ 733 if (param_total_num_mbufs) 734 nb_mbuf_per_pool = param_total_num_mbufs; 735 else { 736 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 737 (nb_lcores * mb_mempool_cache) + 738 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 739 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 740 } 741 742 if (numa_support) { 743 uint8_t i; 744 745 for (i = 0; i < num_sockets; i++) 746 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 747 socket_ids[i]); 748 } else { 749 if (socket_num == UMA_NO_CONFIG) 750 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 751 else 752 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 753 socket_num); 754 } 755 756 init_port_config(); 757 758 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 759 DEV_TX_OFFLOAD_GRE_TNL_TSO; 760 /* 761 * Records which Mbuf pool to use by each logical core, if needed. 762 */ 763 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 764 mbp = mbuf_pool_find( 765 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 766 767 if (mbp == NULL) 768 mbp = mbuf_pool_find(0); 769 fwd_lcores[lc_id]->mbp = mbp; 770 /* initialize GSO context */ 771 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 772 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 773 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 774 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 775 ETHER_CRC_LEN; 776 fwd_lcores[lc_id]->gso_ctx.flag = 0; 777 } 778 779 /* Configuration of packet forwarding streams. */ 780 if (init_fwd_streams() < 0) 781 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 782 783 fwd_config_setup(); 784 785 /* create a gro context for each lcore */ 786 gro_param.gro_types = RTE_GRO_TCP_IPV4; 787 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 788 gro_param.max_item_per_flow = MAX_PKT_BURST; 789 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 790 gro_param.socket_id = rte_lcore_to_socket_id( 791 fwd_lcores_cpuids[lc_id]); 792 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 793 if (fwd_lcores[lc_id]->gro_ctx == NULL) { 794 rte_exit(EXIT_FAILURE, 795 "rte_gro_ctx_create() failed\n"); 796 } 797 } 798 } 799 800 801 void 802 reconfig(portid_t new_port_id, unsigned socket_id) 803 { 804 struct rte_port *port; 805 806 /* Reconfiguration of Ethernet ports. */ 807 port = &ports[new_port_id]; 808 rte_eth_dev_info_get(new_port_id, &port->dev_info); 809 810 /* set flag to initialize port/queue */ 811 port->need_reconfig = 1; 812 port->need_reconfig_queues = 1; 813 port->socket_id = socket_id; 814 815 init_port_config(); 816 } 817 818 819 int 820 init_fwd_streams(void) 821 { 822 portid_t pid; 823 struct rte_port *port; 824 streamid_t sm_id, nb_fwd_streams_new; 825 queueid_t q; 826 827 /* set socket id according to numa or not */ 828 RTE_ETH_FOREACH_DEV(pid) { 829 port = &ports[pid]; 830 if (nb_rxq > port->dev_info.max_rx_queues) { 831 printf("Fail: nb_rxq(%d) is greater than " 832 "max_rx_queues(%d)\n", nb_rxq, 833 port->dev_info.max_rx_queues); 834 return -1; 835 } 836 if (nb_txq > port->dev_info.max_tx_queues) { 837 printf("Fail: nb_txq(%d) is greater than " 838 "max_tx_queues(%d)\n", nb_txq, 839 port->dev_info.max_tx_queues); 840 return -1; 841 } 842 if (numa_support) { 843 if (port_numa[pid] != NUMA_NO_CONFIG) 844 port->socket_id = port_numa[pid]; 845 else { 846 port->socket_id = rte_eth_dev_socket_id(pid); 847 848 /* if socket_id is invalid, set to 0 */ 849 if (check_socket_id(port->socket_id) < 0) 850 port->socket_id = 0; 851 } 852 } 853 else { 854 if (socket_num == UMA_NO_CONFIG) 855 port->socket_id = 0; 856 else 857 port->socket_id = socket_num; 858 } 859 } 860 861 q = RTE_MAX(nb_rxq, nb_txq); 862 if (q == 0) { 863 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 864 return -1; 865 } 866 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 867 if (nb_fwd_streams_new == nb_fwd_streams) 868 return 0; 869 /* clear the old */ 870 if (fwd_streams != NULL) { 871 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 872 if (fwd_streams[sm_id] == NULL) 873 continue; 874 rte_free(fwd_streams[sm_id]); 875 fwd_streams[sm_id] = NULL; 876 } 877 rte_free(fwd_streams); 878 fwd_streams = NULL; 879 } 880 881 /* init new */ 882 nb_fwd_streams = nb_fwd_streams_new; 883 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 884 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 885 if (fwd_streams == NULL) 886 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 887 "failed\n", nb_fwd_streams); 888 889 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 890 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 891 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 892 if (fwd_streams[sm_id] == NULL) 893 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 894 " failed\n"); 895 } 896 897 return 0; 898 } 899 900 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 901 static void 902 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 903 { 904 unsigned int total_burst; 905 unsigned int nb_burst; 906 unsigned int burst_stats[3]; 907 uint16_t pktnb_stats[3]; 908 uint16_t nb_pkt; 909 int burst_percent[3]; 910 911 /* 912 * First compute the total number of packet bursts and the 913 * two highest numbers of bursts of the same number of packets. 914 */ 915 total_burst = 0; 916 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 917 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 918 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 919 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 920 if (nb_burst == 0) 921 continue; 922 total_burst += nb_burst; 923 if (nb_burst > burst_stats[0]) { 924 burst_stats[1] = burst_stats[0]; 925 pktnb_stats[1] = pktnb_stats[0]; 926 burst_stats[0] = nb_burst; 927 pktnb_stats[0] = nb_pkt; 928 } 929 } 930 if (total_burst == 0) 931 return; 932 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 933 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 934 burst_percent[0], (int) pktnb_stats[0]); 935 if (burst_stats[0] == total_burst) { 936 printf("]\n"); 937 return; 938 } 939 if (burst_stats[0] + burst_stats[1] == total_burst) { 940 printf(" + %d%% of %d pkts]\n", 941 100 - burst_percent[0], pktnb_stats[1]); 942 return; 943 } 944 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 945 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 946 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 947 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 948 return; 949 } 950 printf(" + %d%% of %d pkts + %d%% of others]\n", 951 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 952 } 953 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 954 955 static void 956 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 957 { 958 struct rte_port *port; 959 uint8_t i; 960 961 static const char *fwd_stats_border = "----------------------"; 962 963 port = &ports[port_id]; 964 printf("\n %s Forward statistics for port %-2d %s\n", 965 fwd_stats_border, port_id, fwd_stats_border); 966 967 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 968 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 969 "%-"PRIu64"\n", 970 stats->ipackets, stats->imissed, 971 (uint64_t) (stats->ipackets + stats->imissed)); 972 973 if (cur_fwd_eng == &csum_fwd_engine) 974 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 975 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 976 if ((stats->ierrors + stats->rx_nombuf) > 0) { 977 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 978 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 979 } 980 981 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 982 "%-"PRIu64"\n", 983 stats->opackets, port->tx_dropped, 984 (uint64_t) (stats->opackets + port->tx_dropped)); 985 } 986 else { 987 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 988 "%14"PRIu64"\n", 989 stats->ipackets, stats->imissed, 990 (uint64_t) (stats->ipackets + stats->imissed)); 991 992 if (cur_fwd_eng == &csum_fwd_engine) 993 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 994 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 995 if ((stats->ierrors + stats->rx_nombuf) > 0) { 996 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 997 printf(" RX-nombufs: %14"PRIu64"\n", 998 stats->rx_nombuf); 999 } 1000 1001 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 1002 "%14"PRIu64"\n", 1003 stats->opackets, port->tx_dropped, 1004 (uint64_t) (stats->opackets + port->tx_dropped)); 1005 } 1006 1007 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1008 if (port->rx_stream) 1009 pkt_burst_stats_display("RX", 1010 &port->rx_stream->rx_burst_stats); 1011 if (port->tx_stream) 1012 pkt_burst_stats_display("TX", 1013 &port->tx_stream->tx_burst_stats); 1014 #endif 1015 1016 if (port->rx_queue_stats_mapping_enabled) { 1017 printf("\n"); 1018 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1019 printf(" Stats reg %2d RX-packets:%14"PRIu64 1020 " RX-errors:%14"PRIu64 1021 " RX-bytes:%14"PRIu64"\n", 1022 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 1023 } 1024 printf("\n"); 1025 } 1026 if (port->tx_queue_stats_mapping_enabled) { 1027 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1028 printf(" Stats reg %2d TX-packets:%14"PRIu64 1029 " TX-bytes:%14"PRIu64"\n", 1030 i, stats->q_opackets[i], stats->q_obytes[i]); 1031 } 1032 } 1033 1034 printf(" %s--------------------------------%s\n", 1035 fwd_stats_border, fwd_stats_border); 1036 } 1037 1038 static void 1039 fwd_stream_stats_display(streamid_t stream_id) 1040 { 1041 struct fwd_stream *fs; 1042 static const char *fwd_top_stats_border = "-------"; 1043 1044 fs = fwd_streams[stream_id]; 1045 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1046 (fs->fwd_dropped == 0)) 1047 return; 1048 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1049 "TX Port=%2d/Queue=%2d %s\n", 1050 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1051 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1052 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 1053 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1054 1055 /* if checksum mode */ 1056 if (cur_fwd_eng == &csum_fwd_engine) { 1057 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 1058 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 1059 } 1060 1061 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1062 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1063 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1064 #endif 1065 } 1066 1067 static void 1068 flush_fwd_rx_queues(void) 1069 { 1070 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1071 portid_t rxp; 1072 portid_t port_id; 1073 queueid_t rxq; 1074 uint16_t nb_rx; 1075 uint16_t i; 1076 uint8_t j; 1077 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 1078 uint64_t timer_period; 1079 1080 /* convert to number of cycles */ 1081 timer_period = rte_get_timer_hz(); /* 1 second timeout */ 1082 1083 for (j = 0; j < 2; j++) { 1084 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 1085 for (rxq = 0; rxq < nb_rxq; rxq++) { 1086 port_id = fwd_ports_ids[rxp]; 1087 /** 1088 * testpmd can stuck in the below do while loop 1089 * if rte_eth_rx_burst() always returns nonzero 1090 * packets. So timer is added to exit this loop 1091 * after 1sec timer expiry. 1092 */ 1093 prev_tsc = rte_rdtsc(); 1094 do { 1095 nb_rx = rte_eth_rx_burst(port_id, rxq, 1096 pkts_burst, MAX_PKT_BURST); 1097 for (i = 0; i < nb_rx; i++) 1098 rte_pktmbuf_free(pkts_burst[i]); 1099 1100 cur_tsc = rte_rdtsc(); 1101 diff_tsc = cur_tsc - prev_tsc; 1102 timer_tsc += diff_tsc; 1103 } while ((nb_rx > 0) && 1104 (timer_tsc < timer_period)); 1105 timer_tsc = 0; 1106 } 1107 } 1108 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1109 } 1110 } 1111 1112 static void 1113 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1114 { 1115 struct fwd_stream **fsm; 1116 streamid_t nb_fs; 1117 streamid_t sm_id; 1118 #ifdef RTE_LIBRTE_BITRATE 1119 uint64_t tics_per_1sec; 1120 uint64_t tics_datum; 1121 uint64_t tics_current; 1122 uint8_t idx_port, cnt_ports; 1123 1124 cnt_ports = rte_eth_dev_count(); 1125 tics_datum = rte_rdtsc(); 1126 tics_per_1sec = rte_get_timer_hz(); 1127 #endif 1128 fsm = &fwd_streams[fc->stream_idx]; 1129 nb_fs = fc->stream_nb; 1130 do { 1131 for (sm_id = 0; sm_id < nb_fs; sm_id++) 1132 (*pkt_fwd)(fsm[sm_id]); 1133 #ifdef RTE_LIBRTE_BITRATE 1134 if (bitrate_enabled != 0 && 1135 bitrate_lcore_id == rte_lcore_id()) { 1136 tics_current = rte_rdtsc(); 1137 if (tics_current - tics_datum >= tics_per_1sec) { 1138 /* Periodic bitrate calculation */ 1139 for (idx_port = 0; 1140 idx_port < cnt_ports; 1141 idx_port++) 1142 rte_stats_bitrate_calc(bitrate_data, 1143 idx_port); 1144 tics_datum = tics_current; 1145 } 1146 } 1147 #endif 1148 #ifdef RTE_LIBRTE_LATENCY_STATS 1149 if (latencystats_enabled != 0 && 1150 latencystats_lcore_id == rte_lcore_id()) 1151 rte_latencystats_update(); 1152 #endif 1153 1154 } while (! fc->stopped); 1155 } 1156 1157 static int 1158 start_pkt_forward_on_core(void *fwd_arg) 1159 { 1160 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1161 cur_fwd_config.fwd_eng->packet_fwd); 1162 return 0; 1163 } 1164 1165 /* 1166 * Run the TXONLY packet forwarding engine to send a single burst of packets. 1167 * Used to start communication flows in network loopback test configurations. 1168 */ 1169 static int 1170 run_one_txonly_burst_on_core(void *fwd_arg) 1171 { 1172 struct fwd_lcore *fwd_lc; 1173 struct fwd_lcore tmp_lcore; 1174 1175 fwd_lc = (struct fwd_lcore *) fwd_arg; 1176 tmp_lcore = *fwd_lc; 1177 tmp_lcore.stopped = 1; 1178 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1179 return 0; 1180 } 1181 1182 /* 1183 * Launch packet forwarding: 1184 * - Setup per-port forwarding context. 1185 * - launch logical cores with their forwarding configuration. 1186 */ 1187 static void 1188 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1189 { 1190 port_fwd_begin_t port_fwd_begin; 1191 unsigned int i; 1192 unsigned int lc_id; 1193 int diag; 1194 1195 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1196 if (port_fwd_begin != NULL) { 1197 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1198 (*port_fwd_begin)(fwd_ports_ids[i]); 1199 } 1200 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1201 lc_id = fwd_lcores_cpuids[i]; 1202 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1203 fwd_lcores[i]->stopped = 0; 1204 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1205 fwd_lcores[i], lc_id); 1206 if (diag != 0) 1207 printf("launch lcore %u failed - diag=%d\n", 1208 lc_id, diag); 1209 } 1210 } 1211 } 1212 1213 /* 1214 * Launch packet forwarding configuration. 1215 */ 1216 void 1217 start_packet_forwarding(int with_tx_first) 1218 { 1219 port_fwd_begin_t port_fwd_begin; 1220 port_fwd_end_t port_fwd_end; 1221 struct rte_port *port; 1222 unsigned int i; 1223 portid_t pt_id; 1224 streamid_t sm_id; 1225 1226 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 1227 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 1228 1229 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 1230 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 1231 1232 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 1233 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 1234 (!nb_rxq || !nb_txq)) 1235 rte_exit(EXIT_FAILURE, 1236 "Either rxq or txq are 0, cannot use %s fwd mode\n", 1237 cur_fwd_eng->fwd_mode_name); 1238 1239 if (all_ports_started() == 0) { 1240 printf("Not all ports were started\n"); 1241 return; 1242 } 1243 if (test_done == 0) { 1244 printf("Packet forwarding already started\n"); 1245 return; 1246 } 1247 1248 if (init_fwd_streams() < 0) { 1249 printf("Fail from init_fwd_streams()\n"); 1250 return; 1251 } 1252 1253 if(dcb_test) { 1254 for (i = 0; i < nb_fwd_ports; i++) { 1255 pt_id = fwd_ports_ids[i]; 1256 port = &ports[pt_id]; 1257 if (!port->dcb_flag) { 1258 printf("In DCB mode, all forwarding ports must " 1259 "be configured in this mode.\n"); 1260 return; 1261 } 1262 } 1263 if (nb_fwd_lcores == 1) { 1264 printf("In DCB mode,the nb forwarding cores " 1265 "should be larger than 1.\n"); 1266 return; 1267 } 1268 } 1269 test_done = 0; 1270 1271 if(!no_flush_rx) 1272 flush_fwd_rx_queues(); 1273 1274 fwd_config_setup(); 1275 pkt_fwd_config_display(&cur_fwd_config); 1276 rxtx_config_display(); 1277 1278 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1279 pt_id = fwd_ports_ids[i]; 1280 port = &ports[pt_id]; 1281 rte_eth_stats_get(pt_id, &port->stats); 1282 port->tx_dropped = 0; 1283 1284 map_port_queue_stats_mapping_registers(pt_id, port); 1285 } 1286 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1287 fwd_streams[sm_id]->rx_packets = 0; 1288 fwd_streams[sm_id]->tx_packets = 0; 1289 fwd_streams[sm_id]->fwd_dropped = 0; 1290 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1291 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1292 1293 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1294 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1295 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1296 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1297 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1298 #endif 1299 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1300 fwd_streams[sm_id]->core_cycles = 0; 1301 #endif 1302 } 1303 if (with_tx_first) { 1304 port_fwd_begin = tx_only_engine.port_fwd_begin; 1305 if (port_fwd_begin != NULL) { 1306 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1307 (*port_fwd_begin)(fwd_ports_ids[i]); 1308 } 1309 while (with_tx_first--) { 1310 launch_packet_forwarding( 1311 run_one_txonly_burst_on_core); 1312 rte_eal_mp_wait_lcore(); 1313 } 1314 port_fwd_end = tx_only_engine.port_fwd_end; 1315 if (port_fwd_end != NULL) { 1316 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1317 (*port_fwd_end)(fwd_ports_ids[i]); 1318 } 1319 } 1320 launch_packet_forwarding(start_pkt_forward_on_core); 1321 } 1322 1323 void 1324 stop_packet_forwarding(void) 1325 { 1326 struct rte_eth_stats stats; 1327 struct rte_port *port; 1328 port_fwd_end_t port_fwd_end; 1329 int i; 1330 portid_t pt_id; 1331 streamid_t sm_id; 1332 lcoreid_t lc_id; 1333 uint64_t total_recv; 1334 uint64_t total_xmit; 1335 uint64_t total_rx_dropped; 1336 uint64_t total_tx_dropped; 1337 uint64_t total_rx_nombuf; 1338 uint64_t tx_dropped; 1339 uint64_t rx_bad_ip_csum; 1340 uint64_t rx_bad_l4_csum; 1341 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1342 uint64_t fwd_cycles; 1343 #endif 1344 1345 static const char *acc_stats_border = "+++++++++++++++"; 1346 1347 if (test_done) { 1348 printf("Packet forwarding not started\n"); 1349 return; 1350 } 1351 printf("Telling cores to stop..."); 1352 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1353 fwd_lcores[lc_id]->stopped = 1; 1354 printf("\nWaiting for lcores to finish...\n"); 1355 rte_eal_mp_wait_lcore(); 1356 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1357 if (port_fwd_end != NULL) { 1358 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1359 pt_id = fwd_ports_ids[i]; 1360 (*port_fwd_end)(pt_id); 1361 } 1362 } 1363 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1364 fwd_cycles = 0; 1365 #endif 1366 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1367 if (cur_fwd_config.nb_fwd_streams > 1368 cur_fwd_config.nb_fwd_ports) { 1369 fwd_stream_stats_display(sm_id); 1370 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1371 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1372 } else { 1373 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1374 fwd_streams[sm_id]; 1375 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1376 fwd_streams[sm_id]; 1377 } 1378 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1379 tx_dropped = (uint64_t) (tx_dropped + 1380 fwd_streams[sm_id]->fwd_dropped); 1381 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1382 1383 rx_bad_ip_csum = 1384 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1385 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1386 fwd_streams[sm_id]->rx_bad_ip_csum); 1387 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1388 rx_bad_ip_csum; 1389 1390 rx_bad_l4_csum = 1391 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1392 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1393 fwd_streams[sm_id]->rx_bad_l4_csum); 1394 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1395 rx_bad_l4_csum; 1396 1397 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1398 fwd_cycles = (uint64_t) (fwd_cycles + 1399 fwd_streams[sm_id]->core_cycles); 1400 #endif 1401 } 1402 total_recv = 0; 1403 total_xmit = 0; 1404 total_rx_dropped = 0; 1405 total_tx_dropped = 0; 1406 total_rx_nombuf = 0; 1407 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1408 pt_id = fwd_ports_ids[i]; 1409 1410 port = &ports[pt_id]; 1411 rte_eth_stats_get(pt_id, &stats); 1412 stats.ipackets -= port->stats.ipackets; 1413 port->stats.ipackets = 0; 1414 stats.opackets -= port->stats.opackets; 1415 port->stats.opackets = 0; 1416 stats.ibytes -= port->stats.ibytes; 1417 port->stats.ibytes = 0; 1418 stats.obytes -= port->stats.obytes; 1419 port->stats.obytes = 0; 1420 stats.imissed -= port->stats.imissed; 1421 port->stats.imissed = 0; 1422 stats.oerrors -= port->stats.oerrors; 1423 port->stats.oerrors = 0; 1424 stats.rx_nombuf -= port->stats.rx_nombuf; 1425 port->stats.rx_nombuf = 0; 1426 1427 total_recv += stats.ipackets; 1428 total_xmit += stats.opackets; 1429 total_rx_dropped += stats.imissed; 1430 total_tx_dropped += port->tx_dropped; 1431 total_rx_nombuf += stats.rx_nombuf; 1432 1433 fwd_port_stats_display(pt_id, &stats); 1434 } 1435 1436 printf("\n %s Accumulated forward statistics for all ports" 1437 "%s\n", 1438 acc_stats_border, acc_stats_border); 1439 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1440 "%-"PRIu64"\n" 1441 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1442 "%-"PRIu64"\n", 1443 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1444 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1445 if (total_rx_nombuf > 0) 1446 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1447 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1448 "%s\n", 1449 acc_stats_border, acc_stats_border); 1450 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1451 if (total_recv > 0) 1452 printf("\n CPU cycles/packet=%u (total cycles=" 1453 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1454 (unsigned int)(fwd_cycles / total_recv), 1455 fwd_cycles, total_recv); 1456 #endif 1457 printf("\nDone.\n"); 1458 test_done = 1; 1459 } 1460 1461 void 1462 dev_set_link_up(portid_t pid) 1463 { 1464 if (rte_eth_dev_set_link_up(pid) < 0) 1465 printf("\nSet link up fail.\n"); 1466 } 1467 1468 void 1469 dev_set_link_down(portid_t pid) 1470 { 1471 if (rte_eth_dev_set_link_down(pid) < 0) 1472 printf("\nSet link down fail.\n"); 1473 } 1474 1475 static int 1476 all_ports_started(void) 1477 { 1478 portid_t pi; 1479 struct rte_port *port; 1480 1481 RTE_ETH_FOREACH_DEV(pi) { 1482 port = &ports[pi]; 1483 /* Check if there is a port which is not started */ 1484 if ((port->port_status != RTE_PORT_STARTED) && 1485 (port->slave_flag == 0)) 1486 return 0; 1487 } 1488 1489 /* No port is not started */ 1490 return 1; 1491 } 1492 1493 int 1494 port_is_stopped(portid_t port_id) 1495 { 1496 struct rte_port *port = &ports[port_id]; 1497 1498 if ((port->port_status != RTE_PORT_STOPPED) && 1499 (port->slave_flag == 0)) 1500 return 0; 1501 return 1; 1502 } 1503 1504 int 1505 all_ports_stopped(void) 1506 { 1507 portid_t pi; 1508 1509 RTE_ETH_FOREACH_DEV(pi) { 1510 if (!port_is_stopped(pi)) 1511 return 0; 1512 } 1513 1514 return 1; 1515 } 1516 1517 int 1518 port_is_started(portid_t port_id) 1519 { 1520 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1521 return 0; 1522 1523 if (ports[port_id].port_status != RTE_PORT_STARTED) 1524 return 0; 1525 1526 return 1; 1527 } 1528 1529 static int 1530 port_is_closed(portid_t port_id) 1531 { 1532 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1533 return 0; 1534 1535 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1536 return 0; 1537 1538 return 1; 1539 } 1540 1541 int 1542 start_port(portid_t pid) 1543 { 1544 int diag, need_check_link_status = -1; 1545 portid_t pi; 1546 queueid_t qi; 1547 struct rte_port *port; 1548 struct ether_addr mac_addr; 1549 enum rte_eth_event_type event_type; 1550 1551 if (port_id_is_invalid(pid, ENABLED_WARN)) 1552 return 0; 1553 1554 if(dcb_config) 1555 dcb_test = 1; 1556 RTE_ETH_FOREACH_DEV(pi) { 1557 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1558 continue; 1559 1560 need_check_link_status = 0; 1561 port = &ports[pi]; 1562 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1563 RTE_PORT_HANDLING) == 0) { 1564 printf("Port %d is now not stopped\n", pi); 1565 continue; 1566 } 1567 1568 if (port->need_reconfig > 0) { 1569 port->need_reconfig = 0; 1570 1571 if (flow_isolate_all) { 1572 int ret = port_flow_isolate(pi, 1); 1573 if (ret) { 1574 printf("Failed to apply isolated" 1575 " mode on port %d\n", pi); 1576 return -1; 1577 } 1578 } 1579 1580 printf("Configuring Port %d (socket %u)\n", pi, 1581 port->socket_id); 1582 /* configure port */ 1583 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1584 &(port->dev_conf)); 1585 if (diag != 0) { 1586 if (rte_atomic16_cmpset(&(port->port_status), 1587 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1588 printf("Port %d can not be set back " 1589 "to stopped\n", pi); 1590 printf("Fail to configure port %d\n", pi); 1591 /* try to reconfigure port next time */ 1592 port->need_reconfig = 1; 1593 return -1; 1594 } 1595 } 1596 if (port->need_reconfig_queues > 0) { 1597 port->need_reconfig_queues = 0; 1598 port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; 1599 /* Apply Tx offloads configuration */ 1600 port->tx_conf.offloads = port->dev_conf.txmode.offloads; 1601 /* setup tx queues */ 1602 for (qi = 0; qi < nb_txq; qi++) { 1603 if ((numa_support) && 1604 (txring_numa[pi] != NUMA_NO_CONFIG)) 1605 diag = rte_eth_tx_queue_setup(pi, qi, 1606 nb_txd,txring_numa[pi], 1607 &(port->tx_conf)); 1608 else 1609 diag = rte_eth_tx_queue_setup(pi, qi, 1610 nb_txd,port->socket_id, 1611 &(port->tx_conf)); 1612 1613 if (diag == 0) 1614 continue; 1615 1616 /* Fail to setup tx queue, return */ 1617 if (rte_atomic16_cmpset(&(port->port_status), 1618 RTE_PORT_HANDLING, 1619 RTE_PORT_STOPPED) == 0) 1620 printf("Port %d can not be set back " 1621 "to stopped\n", pi); 1622 printf("Fail to configure port %d tx queues\n", pi); 1623 /* try to reconfigure queues next time */ 1624 port->need_reconfig_queues = 1; 1625 return -1; 1626 } 1627 /* Apply Rx offloads configuration */ 1628 port->rx_conf.offloads = port->dev_conf.rxmode.offloads; 1629 /* setup rx queues */ 1630 for (qi = 0; qi < nb_rxq; qi++) { 1631 if ((numa_support) && 1632 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1633 struct rte_mempool * mp = 1634 mbuf_pool_find(rxring_numa[pi]); 1635 if (mp == NULL) { 1636 printf("Failed to setup RX queue:" 1637 "No mempool allocation" 1638 " on the socket %d\n", 1639 rxring_numa[pi]); 1640 return -1; 1641 } 1642 1643 diag = rte_eth_rx_queue_setup(pi, qi, 1644 nb_rxd,rxring_numa[pi], 1645 &(port->rx_conf),mp); 1646 } else { 1647 struct rte_mempool *mp = 1648 mbuf_pool_find(port->socket_id); 1649 if (mp == NULL) { 1650 printf("Failed to setup RX queue:" 1651 "No mempool allocation" 1652 " on the socket %d\n", 1653 port->socket_id); 1654 return -1; 1655 } 1656 diag = rte_eth_rx_queue_setup(pi, qi, 1657 nb_rxd,port->socket_id, 1658 &(port->rx_conf), mp); 1659 } 1660 if (diag == 0) 1661 continue; 1662 1663 /* Fail to setup rx queue, return */ 1664 if (rte_atomic16_cmpset(&(port->port_status), 1665 RTE_PORT_HANDLING, 1666 RTE_PORT_STOPPED) == 0) 1667 printf("Port %d can not be set back " 1668 "to stopped\n", pi); 1669 printf("Fail to configure port %d rx queues\n", pi); 1670 /* try to reconfigure queues next time */ 1671 port->need_reconfig_queues = 1; 1672 return -1; 1673 } 1674 } 1675 1676 /* start port */ 1677 if (rte_eth_dev_start(pi) < 0) { 1678 printf("Fail to start port %d\n", pi); 1679 1680 /* Fail to setup rx queue, return */ 1681 if (rte_atomic16_cmpset(&(port->port_status), 1682 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1683 printf("Port %d can not be set back to " 1684 "stopped\n", pi); 1685 continue; 1686 } 1687 1688 if (rte_atomic16_cmpset(&(port->port_status), 1689 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1690 printf("Port %d can not be set into started\n", pi); 1691 1692 rte_eth_macaddr_get(pi, &mac_addr); 1693 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1694 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1695 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1696 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1697 1698 /* at least one port started, need checking link status */ 1699 need_check_link_status = 1; 1700 } 1701 1702 for (event_type = RTE_ETH_EVENT_UNKNOWN; 1703 event_type < RTE_ETH_EVENT_MAX; 1704 event_type++) { 1705 diag = rte_eth_dev_callback_register(RTE_ETH_ALL, 1706 event_type, 1707 eth_event_callback, 1708 NULL); 1709 if (diag) { 1710 printf("Failed to setup even callback for event %d\n", 1711 event_type); 1712 return -1; 1713 } 1714 } 1715 1716 if (need_check_link_status == 1 && !no_link_check) 1717 check_all_ports_link_status(RTE_PORT_ALL); 1718 else if (need_check_link_status == 0) 1719 printf("Please stop the ports first\n"); 1720 1721 printf("Done\n"); 1722 return 0; 1723 } 1724 1725 void 1726 stop_port(portid_t pid) 1727 { 1728 portid_t pi; 1729 struct rte_port *port; 1730 int need_check_link_status = 0; 1731 1732 if (dcb_test) { 1733 dcb_test = 0; 1734 dcb_config = 0; 1735 } 1736 1737 if (port_id_is_invalid(pid, ENABLED_WARN)) 1738 return; 1739 1740 printf("Stopping ports...\n"); 1741 1742 RTE_ETH_FOREACH_DEV(pi) { 1743 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1744 continue; 1745 1746 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1747 printf("Please remove port %d from forwarding configuration.\n", pi); 1748 continue; 1749 } 1750 1751 if (port_is_bonding_slave(pi)) { 1752 printf("Please remove port %d from bonded device.\n", pi); 1753 continue; 1754 } 1755 1756 port = &ports[pi]; 1757 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1758 RTE_PORT_HANDLING) == 0) 1759 continue; 1760 1761 rte_eth_dev_stop(pi); 1762 1763 if (rte_atomic16_cmpset(&(port->port_status), 1764 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1765 printf("Port %d can not be set into stopped\n", pi); 1766 need_check_link_status = 1; 1767 } 1768 if (need_check_link_status && !no_link_check) 1769 check_all_ports_link_status(RTE_PORT_ALL); 1770 1771 printf("Done\n"); 1772 } 1773 1774 void 1775 close_port(portid_t pid) 1776 { 1777 portid_t pi; 1778 struct rte_port *port; 1779 1780 if (port_id_is_invalid(pid, ENABLED_WARN)) 1781 return; 1782 1783 printf("Closing ports...\n"); 1784 1785 RTE_ETH_FOREACH_DEV(pi) { 1786 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1787 continue; 1788 1789 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1790 printf("Please remove port %d from forwarding configuration.\n", pi); 1791 continue; 1792 } 1793 1794 if (port_is_bonding_slave(pi)) { 1795 printf("Please remove port %d from bonded device.\n", pi); 1796 continue; 1797 } 1798 1799 port = &ports[pi]; 1800 if (rte_atomic16_cmpset(&(port->port_status), 1801 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1802 printf("Port %d is already closed\n", pi); 1803 continue; 1804 } 1805 1806 if (rte_atomic16_cmpset(&(port->port_status), 1807 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1808 printf("Port %d is now not stopped\n", pi); 1809 continue; 1810 } 1811 1812 if (port->flow_list) 1813 port_flow_flush(pi); 1814 rte_eth_dev_close(pi); 1815 1816 if (rte_atomic16_cmpset(&(port->port_status), 1817 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1818 printf("Port %d cannot be set to closed\n", pi); 1819 } 1820 1821 printf("Done\n"); 1822 } 1823 1824 void 1825 reset_port(portid_t pid) 1826 { 1827 int diag; 1828 portid_t pi; 1829 struct rte_port *port; 1830 1831 if (port_id_is_invalid(pid, ENABLED_WARN)) 1832 return; 1833 1834 printf("Resetting ports...\n"); 1835 1836 RTE_ETH_FOREACH_DEV(pi) { 1837 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1838 continue; 1839 1840 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1841 printf("Please remove port %d from forwarding " 1842 "configuration.\n", pi); 1843 continue; 1844 } 1845 1846 if (port_is_bonding_slave(pi)) { 1847 printf("Please remove port %d from bonded device.\n", 1848 pi); 1849 continue; 1850 } 1851 1852 diag = rte_eth_dev_reset(pi); 1853 if (diag == 0) { 1854 port = &ports[pi]; 1855 port->need_reconfig = 1; 1856 port->need_reconfig_queues = 1; 1857 } else { 1858 printf("Failed to reset port %d. diag=%d\n", pi, diag); 1859 } 1860 } 1861 1862 printf("Done\n"); 1863 } 1864 1865 static int 1866 eth_dev_event_callback_register(void) 1867 { 1868 int ret; 1869 1870 /* register the device event callback */ 1871 ret = rte_dev_event_callback_register(NULL, 1872 eth_dev_event_callback, NULL); 1873 if (ret) { 1874 printf("Failed to register device event callback\n"); 1875 return -1; 1876 } 1877 1878 return 0; 1879 } 1880 1881 1882 static int 1883 eth_dev_event_callback_unregister(void) 1884 { 1885 int ret; 1886 1887 /* unregister the device event callback */ 1888 ret = rte_dev_event_callback_unregister(NULL, 1889 eth_dev_event_callback, NULL); 1890 if (ret < 0) { 1891 printf("Failed to unregister device event callback\n"); 1892 return -1; 1893 } 1894 1895 return 0; 1896 } 1897 1898 void 1899 attach_port(char *identifier) 1900 { 1901 portid_t pi = 0; 1902 unsigned int socket_id; 1903 1904 printf("Attaching a new port...\n"); 1905 1906 if (identifier == NULL) { 1907 printf("Invalid parameters are specified\n"); 1908 return; 1909 } 1910 1911 if (rte_eth_dev_attach(identifier, &pi)) 1912 return; 1913 1914 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1915 /* if socket_id is invalid, set to 0 */ 1916 if (check_socket_id(socket_id) < 0) 1917 socket_id = 0; 1918 reconfig(pi, socket_id); 1919 rte_eth_promiscuous_enable(pi); 1920 1921 nb_ports = rte_eth_dev_count(); 1922 1923 ports[pi].port_status = RTE_PORT_STOPPED; 1924 1925 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1926 printf("Done\n"); 1927 } 1928 1929 void 1930 detach_port(portid_t port_id) 1931 { 1932 char name[RTE_ETH_NAME_MAX_LEN]; 1933 1934 printf("Detaching a port...\n"); 1935 1936 if (!port_is_closed(port_id)) { 1937 printf("Please close port first\n"); 1938 return; 1939 } 1940 1941 if (ports[port_id].flow_list) 1942 port_flow_flush(port_id); 1943 1944 if (rte_eth_dev_detach(port_id, name)) { 1945 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name); 1946 return; 1947 } 1948 1949 nb_ports = rte_eth_dev_count(); 1950 1951 printf("Port '%s' is detached. Now total ports is %d\n", 1952 name, nb_ports); 1953 printf("Done\n"); 1954 return; 1955 } 1956 1957 void 1958 pmd_test_exit(void) 1959 { 1960 portid_t pt_id; 1961 int ret; 1962 1963 if (test_done == 0) 1964 stop_packet_forwarding(); 1965 1966 if (ports != NULL) { 1967 no_link_check = 1; 1968 RTE_ETH_FOREACH_DEV(pt_id) { 1969 printf("\nShutting down port %d...\n", pt_id); 1970 fflush(stdout); 1971 stop_port(pt_id); 1972 close_port(pt_id); 1973 } 1974 } 1975 1976 if (hot_plug) { 1977 ret = rte_dev_event_monitor_stop(); 1978 if (ret) 1979 RTE_LOG(ERR, EAL, 1980 "fail to stop device event monitor."); 1981 1982 ret = eth_dev_event_callback_unregister(); 1983 if (ret) 1984 RTE_LOG(ERR, EAL, 1985 "fail to unregister all event callbacks."); 1986 } 1987 1988 printf("\nBye...\n"); 1989 } 1990 1991 typedef void (*cmd_func_t)(void); 1992 struct pmd_test_command { 1993 const char *cmd_name; 1994 cmd_func_t cmd_func; 1995 }; 1996 1997 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1998 1999 /* Check the link status of all ports in up to 9s, and print them finally */ 2000 static void 2001 check_all_ports_link_status(uint32_t port_mask) 2002 { 2003 #define CHECK_INTERVAL 100 /* 100ms */ 2004 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2005 portid_t portid; 2006 uint8_t count, all_ports_up, print_flag = 0; 2007 struct rte_eth_link link; 2008 2009 printf("Checking link statuses...\n"); 2010 fflush(stdout); 2011 for (count = 0; count <= MAX_CHECK_TIME; count++) { 2012 all_ports_up = 1; 2013 RTE_ETH_FOREACH_DEV(portid) { 2014 if ((port_mask & (1 << portid)) == 0) 2015 continue; 2016 memset(&link, 0, sizeof(link)); 2017 rte_eth_link_get_nowait(portid, &link); 2018 /* print link status if flag set */ 2019 if (print_flag == 1) { 2020 if (link.link_status) 2021 printf( 2022 "Port%d Link Up. speed %u Mbps- %s\n", 2023 portid, link.link_speed, 2024 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2025 ("full-duplex") : ("half-duplex\n")); 2026 else 2027 printf("Port %d Link Down\n", portid); 2028 continue; 2029 } 2030 /* clear all_ports_up flag if any link down */ 2031 if (link.link_status == ETH_LINK_DOWN) { 2032 all_ports_up = 0; 2033 break; 2034 } 2035 } 2036 /* after finally printing all link status, get out */ 2037 if (print_flag == 1) 2038 break; 2039 2040 if (all_ports_up == 0) { 2041 fflush(stdout); 2042 rte_delay_ms(CHECK_INTERVAL); 2043 } 2044 2045 /* set the print_flag if all ports up or timeout */ 2046 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2047 print_flag = 1; 2048 } 2049 2050 if (lsc_interrupt) 2051 break; 2052 } 2053 } 2054 2055 static void 2056 rmv_event_callback(void *arg) 2057 { 2058 struct rte_eth_dev *dev; 2059 portid_t port_id = (intptr_t)arg; 2060 2061 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2062 dev = &rte_eth_devices[port_id]; 2063 2064 stop_port(port_id); 2065 close_port(port_id); 2066 printf("removing device %s\n", dev->device->name); 2067 if (rte_eal_dev_detach(dev->device)) 2068 TESTPMD_LOG(ERR, "Failed to detach device %s\n", 2069 dev->device->name); 2070 } 2071 2072 /* This function is used by the interrupt thread */ 2073 static int 2074 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 2075 void *ret_param) 2076 { 2077 static const char * const event_desc[] = { 2078 [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 2079 [RTE_ETH_EVENT_INTR_LSC] = "LSC", 2080 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 2081 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 2082 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 2083 [RTE_ETH_EVENT_MACSEC] = "MACsec", 2084 [RTE_ETH_EVENT_INTR_RMV] = "device removal", 2085 [RTE_ETH_EVENT_NEW] = "device probed", 2086 [RTE_ETH_EVENT_DESTROY] = "device released", 2087 [RTE_ETH_EVENT_MAX] = NULL, 2088 }; 2089 2090 RTE_SET_USED(param); 2091 RTE_SET_USED(ret_param); 2092 2093 if (type >= RTE_ETH_EVENT_MAX) { 2094 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 2095 port_id, __func__, type); 2096 fflush(stderr); 2097 } else if (event_print_mask & (UINT32_C(1) << type)) { 2098 printf("\nPort %" PRIu8 ": %s event\n", port_id, 2099 event_desc[type]); 2100 fflush(stdout); 2101 } 2102 2103 if (port_id_is_invalid(port_id, DISABLED_WARN)) 2104 return 0; 2105 2106 switch (type) { 2107 case RTE_ETH_EVENT_INTR_RMV: 2108 if (rte_eal_alarm_set(100000, 2109 rmv_event_callback, (void *)(intptr_t)port_id)) 2110 fprintf(stderr, "Could not set up deferred device removal\n"); 2111 break; 2112 default: 2113 break; 2114 } 2115 return 0; 2116 } 2117 2118 /* This function is used by the interrupt thread */ 2119 static void 2120 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type, 2121 __rte_unused void *arg) 2122 { 2123 if (type >= RTE_DEV_EVENT_MAX) { 2124 fprintf(stderr, "%s called upon invalid event %d\n", 2125 __func__, type); 2126 fflush(stderr); 2127 } 2128 2129 switch (type) { 2130 case RTE_DEV_EVENT_REMOVE: 2131 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n", 2132 device_name); 2133 /* TODO: After finish failure handle, begin to stop 2134 * packet forward, stop port, close port, detach port. 2135 */ 2136 break; 2137 case RTE_DEV_EVENT_ADD: 2138 RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 2139 device_name); 2140 /* TODO: After finish kernel driver binding, 2141 * begin to attach port. 2142 */ 2143 break; 2144 default: 2145 break; 2146 } 2147 } 2148 2149 static int 2150 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2151 { 2152 uint16_t i; 2153 int diag; 2154 uint8_t mapping_found = 0; 2155 2156 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2157 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2158 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 2159 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 2160 tx_queue_stats_mappings[i].queue_id, 2161 tx_queue_stats_mappings[i].stats_counter_id); 2162 if (diag != 0) 2163 return diag; 2164 mapping_found = 1; 2165 } 2166 } 2167 if (mapping_found) 2168 port->tx_queue_stats_mapping_enabled = 1; 2169 return 0; 2170 } 2171 2172 static int 2173 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2174 { 2175 uint16_t i; 2176 int diag; 2177 uint8_t mapping_found = 0; 2178 2179 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2180 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2181 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 2182 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 2183 rx_queue_stats_mappings[i].queue_id, 2184 rx_queue_stats_mappings[i].stats_counter_id); 2185 if (diag != 0) 2186 return diag; 2187 mapping_found = 1; 2188 } 2189 } 2190 if (mapping_found) 2191 port->rx_queue_stats_mapping_enabled = 1; 2192 return 0; 2193 } 2194 2195 static void 2196 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2197 { 2198 int diag = 0; 2199 2200 diag = set_tx_queue_stats_mapping_registers(pi, port); 2201 if (diag != 0) { 2202 if (diag == -ENOTSUP) { 2203 port->tx_queue_stats_mapping_enabled = 0; 2204 printf("TX queue stats mapping not supported port id=%d\n", pi); 2205 } 2206 else 2207 rte_exit(EXIT_FAILURE, 2208 "set_tx_queue_stats_mapping_registers " 2209 "failed for port id=%d diag=%d\n", 2210 pi, diag); 2211 } 2212 2213 diag = set_rx_queue_stats_mapping_registers(pi, port); 2214 if (diag != 0) { 2215 if (diag == -ENOTSUP) { 2216 port->rx_queue_stats_mapping_enabled = 0; 2217 printf("RX queue stats mapping not supported port id=%d\n", pi); 2218 } 2219 else 2220 rte_exit(EXIT_FAILURE, 2221 "set_rx_queue_stats_mapping_registers " 2222 "failed for port id=%d diag=%d\n", 2223 pi, diag); 2224 } 2225 } 2226 2227 static void 2228 rxtx_port_config(struct rte_port *port) 2229 { 2230 port->rx_conf = port->dev_info.default_rxconf; 2231 port->tx_conf = port->dev_info.default_txconf; 2232 2233 /* Check if any RX/TX parameters have been passed */ 2234 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2235 port->rx_conf.rx_thresh.pthresh = rx_pthresh; 2236 2237 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2238 port->rx_conf.rx_thresh.hthresh = rx_hthresh; 2239 2240 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2241 port->rx_conf.rx_thresh.wthresh = rx_wthresh; 2242 2243 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2244 port->rx_conf.rx_free_thresh = rx_free_thresh; 2245 2246 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2247 port->rx_conf.rx_drop_en = rx_drop_en; 2248 2249 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2250 port->tx_conf.tx_thresh.pthresh = tx_pthresh; 2251 2252 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2253 port->tx_conf.tx_thresh.hthresh = tx_hthresh; 2254 2255 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2256 port->tx_conf.tx_thresh.wthresh = tx_wthresh; 2257 2258 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2259 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 2260 2261 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2262 port->tx_conf.tx_free_thresh = tx_free_thresh; 2263 } 2264 2265 void 2266 init_port_config(void) 2267 { 2268 portid_t pid; 2269 struct rte_port *port; 2270 2271 RTE_ETH_FOREACH_DEV(pid) { 2272 port = &ports[pid]; 2273 port->dev_conf.fdir_conf = fdir_conf; 2274 if (nb_rxq > 1) { 2275 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2276 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 2277 } else { 2278 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2279 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2280 } 2281 2282 if (port->dcb_flag == 0) { 2283 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 2284 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 2285 else 2286 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 2287 } 2288 2289 rxtx_port_config(port); 2290 2291 rte_eth_macaddr_get(pid, &port->eth_addr); 2292 2293 map_port_queue_stats_mapping_registers(pid, port); 2294 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2295 rte_pmd_ixgbe_bypass_init(pid); 2296 #endif 2297 2298 if (lsc_interrupt && 2299 (rte_eth_devices[pid].data->dev_flags & 2300 RTE_ETH_DEV_INTR_LSC)) 2301 port->dev_conf.intr_conf.lsc = 1; 2302 if (rmv_interrupt && 2303 (rte_eth_devices[pid].data->dev_flags & 2304 RTE_ETH_DEV_INTR_RMV)) 2305 port->dev_conf.intr_conf.rmv = 1; 2306 2307 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 2308 /* Detect softnic port */ 2309 if (!strcmp(port->dev_info.driver_name, "net_softnic")) { 2310 port->softnic_enable = 1; 2311 memset(&port->softport, 0, sizeof(struct softnic_port)); 2312 2313 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) 2314 port->softport.tm_flag = 1; 2315 } 2316 #endif 2317 } 2318 } 2319 2320 void set_port_slave_flag(portid_t slave_pid) 2321 { 2322 struct rte_port *port; 2323 2324 port = &ports[slave_pid]; 2325 port->slave_flag = 1; 2326 } 2327 2328 void clear_port_slave_flag(portid_t slave_pid) 2329 { 2330 struct rte_port *port; 2331 2332 port = &ports[slave_pid]; 2333 port->slave_flag = 0; 2334 } 2335 2336 uint8_t port_is_bonding_slave(portid_t slave_pid) 2337 { 2338 struct rte_port *port; 2339 2340 port = &ports[slave_pid]; 2341 return port->slave_flag; 2342 } 2343 2344 const uint16_t vlan_tags[] = { 2345 0, 1, 2, 3, 4, 5, 6, 7, 2346 8, 9, 10, 11, 12, 13, 14, 15, 2347 16, 17, 18, 19, 20, 21, 22, 23, 2348 24, 25, 26, 27, 28, 29, 30, 31 2349 }; 2350 2351 static int 2352 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 2353 enum dcb_mode_enable dcb_mode, 2354 enum rte_eth_nb_tcs num_tcs, 2355 uint8_t pfc_en) 2356 { 2357 uint8_t i; 2358 2359 /* 2360 * Builds up the correct configuration for dcb+vt based on the vlan tags array 2361 * given above, and the number of traffic classes available for use. 2362 */ 2363 if (dcb_mode == DCB_VT_ENABLED) { 2364 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 2365 ð_conf->rx_adv_conf.vmdq_dcb_conf; 2366 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 2367 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2368 2369 /* VMDQ+DCB RX and TX configurations */ 2370 vmdq_rx_conf->enable_default_pool = 0; 2371 vmdq_rx_conf->default_pool = 0; 2372 vmdq_rx_conf->nb_queue_pools = 2373 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2374 vmdq_tx_conf->nb_queue_pools = 2375 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2376 2377 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 2378 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 2379 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 2380 vmdq_rx_conf->pool_map[i].pools = 2381 1 << (i % vmdq_rx_conf->nb_queue_pools); 2382 } 2383 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2384 vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2385 vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2386 } 2387 2388 /* set DCB mode of RX and TX of multiple queues */ 2389 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 2390 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2391 } else { 2392 struct rte_eth_dcb_rx_conf *rx_conf = 2393 ð_conf->rx_adv_conf.dcb_rx_conf; 2394 struct rte_eth_dcb_tx_conf *tx_conf = 2395 ð_conf->tx_adv_conf.dcb_tx_conf; 2396 2397 rx_conf->nb_tcs = num_tcs; 2398 tx_conf->nb_tcs = num_tcs; 2399 2400 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2401 rx_conf->dcb_tc[i] = i % num_tcs; 2402 tx_conf->dcb_tc[i] = i % num_tcs; 2403 } 2404 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 2405 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 2406 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 2407 } 2408 2409 if (pfc_en) 2410 eth_conf->dcb_capability_en = 2411 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2412 else 2413 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2414 2415 return 0; 2416 } 2417 2418 int 2419 init_port_dcb_config(portid_t pid, 2420 enum dcb_mode_enable dcb_mode, 2421 enum rte_eth_nb_tcs num_tcs, 2422 uint8_t pfc_en) 2423 { 2424 struct rte_eth_conf port_conf; 2425 struct rte_port *rte_port; 2426 int retval; 2427 uint16_t i; 2428 2429 rte_port = &ports[pid]; 2430 2431 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2432 /* Enter DCB configuration status */ 2433 dcb_config = 1; 2434 2435 port_conf.rxmode = rte_port->dev_conf.rxmode; 2436 port_conf.txmode = rte_port->dev_conf.txmode; 2437 2438 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 2439 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2440 if (retval < 0) 2441 return retval; 2442 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2443 2444 /** 2445 * Write the configuration into the device. 2446 * Set the numbers of RX & TX queues to 0, so 2447 * the RX & TX queues will not be setup. 2448 */ 2449 rte_eth_dev_configure(pid, 0, 0, &port_conf); 2450 2451 rte_eth_dev_info_get(pid, &rte_port->dev_info); 2452 2453 /* If dev_info.vmdq_pool_base is greater than 0, 2454 * the queue id of vmdq pools is started after pf queues. 2455 */ 2456 if (dcb_mode == DCB_VT_ENABLED && 2457 rte_port->dev_info.vmdq_pool_base > 0) { 2458 printf("VMDQ_DCB multi-queue mode is nonsensical" 2459 " for port %d.", pid); 2460 return -1; 2461 } 2462 2463 /* Assume the ports in testpmd have the same dcb capability 2464 * and has the same number of rxq and txq in dcb mode 2465 */ 2466 if (dcb_mode == DCB_VT_ENABLED) { 2467 if (rte_port->dev_info.max_vfs > 0) { 2468 nb_rxq = rte_port->dev_info.nb_rx_queues; 2469 nb_txq = rte_port->dev_info.nb_tx_queues; 2470 } else { 2471 nb_rxq = rte_port->dev_info.max_rx_queues; 2472 nb_txq = rte_port->dev_info.max_tx_queues; 2473 } 2474 } else { 2475 /*if vt is disabled, use all pf queues */ 2476 if (rte_port->dev_info.vmdq_pool_base == 0) { 2477 nb_rxq = rte_port->dev_info.max_rx_queues; 2478 nb_txq = rte_port->dev_info.max_tx_queues; 2479 } else { 2480 nb_rxq = (queueid_t)num_tcs; 2481 nb_txq = (queueid_t)num_tcs; 2482 2483 } 2484 } 2485 rx_free_thresh = 64; 2486 2487 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2488 2489 rxtx_port_config(rte_port); 2490 /* VLAN filter */ 2491 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2492 for (i = 0; i < RTE_DIM(vlan_tags); i++) 2493 rx_vft_set(pid, vlan_tags[i], 1); 2494 2495 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2496 map_port_queue_stats_mapping_registers(pid, rte_port); 2497 2498 rte_port->dcb_flag = 1; 2499 2500 return 0; 2501 } 2502 2503 static void 2504 init_port(void) 2505 { 2506 /* Configuration of Ethernet ports. */ 2507 ports = rte_zmalloc("testpmd: ports", 2508 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2509 RTE_CACHE_LINE_SIZE); 2510 if (ports == NULL) { 2511 rte_exit(EXIT_FAILURE, 2512 "rte_zmalloc(%d struct rte_port) failed\n", 2513 RTE_MAX_ETHPORTS); 2514 } 2515 } 2516 2517 static void 2518 force_quit(void) 2519 { 2520 pmd_test_exit(); 2521 prompt_exit(); 2522 } 2523 2524 static void 2525 print_stats(void) 2526 { 2527 uint8_t i; 2528 const char clr[] = { 27, '[', '2', 'J', '\0' }; 2529 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2530 2531 /* Clear screen and move to top left */ 2532 printf("%s%s", clr, top_left); 2533 2534 printf("\nPort statistics ===================================="); 2535 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2536 nic_stats_display(fwd_ports_ids[i]); 2537 } 2538 2539 static void 2540 signal_handler(int signum) 2541 { 2542 if (signum == SIGINT || signum == SIGTERM) { 2543 printf("\nSignal %d received, preparing to exit...\n", 2544 signum); 2545 #ifdef RTE_LIBRTE_PDUMP 2546 /* uninitialize packet capture framework */ 2547 rte_pdump_uninit(); 2548 #endif 2549 #ifdef RTE_LIBRTE_LATENCY_STATS 2550 rte_latencystats_uninit(); 2551 #endif 2552 force_quit(); 2553 /* Set flag to indicate the force termination. */ 2554 f_quit = 1; 2555 /* exit with the expected status */ 2556 signal(signum, SIG_DFL); 2557 kill(getpid(), signum); 2558 } 2559 } 2560 2561 int 2562 main(int argc, char** argv) 2563 { 2564 int diag; 2565 portid_t port_id; 2566 int ret; 2567 2568 signal(SIGINT, signal_handler); 2569 signal(SIGTERM, signal_handler); 2570 2571 diag = rte_eal_init(argc, argv); 2572 if (diag < 0) 2573 rte_panic("Cannot init EAL\n"); 2574 2575 testpmd_logtype = rte_log_register("testpmd"); 2576 if (testpmd_logtype < 0) 2577 rte_panic("Cannot register log type"); 2578 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 2579 2580 if (mlockall(MCL_CURRENT | MCL_FUTURE)) { 2581 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 2582 strerror(errno)); 2583 } 2584 2585 #ifdef RTE_LIBRTE_PDUMP 2586 /* initialize packet capture framework */ 2587 rte_pdump_init(NULL); 2588 #endif 2589 2590 nb_ports = (portid_t) rte_eth_dev_count(); 2591 if (nb_ports == 0) 2592 TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 2593 2594 /* allocate port structures, and init them */ 2595 init_port(); 2596 2597 set_def_fwd_config(); 2598 if (nb_lcores == 0) 2599 rte_panic("Empty set of forwarding logical cores - check the " 2600 "core mask supplied in the command parameters\n"); 2601 2602 /* Bitrate/latency stats disabled by default */ 2603 #ifdef RTE_LIBRTE_BITRATE 2604 bitrate_enabled = 0; 2605 #endif 2606 #ifdef RTE_LIBRTE_LATENCY_STATS 2607 latencystats_enabled = 0; 2608 #endif 2609 2610 argc -= diag; 2611 argv += diag; 2612 if (argc > 1) 2613 launch_args_parse(argc, argv); 2614 2615 if (tx_first && interactive) 2616 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 2617 "interactive mode.\n"); 2618 2619 if (tx_first && lsc_interrupt) { 2620 printf("Warning: lsc_interrupt needs to be off when " 2621 " using tx_first. Disabling.\n"); 2622 lsc_interrupt = 0; 2623 } 2624 2625 if (!nb_rxq && !nb_txq) 2626 printf("Warning: Either rx or tx queues should be non-zero\n"); 2627 2628 if (nb_rxq > 1 && nb_rxq > nb_txq) 2629 printf("Warning: nb_rxq=%d enables RSS configuration, " 2630 "but nb_txq=%d will prevent to fully test it.\n", 2631 nb_rxq, nb_txq); 2632 2633 init_config(); 2634 2635 if (hot_plug) { 2636 /* enable hot plug monitoring */ 2637 ret = rte_dev_event_monitor_start(); 2638 if (ret) { 2639 rte_errno = EINVAL; 2640 return -1; 2641 } 2642 eth_dev_event_callback_register(); 2643 2644 } 2645 2646 if (start_port(RTE_PORT_ALL) != 0) 2647 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2648 2649 /* set all ports to promiscuous mode by default */ 2650 RTE_ETH_FOREACH_DEV(port_id) 2651 rte_eth_promiscuous_enable(port_id); 2652 2653 /* Init metrics library */ 2654 rte_metrics_init(rte_socket_id()); 2655 2656 #ifdef RTE_LIBRTE_LATENCY_STATS 2657 if (latencystats_enabled != 0) { 2658 int ret = rte_latencystats_init(1, NULL); 2659 if (ret) 2660 printf("Warning: latencystats init()" 2661 " returned error %d\n", ret); 2662 printf("Latencystats running on lcore %d\n", 2663 latencystats_lcore_id); 2664 } 2665 #endif 2666 2667 /* Setup bitrate stats */ 2668 #ifdef RTE_LIBRTE_BITRATE 2669 if (bitrate_enabled != 0) { 2670 bitrate_data = rte_stats_bitrate_create(); 2671 if (bitrate_data == NULL) 2672 rte_exit(EXIT_FAILURE, 2673 "Could not allocate bitrate data.\n"); 2674 rte_stats_bitrate_reg(bitrate_data); 2675 } 2676 #endif 2677 2678 #ifdef RTE_LIBRTE_CMDLINE 2679 if (strlen(cmdline_filename) != 0) 2680 cmdline_read_from_file(cmdline_filename); 2681 2682 if (interactive == 1) { 2683 if (auto_start) { 2684 printf("Start automatic packet forwarding\n"); 2685 start_packet_forwarding(0); 2686 } 2687 prompt(); 2688 pmd_test_exit(); 2689 } else 2690 #endif 2691 { 2692 char c; 2693 int rc; 2694 2695 f_quit = 0; 2696 2697 printf("No commandline core given, start packet forwarding\n"); 2698 start_packet_forwarding(tx_first); 2699 if (stats_period != 0) { 2700 uint64_t prev_time = 0, cur_time, diff_time = 0; 2701 uint64_t timer_period; 2702 2703 /* Convert to number of cycles */ 2704 timer_period = stats_period * rte_get_timer_hz(); 2705 2706 while (f_quit == 0) { 2707 cur_time = rte_get_timer_cycles(); 2708 diff_time += cur_time - prev_time; 2709 2710 if (diff_time >= timer_period) { 2711 print_stats(); 2712 /* Reset the timer */ 2713 diff_time = 0; 2714 } 2715 /* Sleep to avoid unnecessary checks */ 2716 prev_time = cur_time; 2717 sleep(1); 2718 } 2719 } 2720 2721 printf("Press enter to exit\n"); 2722 rc = read(0, &c, 1); 2723 pmd_test_exit(); 2724 if (rc < 0) 2725 return 1; 2726 } 2727 2728 return 0; 2729 } 2730