1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <stdarg.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <signal.h> 9 #include <string.h> 10 #include <time.h> 11 #include <fcntl.h> 12 #include <sys/mman.h> 13 #include <sys/types.h> 14 #include <errno.h> 15 16 #include <sys/queue.h> 17 #include <sys/stat.h> 18 19 #include <stdint.h> 20 #include <unistd.h> 21 #include <inttypes.h> 22 23 #include <rte_common.h> 24 #include <rte_errno.h> 25 #include <rte_byteorder.h> 26 #include <rte_log.h> 27 #include <rte_debug.h> 28 #include <rte_cycles.h> 29 #include <rte_memory.h> 30 #include <rte_memcpy.h> 31 #include <rte_launch.h> 32 #include <rte_eal.h> 33 #include <rte_alarm.h> 34 #include <rte_per_lcore.h> 35 #include <rte_lcore.h> 36 #include <rte_atomic.h> 37 #include <rte_branch_prediction.h> 38 #include <rte_mempool.h> 39 #include <rte_malloc.h> 40 #include <rte_mbuf.h> 41 #include <rte_interrupts.h> 42 #include <rte_pci.h> 43 #include <rte_ether.h> 44 #include <rte_ethdev.h> 45 #include <rte_dev.h> 46 #include <rte_string_fns.h> 47 #ifdef RTE_LIBRTE_IXGBE_PMD 48 #include <rte_pmd_ixgbe.h> 49 #endif 50 #ifdef RTE_LIBRTE_PDUMP 51 #include <rte_pdump.h> 52 #endif 53 #include <rte_flow.h> 54 #include <rte_metrics.h> 55 #ifdef RTE_LIBRTE_BITRATE 56 #include <rte_bitrate.h> 57 #endif 58 #ifdef RTE_LIBRTE_LATENCY_STATS 59 #include <rte_latencystats.h> 60 #endif 61 62 #include "testpmd.h" 63 64 uint16_t verbose_level = 0; /**< Silent by default. */ 65 int testpmd_logtype; /**< Log type for testpmd logs */ 66 67 /* use master core for command line ? */ 68 uint8_t interactive = 0; 69 uint8_t auto_start = 0; 70 uint8_t tx_first; 71 char cmdline_filename[PATH_MAX] = {0}; 72 73 /* 74 * NUMA support configuration. 75 * When set, the NUMA support attempts to dispatch the allocation of the 76 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 77 * probed ports among the CPU sockets 0 and 1. 78 * Otherwise, all memory is allocated from CPU socket 0. 79 */ 80 uint8_t numa_support = 1; /**< numa enabled by default */ 81 82 /* 83 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 84 * not configured. 85 */ 86 uint8_t socket_num = UMA_NO_CONFIG; 87 88 /* 89 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 90 */ 91 uint8_t mp_anon = 0; 92 93 /* 94 * Record the Ethernet address of peer target ports to which packets are 95 * forwarded. 96 * Must be instantiated with the ethernet addresses of peer traffic generator 97 * ports. 98 */ 99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 100 portid_t nb_peer_eth_addrs = 0; 101 102 /* 103 * Probed Target Environment. 104 */ 105 struct rte_port *ports; /**< For all probed ethernet ports. */ 106 portid_t nb_ports; /**< Number of probed ethernet ports. */ 107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 108 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 109 110 /* 111 * Test Forwarding Configuration. 112 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 113 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 114 */ 115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 117 portid_t nb_cfg_ports; /**< Number of configured ports. */ 118 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 119 120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 122 123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 124 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 125 126 /* 127 * Forwarding engines. 128 */ 129 struct fwd_engine * fwd_engines[] = { 130 &io_fwd_engine, 131 &mac_fwd_engine, 132 &mac_swap_engine, 133 &flow_gen_engine, 134 &rx_only_engine, 135 &tx_only_engine, 136 &csum_fwd_engine, 137 &icmp_echo_engine, 138 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 139 &softnic_tm_engine, 140 &softnic_tm_bypass_engine, 141 #endif 142 #ifdef RTE_LIBRTE_IEEE1588 143 &ieee1588_fwd_engine, 144 #endif 145 NULL, 146 }; 147 148 struct fwd_config cur_fwd_config; 149 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 150 uint32_t retry_enabled; 151 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 152 uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 153 154 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 155 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 156 * specified on command-line. */ 157 uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 158 159 /* 160 * In container, it cannot terminate the process which running with 'stats-period' 161 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 162 */ 163 uint8_t f_quit; 164 165 /* 166 * Configuration of packet segments used by the "txonly" processing engine. 167 */ 168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 170 TXONLY_DEF_PACKET_LEN, 171 }; 172 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 173 174 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 175 /**< Split policy for packets to TX. */ 176 177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 179 180 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 181 uint8_t dcb_config = 0; 182 183 /* Whether the dcb is in testing status */ 184 uint8_t dcb_test = 0; 185 186 /* 187 * Configurable number of RX/TX queues. 188 */ 189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 191 192 /* 193 * Configurable number of RX/TX ring descriptors. 194 */ 195 #define RTE_TEST_RX_DESC_DEFAULT 128 196 #define RTE_TEST_TX_DESC_DEFAULT 512 197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 199 200 #define RTE_PMD_PARAM_UNSET -1 201 /* 202 * Configurable values of RX and TX ring threshold registers. 203 */ 204 205 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 206 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 207 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 208 209 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 210 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 211 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 212 213 /* 214 * Configurable value of RX free threshold. 215 */ 216 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 217 218 /* 219 * Configurable value of RX drop enable. 220 */ 221 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 222 223 /* 224 * Configurable value of TX free threshold. 225 */ 226 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 227 228 /* 229 * Configurable value of TX RS bit threshold. 230 */ 231 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 232 233 /* 234 * Receive Side Scaling (RSS) configuration. 235 */ 236 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 237 238 /* 239 * Port topology configuration 240 */ 241 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 242 243 /* 244 * Avoids to flush all the RX streams before starts forwarding. 245 */ 246 uint8_t no_flush_rx = 0; /* flush by default */ 247 248 /* 249 * Flow API isolated mode. 250 */ 251 uint8_t flow_isolate_all; 252 253 /* 254 * Avoids to check link status when starting/stopping a port. 255 */ 256 uint8_t no_link_check = 0; /* check by default */ 257 258 /* 259 * Enable link status change notification 260 */ 261 uint8_t lsc_interrupt = 1; /* enabled by default */ 262 263 /* 264 * Enable device removal notification. 265 */ 266 uint8_t rmv_interrupt = 1; /* enabled by default */ 267 268 /* 269 * Display or mask ether events 270 * Default to all events except VF_MBOX 271 */ 272 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 273 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 274 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 275 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 276 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 277 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 278 279 /* 280 * NIC bypass mode configuration options. 281 */ 282 283 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 284 /* The NIC bypass watchdog timeout. */ 285 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 286 #endif 287 288 289 #ifdef RTE_LIBRTE_LATENCY_STATS 290 291 /* 292 * Set when latency stats is enabled in the commandline 293 */ 294 uint8_t latencystats_enabled; 295 296 /* 297 * Lcore ID to serive latency statistics. 298 */ 299 lcoreid_t latencystats_lcore_id = -1; 300 301 #endif 302 303 /* 304 * Ethernet device configuration. 305 */ 306 struct rte_eth_rxmode rx_mode = { 307 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 308 .offloads = (DEV_RX_OFFLOAD_VLAN_FILTER | 309 DEV_RX_OFFLOAD_VLAN_STRIP | 310 DEV_RX_OFFLOAD_CRC_STRIP), 311 .ignore_offload_bitfield = 1, 312 }; 313 314 struct rte_eth_txmode tx_mode; 315 316 struct rte_fdir_conf fdir_conf = { 317 .mode = RTE_FDIR_MODE_NONE, 318 .pballoc = RTE_FDIR_PBALLOC_64K, 319 .status = RTE_FDIR_REPORT_STATUS, 320 .mask = { 321 .vlan_tci_mask = 0x0, 322 .ipv4_mask = { 323 .src_ip = 0xFFFFFFFF, 324 .dst_ip = 0xFFFFFFFF, 325 }, 326 .ipv6_mask = { 327 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 328 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 329 }, 330 .src_port_mask = 0xFFFF, 331 .dst_port_mask = 0xFFFF, 332 .mac_addr_byte_mask = 0xFF, 333 .tunnel_type_mask = 1, 334 .tunnel_id_mask = 0xFFFFFFFF, 335 }, 336 .drop_queue = 127, 337 }; 338 339 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 340 341 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 342 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 343 344 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 345 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 346 347 uint16_t nb_tx_queue_stats_mappings = 0; 348 uint16_t nb_rx_queue_stats_mappings = 0; 349 350 /* 351 * Display zero values by default for xstats 352 */ 353 uint8_t xstats_hide_zero; 354 355 unsigned int num_sockets = 0; 356 unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 357 358 #ifdef RTE_LIBRTE_BITRATE 359 /* Bitrate statistics */ 360 struct rte_stats_bitrates *bitrate_data; 361 lcoreid_t bitrate_lcore_id; 362 uint8_t bitrate_enabled; 363 #endif 364 365 struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 366 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 367 368 /* Forward function declarations */ 369 static void map_port_queue_stats_mapping_registers(portid_t pi, 370 struct rte_port *port); 371 static void check_all_ports_link_status(uint32_t port_mask); 372 static int eth_event_callback(portid_t port_id, 373 enum rte_eth_event_type type, 374 void *param, void *ret_param); 375 376 /* 377 * Check if all the ports are started. 378 * If yes, return positive value. If not, return zero. 379 */ 380 static int all_ports_started(void); 381 382 struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 383 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 384 385 /* 386 * Helper function to check if socket is already discovered. 387 * If yes, return positive value. If not, return zero. 388 */ 389 int 390 new_socket_id(unsigned int socket_id) 391 { 392 unsigned int i; 393 394 for (i = 0; i < num_sockets; i++) { 395 if (socket_ids[i] == socket_id) 396 return 0; 397 } 398 return 1; 399 } 400 401 /* 402 * Setup default configuration. 403 */ 404 static void 405 set_default_fwd_lcores_config(void) 406 { 407 unsigned int i; 408 unsigned int nb_lc; 409 unsigned int sock_num; 410 411 nb_lc = 0; 412 for (i = 0; i < RTE_MAX_LCORE; i++) { 413 sock_num = rte_lcore_to_socket_id(i); 414 if (new_socket_id(sock_num)) { 415 if (num_sockets >= RTE_MAX_NUMA_NODES) { 416 rte_exit(EXIT_FAILURE, 417 "Total sockets greater than %u\n", 418 RTE_MAX_NUMA_NODES); 419 } 420 socket_ids[num_sockets++] = sock_num; 421 } 422 if (!rte_lcore_is_enabled(i)) 423 continue; 424 if (i == rte_get_master_lcore()) 425 continue; 426 fwd_lcores_cpuids[nb_lc++] = i; 427 } 428 nb_lcores = (lcoreid_t) nb_lc; 429 nb_cfg_lcores = nb_lcores; 430 nb_fwd_lcores = 1; 431 } 432 433 static void 434 set_def_peer_eth_addrs(void) 435 { 436 portid_t i; 437 438 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 439 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 440 peer_eth_addrs[i].addr_bytes[5] = i; 441 } 442 } 443 444 static void 445 set_default_fwd_ports_config(void) 446 { 447 portid_t pt_id; 448 int i = 0; 449 450 RTE_ETH_FOREACH_DEV(pt_id) 451 fwd_ports_ids[i++] = pt_id; 452 453 nb_cfg_ports = nb_ports; 454 nb_fwd_ports = nb_ports; 455 } 456 457 void 458 set_def_fwd_config(void) 459 { 460 set_default_fwd_lcores_config(); 461 set_def_peer_eth_addrs(); 462 set_default_fwd_ports_config(); 463 } 464 465 /* 466 * Configuration initialisation done once at init time. 467 */ 468 static void 469 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 470 unsigned int socket_id) 471 { 472 char pool_name[RTE_MEMPOOL_NAMESIZE]; 473 struct rte_mempool *rte_mp = NULL; 474 uint32_t mb_size; 475 476 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 477 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 478 479 TESTPMD_LOG(INFO, 480 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 481 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 482 483 if (mp_anon != 0) { 484 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 485 mb_size, (unsigned) mb_mempool_cache, 486 sizeof(struct rte_pktmbuf_pool_private), 487 socket_id, 0); 488 if (rte_mp == NULL) 489 goto err; 490 491 if (rte_mempool_populate_anon(rte_mp) == 0) { 492 rte_mempool_free(rte_mp); 493 rte_mp = NULL; 494 goto err; 495 } 496 rte_pktmbuf_pool_init(rte_mp, NULL); 497 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 498 } else { 499 /* wrapper to rte_mempool_create() */ 500 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 501 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 502 } 503 504 err: 505 if (rte_mp == NULL) { 506 rte_exit(EXIT_FAILURE, 507 "Creation of mbuf pool for socket %u failed: %s\n", 508 socket_id, rte_strerror(rte_errno)); 509 } else if (verbose_level > 0) { 510 rte_mempool_dump(stdout, rte_mp); 511 } 512 } 513 514 /* 515 * Check given socket id is valid or not with NUMA mode, 516 * if valid, return 0, else return -1 517 */ 518 static int 519 check_socket_id(const unsigned int socket_id) 520 { 521 static int warning_once = 0; 522 523 if (new_socket_id(socket_id)) { 524 if (!warning_once && numa_support) 525 printf("Warning: NUMA should be configured manually by" 526 " using --port-numa-config and" 527 " --ring-numa-config parameters along with" 528 " --numa.\n"); 529 warning_once = 1; 530 return -1; 531 } 532 return 0; 533 } 534 535 static void 536 init_config(void) 537 { 538 portid_t pid; 539 struct rte_port *port; 540 struct rte_mempool *mbp; 541 unsigned int nb_mbuf_per_pool; 542 lcoreid_t lc_id; 543 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 544 struct rte_gro_param gro_param; 545 uint32_t gso_types; 546 547 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 548 549 if (numa_support) { 550 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 551 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 552 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 553 } 554 555 /* Configuration of logical cores. */ 556 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 557 sizeof(struct fwd_lcore *) * nb_lcores, 558 RTE_CACHE_LINE_SIZE); 559 if (fwd_lcores == NULL) { 560 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 561 "failed\n", nb_lcores); 562 } 563 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 564 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 565 sizeof(struct fwd_lcore), 566 RTE_CACHE_LINE_SIZE); 567 if (fwd_lcores[lc_id] == NULL) { 568 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 569 "failed\n"); 570 } 571 fwd_lcores[lc_id]->cpuid_idx = lc_id; 572 } 573 574 RTE_ETH_FOREACH_DEV(pid) { 575 port = &ports[pid]; 576 /* Apply default Tx configuration for all ports */ 577 port->dev_conf.txmode = tx_mode; 578 rte_eth_dev_info_get(pid, &port->dev_info); 579 580 if (numa_support) { 581 if (port_numa[pid] != NUMA_NO_CONFIG) 582 port_per_socket[port_numa[pid]]++; 583 else { 584 uint32_t socket_id = rte_eth_dev_socket_id(pid); 585 586 /* if socket_id is invalid, set to 0 */ 587 if (check_socket_id(socket_id) < 0) 588 socket_id = 0; 589 port_per_socket[socket_id]++; 590 } 591 } 592 593 /* set flag to initialize port/queue */ 594 port->need_reconfig = 1; 595 port->need_reconfig_queues = 1; 596 } 597 598 /* 599 * Create pools of mbuf. 600 * If NUMA support is disabled, create a single pool of mbuf in 601 * socket 0 memory by default. 602 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 603 * 604 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 605 * nb_txd can be configured at run time. 606 */ 607 if (param_total_num_mbufs) 608 nb_mbuf_per_pool = param_total_num_mbufs; 609 else { 610 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 611 (nb_lcores * mb_mempool_cache) + 612 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 613 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 614 } 615 616 if (numa_support) { 617 uint8_t i; 618 619 for (i = 0; i < num_sockets; i++) 620 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 621 socket_ids[i]); 622 } else { 623 if (socket_num == UMA_NO_CONFIG) 624 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 625 else 626 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 627 socket_num); 628 } 629 630 init_port_config(); 631 632 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 633 DEV_TX_OFFLOAD_GRE_TNL_TSO; 634 /* 635 * Records which Mbuf pool to use by each logical core, if needed. 636 */ 637 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 638 mbp = mbuf_pool_find( 639 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 640 641 if (mbp == NULL) 642 mbp = mbuf_pool_find(0); 643 fwd_lcores[lc_id]->mbp = mbp; 644 /* initialize GSO context */ 645 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 646 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 647 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 648 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 649 ETHER_CRC_LEN; 650 fwd_lcores[lc_id]->gso_ctx.flag = 0; 651 } 652 653 /* Configuration of packet forwarding streams. */ 654 if (init_fwd_streams() < 0) 655 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 656 657 fwd_config_setup(); 658 659 /* create a gro context for each lcore */ 660 gro_param.gro_types = RTE_GRO_TCP_IPV4; 661 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 662 gro_param.max_item_per_flow = MAX_PKT_BURST; 663 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 664 gro_param.socket_id = rte_lcore_to_socket_id( 665 fwd_lcores_cpuids[lc_id]); 666 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 667 if (fwd_lcores[lc_id]->gro_ctx == NULL) { 668 rte_exit(EXIT_FAILURE, 669 "rte_gro_ctx_create() failed\n"); 670 } 671 } 672 } 673 674 675 void 676 reconfig(portid_t new_port_id, unsigned socket_id) 677 { 678 struct rte_port *port; 679 680 /* Reconfiguration of Ethernet ports. */ 681 port = &ports[new_port_id]; 682 rte_eth_dev_info_get(new_port_id, &port->dev_info); 683 684 /* set flag to initialize port/queue */ 685 port->need_reconfig = 1; 686 port->need_reconfig_queues = 1; 687 port->socket_id = socket_id; 688 689 init_port_config(); 690 } 691 692 693 int 694 init_fwd_streams(void) 695 { 696 portid_t pid; 697 struct rte_port *port; 698 streamid_t sm_id, nb_fwd_streams_new; 699 queueid_t q; 700 701 /* set socket id according to numa or not */ 702 RTE_ETH_FOREACH_DEV(pid) { 703 port = &ports[pid]; 704 if (nb_rxq > port->dev_info.max_rx_queues) { 705 printf("Fail: nb_rxq(%d) is greater than " 706 "max_rx_queues(%d)\n", nb_rxq, 707 port->dev_info.max_rx_queues); 708 return -1; 709 } 710 if (nb_txq > port->dev_info.max_tx_queues) { 711 printf("Fail: nb_txq(%d) is greater than " 712 "max_tx_queues(%d)\n", nb_txq, 713 port->dev_info.max_tx_queues); 714 return -1; 715 } 716 if (numa_support) { 717 if (port_numa[pid] != NUMA_NO_CONFIG) 718 port->socket_id = port_numa[pid]; 719 else { 720 port->socket_id = rte_eth_dev_socket_id(pid); 721 722 /* if socket_id is invalid, set to 0 */ 723 if (check_socket_id(port->socket_id) < 0) 724 port->socket_id = 0; 725 } 726 } 727 else { 728 if (socket_num == UMA_NO_CONFIG) 729 port->socket_id = 0; 730 else 731 port->socket_id = socket_num; 732 } 733 } 734 735 q = RTE_MAX(nb_rxq, nb_txq); 736 if (q == 0) { 737 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 738 return -1; 739 } 740 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 741 if (nb_fwd_streams_new == nb_fwd_streams) 742 return 0; 743 /* clear the old */ 744 if (fwd_streams != NULL) { 745 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 746 if (fwd_streams[sm_id] == NULL) 747 continue; 748 rte_free(fwd_streams[sm_id]); 749 fwd_streams[sm_id] = NULL; 750 } 751 rte_free(fwd_streams); 752 fwd_streams = NULL; 753 } 754 755 /* init new */ 756 nb_fwd_streams = nb_fwd_streams_new; 757 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 758 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 759 if (fwd_streams == NULL) 760 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 761 "failed\n", nb_fwd_streams); 762 763 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 764 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 765 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 766 if (fwd_streams[sm_id] == NULL) 767 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 768 " failed\n"); 769 } 770 771 return 0; 772 } 773 774 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 775 static void 776 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 777 { 778 unsigned int total_burst; 779 unsigned int nb_burst; 780 unsigned int burst_stats[3]; 781 uint16_t pktnb_stats[3]; 782 uint16_t nb_pkt; 783 int burst_percent[3]; 784 785 /* 786 * First compute the total number of packet bursts and the 787 * two highest numbers of bursts of the same number of packets. 788 */ 789 total_burst = 0; 790 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 791 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 792 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 793 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 794 if (nb_burst == 0) 795 continue; 796 total_burst += nb_burst; 797 if (nb_burst > burst_stats[0]) { 798 burst_stats[1] = burst_stats[0]; 799 pktnb_stats[1] = pktnb_stats[0]; 800 burst_stats[0] = nb_burst; 801 pktnb_stats[0] = nb_pkt; 802 } 803 } 804 if (total_burst == 0) 805 return; 806 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 807 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 808 burst_percent[0], (int) pktnb_stats[0]); 809 if (burst_stats[0] == total_burst) { 810 printf("]\n"); 811 return; 812 } 813 if (burst_stats[0] + burst_stats[1] == total_burst) { 814 printf(" + %d%% of %d pkts]\n", 815 100 - burst_percent[0], pktnb_stats[1]); 816 return; 817 } 818 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 819 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 820 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 821 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 822 return; 823 } 824 printf(" + %d%% of %d pkts + %d%% of others]\n", 825 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 826 } 827 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 828 829 static void 830 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 831 { 832 struct rte_port *port; 833 uint8_t i; 834 835 static const char *fwd_stats_border = "----------------------"; 836 837 port = &ports[port_id]; 838 printf("\n %s Forward statistics for port %-2d %s\n", 839 fwd_stats_border, port_id, fwd_stats_border); 840 841 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 842 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 843 "%-"PRIu64"\n", 844 stats->ipackets, stats->imissed, 845 (uint64_t) (stats->ipackets + stats->imissed)); 846 847 if (cur_fwd_eng == &csum_fwd_engine) 848 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 849 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 850 if ((stats->ierrors + stats->rx_nombuf) > 0) { 851 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 852 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 853 } 854 855 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 856 "%-"PRIu64"\n", 857 stats->opackets, port->tx_dropped, 858 (uint64_t) (stats->opackets + port->tx_dropped)); 859 } 860 else { 861 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 862 "%14"PRIu64"\n", 863 stats->ipackets, stats->imissed, 864 (uint64_t) (stats->ipackets + stats->imissed)); 865 866 if (cur_fwd_eng == &csum_fwd_engine) 867 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 868 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 869 if ((stats->ierrors + stats->rx_nombuf) > 0) { 870 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 871 printf(" RX-nombufs: %14"PRIu64"\n", 872 stats->rx_nombuf); 873 } 874 875 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 876 "%14"PRIu64"\n", 877 stats->opackets, port->tx_dropped, 878 (uint64_t) (stats->opackets + port->tx_dropped)); 879 } 880 881 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 882 if (port->rx_stream) 883 pkt_burst_stats_display("RX", 884 &port->rx_stream->rx_burst_stats); 885 if (port->tx_stream) 886 pkt_burst_stats_display("TX", 887 &port->tx_stream->tx_burst_stats); 888 #endif 889 890 if (port->rx_queue_stats_mapping_enabled) { 891 printf("\n"); 892 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 893 printf(" Stats reg %2d RX-packets:%14"PRIu64 894 " RX-errors:%14"PRIu64 895 " RX-bytes:%14"PRIu64"\n", 896 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 897 } 898 printf("\n"); 899 } 900 if (port->tx_queue_stats_mapping_enabled) { 901 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 902 printf(" Stats reg %2d TX-packets:%14"PRIu64 903 " TX-bytes:%14"PRIu64"\n", 904 i, stats->q_opackets[i], stats->q_obytes[i]); 905 } 906 } 907 908 printf(" %s--------------------------------%s\n", 909 fwd_stats_border, fwd_stats_border); 910 } 911 912 static void 913 fwd_stream_stats_display(streamid_t stream_id) 914 { 915 struct fwd_stream *fs; 916 static const char *fwd_top_stats_border = "-------"; 917 918 fs = fwd_streams[stream_id]; 919 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 920 (fs->fwd_dropped == 0)) 921 return; 922 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 923 "TX Port=%2d/Queue=%2d %s\n", 924 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 925 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 926 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 927 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 928 929 /* if checksum mode */ 930 if (cur_fwd_eng == &csum_fwd_engine) { 931 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 932 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 933 } 934 935 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 936 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 937 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 938 #endif 939 } 940 941 static void 942 flush_fwd_rx_queues(void) 943 { 944 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 945 portid_t rxp; 946 portid_t port_id; 947 queueid_t rxq; 948 uint16_t nb_rx; 949 uint16_t i; 950 uint8_t j; 951 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 952 uint64_t timer_period; 953 954 /* convert to number of cycles */ 955 timer_period = rte_get_timer_hz(); /* 1 second timeout */ 956 957 for (j = 0; j < 2; j++) { 958 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 959 for (rxq = 0; rxq < nb_rxq; rxq++) { 960 port_id = fwd_ports_ids[rxp]; 961 /** 962 * testpmd can stuck in the below do while loop 963 * if rte_eth_rx_burst() always returns nonzero 964 * packets. So timer is added to exit this loop 965 * after 1sec timer expiry. 966 */ 967 prev_tsc = rte_rdtsc(); 968 do { 969 nb_rx = rte_eth_rx_burst(port_id, rxq, 970 pkts_burst, MAX_PKT_BURST); 971 for (i = 0; i < nb_rx; i++) 972 rte_pktmbuf_free(pkts_burst[i]); 973 974 cur_tsc = rte_rdtsc(); 975 diff_tsc = cur_tsc - prev_tsc; 976 timer_tsc += diff_tsc; 977 } while ((nb_rx > 0) && 978 (timer_tsc < timer_period)); 979 timer_tsc = 0; 980 } 981 } 982 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 983 } 984 } 985 986 static void 987 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 988 { 989 struct fwd_stream **fsm; 990 streamid_t nb_fs; 991 streamid_t sm_id; 992 #ifdef RTE_LIBRTE_BITRATE 993 uint64_t tics_per_1sec; 994 uint64_t tics_datum; 995 uint64_t tics_current; 996 uint8_t idx_port, cnt_ports; 997 998 cnt_ports = rte_eth_dev_count(); 999 tics_datum = rte_rdtsc(); 1000 tics_per_1sec = rte_get_timer_hz(); 1001 #endif 1002 fsm = &fwd_streams[fc->stream_idx]; 1003 nb_fs = fc->stream_nb; 1004 do { 1005 for (sm_id = 0; sm_id < nb_fs; sm_id++) 1006 (*pkt_fwd)(fsm[sm_id]); 1007 #ifdef RTE_LIBRTE_BITRATE 1008 if (bitrate_enabled != 0 && 1009 bitrate_lcore_id == rte_lcore_id()) { 1010 tics_current = rte_rdtsc(); 1011 if (tics_current - tics_datum >= tics_per_1sec) { 1012 /* Periodic bitrate calculation */ 1013 for (idx_port = 0; 1014 idx_port < cnt_ports; 1015 idx_port++) 1016 rte_stats_bitrate_calc(bitrate_data, 1017 idx_port); 1018 tics_datum = tics_current; 1019 } 1020 } 1021 #endif 1022 #ifdef RTE_LIBRTE_LATENCY_STATS 1023 if (latencystats_enabled != 0 && 1024 latencystats_lcore_id == rte_lcore_id()) 1025 rte_latencystats_update(); 1026 #endif 1027 1028 } while (! fc->stopped); 1029 } 1030 1031 static int 1032 start_pkt_forward_on_core(void *fwd_arg) 1033 { 1034 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1035 cur_fwd_config.fwd_eng->packet_fwd); 1036 return 0; 1037 } 1038 1039 /* 1040 * Run the TXONLY packet forwarding engine to send a single burst of packets. 1041 * Used to start communication flows in network loopback test configurations. 1042 */ 1043 static int 1044 run_one_txonly_burst_on_core(void *fwd_arg) 1045 { 1046 struct fwd_lcore *fwd_lc; 1047 struct fwd_lcore tmp_lcore; 1048 1049 fwd_lc = (struct fwd_lcore *) fwd_arg; 1050 tmp_lcore = *fwd_lc; 1051 tmp_lcore.stopped = 1; 1052 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1053 return 0; 1054 } 1055 1056 /* 1057 * Launch packet forwarding: 1058 * - Setup per-port forwarding context. 1059 * - launch logical cores with their forwarding configuration. 1060 */ 1061 static void 1062 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1063 { 1064 port_fwd_begin_t port_fwd_begin; 1065 unsigned int i; 1066 unsigned int lc_id; 1067 int diag; 1068 1069 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1070 if (port_fwd_begin != NULL) { 1071 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1072 (*port_fwd_begin)(fwd_ports_ids[i]); 1073 } 1074 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1075 lc_id = fwd_lcores_cpuids[i]; 1076 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1077 fwd_lcores[i]->stopped = 0; 1078 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1079 fwd_lcores[i], lc_id); 1080 if (diag != 0) 1081 printf("launch lcore %u failed - diag=%d\n", 1082 lc_id, diag); 1083 } 1084 } 1085 } 1086 1087 /* 1088 * Launch packet forwarding configuration. 1089 */ 1090 void 1091 start_packet_forwarding(int with_tx_first) 1092 { 1093 port_fwd_begin_t port_fwd_begin; 1094 port_fwd_end_t port_fwd_end; 1095 struct rte_port *port; 1096 unsigned int i; 1097 portid_t pt_id; 1098 streamid_t sm_id; 1099 1100 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 1101 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 1102 1103 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 1104 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 1105 1106 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 1107 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 1108 (!nb_rxq || !nb_txq)) 1109 rte_exit(EXIT_FAILURE, 1110 "Either rxq or txq are 0, cannot use %s fwd mode\n", 1111 cur_fwd_eng->fwd_mode_name); 1112 1113 if (all_ports_started() == 0) { 1114 printf("Not all ports were started\n"); 1115 return; 1116 } 1117 if (test_done == 0) { 1118 printf("Packet forwarding already started\n"); 1119 return; 1120 } 1121 1122 if (init_fwd_streams() < 0) { 1123 printf("Fail from init_fwd_streams()\n"); 1124 return; 1125 } 1126 1127 if(dcb_test) { 1128 for (i = 0; i < nb_fwd_ports; i++) { 1129 pt_id = fwd_ports_ids[i]; 1130 port = &ports[pt_id]; 1131 if (!port->dcb_flag) { 1132 printf("In DCB mode, all forwarding ports must " 1133 "be configured in this mode.\n"); 1134 return; 1135 } 1136 } 1137 if (nb_fwd_lcores == 1) { 1138 printf("In DCB mode,the nb forwarding cores " 1139 "should be larger than 1.\n"); 1140 return; 1141 } 1142 } 1143 test_done = 0; 1144 1145 if(!no_flush_rx) 1146 flush_fwd_rx_queues(); 1147 1148 fwd_config_setup(); 1149 pkt_fwd_config_display(&cur_fwd_config); 1150 rxtx_config_display(); 1151 1152 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1153 pt_id = fwd_ports_ids[i]; 1154 port = &ports[pt_id]; 1155 rte_eth_stats_get(pt_id, &port->stats); 1156 port->tx_dropped = 0; 1157 1158 map_port_queue_stats_mapping_registers(pt_id, port); 1159 } 1160 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1161 fwd_streams[sm_id]->rx_packets = 0; 1162 fwd_streams[sm_id]->tx_packets = 0; 1163 fwd_streams[sm_id]->fwd_dropped = 0; 1164 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1165 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1166 1167 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1168 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1169 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1170 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1171 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1172 #endif 1173 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1174 fwd_streams[sm_id]->core_cycles = 0; 1175 #endif 1176 } 1177 if (with_tx_first) { 1178 port_fwd_begin = tx_only_engine.port_fwd_begin; 1179 if (port_fwd_begin != NULL) { 1180 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1181 (*port_fwd_begin)(fwd_ports_ids[i]); 1182 } 1183 while (with_tx_first--) { 1184 launch_packet_forwarding( 1185 run_one_txonly_burst_on_core); 1186 rte_eal_mp_wait_lcore(); 1187 } 1188 port_fwd_end = tx_only_engine.port_fwd_end; 1189 if (port_fwd_end != NULL) { 1190 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1191 (*port_fwd_end)(fwd_ports_ids[i]); 1192 } 1193 } 1194 launch_packet_forwarding(start_pkt_forward_on_core); 1195 } 1196 1197 void 1198 stop_packet_forwarding(void) 1199 { 1200 struct rte_eth_stats stats; 1201 struct rte_port *port; 1202 port_fwd_end_t port_fwd_end; 1203 int i; 1204 portid_t pt_id; 1205 streamid_t sm_id; 1206 lcoreid_t lc_id; 1207 uint64_t total_recv; 1208 uint64_t total_xmit; 1209 uint64_t total_rx_dropped; 1210 uint64_t total_tx_dropped; 1211 uint64_t total_rx_nombuf; 1212 uint64_t tx_dropped; 1213 uint64_t rx_bad_ip_csum; 1214 uint64_t rx_bad_l4_csum; 1215 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1216 uint64_t fwd_cycles; 1217 #endif 1218 1219 static const char *acc_stats_border = "+++++++++++++++"; 1220 1221 if (test_done) { 1222 printf("Packet forwarding not started\n"); 1223 return; 1224 } 1225 printf("Telling cores to stop..."); 1226 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1227 fwd_lcores[lc_id]->stopped = 1; 1228 printf("\nWaiting for lcores to finish...\n"); 1229 rte_eal_mp_wait_lcore(); 1230 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1231 if (port_fwd_end != NULL) { 1232 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1233 pt_id = fwd_ports_ids[i]; 1234 (*port_fwd_end)(pt_id); 1235 } 1236 } 1237 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1238 fwd_cycles = 0; 1239 #endif 1240 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1241 if (cur_fwd_config.nb_fwd_streams > 1242 cur_fwd_config.nb_fwd_ports) { 1243 fwd_stream_stats_display(sm_id); 1244 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1245 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1246 } else { 1247 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1248 fwd_streams[sm_id]; 1249 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1250 fwd_streams[sm_id]; 1251 } 1252 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1253 tx_dropped = (uint64_t) (tx_dropped + 1254 fwd_streams[sm_id]->fwd_dropped); 1255 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1256 1257 rx_bad_ip_csum = 1258 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1259 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1260 fwd_streams[sm_id]->rx_bad_ip_csum); 1261 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1262 rx_bad_ip_csum; 1263 1264 rx_bad_l4_csum = 1265 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1266 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1267 fwd_streams[sm_id]->rx_bad_l4_csum); 1268 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1269 rx_bad_l4_csum; 1270 1271 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1272 fwd_cycles = (uint64_t) (fwd_cycles + 1273 fwd_streams[sm_id]->core_cycles); 1274 #endif 1275 } 1276 total_recv = 0; 1277 total_xmit = 0; 1278 total_rx_dropped = 0; 1279 total_tx_dropped = 0; 1280 total_rx_nombuf = 0; 1281 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1282 pt_id = fwd_ports_ids[i]; 1283 1284 port = &ports[pt_id]; 1285 rte_eth_stats_get(pt_id, &stats); 1286 stats.ipackets -= port->stats.ipackets; 1287 port->stats.ipackets = 0; 1288 stats.opackets -= port->stats.opackets; 1289 port->stats.opackets = 0; 1290 stats.ibytes -= port->stats.ibytes; 1291 port->stats.ibytes = 0; 1292 stats.obytes -= port->stats.obytes; 1293 port->stats.obytes = 0; 1294 stats.imissed -= port->stats.imissed; 1295 port->stats.imissed = 0; 1296 stats.oerrors -= port->stats.oerrors; 1297 port->stats.oerrors = 0; 1298 stats.rx_nombuf -= port->stats.rx_nombuf; 1299 port->stats.rx_nombuf = 0; 1300 1301 total_recv += stats.ipackets; 1302 total_xmit += stats.opackets; 1303 total_rx_dropped += stats.imissed; 1304 total_tx_dropped += port->tx_dropped; 1305 total_rx_nombuf += stats.rx_nombuf; 1306 1307 fwd_port_stats_display(pt_id, &stats); 1308 } 1309 1310 printf("\n %s Accumulated forward statistics for all ports" 1311 "%s\n", 1312 acc_stats_border, acc_stats_border); 1313 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1314 "%-"PRIu64"\n" 1315 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1316 "%-"PRIu64"\n", 1317 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1318 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1319 if (total_rx_nombuf > 0) 1320 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1321 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1322 "%s\n", 1323 acc_stats_border, acc_stats_border); 1324 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1325 if (total_recv > 0) 1326 printf("\n CPU cycles/packet=%u (total cycles=" 1327 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1328 (unsigned int)(fwd_cycles / total_recv), 1329 fwd_cycles, total_recv); 1330 #endif 1331 printf("\nDone.\n"); 1332 test_done = 1; 1333 } 1334 1335 void 1336 dev_set_link_up(portid_t pid) 1337 { 1338 if (rte_eth_dev_set_link_up(pid) < 0) 1339 printf("\nSet link up fail.\n"); 1340 } 1341 1342 void 1343 dev_set_link_down(portid_t pid) 1344 { 1345 if (rte_eth_dev_set_link_down(pid) < 0) 1346 printf("\nSet link down fail.\n"); 1347 } 1348 1349 static int 1350 all_ports_started(void) 1351 { 1352 portid_t pi; 1353 struct rte_port *port; 1354 1355 RTE_ETH_FOREACH_DEV(pi) { 1356 port = &ports[pi]; 1357 /* Check if there is a port which is not started */ 1358 if ((port->port_status != RTE_PORT_STARTED) && 1359 (port->slave_flag == 0)) 1360 return 0; 1361 } 1362 1363 /* No port is not started */ 1364 return 1; 1365 } 1366 1367 int 1368 port_is_stopped(portid_t port_id) 1369 { 1370 struct rte_port *port = &ports[port_id]; 1371 1372 if ((port->port_status != RTE_PORT_STOPPED) && 1373 (port->slave_flag == 0)) 1374 return 0; 1375 return 1; 1376 } 1377 1378 int 1379 all_ports_stopped(void) 1380 { 1381 portid_t pi; 1382 1383 RTE_ETH_FOREACH_DEV(pi) { 1384 if (!port_is_stopped(pi)) 1385 return 0; 1386 } 1387 1388 return 1; 1389 } 1390 1391 int 1392 port_is_started(portid_t port_id) 1393 { 1394 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1395 return 0; 1396 1397 if (ports[port_id].port_status != RTE_PORT_STARTED) 1398 return 0; 1399 1400 return 1; 1401 } 1402 1403 static int 1404 port_is_closed(portid_t port_id) 1405 { 1406 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1407 return 0; 1408 1409 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1410 return 0; 1411 1412 return 1; 1413 } 1414 1415 int 1416 start_port(portid_t pid) 1417 { 1418 int diag, need_check_link_status = -1; 1419 portid_t pi; 1420 queueid_t qi; 1421 struct rte_port *port; 1422 struct ether_addr mac_addr; 1423 enum rte_eth_event_type event_type; 1424 1425 if (port_id_is_invalid(pid, ENABLED_WARN)) 1426 return 0; 1427 1428 if(dcb_config) 1429 dcb_test = 1; 1430 RTE_ETH_FOREACH_DEV(pi) { 1431 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1432 continue; 1433 1434 need_check_link_status = 0; 1435 port = &ports[pi]; 1436 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1437 RTE_PORT_HANDLING) == 0) { 1438 printf("Port %d is now not stopped\n", pi); 1439 continue; 1440 } 1441 1442 if (port->need_reconfig > 0) { 1443 port->need_reconfig = 0; 1444 1445 if (flow_isolate_all) { 1446 int ret = port_flow_isolate(pi, 1); 1447 if (ret) { 1448 printf("Failed to apply isolated" 1449 " mode on port %d\n", pi); 1450 return -1; 1451 } 1452 } 1453 1454 printf("Configuring Port %d (socket %u)\n", pi, 1455 port->socket_id); 1456 /* configure port */ 1457 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1458 &(port->dev_conf)); 1459 if (diag != 0) { 1460 if (rte_atomic16_cmpset(&(port->port_status), 1461 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1462 printf("Port %d can not be set back " 1463 "to stopped\n", pi); 1464 printf("Fail to configure port %d\n", pi); 1465 /* try to reconfigure port next time */ 1466 port->need_reconfig = 1; 1467 return -1; 1468 } 1469 } 1470 if (port->need_reconfig_queues > 0) { 1471 port->need_reconfig_queues = 0; 1472 port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; 1473 /* Apply Tx offloads configuration */ 1474 port->tx_conf.offloads = port->dev_conf.txmode.offloads; 1475 /* setup tx queues */ 1476 for (qi = 0; qi < nb_txq; qi++) { 1477 if ((numa_support) && 1478 (txring_numa[pi] != NUMA_NO_CONFIG)) 1479 diag = rte_eth_tx_queue_setup(pi, qi, 1480 nb_txd,txring_numa[pi], 1481 &(port->tx_conf)); 1482 else 1483 diag = rte_eth_tx_queue_setup(pi, qi, 1484 nb_txd,port->socket_id, 1485 &(port->tx_conf)); 1486 1487 if (diag == 0) 1488 continue; 1489 1490 /* Fail to setup tx queue, return */ 1491 if (rte_atomic16_cmpset(&(port->port_status), 1492 RTE_PORT_HANDLING, 1493 RTE_PORT_STOPPED) == 0) 1494 printf("Port %d can not be set back " 1495 "to stopped\n", pi); 1496 printf("Fail to configure port %d tx queues\n", pi); 1497 /* try to reconfigure queues next time */ 1498 port->need_reconfig_queues = 1; 1499 return -1; 1500 } 1501 /* Apply Rx offloads configuration */ 1502 port->rx_conf.offloads = port->dev_conf.rxmode.offloads; 1503 /* setup rx queues */ 1504 for (qi = 0; qi < nb_rxq; qi++) { 1505 if ((numa_support) && 1506 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1507 struct rte_mempool * mp = 1508 mbuf_pool_find(rxring_numa[pi]); 1509 if (mp == NULL) { 1510 printf("Failed to setup RX queue:" 1511 "No mempool allocation" 1512 " on the socket %d\n", 1513 rxring_numa[pi]); 1514 return -1; 1515 } 1516 1517 diag = rte_eth_rx_queue_setup(pi, qi, 1518 nb_rxd,rxring_numa[pi], 1519 &(port->rx_conf),mp); 1520 } else { 1521 struct rte_mempool *mp = 1522 mbuf_pool_find(port->socket_id); 1523 if (mp == NULL) { 1524 printf("Failed to setup RX queue:" 1525 "No mempool allocation" 1526 " on the socket %d\n", 1527 port->socket_id); 1528 return -1; 1529 } 1530 diag = rte_eth_rx_queue_setup(pi, qi, 1531 nb_rxd,port->socket_id, 1532 &(port->rx_conf), mp); 1533 } 1534 if (diag == 0) 1535 continue; 1536 1537 /* Fail to setup rx queue, return */ 1538 if (rte_atomic16_cmpset(&(port->port_status), 1539 RTE_PORT_HANDLING, 1540 RTE_PORT_STOPPED) == 0) 1541 printf("Port %d can not be set back " 1542 "to stopped\n", pi); 1543 printf("Fail to configure port %d rx queues\n", pi); 1544 /* try to reconfigure queues next time */ 1545 port->need_reconfig_queues = 1; 1546 return -1; 1547 } 1548 } 1549 1550 for (event_type = RTE_ETH_EVENT_UNKNOWN; 1551 event_type < RTE_ETH_EVENT_MAX; 1552 event_type++) { 1553 diag = rte_eth_dev_callback_register(pi, 1554 event_type, 1555 eth_event_callback, 1556 NULL); 1557 if (diag) { 1558 printf("Failed to setup even callback for event %d\n", 1559 event_type); 1560 return -1; 1561 } 1562 } 1563 1564 /* start port */ 1565 if (rte_eth_dev_start(pi) < 0) { 1566 printf("Fail to start port %d\n", pi); 1567 1568 /* Fail to setup rx queue, return */ 1569 if (rte_atomic16_cmpset(&(port->port_status), 1570 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1571 printf("Port %d can not be set back to " 1572 "stopped\n", pi); 1573 continue; 1574 } 1575 1576 if (rte_atomic16_cmpset(&(port->port_status), 1577 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1578 printf("Port %d can not be set into started\n", pi); 1579 1580 rte_eth_macaddr_get(pi, &mac_addr); 1581 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1582 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1583 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1584 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1585 1586 /* at least one port started, need checking link status */ 1587 need_check_link_status = 1; 1588 } 1589 1590 if (need_check_link_status == 1 && !no_link_check) 1591 check_all_ports_link_status(RTE_PORT_ALL); 1592 else if (need_check_link_status == 0) 1593 printf("Please stop the ports first\n"); 1594 1595 printf("Done\n"); 1596 return 0; 1597 } 1598 1599 void 1600 stop_port(portid_t pid) 1601 { 1602 portid_t pi; 1603 struct rte_port *port; 1604 int need_check_link_status = 0; 1605 1606 if (dcb_test) { 1607 dcb_test = 0; 1608 dcb_config = 0; 1609 } 1610 1611 if (port_id_is_invalid(pid, ENABLED_WARN)) 1612 return; 1613 1614 printf("Stopping ports...\n"); 1615 1616 RTE_ETH_FOREACH_DEV(pi) { 1617 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1618 continue; 1619 1620 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1621 printf("Please remove port %d from forwarding configuration.\n", pi); 1622 continue; 1623 } 1624 1625 if (port_is_bonding_slave(pi)) { 1626 printf("Please remove port %d from bonded device.\n", pi); 1627 continue; 1628 } 1629 1630 port = &ports[pi]; 1631 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1632 RTE_PORT_HANDLING) == 0) 1633 continue; 1634 1635 rte_eth_dev_stop(pi); 1636 1637 if (rte_atomic16_cmpset(&(port->port_status), 1638 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1639 printf("Port %d can not be set into stopped\n", pi); 1640 need_check_link_status = 1; 1641 } 1642 if (need_check_link_status && !no_link_check) 1643 check_all_ports_link_status(RTE_PORT_ALL); 1644 1645 printf("Done\n"); 1646 } 1647 1648 void 1649 close_port(portid_t pid) 1650 { 1651 portid_t pi; 1652 struct rte_port *port; 1653 1654 if (port_id_is_invalid(pid, ENABLED_WARN)) 1655 return; 1656 1657 printf("Closing ports...\n"); 1658 1659 RTE_ETH_FOREACH_DEV(pi) { 1660 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1661 continue; 1662 1663 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1664 printf("Please remove port %d from forwarding configuration.\n", pi); 1665 continue; 1666 } 1667 1668 if (port_is_bonding_slave(pi)) { 1669 printf("Please remove port %d from bonded device.\n", pi); 1670 continue; 1671 } 1672 1673 port = &ports[pi]; 1674 if (rte_atomic16_cmpset(&(port->port_status), 1675 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1676 printf("Port %d is already closed\n", pi); 1677 continue; 1678 } 1679 1680 if (rte_atomic16_cmpset(&(port->port_status), 1681 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1682 printf("Port %d is now not stopped\n", pi); 1683 continue; 1684 } 1685 1686 if (port->flow_list) 1687 port_flow_flush(pi); 1688 rte_eth_dev_close(pi); 1689 1690 if (rte_atomic16_cmpset(&(port->port_status), 1691 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1692 printf("Port %d cannot be set to closed\n", pi); 1693 } 1694 1695 printf("Done\n"); 1696 } 1697 1698 void 1699 reset_port(portid_t pid) 1700 { 1701 int diag; 1702 portid_t pi; 1703 struct rte_port *port; 1704 1705 if (port_id_is_invalid(pid, ENABLED_WARN)) 1706 return; 1707 1708 printf("Resetting ports...\n"); 1709 1710 RTE_ETH_FOREACH_DEV(pi) { 1711 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1712 continue; 1713 1714 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1715 printf("Please remove port %d from forwarding " 1716 "configuration.\n", pi); 1717 continue; 1718 } 1719 1720 if (port_is_bonding_slave(pi)) { 1721 printf("Please remove port %d from bonded device.\n", 1722 pi); 1723 continue; 1724 } 1725 1726 diag = rte_eth_dev_reset(pi); 1727 if (diag == 0) { 1728 port = &ports[pi]; 1729 port->need_reconfig = 1; 1730 port->need_reconfig_queues = 1; 1731 } else { 1732 printf("Failed to reset port %d. diag=%d\n", pi, diag); 1733 } 1734 } 1735 1736 printf("Done\n"); 1737 } 1738 1739 void 1740 attach_port(char *identifier) 1741 { 1742 portid_t pi = 0; 1743 unsigned int socket_id; 1744 1745 printf("Attaching a new port...\n"); 1746 1747 if (identifier == NULL) { 1748 printf("Invalid parameters are specified\n"); 1749 return; 1750 } 1751 1752 if (rte_eth_dev_attach(identifier, &pi)) 1753 return; 1754 1755 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1756 /* if socket_id is invalid, set to 0 */ 1757 if (check_socket_id(socket_id) < 0) 1758 socket_id = 0; 1759 reconfig(pi, socket_id); 1760 rte_eth_promiscuous_enable(pi); 1761 1762 nb_ports = rte_eth_dev_count(); 1763 1764 ports[pi].port_status = RTE_PORT_STOPPED; 1765 1766 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1767 printf("Done\n"); 1768 } 1769 1770 void 1771 detach_port(portid_t port_id) 1772 { 1773 char name[RTE_ETH_NAME_MAX_LEN]; 1774 1775 printf("Detaching a port...\n"); 1776 1777 if (!port_is_closed(port_id)) { 1778 printf("Please close port first\n"); 1779 return; 1780 } 1781 1782 if (ports[port_id].flow_list) 1783 port_flow_flush(port_id); 1784 1785 if (rte_eth_dev_detach(port_id, name)) { 1786 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name); 1787 return; 1788 } 1789 1790 nb_ports = rte_eth_dev_count(); 1791 1792 printf("Port '%s' is detached. Now total ports is %d\n", 1793 name, nb_ports); 1794 printf("Done\n"); 1795 return; 1796 } 1797 1798 void 1799 pmd_test_exit(void) 1800 { 1801 portid_t pt_id; 1802 1803 if (test_done == 0) 1804 stop_packet_forwarding(); 1805 1806 if (ports != NULL) { 1807 no_link_check = 1; 1808 RTE_ETH_FOREACH_DEV(pt_id) { 1809 printf("\nShutting down port %d...\n", pt_id); 1810 fflush(stdout); 1811 stop_port(pt_id); 1812 close_port(pt_id); 1813 } 1814 } 1815 printf("\nBye...\n"); 1816 } 1817 1818 typedef void (*cmd_func_t)(void); 1819 struct pmd_test_command { 1820 const char *cmd_name; 1821 cmd_func_t cmd_func; 1822 }; 1823 1824 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1825 1826 /* Check the link status of all ports in up to 9s, and print them finally */ 1827 static void 1828 check_all_ports_link_status(uint32_t port_mask) 1829 { 1830 #define CHECK_INTERVAL 100 /* 100ms */ 1831 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1832 portid_t portid; 1833 uint8_t count, all_ports_up, print_flag = 0; 1834 struct rte_eth_link link; 1835 1836 printf("Checking link statuses...\n"); 1837 fflush(stdout); 1838 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1839 all_ports_up = 1; 1840 RTE_ETH_FOREACH_DEV(portid) { 1841 if ((port_mask & (1 << portid)) == 0) 1842 continue; 1843 memset(&link, 0, sizeof(link)); 1844 rte_eth_link_get_nowait(portid, &link); 1845 /* print link status if flag set */ 1846 if (print_flag == 1) { 1847 if (link.link_status) 1848 printf( 1849 "Port%d Link Up. speed %u Mbps- %s\n", 1850 portid, link.link_speed, 1851 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1852 ("full-duplex") : ("half-duplex\n")); 1853 else 1854 printf("Port %d Link Down\n", portid); 1855 continue; 1856 } 1857 /* clear all_ports_up flag if any link down */ 1858 if (link.link_status == ETH_LINK_DOWN) { 1859 all_ports_up = 0; 1860 break; 1861 } 1862 } 1863 /* after finally printing all link status, get out */ 1864 if (print_flag == 1) 1865 break; 1866 1867 if (all_ports_up == 0) { 1868 fflush(stdout); 1869 rte_delay_ms(CHECK_INTERVAL); 1870 } 1871 1872 /* set the print_flag if all ports up or timeout */ 1873 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1874 print_flag = 1; 1875 } 1876 1877 if (lsc_interrupt) 1878 break; 1879 } 1880 } 1881 1882 static void 1883 rmv_event_callback(void *arg) 1884 { 1885 struct rte_eth_dev *dev; 1886 portid_t port_id = (intptr_t)arg; 1887 1888 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1889 dev = &rte_eth_devices[port_id]; 1890 1891 stop_port(port_id); 1892 close_port(port_id); 1893 printf("removing device %s\n", dev->device->name); 1894 if (rte_eal_dev_detach(dev->device)) 1895 TESTPMD_LOG(ERR, "Failed to detach device %s\n", 1896 dev->device->name); 1897 } 1898 1899 /* This function is used by the interrupt thread */ 1900 static int 1901 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 1902 void *ret_param) 1903 { 1904 static const char * const event_desc[] = { 1905 [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 1906 [RTE_ETH_EVENT_INTR_LSC] = "LSC", 1907 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 1908 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 1909 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 1910 [RTE_ETH_EVENT_MACSEC] = "MACsec", 1911 [RTE_ETH_EVENT_INTR_RMV] = "device removal", 1912 [RTE_ETH_EVENT_MAX] = NULL, 1913 }; 1914 1915 RTE_SET_USED(param); 1916 RTE_SET_USED(ret_param); 1917 1918 if (type >= RTE_ETH_EVENT_MAX) { 1919 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 1920 port_id, __func__, type); 1921 fflush(stderr); 1922 } else if (event_print_mask & (UINT32_C(1) << type)) { 1923 printf("\nPort %" PRIu8 ": %s event\n", port_id, 1924 event_desc[type]); 1925 fflush(stdout); 1926 } 1927 1928 switch (type) { 1929 case RTE_ETH_EVENT_INTR_RMV: 1930 if (rte_eal_alarm_set(100000, 1931 rmv_event_callback, (void *)(intptr_t)port_id)) 1932 fprintf(stderr, "Could not set up deferred device removal\n"); 1933 break; 1934 default: 1935 break; 1936 } 1937 return 0; 1938 } 1939 1940 static int 1941 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 1942 { 1943 uint16_t i; 1944 int diag; 1945 uint8_t mapping_found = 0; 1946 1947 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1948 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1949 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1950 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1951 tx_queue_stats_mappings[i].queue_id, 1952 tx_queue_stats_mappings[i].stats_counter_id); 1953 if (diag != 0) 1954 return diag; 1955 mapping_found = 1; 1956 } 1957 } 1958 if (mapping_found) 1959 port->tx_queue_stats_mapping_enabled = 1; 1960 return 0; 1961 } 1962 1963 static int 1964 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 1965 { 1966 uint16_t i; 1967 int diag; 1968 uint8_t mapping_found = 0; 1969 1970 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1971 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1972 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1973 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1974 rx_queue_stats_mappings[i].queue_id, 1975 rx_queue_stats_mappings[i].stats_counter_id); 1976 if (diag != 0) 1977 return diag; 1978 mapping_found = 1; 1979 } 1980 } 1981 if (mapping_found) 1982 port->rx_queue_stats_mapping_enabled = 1; 1983 return 0; 1984 } 1985 1986 static void 1987 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 1988 { 1989 int diag = 0; 1990 1991 diag = set_tx_queue_stats_mapping_registers(pi, port); 1992 if (diag != 0) { 1993 if (diag == -ENOTSUP) { 1994 port->tx_queue_stats_mapping_enabled = 0; 1995 printf("TX queue stats mapping not supported port id=%d\n", pi); 1996 } 1997 else 1998 rte_exit(EXIT_FAILURE, 1999 "set_tx_queue_stats_mapping_registers " 2000 "failed for port id=%d diag=%d\n", 2001 pi, diag); 2002 } 2003 2004 diag = set_rx_queue_stats_mapping_registers(pi, port); 2005 if (diag != 0) { 2006 if (diag == -ENOTSUP) { 2007 port->rx_queue_stats_mapping_enabled = 0; 2008 printf("RX queue stats mapping not supported port id=%d\n", pi); 2009 } 2010 else 2011 rte_exit(EXIT_FAILURE, 2012 "set_rx_queue_stats_mapping_registers " 2013 "failed for port id=%d diag=%d\n", 2014 pi, diag); 2015 } 2016 } 2017 2018 static void 2019 rxtx_port_config(struct rte_port *port) 2020 { 2021 port->rx_conf = port->dev_info.default_rxconf; 2022 port->tx_conf = port->dev_info.default_txconf; 2023 2024 /* Check if any RX/TX parameters have been passed */ 2025 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2026 port->rx_conf.rx_thresh.pthresh = rx_pthresh; 2027 2028 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2029 port->rx_conf.rx_thresh.hthresh = rx_hthresh; 2030 2031 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2032 port->rx_conf.rx_thresh.wthresh = rx_wthresh; 2033 2034 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2035 port->rx_conf.rx_free_thresh = rx_free_thresh; 2036 2037 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2038 port->rx_conf.rx_drop_en = rx_drop_en; 2039 2040 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2041 port->tx_conf.tx_thresh.pthresh = tx_pthresh; 2042 2043 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2044 port->tx_conf.tx_thresh.hthresh = tx_hthresh; 2045 2046 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2047 port->tx_conf.tx_thresh.wthresh = tx_wthresh; 2048 2049 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2050 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 2051 2052 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2053 port->tx_conf.tx_free_thresh = tx_free_thresh; 2054 } 2055 2056 void 2057 init_port_config(void) 2058 { 2059 portid_t pid; 2060 struct rte_port *port; 2061 2062 RTE_ETH_FOREACH_DEV(pid) { 2063 port = &ports[pid]; 2064 port->dev_conf.rxmode = rx_mode; 2065 port->dev_conf.fdir_conf = fdir_conf; 2066 if (nb_rxq > 1) { 2067 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2068 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 2069 } else { 2070 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2071 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2072 } 2073 2074 if (port->dcb_flag == 0) { 2075 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 2076 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 2077 else 2078 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 2079 } 2080 2081 rxtx_port_config(port); 2082 2083 rte_eth_macaddr_get(pid, &port->eth_addr); 2084 2085 map_port_queue_stats_mapping_registers(pid, port); 2086 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2087 rte_pmd_ixgbe_bypass_init(pid); 2088 #endif 2089 2090 if (lsc_interrupt && 2091 (rte_eth_devices[pid].data->dev_flags & 2092 RTE_ETH_DEV_INTR_LSC)) 2093 port->dev_conf.intr_conf.lsc = 1; 2094 if (rmv_interrupt && 2095 (rte_eth_devices[pid].data->dev_flags & 2096 RTE_ETH_DEV_INTR_RMV)) 2097 port->dev_conf.intr_conf.rmv = 1; 2098 2099 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 2100 /* Detect softnic port */ 2101 if (!strcmp(port->dev_info.driver_name, "net_softnic")) { 2102 port->softnic_enable = 1; 2103 memset(&port->softport, 0, sizeof(struct softnic_port)); 2104 2105 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) 2106 port->softport.tm_flag = 1; 2107 } 2108 #endif 2109 } 2110 } 2111 2112 void set_port_slave_flag(portid_t slave_pid) 2113 { 2114 struct rte_port *port; 2115 2116 port = &ports[slave_pid]; 2117 port->slave_flag = 1; 2118 } 2119 2120 void clear_port_slave_flag(portid_t slave_pid) 2121 { 2122 struct rte_port *port; 2123 2124 port = &ports[slave_pid]; 2125 port->slave_flag = 0; 2126 } 2127 2128 uint8_t port_is_bonding_slave(portid_t slave_pid) 2129 { 2130 struct rte_port *port; 2131 2132 port = &ports[slave_pid]; 2133 return port->slave_flag; 2134 } 2135 2136 const uint16_t vlan_tags[] = { 2137 0, 1, 2, 3, 4, 5, 6, 7, 2138 8, 9, 10, 11, 12, 13, 14, 15, 2139 16, 17, 18, 19, 20, 21, 22, 23, 2140 24, 25, 26, 27, 28, 29, 30, 31 2141 }; 2142 2143 static int 2144 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 2145 enum dcb_mode_enable dcb_mode, 2146 enum rte_eth_nb_tcs num_tcs, 2147 uint8_t pfc_en) 2148 { 2149 uint8_t i; 2150 2151 /* 2152 * Builds up the correct configuration for dcb+vt based on the vlan tags array 2153 * given above, and the number of traffic classes available for use. 2154 */ 2155 if (dcb_mode == DCB_VT_ENABLED) { 2156 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 2157 ð_conf->rx_adv_conf.vmdq_dcb_conf; 2158 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 2159 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2160 2161 /* VMDQ+DCB RX and TX configurations */ 2162 vmdq_rx_conf->enable_default_pool = 0; 2163 vmdq_rx_conf->default_pool = 0; 2164 vmdq_rx_conf->nb_queue_pools = 2165 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2166 vmdq_tx_conf->nb_queue_pools = 2167 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2168 2169 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 2170 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 2171 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 2172 vmdq_rx_conf->pool_map[i].pools = 2173 1 << (i % vmdq_rx_conf->nb_queue_pools); 2174 } 2175 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2176 vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2177 vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2178 } 2179 2180 /* set DCB mode of RX and TX of multiple queues */ 2181 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 2182 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2183 } else { 2184 struct rte_eth_dcb_rx_conf *rx_conf = 2185 ð_conf->rx_adv_conf.dcb_rx_conf; 2186 struct rte_eth_dcb_tx_conf *tx_conf = 2187 ð_conf->tx_adv_conf.dcb_tx_conf; 2188 2189 rx_conf->nb_tcs = num_tcs; 2190 tx_conf->nb_tcs = num_tcs; 2191 2192 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2193 rx_conf->dcb_tc[i] = i % num_tcs; 2194 tx_conf->dcb_tc[i] = i % num_tcs; 2195 } 2196 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 2197 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 2198 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 2199 } 2200 2201 if (pfc_en) 2202 eth_conf->dcb_capability_en = 2203 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2204 else 2205 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2206 2207 return 0; 2208 } 2209 2210 int 2211 init_port_dcb_config(portid_t pid, 2212 enum dcb_mode_enable dcb_mode, 2213 enum rte_eth_nb_tcs num_tcs, 2214 uint8_t pfc_en) 2215 { 2216 struct rte_eth_conf port_conf; 2217 struct rte_port *rte_port; 2218 int retval; 2219 uint16_t i; 2220 2221 rte_port = &ports[pid]; 2222 2223 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2224 /* Enter DCB configuration status */ 2225 dcb_config = 1; 2226 2227 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 2228 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2229 if (retval < 0) 2230 return retval; 2231 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2232 2233 /** 2234 * Write the configuration into the device. 2235 * Set the numbers of RX & TX queues to 0, so 2236 * the RX & TX queues will not be setup. 2237 */ 2238 rte_eth_dev_configure(pid, 0, 0, &port_conf); 2239 2240 rte_eth_dev_info_get(pid, &rte_port->dev_info); 2241 2242 /* If dev_info.vmdq_pool_base is greater than 0, 2243 * the queue id of vmdq pools is started after pf queues. 2244 */ 2245 if (dcb_mode == DCB_VT_ENABLED && 2246 rte_port->dev_info.vmdq_pool_base > 0) { 2247 printf("VMDQ_DCB multi-queue mode is nonsensical" 2248 " for port %d.", pid); 2249 return -1; 2250 } 2251 2252 /* Assume the ports in testpmd have the same dcb capability 2253 * and has the same number of rxq and txq in dcb mode 2254 */ 2255 if (dcb_mode == DCB_VT_ENABLED) { 2256 if (rte_port->dev_info.max_vfs > 0) { 2257 nb_rxq = rte_port->dev_info.nb_rx_queues; 2258 nb_txq = rte_port->dev_info.nb_tx_queues; 2259 } else { 2260 nb_rxq = rte_port->dev_info.max_rx_queues; 2261 nb_txq = rte_port->dev_info.max_tx_queues; 2262 } 2263 } else { 2264 /*if vt is disabled, use all pf queues */ 2265 if (rte_port->dev_info.vmdq_pool_base == 0) { 2266 nb_rxq = rte_port->dev_info.max_rx_queues; 2267 nb_txq = rte_port->dev_info.max_tx_queues; 2268 } else { 2269 nb_rxq = (queueid_t)num_tcs; 2270 nb_txq = (queueid_t)num_tcs; 2271 2272 } 2273 } 2274 rx_free_thresh = 64; 2275 2276 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2277 2278 rxtx_port_config(rte_port); 2279 /* VLAN filter */ 2280 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2281 for (i = 0; i < RTE_DIM(vlan_tags); i++) 2282 rx_vft_set(pid, vlan_tags[i], 1); 2283 2284 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2285 map_port_queue_stats_mapping_registers(pid, rte_port); 2286 2287 rte_port->dcb_flag = 1; 2288 2289 return 0; 2290 } 2291 2292 static void 2293 init_port(void) 2294 { 2295 /* Configuration of Ethernet ports. */ 2296 ports = rte_zmalloc("testpmd: ports", 2297 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2298 RTE_CACHE_LINE_SIZE); 2299 if (ports == NULL) { 2300 rte_exit(EXIT_FAILURE, 2301 "rte_zmalloc(%d struct rte_port) failed\n", 2302 RTE_MAX_ETHPORTS); 2303 } 2304 } 2305 2306 static void 2307 force_quit(void) 2308 { 2309 pmd_test_exit(); 2310 prompt_exit(); 2311 } 2312 2313 static void 2314 print_stats(void) 2315 { 2316 uint8_t i; 2317 const char clr[] = { 27, '[', '2', 'J', '\0' }; 2318 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2319 2320 /* Clear screen and move to top left */ 2321 printf("%s%s", clr, top_left); 2322 2323 printf("\nPort statistics ===================================="); 2324 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2325 nic_stats_display(fwd_ports_ids[i]); 2326 } 2327 2328 static void 2329 signal_handler(int signum) 2330 { 2331 if (signum == SIGINT || signum == SIGTERM) { 2332 printf("\nSignal %d received, preparing to exit...\n", 2333 signum); 2334 #ifdef RTE_LIBRTE_PDUMP 2335 /* uninitialize packet capture framework */ 2336 rte_pdump_uninit(); 2337 #endif 2338 #ifdef RTE_LIBRTE_LATENCY_STATS 2339 rte_latencystats_uninit(); 2340 #endif 2341 force_quit(); 2342 /* Set flag to indicate the force termination. */ 2343 f_quit = 1; 2344 /* exit with the expected status */ 2345 signal(signum, SIG_DFL); 2346 kill(getpid(), signum); 2347 } 2348 } 2349 2350 int 2351 main(int argc, char** argv) 2352 { 2353 int diag; 2354 portid_t port_id; 2355 2356 signal(SIGINT, signal_handler); 2357 signal(SIGTERM, signal_handler); 2358 2359 diag = rte_eal_init(argc, argv); 2360 if (diag < 0) 2361 rte_panic("Cannot init EAL\n"); 2362 2363 testpmd_logtype = rte_log_register("testpmd"); 2364 if (testpmd_logtype < 0) 2365 rte_panic("Cannot register log type"); 2366 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 2367 2368 if (mlockall(MCL_CURRENT | MCL_FUTURE)) { 2369 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 2370 strerror(errno)); 2371 } 2372 2373 #ifdef RTE_LIBRTE_PDUMP 2374 /* initialize packet capture framework */ 2375 rte_pdump_init(NULL); 2376 #endif 2377 2378 nb_ports = (portid_t) rte_eth_dev_count(); 2379 if (nb_ports == 0) 2380 TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 2381 2382 /* allocate port structures, and init them */ 2383 init_port(); 2384 2385 set_def_fwd_config(); 2386 if (nb_lcores == 0) 2387 rte_panic("Empty set of forwarding logical cores - check the " 2388 "core mask supplied in the command parameters\n"); 2389 2390 /* Bitrate/latency stats disabled by default */ 2391 #ifdef RTE_LIBRTE_BITRATE 2392 bitrate_enabled = 0; 2393 #endif 2394 #ifdef RTE_LIBRTE_LATENCY_STATS 2395 latencystats_enabled = 0; 2396 #endif 2397 2398 argc -= diag; 2399 argv += diag; 2400 if (argc > 1) 2401 launch_args_parse(argc, argv); 2402 2403 if (tx_first && interactive) 2404 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 2405 "interactive mode.\n"); 2406 2407 if (tx_first && lsc_interrupt) { 2408 printf("Warning: lsc_interrupt needs to be off when " 2409 " using tx_first. Disabling.\n"); 2410 lsc_interrupt = 0; 2411 } 2412 2413 if (!nb_rxq && !nb_txq) 2414 printf("Warning: Either rx or tx queues should be non-zero\n"); 2415 2416 if (nb_rxq > 1 && nb_rxq > nb_txq) 2417 printf("Warning: nb_rxq=%d enables RSS configuration, " 2418 "but nb_txq=%d will prevent to fully test it.\n", 2419 nb_rxq, nb_txq); 2420 2421 init_config(); 2422 if (start_port(RTE_PORT_ALL) != 0) 2423 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2424 2425 /* set all ports to promiscuous mode by default */ 2426 RTE_ETH_FOREACH_DEV(port_id) 2427 rte_eth_promiscuous_enable(port_id); 2428 2429 /* Init metrics library */ 2430 rte_metrics_init(rte_socket_id()); 2431 2432 #ifdef RTE_LIBRTE_LATENCY_STATS 2433 if (latencystats_enabled != 0) { 2434 int ret = rte_latencystats_init(1, NULL); 2435 if (ret) 2436 printf("Warning: latencystats init()" 2437 " returned error %d\n", ret); 2438 printf("Latencystats running on lcore %d\n", 2439 latencystats_lcore_id); 2440 } 2441 #endif 2442 2443 /* Setup bitrate stats */ 2444 #ifdef RTE_LIBRTE_BITRATE 2445 if (bitrate_enabled != 0) { 2446 bitrate_data = rte_stats_bitrate_create(); 2447 if (bitrate_data == NULL) 2448 rte_exit(EXIT_FAILURE, 2449 "Could not allocate bitrate data.\n"); 2450 rte_stats_bitrate_reg(bitrate_data); 2451 } 2452 #endif 2453 2454 #ifdef RTE_LIBRTE_CMDLINE 2455 if (strlen(cmdline_filename) != 0) 2456 cmdline_read_from_file(cmdline_filename); 2457 2458 if (interactive == 1) { 2459 if (auto_start) { 2460 printf("Start automatic packet forwarding\n"); 2461 start_packet_forwarding(0); 2462 } 2463 prompt(); 2464 pmd_test_exit(); 2465 } else 2466 #endif 2467 { 2468 char c; 2469 int rc; 2470 2471 f_quit = 0; 2472 2473 printf("No commandline core given, start packet forwarding\n"); 2474 start_packet_forwarding(tx_first); 2475 if (stats_period != 0) { 2476 uint64_t prev_time = 0, cur_time, diff_time = 0; 2477 uint64_t timer_period; 2478 2479 /* Convert to number of cycles */ 2480 timer_period = stats_period * rte_get_timer_hz(); 2481 2482 while (f_quit == 0) { 2483 cur_time = rte_get_timer_cycles(); 2484 diff_time += cur_time - prev_time; 2485 2486 if (diff_time >= timer_period) { 2487 print_stats(); 2488 /* Reset the timer */ 2489 diff_time = 0; 2490 } 2491 /* Sleep to avoid unnecessary checks */ 2492 prev_time = cur_time; 2493 sleep(1); 2494 } 2495 } 2496 2497 printf("Press enter to exit\n"); 2498 rc = read(0, &c, 1); 2499 pmd_test_exit(); 2500 if (rc < 0) 2501 return 1; 2502 } 2503 2504 return 0; 2505 } 2506